diff --git "a/SHOW/.vscode/PythonImportHelper-v2-Completion.json" "b/SHOW/.vscode/PythonImportHelper-v2-Completion.json" new file mode 100644--- /dev/null +++ "b/SHOW/.vscode/PythonImportHelper-v2-Completion.json" @@ -0,0 +1,31655 @@ +[ + { + "label": "sys", + "kind": 6, + "isExtraImport": true, + "importPath": "sys", + "description": "sys", + "detail": "sys", + "documentation": {} + }, + { + "label": "float_repr_style", + "importPath": "sys", + "description": "sys", + "isExtraImport": true, + "detail": "sys", + "documentation": {} + }, + { + "label": "Path", + "importPath": "pathlib", + "description": "pathlib", + "isExtraImport": true, + "detail": "pathlib", + "documentation": {} + }, + { + "label": "Path", + "importPath": "pathlib", + "description": "pathlib", + "isExtraImport": true, + "detail": "pathlib", + "documentation": {} + }, + { + "label": "Path", + "importPath": "pathlib", + "description": "pathlib", + "isExtraImport": true, + "detail": "pathlib", + "documentation": {} + }, + { + "label": "Path", + "importPath": "pathlib", + "description": "pathlib", + "isExtraImport": true, + "detail": "pathlib", + "documentation": {} + }, + { + "label": "Path", + "importPath": "pathlib", + "description": "pathlib", + "isExtraImport": true, + "detail": "pathlib", + "documentation": {} + }, + { + "label": "Path", + "importPath": "pathlib", + "description": "pathlib", + "isExtraImport": true, + "detail": "pathlib", + "documentation": {} + }, + { + "label": "Path", + "importPath": "pathlib", + "description": "pathlib", + "isExtraImport": true, + "detail": "pathlib", + "documentation": {} + }, + { + "label": "Path", + "importPath": "pathlib", + "description": "pathlib", + "isExtraImport": true, + "detail": "pathlib", + "documentation": {} + }, + { + "label": "Path", + "importPath": "pathlib", + "description": "pathlib", + "isExtraImport": true, + "detail": "pathlib", + "documentation": {} + }, + { + "label": "Path", + "importPath": "pathlib", + "description": "pathlib", + "isExtraImport": true, + "detail": "pathlib", + "documentation": {} + }, + { + "label": "Path", + "importPath": "pathlib", + "description": "pathlib", + "isExtraImport": true, + "detail": "pathlib", + "documentation": {} + }, + { + "label": "Path", + "importPath": "pathlib", + "description": "pathlib", + "isExtraImport": true, + "detail": "pathlib", + "documentation": {} + }, + { + "label": "Path", + "importPath": "pathlib", + "description": "pathlib", + "isExtraImport": true, + "detail": "pathlib", + "documentation": {} + }, + { + "label": "Path", + "importPath": "pathlib", + "description": "pathlib", + "isExtraImport": true, + "detail": "pathlib", + "documentation": {} + }, + { + "label": "Path", + "importPath": "pathlib", + "description": "pathlib", + "isExtraImport": true, + "detail": "pathlib", + "documentation": {} + }, + { + "label": "Path", + "importPath": "pathlib", + "description": "pathlib", + "isExtraImport": true, + "detail": "pathlib", + "documentation": {} + }, + { + "label": "Path", + "importPath": "pathlib", + "description": "pathlib", + "isExtraImport": true, + "detail": "pathlib", + "documentation": {} + }, + { + "label": "Path", + "importPath": "pathlib", + "description": "pathlib", + "isExtraImport": true, + "detail": "pathlib", + "documentation": {} + }, + { + "label": "Path", + "importPath": "pathlib", + "description": "pathlib", + "isExtraImport": true, + "detail": "pathlib", + "documentation": {} + }, + { + "label": "Path", + "importPath": "pathlib", + "description": "pathlib", + "isExtraImport": true, + "detail": "pathlib", + "documentation": {} + }, + { + "label": "Path", + "importPath": "pathlib", + "description": "pathlib", + "isExtraImport": true, + "detail": "pathlib", + "documentation": {} + }, + { + "label": "Path", + "importPath": "pathlib", + "description": "pathlib", + "isExtraImport": true, + "detail": "pathlib", + "documentation": {} + }, + { + "label": "Path", + "importPath": "pathlib", + "description": "pathlib", + "isExtraImport": true, + "detail": "pathlib", + "documentation": {} + }, + { + "label": "Path", + "importPath": "pathlib", + "description": "pathlib", + "isExtraImport": true, + "detail": "pathlib", + "documentation": {} + }, + { + "label": "Path", + "importPath": "pathlib", + "description": "pathlib", + "isExtraImport": true, + "detail": "pathlib", + "documentation": {} + }, + { + "label": "Path", + "importPath": "pathlib", + "description": "pathlib", + "isExtraImport": true, + "detail": "pathlib", + "documentation": {} + }, + { + "label": "Path", + "importPath": "pathlib", + "description": "pathlib", + "isExtraImport": true, + "detail": "pathlib", + "documentation": {} + }, + { + "label": "Path", + "importPath": "pathlib", + "description": "pathlib", + "isExtraImport": true, + "detail": "pathlib", + "documentation": {} + }, + { + "label": "Path", + "importPath": "pathlib", + "description": "pathlib", + "isExtraImport": true, + "detail": "pathlib", + "documentation": {} + }, + { + "label": "Path", + "importPath": "pathlib", + "description": "pathlib", + "isExtraImport": true, + "detail": "pathlib", + "documentation": {} + }, + { + "label": "Path", + "importPath": "pathlib", + "description": "pathlib", + "isExtraImport": true, + "detail": "pathlib", + "documentation": {} + }, + { + "label": "Path", + "importPath": "pathlib", + "description": "pathlib", + "isExtraImport": true, + "detail": "pathlib", + "documentation": {} + }, + { + "label": "Path", + "importPath": "pathlib", + "description": "pathlib", + "isExtraImport": true, + "detail": "pathlib", + "documentation": {} + }, + { + "label": "Path", + "importPath": "pathlib", + "description": "pathlib", + "isExtraImport": true, + "detail": "pathlib", + "documentation": {} + }, + { + "label": "Path", + "importPath": "pathlib", + "description": "pathlib", + "isExtraImport": true, + "detail": "pathlib", + "documentation": {} + }, + { + "label": "Path", + "importPath": "pathlib", + "description": "pathlib", + "isExtraImport": true, + "detail": "pathlib", + "documentation": {} + }, + { + "label": "Path", + "importPath": "pathlib", + "description": "pathlib", + "isExtraImport": true, + "detail": "pathlib", + "documentation": {} + }, + { + "label": "Path", + "importPath": "pathlib", + "description": "pathlib", + "isExtraImport": true, + "detail": "pathlib", + "documentation": {} + }, + { + "label": "Path", + "importPath": "pathlib", + "description": "pathlib", + "isExtraImport": true, + "detail": "pathlib", + "documentation": {} + }, + { + "label": "Path", + "importPath": "pathlib", + "description": "pathlib", + "isExtraImport": true, + "detail": "pathlib", + "documentation": {} + }, + { + "label": "Path", + "importPath": "pathlib", + "description": "pathlib", + "isExtraImport": true, + "detail": "pathlib", + "documentation": {} + }, + { + "label": "Path", + "importPath": "pathlib", + "description": "pathlib", + "isExtraImport": true, + "detail": "pathlib", + "documentation": {} + }, + { + "label": "Path", + "importPath": "pathlib", + "description": "pathlib", + "isExtraImport": true, + "detail": "pathlib", + "documentation": {} + }, + { + "label": "Path", + "importPath": "pathlib", + "description": "pathlib", + "isExtraImport": true, + "detail": "pathlib", + "documentation": {} + }, + { + "label": "Path", + "importPath": "pathlib", + "description": "pathlib", + "isExtraImport": true, + "detail": "pathlib", + "documentation": {} + }, + { + "label": "Path", + "importPath": "pathlib", + "description": "pathlib", + "isExtraImport": true, + "detail": "pathlib", + "documentation": {} + }, + { + "label": "Path", + "importPath": "pathlib", + "description": "pathlib", + "isExtraImport": true, + "detail": "pathlib", + "documentation": {} + }, + { + "label": "Path", + "importPath": "pathlib", + "description": "pathlib", + "isExtraImport": true, + "detail": "pathlib", + "documentation": {} + }, + { + "label": "Path", + "importPath": "pathlib", + "description": "pathlib", + "isExtraImport": true, + "detail": "pathlib", + "documentation": {} + }, + { + "label": "Path", + "importPath": "pathlib", + "description": "pathlib", + "isExtraImport": true, + "detail": "pathlib", + "documentation": {} + }, + { + "label": "Path", + "importPath": "pathlib", + "description": "pathlib", + "isExtraImport": true, + "detail": "pathlib", + "documentation": {} + }, + { + "label": "Path", + "importPath": "pathlib", + "description": "pathlib", + "isExtraImport": true, + "detail": "pathlib", + "documentation": {} + }, + { + "label": "Path", + "importPath": "pathlib", + "description": "pathlib", + "isExtraImport": true, + "detail": "pathlib", + "documentation": {} + }, + { + "label": "Path", + "importPath": "pathlib", + "description": "pathlib", + "isExtraImport": true, + "detail": "pathlib", + "documentation": {} + }, + { + "label": "platform", + "kind": 6, + "isExtraImport": true, + "importPath": "platform", + "description": "platform", + "detail": "platform", + "documentation": {} + }, + { + "label": "SHOW", + "kind": 6, + "isExtraImport": true, + "importPath": "SHOW", + "description": "SHOW", + "detail": "SHOW", + "documentation": {} + }, + { + "label": "attr_dict", + "importPath": "SHOW", + "description": "SHOW", + "isExtraImport": true, + "detail": "SHOW", + "documentation": {} + }, + { + "label": "os", + "kind": 6, + "isExtraImport": true, + "importPath": "os", + "description": "os", + "detail": "os", + "documentation": {} + }, + { + "label": "pandas", + "kind": 6, + "isExtraImport": true, + "importPath": "pandas", + "description": "pandas", + "detail": "pandas", + "documentation": {} + }, + { + "label": "datetime", + "kind": 6, + "isExtraImport": true, + "importPath": "datetime", + "description": "datetime", + "detail": "datetime", + "documentation": {} + }, + { + "label": "datetime", + "importPath": "datetime", + "description": "datetime", + "isExtraImport": true, + "detail": "datetime", + "documentation": {} + }, + { + "label": "datetime", + "importPath": "datetime", + "description": "datetime", + "isExtraImport": true, + "detail": "datetime", + "documentation": {} + }, + { + "label": "datetime", + "importPath": "datetime", + "description": "datetime", + "isExtraImport": true, + "detail": "datetime", + "documentation": {} + }, + { + "label": "datetime", + "importPath": "datetime", + "description": "datetime", + "isExtraImport": true, + "detail": "datetime", + "documentation": {} + }, + { + "label": "datetime", + "importPath": "datetime", + "description": "datetime", + "isExtraImport": true, + "detail": "datetime", + "documentation": {} + }, + { + "label": "os.path", + "kind": 6, + "isExtraImport": true, + "importPath": "os.path", + "description": "os.path", + "detail": "os.path", + "documentation": {} + }, + { + "label": "EasyDict", + "importPath": "easydict", + "description": "easydict", + "isExtraImport": true, + "detail": "easydict", + "documentation": {} + }, + { + "label": "EasyDict", + "importPath": "easydict", + "description": "easydict", + "isExtraImport": true, + "detail": "easydict", + "documentation": {} + }, + { + "label": "EasyDict", + "importPath": "easydict", + "description": "easydict", + "isExtraImport": true, + "detail": "easydict", + "documentation": {} + }, + { + "label": "EasyDict", + "importPath": "easydict", + "description": "easydict", + "isExtraImport": true, + "detail": "easydict", + "documentation": {} + }, + { + "label": "EasyDict", + "importPath": "easydict", + "description": "easydict", + "isExtraImport": true, + "detail": "easydict", + "documentation": {} + }, + { + "label": "EasyDict", + "importPath": "easydict", + "description": "easydict", + "isExtraImport": true, + "detail": "easydict", + "documentation": {} + }, + { + "label": "EasyDict", + "importPath": "easydict", + "description": "easydict", + "isExtraImport": true, + "detail": "easydict", + "documentation": {} + }, + { + "label": "EasyDict", + "importPath": "easydict", + "description": "easydict", + "isExtraImport": true, + "detail": "easydict", + "documentation": {} + }, + { + "label": "EasyDict", + "importPath": "easydict", + "description": "easydict", + "isExtraImport": true, + "detail": "easydict", + "documentation": {} + }, + { + "label": "EasyDict", + "importPath": "easydict", + "description": "easydict", + "isExtraImport": true, + "detail": "easydict", + "documentation": {} + }, + { + "label": "EasyDict", + "importPath": "easydict", + "description": "easydict", + "isExtraImport": true, + "detail": "easydict", + "documentation": {} + }, + { + "label": "EasyDict", + "importPath": "easydict", + "description": "easydict", + "isExtraImport": true, + "detail": "easydict", + "documentation": {} + }, + { + "label": "EasyDict", + "importPath": "easydict", + "description": "easydict", + "isExtraImport": true, + "detail": "easydict", + "documentation": {} + }, + { + "label": "EasyDict", + "importPath": "easydict", + "description": "easydict", + "isExtraImport": true, + "detail": "easydict", + "documentation": {} + }, + { + "label": "EasyDict", + "importPath": "easydict", + "description": "easydict", + "isExtraImport": true, + "detail": "easydict", + "documentation": {} + }, + { + "label": "EasyDict", + "importPath": "easydict", + "description": "easydict", + "isExtraImport": true, + "detail": "easydict", + "documentation": {} + }, + { + "label": "EasyDict", + "importPath": "easydict", + "description": "easydict", + "isExtraImport": true, + "detail": "easydict", + "documentation": {} + }, + { + "label": "EasyDict", + "importPath": "easydict", + "description": "easydict", + "isExtraImport": true, + "detail": "easydict", + "documentation": {} + }, + { + "label": "EasyDict", + "importPath": "easydict", + "description": "easydict", + "isExtraImport": true, + "detail": "easydict", + "documentation": {} + }, + { + "label": "EasyDict", + "importPath": "easydict", + "description": "easydict", + "isExtraImport": true, + "detail": "easydict", + "documentation": {} + }, + { + "label": "EasyDict", + "importPath": "easydict", + "description": "easydict", + "isExtraImport": true, + "detail": "easydict", + "documentation": {} + }, + { + "label": "EasyDict", + "importPath": "easydict", + "description": "easydict", + "isExtraImport": true, + "detail": "easydict", + "documentation": {} + }, + { + "label": "EasyDict", + "importPath": "easydict", + "description": "easydict", + "isExtraImport": true, + "detail": "easydict", + "documentation": {} + }, + { + "label": "EasyDict", + "importPath": "easydict", + "description": "easydict", + "isExtraImport": true, + "detail": "easydict", + "documentation": {} + }, + { + "label": "EasyDict", + "importPath": "easydict", + "description": "easydict", + "isExtraImport": true, + "detail": "easydict", + "documentation": {} + }, + { + "label": "EasyDict", + "importPath": "easydict", + "description": "easydict", + "isExtraImport": true, + "detail": "easydict", + "documentation": {} + }, + { + "label": "EasyDict", + "importPath": "easydict", + "description": "easydict", + "isExtraImport": true, + "detail": "easydict", + "documentation": {} + }, + { + "label": "EasyDict", + "importPath": "easydict", + "description": "easydict", + "isExtraImport": true, + "detail": "easydict", + "documentation": {} + }, + { + "label": "EasyDict", + "importPath": "easydict", + "description": "easydict", + "isExtraImport": true, + "detail": "easydict", + "documentation": {} + }, + { + "label": "EasyDict", + "importPath": "easydict", + "description": "easydict", + "isExtraImport": true, + "detail": "easydict", + "documentation": {} + }, + { + "label": "EasyDict", + "importPath": "easydict", + "description": "easydict", + "isExtraImport": true, + "detail": "easydict", + "documentation": {} + }, + { + "label": "EasyDict", + "importPath": "easydict", + "description": "easydict", + "isExtraImport": true, + "detail": "easydict", + "documentation": {} + }, + { + "label": "EasyDict", + "importPath": "easydict", + "description": "easydict", + "isExtraImport": true, + "detail": "easydict", + "documentation": {} + }, + { + "label": "EasyDict", + "importPath": "easydict", + "description": "easydict", + "isExtraImport": true, + "detail": "easydict", + "documentation": {} + }, + { + "label": "EasyDict", + "importPath": "easydict", + "description": "easydict", + "isExtraImport": true, + "detail": "easydict", + "documentation": {} + }, + { + "label": "EasyDict", + "importPath": "easydict", + "description": "easydict", + "isExtraImport": true, + "detail": "easydict", + "documentation": {} + }, + { + "label": "EasyDict", + "importPath": "easydict", + "description": "easydict", + "isExtraImport": true, + "detail": "easydict", + "documentation": {} + }, + { + "label": "EasyDict", + "importPath": "easydict", + "description": "easydict", + "isExtraImport": true, + "detail": "easydict", + "documentation": {} + }, + { + "label": "EasyDict", + "importPath": "easydict", + "description": "easydict", + "isExtraImport": true, + "detail": "easydict", + "documentation": {} + }, + { + "label": "EasyDict", + "importPath": "easydict", + "description": "easydict", + "isExtraImport": true, + "detail": "easydict", + "documentation": {} + }, + { + "label": "EasyDict", + "importPath": "easydict", + "description": "easydict", + "isExtraImport": true, + "detail": "easydict", + "documentation": {} + }, + { + "label": "EasyDict", + "importPath": "easydict", + "description": "easydict", + "isExtraImport": true, + "detail": "easydict", + "documentation": {} + }, + { + "label": "EasyDict", + "importPath": "easydict", + "description": "easydict", + "isExtraImport": true, + "detail": "easydict", + "documentation": {} + }, + { + "label": "EasyDict", + "importPath": "easydict", + "description": "easydict", + "isExtraImport": true, + "detail": "easydict", + "documentation": {} + }, + { + "label": "EasyDict", + "importPath": "easydict", + "description": "easydict", + "isExtraImport": true, + "detail": "easydict", + "documentation": {} + }, + { + "label": "EasyDict", + "importPath": "easydict", + "description": "easydict", + "isExtraImport": true, + "detail": "easydict", + "documentation": {} + }, + { + "label": "EasyDict", + "importPath": "easydict", + "description": "easydict", + "isExtraImport": true, + "detail": "easydict", + "documentation": {} + }, + { + "label": "EasyDict", + "importPath": "easydict", + "description": "easydict", + "isExtraImport": true, + "detail": "easydict", + "documentation": {} + }, + { + "label": "EasyDict", + "importPath": "easydict", + "description": "easydict", + "isExtraImport": true, + "detail": "easydict", + "documentation": {} + }, + { + "label": "EasyDict", + "importPath": "easydict", + "description": "easydict", + "isExtraImport": true, + "detail": "easydict", + "documentation": {} + }, + { + "label": "EasyDict", + "importPath": "easydict", + "description": "easydict", + "isExtraImport": true, + "detail": "easydict", + "documentation": {} + }, + { + "label": "speaker_info", + "importPath": "configs.configs.speaker_info", + "description": "configs.configs.speaker_info", + "isExtraImport": true, + "detail": "configs.configs.speaker_info", + "documentation": {} + }, + { + "label": "loguru", + "kind": 6, + "isExtraImport": true, + "importPath": "loguru", + "description": "loguru", + "detail": "loguru", + "documentation": {} + }, + { + "label": "logger", + "importPath": "loguru", + "description": "loguru", + "isExtraImport": true, + "detail": "loguru", + "documentation": {} + }, + { + "label": "logger", + "importPath": "loguru", + "description": "loguru", + "isExtraImport": true, + "detail": "loguru", + "documentation": {} + }, + { + "label": "logger", + "importPath": "loguru", + "description": "loguru", + "isExtraImport": true, + "detail": "loguru", + "documentation": {} + }, + { + "label": "logger", + "importPath": "loguru", + "description": "loguru", + "isExtraImport": true, + "detail": "loguru", + "documentation": {} + }, + { + "label": "logger", + "importPath": "loguru", + "description": "loguru", + "isExtraImport": true, + "detail": "loguru", + "documentation": {} + }, + { + "label": "logger", + "importPath": "loguru", + "description": "loguru", + "isExtraImport": true, + "detail": "loguru", + "documentation": {} + }, + { + "label": "logger", + "importPath": "loguru", + "description": "loguru", + "isExtraImport": true, + "detail": "loguru", + "documentation": {} + }, + { + "label": "logger", + "importPath": "loguru", + "description": "loguru", + "isExtraImport": true, + "detail": "loguru", + "documentation": {} + }, + { + "label": "logger", + "importPath": "loguru", + "description": "loguru", + "isExtraImport": true, + "detail": "loguru", + "documentation": {} + }, + { + "label": "logger", + "importPath": "loguru", + "description": "loguru", + "isExtraImport": true, + "detail": "loguru", + "documentation": {} + }, + { + "label": "logger", + "importPath": "loguru", + "description": "loguru", + "isExtraImport": true, + "detail": "loguru", + "documentation": {} + }, + { + "label": "logger", + "importPath": "loguru", + "description": "loguru", + "isExtraImport": true, + "detail": "loguru", + "documentation": {} + }, + { + "label": "logger", + "importPath": "loguru", + "description": "loguru", + "isExtraImport": true, + "detail": "loguru", + "documentation": {} + }, + { + "label": "logger", + "importPath": "loguru", + "description": "loguru", + "isExtraImport": true, + "detail": "loguru", + "documentation": {} + }, + { + "label": "logger", + "importPath": "loguru", + "description": "loguru", + "isExtraImport": true, + "detail": "loguru", + "documentation": {} + }, + { + "label": "logger", + "importPath": "loguru", + "description": "loguru", + "isExtraImport": true, + "detail": "loguru", + "documentation": {} + }, + { + "label": "logger", + "importPath": "loguru", + "description": "loguru", + "isExtraImport": true, + "detail": "loguru", + "documentation": {} + }, + { + "label": "logger", + "importPath": "loguru", + "description": "loguru", + "isExtraImport": true, + "detail": "loguru", + "documentation": {} + }, + { + "label": "logger", + "importPath": "loguru", + "description": "loguru", + "isExtraImport": true, + "detail": "loguru", + "documentation": {} + }, + { + "label": "logger", + "importPath": "loguru", + "description": "loguru", + "isExtraImport": true, + "detail": "loguru", + "documentation": {} + }, + { + "label": "logger", + "importPath": "loguru", + "description": "loguru", + "isExtraImport": true, + "detail": "loguru", + "documentation": {} + }, + { + "label": "logger", + "importPath": "loguru", + "description": "loguru", + "isExtraImport": true, + "detail": "loguru", + "documentation": {} + }, + { + "label": "logger", + "importPath": "loguru", + "description": "loguru", + "isExtraImport": true, + "detail": "loguru", + "documentation": {} + }, + { + "label": "logger", + "importPath": "loguru", + "description": "loguru", + "isExtraImport": true, + "detail": "loguru", + "documentation": {} + }, + { + "label": "logger", + "importPath": "loguru", + "description": "loguru", + "isExtraImport": true, + "detail": "loguru", + "documentation": {} + }, + { + "label": "logger", + "importPath": "loguru", + "description": "loguru", + "isExtraImport": true, + "detail": "loguru", + "documentation": {} + }, + { + "label": "logger", + "importPath": "loguru", + "description": "loguru", + "isExtraImport": true, + "detail": "loguru", + "documentation": {} + }, + { + "label": "logger", + "importPath": "loguru", + "description": "loguru", + "isExtraImport": true, + "detail": "loguru", + "documentation": {} + }, + { + "label": "logger", + "importPath": "loguru", + "description": "loguru", + "isExtraImport": true, + "detail": "loguru", + "documentation": {} + }, + { + "label": "logger", + "importPath": "loguru", + "description": "loguru", + "isExtraImport": true, + "detail": "loguru", + "documentation": {} + }, + { + "label": "logger", + "importPath": "loguru", + "description": "loguru", + "isExtraImport": true, + "detail": "loguru", + "documentation": {} + }, + { + "label": "logger", + "importPath": "loguru", + "description": "loguru", + "isExtraImport": true, + "detail": "loguru", + "documentation": {} + }, + { + "label": "logger", + "importPath": "loguru", + "description": "loguru", + "isExtraImport": true, + "detail": "loguru", + "documentation": {} + }, + { + "label": "logger", + "importPath": "loguru", + "description": "loguru", + "isExtraImport": true, + "detail": "loguru", + "documentation": {} + }, + { + "label": "logger", + "importPath": "loguru", + "description": "loguru", + "isExtraImport": true, + "detail": "loguru", + "documentation": {} + }, + { + "label": "logger", + "importPath": "loguru", + "description": "loguru", + "isExtraImport": true, + "detail": "loguru", + "documentation": {} + }, + { + "label": "logger", + "importPath": "loguru", + "description": "loguru", + "isExtraImport": true, + "detail": "loguru", + "documentation": {} + }, + { + "label": "logger", + "importPath": "loguru", + "description": "loguru", + "isExtraImport": true, + "detail": "loguru", + "documentation": {} + }, + { + "label": "logger", + "importPath": "loguru", + "description": "loguru", + "isExtraImport": true, + "detail": "loguru", + "documentation": {} + }, + { + "label": "logger", + "importPath": "loguru", + "description": "loguru", + "isExtraImport": true, + "detail": "loguru", + "documentation": {} + }, + { + "label": "logger", + "importPath": "loguru", + "description": "loguru", + "isExtraImport": true, + "detail": "loguru", + "documentation": {} + }, + { + "label": "logger", + "importPath": "loguru", + "description": "loguru", + "isExtraImport": true, + "detail": "loguru", + "documentation": {} + }, + { + "label": "logger", + "importPath": "loguru", + "description": "loguru", + "isExtraImport": true, + "detail": "loguru", + "documentation": {} + }, + { + "label": "logger", + "importPath": "loguru", + "description": "loguru", + "isExtraImport": true, + "detail": "loguru", + "documentation": {} + }, + { + "label": "logger", + "importPath": "loguru", + "description": "loguru", + "isExtraImport": true, + "detail": "loguru", + "documentation": {} + }, + { + "label": "logger", + "importPath": "loguru", + "description": "loguru", + "isExtraImport": true, + "detail": "loguru", + "documentation": {} + }, + { + "label": "logger", + "importPath": "loguru", + "description": "loguru", + "isExtraImport": true, + "detail": "loguru", + "documentation": {} + }, + { + "label": "warnings", + "kind": 6, + "isExtraImport": true, + "importPath": "warnings", + "description": "warnings", + "detail": "warnings", + "documentation": {} + }, + { + "label": "torch", + "kind": 6, + "isExtraImport": true, + "importPath": "torch", + "description": "torch", + "detail": "torch", + "documentation": {} + }, + { + "label": "nn", + "importPath": "torch", + "description": "torch", + "isExtraImport": true, + "detail": "torch", + "documentation": {} + }, + { + "label": "nn", + "importPath": "torch", + "description": "torch", + "isExtraImport": true, + "detail": "torch", + "documentation": {} + }, + { + "label": "distributed", + "importPath": "torch", + "description": "torch", + "isExtraImport": true, + "detail": "torch", + "documentation": {} + }, + { + "label": "distributed", + "importPath": "torch", + "description": "torch", + "isExtraImport": true, + "detail": "torch", + "documentation": {} + }, + { + "label": "distributed", + "importPath": "torch", + "description": "torch", + "isExtraImport": true, + "detail": "torch", + "documentation": {} + }, + { + "label": "distributed", + "importPath": "torch", + "description": "torch", + "isExtraImport": true, + "detail": "torch", + "documentation": {} + }, + { + "label": "distributed", + "importPath": "torch", + "description": "torch", + "isExtraImport": true, + "detail": "torch", + "documentation": {} + }, + { + "label": "distributed", + "importPath": "torch", + "description": "torch", + "isExtraImport": true, + "detail": "torch", + "documentation": {} + }, + { + "label": "nn", + "importPath": "torch", + "description": "torch", + "isExtraImport": true, + "detail": "torch", + "documentation": {} + }, + { + "label": "nn", + "importPath": "torch", + "description": "torch", + "isExtraImport": true, + "detail": "torch", + "documentation": {} + }, + { + "label": "nn", + "importPath": "torch", + "description": "torch", + "isExtraImport": true, + "detail": "torch", + "documentation": {} + }, + { + "label": "nn", + "importPath": "torch", + "description": "torch", + "isExtraImport": true, + "detail": "torch", + "documentation": {} + }, + { + "label": "nn", + "importPath": "torch", + "description": "torch", + "isExtraImport": true, + "detail": "torch", + "documentation": {} + }, + { + "label": "nn", + "importPath": "torch", + "description": "torch", + "isExtraImport": true, + "detail": "torch", + "documentation": {} + }, + { + "label": "autograd", + "importPath": "torch", + "description": "torch", + "isExtraImport": true, + "detail": "torch", + "documentation": {} + }, + { + "label": "checkpoint", + "importPath": "torch.utils.checkpoint", + "description": "torch.utils.checkpoint", + "isExtraImport": true, + "detail": "torch.utils.checkpoint", + "documentation": {} + }, + { + "label": "checkpoint_sequential", + "importPath": "torch.utils.checkpoint", + "description": "torch.utils.checkpoint", + "isExtraImport": true, + "detail": "torch.utils.checkpoint", + "documentation": {} + }, + { + "label": "torch.nn", + "kind": 6, + "isExtraImport": true, + "importPath": "torch.nn", + "description": "torch.nn", + "detail": "torch.nn", + "documentation": {} + }, + { + "label": "Linear", + "importPath": "torch.nn", + "description": "torch.nn", + "isExtraImport": true, + "detail": "torch.nn", + "documentation": {} + }, + { + "label": "Conv2d", + "importPath": "torch.nn", + "description": "torch.nn", + "isExtraImport": true, + "detail": "torch.nn", + "documentation": {} + }, + { + "label": "BatchNorm1d", + "importPath": "torch.nn", + "description": "torch.nn", + "isExtraImport": true, + "detail": "torch.nn", + "documentation": {} + }, + { + "label": "BatchNorm2d", + "importPath": "torch.nn", + "description": "torch.nn", + "isExtraImport": true, + "detail": "torch.nn", + "documentation": {} + }, + { + "label": "PReLU", + "importPath": "torch.nn", + "description": "torch.nn", + "isExtraImport": true, + "detail": "torch.nn", + "documentation": {} + }, + { + "label": "Sequential", + "importPath": "torch.nn", + "description": "torch.nn", + "isExtraImport": true, + "detail": "torch.nn", + "documentation": {} + }, + { + "label": "Module", + "importPath": "torch.nn", + "description": "torch.nn", + "isExtraImport": true, + "detail": "torch.nn", + "documentation": {} + }, + { + "label": "CrossEntropyLoss", + "importPath": "torch.nn", + "description": "torch.nn", + "isExtraImport": true, + "detail": "torch.nn", + "documentation": {} + }, + { + "label": "MSELoss", + "importPath": "torch.nn", + "description": "torch.nn", + "isExtraImport": true, + "detail": "torch.nn", + "documentation": {} + }, + { + "label": "CrossEntropyLoss", + "importPath": "torch.nn", + "description": "torch.nn", + "isExtraImport": true, + "detail": "torch.nn", + "documentation": {} + }, + { + "label": "functional", + "importPath": "torch.nn", + "description": "torch.nn", + "isExtraImport": true, + "detail": "torch.nn", + "documentation": {} + }, + { + "label": "functional", + "importPath": "torch.nn", + "description": "torch.nn", + "isExtraImport": true, + "detail": "torch.nn", + "documentation": {} + }, + { + "label": "DropPath", + "importPath": "timm.models.layers", + "description": "timm.models.layers", + "isExtraImport": true, + "detail": "timm.models.layers", + "documentation": {} + }, + { + "label": "to_2tuple", + "importPath": "timm.models.layers", + "description": "timm.models.layers", + "isExtraImport": true, + "detail": "timm.models.layers", + "documentation": {} + }, + { + "label": "trunc_normal_", + "importPath": "timm.models.layers", + "description": "timm.models.layers", + "isExtraImport": true, + "detail": "timm.models.layers", + "documentation": {} + }, + { + "label": "Optional", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Callable", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "List", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Iterable", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Callable", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Callable", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "List", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Union", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "NewType", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Optional", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Optional", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Union", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Union", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Union", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Optional", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Union", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "NewType", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Iterable", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "List", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Optional", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Tuple", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Union", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Iterable", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "List", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Optional", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Tuple", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "Union", + "importPath": "typing", + "description": "typing", + "isExtraImport": true, + "detail": "typing", + "documentation": {} + }, + { + "label": "pickle", + "kind": 6, + "isExtraImport": true, + "importPath": "pickle", + "description": "pickle", + "detail": "pickle", + "documentation": {} + }, + { + "label": "NONE", + "importPath": "pickle", + "description": "pickle", + "isExtraImport": true, + "detail": "pickle", + "documentation": {} + }, + { + "label": "NONE", + "importPath": "pickle", + "description": "pickle", + "isExtraImport": true, + "detail": "pickle", + "documentation": {} + }, + { + "label": "mxnet", + "kind": 6, + "isExtraImport": true, + "importPath": "mxnet", + "description": "mxnet", + "detail": "mxnet", + "documentation": {} + }, + { + "label": "ndarray", + "importPath": "mxnet", + "description": "mxnet", + "isExtraImport": true, + "detail": "mxnet", + "documentation": {} + }, + { + "label": "numpy", + "kind": 6, + "isExtraImport": true, + "importPath": "numpy", + "description": "numpy", + "detail": "numpy", + "documentation": {} + }, + { + "label": "sklearn", + "kind": 6, + "isExtraImport": true, + "importPath": "sklearn", + "description": "sklearn", + "detail": "sklearn", + "documentation": {} + }, + { + "label": "scipy", + "kind": 6, + "isExtraImport": true, + "importPath": "scipy", + "description": "scipy", + "detail": "scipy", + "documentation": {} + }, + { + "label": "interpolate", + "importPath": "scipy", + "description": "scipy", + "isExtraImport": true, + "detail": "scipy", + "documentation": {} + }, + { + "label": "PCA", + "importPath": "sklearn.decomposition", + "description": "sklearn.decomposition", + "isExtraImport": true, + "detail": "sklearn.decomposition", + "documentation": {} + }, + { + "label": "KFold", + "importPath": "sklearn.model_selection", + "description": "sklearn.model_selection", + "isExtraImport": true, + "detail": "sklearn.model_selection", + "documentation": {} + }, + { + "label": "matplotlib.pyplot", + "kind": 6, + "isExtraImport": true, + "importPath": "matplotlib.pyplot", + "description": "matplotlib.pyplot", + "detail": "matplotlib.pyplot", + "documentation": {} + }, + { + "label": "angle_spectrum", + "importPath": "matplotlib.pyplot", + "description": "matplotlib.pyplot", + "isExtraImport": true, + "detail": "matplotlib.pyplot", + "documentation": {} + }, + { + "label": "sample_colours_from_colourmap", + "importPath": "menpo.visualize.viewmatplotlib", + "description": "menpo.visualize.viewmatplotlib", + "isExtraImport": true, + "detail": "menpo.visualize.viewmatplotlib", + "documentation": {} + }, + { + "label": "sample_colours_from_colourmap", + "importPath": "menpo.visualize.viewmatplotlib", + "description": "menpo.visualize.viewmatplotlib", + "isExtraImport": true, + "detail": "menpo.visualize.viewmatplotlib", + "documentation": {} + }, + { + "label": "prettytable", + "kind": 6, + "isExtraImport": true, + "importPath": "prettytable", + "description": "prettytable", + "detail": "prettytable", + "documentation": {} + }, + { + "label": "PrettyTable", + "importPath": "prettytable", + "description": "prettytable", + "isExtraImport": true, + "detail": "prettytable", + "documentation": {} + }, + { + "label": "PrettyTable", + "importPath": "prettytable", + "description": "prettytable", + "isExtraImport": true, + "detail": "prettytable", + "documentation": {} + }, + { + "label": "roc_curve", + "importPath": "sklearn.metrics", + "description": "sklearn.metrics", + "isExtraImport": true, + "detail": "sklearn.metrics", + "documentation": {} + }, + { + "label": "auc", + "importPath": "sklearn.metrics", + "description": "sklearn.metrics", + "isExtraImport": true, + "detail": "sklearn.metrics", + "documentation": {} + }, + { + "label": "roc_curve", + "importPath": "sklearn.metrics", + "description": "sklearn.metrics", + "isExtraImport": true, + "detail": "sklearn.metrics", + "documentation": {} + }, + { + "label": "auc", + "importPath": "sklearn.metrics", + "description": "sklearn.metrics", + "isExtraImport": true, + "detail": "sklearn.metrics", + "documentation": {} + }, + { + "label": "roc_curve", + "importPath": "sklearn.metrics", + "description": "sklearn.metrics", + "isExtraImport": true, + "detail": "sklearn.metrics", + "documentation": {} + }, + { + "label": "logging", + "kind": 6, + "isExtraImport": true, + "importPath": "logging", + "description": "logging", + "detail": "logging", + "documentation": {} + }, + { + "label": "time", + "kind": 6, + "isExtraImport": true, + "importPath": "time", + "description": "time", + "detail": "time", + "documentation": {} + }, + { + "label": "time", + "importPath": "time", + "description": "time", + "isExtraImport": true, + "detail": "time", + "documentation": {} + }, + { + "label": "time", + "importPath": "time", + "description": "time", + "isExtraImport": true, + "detail": "time", + "documentation": {} + }, + { + "label": "time", + "importPath": "time", + "description": "time", + "isExtraImport": true, + "detail": "time", + "documentation": {} + }, + { + "label": "time", + "importPath": "time", + "description": "time", + "isExtraImport": true, + "detail": "time", + "documentation": {} + }, + { + "label": "time", + "importPath": "time", + "description": "time", + "isExtraImport": true, + "detail": "time", + "documentation": {} + }, + { + "label": "time", + "importPath": "time", + "description": "time", + "isExtraImport": true, + "detail": "time", + "documentation": {} + }, + { + "label": "time", + "importPath": "time", + "description": "time", + "isExtraImport": true, + "detail": "time", + "documentation": {} + }, + { + "label": "time", + "importPath": "time", + "description": "time", + "isExtraImport": true, + "detail": "time", + "documentation": {} + }, + { + "label": "time", + "importPath": "time", + "description": "time", + "isExtraImport": true, + "detail": "time", + "documentation": {} + }, + { + "label": "verification", + "importPath": "eval", + "description": "eval", + "isExtraImport": true, + "detail": "eval", + "documentation": {} + }, + { + "label": "AverageMeter", + "importPath": "utils.utils_logging", + "description": "utils.utils_logging", + "isExtraImport": true, + "detail": "utils.utils_logging", + "documentation": {} + }, + { + "label": "AverageMeter", + "importPath": "utils.utils_logging", + "description": "utils.utils_logging", + "isExtraImport": true, + "detail": "utils.utils_logging", + "documentation": {} + }, + { + "label": "init_logging", + "importPath": "utils.utils_logging", + "description": "utils.utils_logging", + "isExtraImport": true, + "detail": "utils.utils_logging", + "documentation": {} + }, + { + "label": "AverageMeter", + "importPath": "utils.utils_logging", + "description": "utils.utils_logging", + "isExtraImport": true, + "detail": "utils.utils_logging", + "documentation": {} + }, + { + "label": "init_logging", + "importPath": "utils.utils_logging", + "description": "utils.utils_logging", + "isExtraImport": true, + "detail": "utils.utils_logging", + "documentation": {} + }, + { + "label": "SummaryWriter", + "importPath": "torch.utils.tensorboard", + "description": "torch.utils.tensorboard", + "isExtraImport": true, + "detail": "torch.utils.tensorboard", + "documentation": {} + }, + { + "label": "SummaryWriter", + "importPath": "torch.utils.tensorboard", + "description": "torch.utils.tensorboard", + "isExtraImport": true, + "detail": "torch.utils.tensorboard", + "documentation": {} + }, + { + "label": "SummaryWriter", + "importPath": "torch.utils.tensorboard", + "description": "torch.utils.tensorboard", + "isExtraImport": true, + "detail": "torch.utils.tensorboard", + "documentation": {} + }, + { + "label": "importlib", + "kind": 6, + "isExtraImport": true, + "importPath": "importlib", + "description": "importlib", + "detail": "importlib", + "documentation": {} + }, + { + "label": "math", + "kind": 6, + "isExtraImport": true, + "importPath": "math", + "description": "math", + "detail": "math", + "documentation": {} + }, + { + "label": "e", + "importPath": "math", + "description": "math", + "isExtraImport": true, + "detail": "math", + "documentation": {} + }, + { + "label": "random", + "kind": 6, + "isExtraImport": true, + "importPath": "random", + "description": "random", + "detail": "random", + "documentation": {} + }, + { + "label": "torch.distributed", + "kind": 6, + "isExtraImport": true, + "importPath": "torch.distributed", + "description": "torch.distributed", + "detail": "torch.distributed", + "documentation": {} + }, + { + "label": "DistributedSampler", + "importPath": "torch.utils.data", + "description": "torch.utils.data", + "isExtraImport": true, + "detail": "torch.utils.data", + "documentation": {} + }, + { + "label": "DataLoader", + "importPath": "torch.utils.data", + "description": "torch.utils.data", + "isExtraImport": true, + "detail": "torch.utils.data", + "documentation": {} + }, + { + "label": "Dataset", + "importPath": "torch.utils.data", + "description": "torch.utils.data", + "isExtraImport": true, + "detail": "torch.utils.data", + "documentation": {} + }, + { + "label": "DataLoader", + "importPath": "torch.utils.data", + "description": "torch.utils.data", + "isExtraImport": true, + "detail": "torch.utils.data", + "documentation": {} + }, + { + "label": "DataLoader", + "importPath": "torch.utils.data", + "description": "torch.utils.data", + "isExtraImport": true, + "detail": "torch.utils.data", + "documentation": {} + }, + { + "label": "DataLoader", + "importPath": "torch.utils.data", + "description": "torch.utils.data", + "isExtraImport": true, + "detail": "torch.utils.data", + "documentation": {} + }, + { + "label": "Dataset", + "importPath": "torch.utils.data", + "description": "torch.utils.data", + "isExtraImport": true, + "detail": "torch.utils.data", + "documentation": {} + }, + { + "label": "DataLoader", + "importPath": "torch.utils.data", + "description": "torch.utils.data", + "isExtraImport": true, + "detail": "torch.utils.data", + "documentation": {} + }, + { + "label": "ConcatDataset", + "importPath": "torch.utils.data", + "description": "torch.utils.data", + "isExtraImport": true, + "detail": "torch.utils.data", + "documentation": {} + }, + { + "label": "Dataset", + "importPath": "torch.utils.data", + "description": "torch.utils.data", + "isExtraImport": true, + "detail": "torch.utils.data", + "documentation": {} + }, + { + "label": "ConcatDataset", + "importPath": "torch.utils.data", + "description": "torch.utils.data", + "isExtraImport": true, + "detail": "torch.utils.data", + "documentation": {} + }, + { + "label": "Dataset", + "importPath": "torch.utils.data", + "description": "torch.utils.data", + "isExtraImport": true, + "detail": "torch.utils.data", + "documentation": {} + }, + { + "label": "DataLoader", + "importPath": "torch.utils.data", + "description": "torch.utils.data", + "isExtraImport": true, + "detail": "torch.utils.data", + "documentation": {} + }, + { + "label": "Dataset", + "importPath": "torch.utils.data", + "description": "torch.utils.data", + "isExtraImport": true, + "detail": "torch.utils.data", + "documentation": {} + }, + { + "label": "DataLoader", + "importPath": "torch.utils.data", + "description": "torch.utils.data", + "isExtraImport": true, + "detail": "torch.utils.data", + "documentation": {} + }, + { + "label": "ConcatDataset", + "importPath": "torch.utils.data", + "description": "torch.utils.data", + "isExtraImport": true, + "detail": "torch.utils.data", + "documentation": {} + }, + { + "label": "Dataset", + "importPath": "torch.utils.data", + "description": "torch.utils.data", + "isExtraImport": true, + "detail": "torch.utils.data", + "documentation": {} + }, + { + "label": "DataLoader", + "importPath": "torch.utils.data", + "description": "torch.utils.data", + "isExtraImport": true, + "detail": "torch.utils.data", + "documentation": {} + }, + { + "label": "ConcatDataset", + "importPath": "torch.utils.data", + "description": "torch.utils.data", + "isExtraImport": true, + "detail": "torch.utils.data", + "documentation": {} + }, + { + "label": "Dataset", + "importPath": "torch.utils.data", + "description": "torch.utils.data", + "isExtraImport": true, + "detail": "torch.utils.data", + "documentation": {} + }, + { + "label": "DataLoader", + "importPath": "torch.utils.data", + "description": "torch.utils.data", + "isExtraImport": true, + "detail": "torch.utils.data", + "documentation": {} + }, + { + "label": "ConcatDataset", + "importPath": "torch.utils.data", + "description": "torch.utils.data", + "isExtraImport": true, + "detail": "torch.utils.data", + "documentation": {} + }, + { + "label": "Dataset", + "importPath": "torch.utils.data", + "description": "torch.utils.data", + "isExtraImport": true, + "detail": "torch.utils.data", + "documentation": {} + }, + { + "label": "DataLoader", + "importPath": "torch.utils.data", + "description": "torch.utils.data", + "isExtraImport": true, + "detail": "torch.utils.data", + "documentation": {} + }, + { + "label": "ConcatDataset", + "importPath": "torch.utils.data", + "description": "torch.utils.data", + "isExtraImport": true, + "detail": "torch.utils.data", + "documentation": {} + }, + { + "label": "Dataset", + "importPath": "torch.utils.data", + "description": "torch.utils.data", + "isExtraImport": true, + "detail": "torch.utils.data", + "documentation": {} + }, + { + "label": "DataLoader", + "importPath": "torch.utils.data", + "description": "torch.utils.data", + "isExtraImport": true, + "detail": "torch.utils.data", + "documentation": {} + }, + { + "label": "ConcatDataset", + "importPath": "torch.utils.data", + "description": "torch.utils.data", + "isExtraImport": true, + "detail": "torch.utils.data", + "documentation": {} + }, + { + "label": "DataLoader", + "importPath": "torch.utils.data", + "description": "torch.utils.data", + "isExtraImport": true, + "detail": "torch.utils.data", + "documentation": {} + }, + { + "label": "DataLoader", + "importPath": "torch.utils.data", + "description": "torch.utils.data", + "isExtraImport": true, + "detail": "torch.utils.data", + "documentation": {} + }, + { + "label": "Dataset", + "importPath": "torch.utils.data", + "description": "torch.utils.data", + "isExtraImport": true, + "detail": "torch.utils.data", + "documentation": {} + }, + { + "label": "DataLoader", + "importPath": "torch.utils.data", + "description": "torch.utils.data", + "isExtraImport": true, + "detail": "torch.utils.data", + "documentation": {} + }, + { + "label": "DataLoader", + "importPath": "torch.utils.data", + "description": "torch.utils.data", + "isExtraImport": true, + "detail": "torch.utils.data", + "documentation": {} + }, + { + "label": "Dataset", + "importPath": "torch.utils.data", + "description": "torch.utils.data", + "isExtraImport": true, + "detail": "torch.utils.data", + "documentation": {} + }, + { + "label": "DataLoader", + "importPath": "torch.utils.data", + "description": "torch.utils.data", + "isExtraImport": true, + "detail": "torch.utils.data", + "documentation": {} + }, + { + "label": "ConcatDataset", + "importPath": "torch.utils.data", + "description": "torch.utils.data", + "isExtraImport": true, + "detail": "torch.utils.data", + "documentation": {} + }, + { + "label": "Dataset", + "importPath": "torch.utils.data", + "description": "torch.utils.data", + "isExtraImport": true, + "detail": "torch.utils.data", + "documentation": {} + }, + { + "label": "DataLoader", + "importPath": "torch.utils.data", + "description": "torch.utils.data", + "isExtraImport": true, + "detail": "torch.utils.data", + "documentation": {} + }, + { + "label": "Dataset", + "importPath": "torch.utils.data", + "description": "torch.utils.data", + "isExtraImport": true, + "detail": "torch.utils.data", + "documentation": {} + }, + { + "label": "DataLoader", + "importPath": "torch.utils.data", + "description": "torch.utils.data", + "isExtraImport": true, + "detail": "torch.utils.data", + "documentation": {} + }, + { + "label": "DataLoader", + "importPath": "torch.utils.data", + "description": "torch.utils.data", + "isExtraImport": true, + "detail": "torch.utils.data", + "documentation": {} + }, + { + "label": "Dataset", + "importPath": "torch.utils.data", + "description": "torch.utils.data", + "isExtraImport": true, + "detail": "torch.utils.data", + "documentation": {} + }, + { + "label": "DataLoader", + "importPath": "torch.utils.data", + "description": "torch.utils.data", + "isExtraImport": true, + "detail": "torch.utils.data", + "documentation": {} + }, + { + "label": "TensorDataset", + "importPath": "torch.utils.data", + "description": "torch.utils.data", + "isExtraImport": true, + "detail": "torch.utils.data", + "documentation": {} + }, + { + "label": "DataLoader", + "importPath": "torch.utils.data", + "description": "torch.utils.data", + "isExtraImport": true, + "detail": "torch.utils.data", + "documentation": {} + }, + { + "label": "DataLoader", + "importPath": "torch.utils.data", + "description": "torch.utils.data", + "isExtraImport": true, + "detail": "torch.utils.data", + "documentation": {} + }, + { + "label": "Dataset", + "importPath": "torch.utils.data", + "description": "torch.utils.data", + "isExtraImport": true, + "detail": "torch.utils.data", + "documentation": {} + }, + { + "label": "DataLoader", + "importPath": "torch.utils.data", + "description": "torch.utils.data", + "isExtraImport": true, + "detail": "torch.utils.data", + "documentation": {} + }, + { + "label": "numbers", + "kind": 6, + "isExtraImport": true, + "importPath": "numbers", + "description": "numbers", + "detail": "numbers", + "documentation": {} + }, + { + "label": "queue", + "kind": 6, + "isExtraImport": true, + "importPath": "queue", + "description": "queue", + "detail": "queue", + "documentation": {} + }, + { + "label": "threading", + "kind": 6, + "isExtraImport": true, + "importPath": "threading", + "description": "threading", + "detail": "threading", + "documentation": {} + }, + { + "label": "functools", + "kind": 6, + "isExtraImport": true, + "importPath": "functools", + "description": "functools", + "detail": "functools", + "documentation": {} + }, + { + "label": "partial", + "importPath": "functools", + "description": "functools", + "isExtraImport": true, + "detail": "functools", + "documentation": {} + }, + { + "label": "reduce", + "importPath": "functools", + "description": "functools", + "isExtraImport": true, + "detail": "functools", + "documentation": {} + }, + { + "label": "reduce", + "importPath": "functools", + "description": "functools", + "isExtraImport": true, + "detail": "functools", + "documentation": {} + }, + { + "label": "wraps", + "importPath": "functools", + "description": "functools", + "isExtraImport": true, + "detail": "functools", + "documentation": {} + }, + { + "label": "reduce", + "importPath": "functools", + "description": "functools", + "isExtraImport": true, + "detail": "functools", + "documentation": {} + }, + { + "label": "partial", + "importPath": "functools", + "description": "functools", + "isExtraImport": true, + "detail": "functools", + "documentation": {} + }, + { + "label": "reduce", + "importPath": "functools", + "description": "functools", + "isExtraImport": true, + "detail": "functools", + "documentation": {} + }, + { + "label": "partial", + "importPath": "functools", + "description": "functools", + "isExtraImport": true, + "detail": "functools", + "documentation": {} + }, + { + "label": "reduce", + "importPath": "functools", + "description": "functools", + "isExtraImport": true, + "detail": "functools", + "documentation": {} + }, + { + "label": "partial", + "importPath": "functools", + "description": "functools", + "isExtraImport": true, + "detail": "functools", + "documentation": {} + }, + { + "label": "partial", + "importPath": "functools", + "description": "functools", + "isExtraImport": true, + "detail": "functools", + "documentation": {} + }, + { + "label": "reduce", + "importPath": "functools", + "description": "functools", + "isExtraImport": true, + "detail": "functools", + "documentation": {} + }, + { + "label": "reduce", + "importPath": "functools", + "description": "functools", + "isExtraImport": true, + "detail": "functools", + "documentation": {} + }, + { + "label": "reduce", + "importPath": "functools", + "description": "functools", + "isExtraImport": true, + "detail": "functools", + "documentation": {} + }, + { + "label": "reduce", + "importPath": "functools", + "description": "functools", + "isExtraImport": true, + "detail": "functools", + "documentation": {} + }, + { + "label": "reduce", + "importPath": "functools", + "description": "functools", + "isExtraImport": true, + "detail": "functools", + "documentation": {} + }, + { + "label": "reduce", + "importPath": "functools", + "description": "functools", + "isExtraImport": true, + "detail": "functools", + "documentation": {} + }, + { + "label": "torchvision", + "kind": 6, + "isExtraImport": true, + "importPath": "torchvision", + "description": "torchvision", + "detail": "torchvision", + "documentation": {} + }, + { + "label": "transforms", + "importPath": "torchvision", + "description": "torchvision", + "isExtraImport": true, + "detail": "torchvision", + "documentation": {} + }, + { + "label": "transforms", + "importPath": "torchvision", + "description": "torchvision", + "isExtraImport": true, + "detail": "torchvision", + "documentation": {} + }, + { + "label": "transforms", + "importPath": "torchvision", + "description": "torchvision", + "isExtraImport": true, + "detail": "torchvision", + "documentation": {} + }, + { + "label": "ImageFolder", + "importPath": "torchvision.datasets", + "description": "torchvision.datasets", + "isExtraImport": true, + "detail": "torchvision.datasets", + "documentation": {} + }, + { + "label": "DistributedSampler", + "importPath": "utils.utils_distributed_sampler", + "description": "utils.utils_distributed_sampler", + "isExtraImport": true, + "detail": "utils.utils_distributed_sampler", + "documentation": {} + }, + { + "label": "get_dist_info", + "importPath": "utils.utils_distributed_sampler", + "description": "utils.utils_distributed_sampler", + "isExtraImport": true, + "detail": "utils.utils_distributed_sampler", + "documentation": {} + }, + { + "label": "worker_init_fn", + "importPath": "utils.utils_distributed_sampler", + "description": "utils.utils_distributed_sampler", + "isExtraImport": true, + "detail": "utils.utils_distributed_sampler", + "documentation": {} + }, + { + "label": "setup_seed", + "importPath": "utils.utils_distributed_sampler", + "description": "utils.utils_distributed_sampler", + "isExtraImport": true, + "detail": "utils.utils_distributed_sampler", + "documentation": {} + }, + { + "label": "setup_seed", + "importPath": "utils.utils_distributed_sampler", + "description": "utils.utils_distributed_sampler", + "isExtraImport": true, + "detail": "utils.utils_distributed_sampler", + "documentation": {} + }, + { + "label": "matplotlib", + "kind": 6, + "isExtraImport": true, + "importPath": "matplotlib", + "description": "matplotlib", + "detail": "matplotlib", + "documentation": {} + }, + { + "label": "timeit", + "kind": 6, + "isExtraImport": true, + "importPath": "timeit", + "description": "timeit", + "detail": "timeit", + "documentation": {} + }, + { + "label": "argparse", + "kind": 6, + "isExtraImport": true, + "importPath": "argparse", + "description": "argparse", + "detail": "argparse", + "documentation": {} + }, + { + "label": "cv2", + "kind": 6, + "isExtraImport": true, + "importPath": "cv2", + "description": "cv2", + "detail": "cv2", + "documentation": {} + }, + { + "label": "ellipse2Poly", + "importPath": "cv2", + "description": "cv2", + "isExtraImport": true, + "detail": "cv2", + "documentation": {} + }, + { + "label": "transform", + "importPath": "skimage", + "description": "skimage", + "isExtraImport": true, + "detail": "skimage", + "documentation": {} + }, + { + "label": "get_model", + "importPath": "backbones", + "description": "backbones", + "isExtraImport": true, + "detail": "backbones", + "documentation": {} + }, + { + "label": "get_model", + "importPath": "backbones", + "description": "backbones", + "isExtraImport": true, + "detail": "backbones", + "documentation": {} + }, + { + "label": "get_model", + "importPath": "backbones", + "description": "backbones", + "isExtraImport": true, + "detail": "backbones", + "documentation": {} + }, + { + "label": "get_model", + "importPath": "backbones", + "description": "backbones", + "isExtraImport": true, + "detail": "backbones", + "documentation": {} + }, + { + "label": "get_model", + "importPath": "backbones", + "description": "backbones", + "isExtraImport": true, + "detail": "backbones", + "documentation": {} + }, + { + "label": "get_model_complexity_info", + "importPath": "ptflops", + "description": "ptflops", + "isExtraImport": true, + "detail": "ptflops", + "documentation": {} + }, + { + "label": "_LRScheduler", + "importPath": "torch.optim.lr_scheduler", + "description": "torch.optim.lr_scheduler", + "isExtraImport": true, + "detail": "torch.optim.lr_scheduler", + "documentation": {} + }, + { + "label": "division", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "absolute_import", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "print_function", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "division", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "absolute_import", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "division", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "print_function", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "absolute_import", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "print_function", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "division", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "absolute_import", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "division", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "print_function", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "unicode_literals", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "absolute_import", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "division", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "print_function", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "unicode_literals", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "absolute_import", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "division", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "print_function", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "unicode_literals", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "absolute_import", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "division", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "print_function", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "unicode_literals", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "absolute_import", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "division", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "print_function", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "unicode_literals", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "absolute_import", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "division", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "print_function", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "absolute_import", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "division", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "print_function", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "absolute_import", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "division", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "print_function", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "unicode_literals", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "absolute_import", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "division", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "print_function", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "unicode_literals", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "absolute_import", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "division", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "print_function", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "unicode_literals", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "division", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "absolute_import", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "division", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "print_function", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "unicode_literals", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "absolute_import", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "division", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "print_function", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "unicode_literals", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "absolute_import", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "division", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "print_function", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "division", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "absolute_import", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "division", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "print_function", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "unicode_literals", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "absolute_import", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "division", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "print_function", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "absolute_import", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "division", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "print_function", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "unicode_literals", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "absolute_import", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "division", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "print_function", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "unicode_literals", + "importPath": "__future__", + "description": "__future__", + "isExtraImport": true, + "detail": "__future__", + "documentation": {} + }, + { + "label": "glob", + "kind": 6, + "isExtraImport": true, + "importPath": "glob", + "description": "glob", + "detail": "glob", + "documentation": {} + }, + { + "label": "glob", + "importPath": "glob", + "description": "glob", + "isExtraImport": true, + "detail": "glob", + "documentation": {} + }, + { + "label": "glob", + "importPath": "glob", + "description": "glob", + "isExtraImport": true, + "detail": "glob", + "documentation": {} + }, + { + "label": "glob", + "importPath": "glob", + "description": "glob", + "isExtraImport": true, + "detail": "glob", + "documentation": {} + }, + { + "label": "glob", + "importPath": "glob", + "description": "glob", + "isExtraImport": true, + "detail": "glob", + "documentation": {} + }, + { + "label": "glob", + "importPath": "glob", + "description": "glob", + "isExtraImport": true, + "detail": "glob", + "documentation": {} + }, + { + "label": "glob", + "importPath": "glob", + "description": "glob", + "isExtraImport": true, + "detail": "glob", + "documentation": {} + }, + { + "label": "glob", + "importPath": "glob", + "description": "glob", + "isExtraImport": true, + "detail": "glob", + "documentation": {} + }, + { + "label": "glob", + "importPath": "glob", + "description": "glob", + "isExtraImport": true, + "detail": "glob", + "documentation": {} + }, + { + "label": "glob", + "importPath": "glob", + "description": "glob", + "isExtraImport": true, + "detail": "glob", + "documentation": {} + }, + { + "label": "glob", + "importPath": "glob", + "description": "glob", + "isExtraImport": true, + "detail": "glob", + "documentation": {} + }, + { + "label": "glob", + "importPath": "glob", + "description": "glob", + "isExtraImport": true, + "detail": "glob", + "documentation": {} + }, + { + "label": "glob", + "importPath": "glob", + "description": "glob", + "isExtraImport": true, + "detail": "glob", + "documentation": {} + }, + { + "label": "glob", + "importPath": "glob", + "description": "glob", + "isExtraImport": true, + "detail": "glob", + "documentation": {} + }, + { + "label": "glob", + "importPath": "glob", + "description": "glob", + "isExtraImport": true, + "detail": "glob", + "documentation": {} + }, + { + "label": "glob", + "importPath": "glob", + "description": "glob", + "isExtraImport": true, + "detail": "glob", + "documentation": {} + }, + { + "label": "glob", + "importPath": "glob", + "description": "glob", + "isExtraImport": true, + "detail": "glob", + "documentation": {} + }, + { + "label": "glob", + "importPath": "glob", + "description": "glob", + "isExtraImport": true, + "detail": "glob", + "documentation": {} + }, + { + "label": "glob", + "importPath": "glob", + "description": "glob", + "isExtraImport": true, + "detail": "glob", + "documentation": {} + }, + { + "label": "glob", + "importPath": "glob", + "description": "glob", + "isExtraImport": true, + "detail": "glob", + "documentation": {} + }, + { + "label": "glob", + "importPath": "glob", + "description": "glob", + "isExtraImport": true, + "detail": "glob", + "documentation": {} + }, + { + "label": "glob", + "importPath": "glob", + "description": "glob", + "isExtraImport": true, + "detail": "glob", + "documentation": {} + }, + { + "label": "glob", + "importPath": "glob", + "description": "glob", + "isExtraImport": true, + "detail": "glob", + "documentation": {} + }, + { + "label": "glob", + "importPath": "glob", + "description": "glob", + "isExtraImport": true, + "detail": "glob", + "documentation": {} + }, + { + "label": "glob", + "importPath": "glob", + "description": "glob", + "isExtraImport": true, + "detail": "glob", + "documentation": {} + }, + { + "label": "glob", + "importPath": "glob", + "description": "glob", + "isExtraImport": true, + "detail": "glob", + "documentation": {} + }, + { + "label": "glob", + "importPath": "glob", + "description": "glob", + "isExtraImport": true, + "detail": "glob", + "documentation": {} + }, + { + "label": "glob", + "importPath": "glob", + "description": "glob", + "isExtraImport": true, + "detail": "glob", + "documentation": {} + }, + { + "label": "glob", + "importPath": "glob", + "description": "glob", + "isExtraImport": true, + "detail": "glob", + "documentation": {} + }, + { + "label": "glob", + "importPath": "glob", + "description": "glob", + "isExtraImport": true, + "detail": "glob", + "documentation": {} + }, + { + "label": "onnxruntime", + "kind": 6, + "isExtraImport": true, + "importPath": "onnxruntime", + "description": "onnxruntime", + "detail": "onnxruntime", + "documentation": {} + }, + { + "label": "onnx", + "kind": 6, + "isExtraImport": true, + "importPath": "onnx", + "description": "onnx", + "detail": "onnx", + "documentation": {} + }, + { + "label": "numpy_helper", + "importPath": "onnx", + "description": "onnx", + "isExtraImport": true, + "detail": "onnx", + "documentation": {} + }, + { + "label": "get_image", + "importPath": "insightface.data", + "description": "insightface.data", + "isExtraImport": true, + "detail": "insightface.data", + "documentation": {} + }, + { + "label": "skimage.transform", + "kind": 6, + "isExtraImport": true, + "importPath": "skimage.transform", + "description": "skimage.transform", + "detail": "skimage.transform", + "documentation": {} + }, + { + "label": "estimate_transform", + "importPath": "skimage.transform", + "description": "skimage.transform", + "isExtraImport": true, + "detail": "skimage.transform", + "documentation": {} + }, + { + "label": "warp", + "importPath": "skimage.transform", + "description": "skimage.transform", + "isExtraImport": true, + "detail": "skimage.transform", + "documentation": {} + }, + { + "label": "resize", + "importPath": "skimage.transform", + "description": "skimage.transform", + "isExtraImport": true, + "detail": "skimage.transform", + "documentation": {} + }, + { + "label": "rescale", + "importPath": "skimage.transform", + "description": "skimage.transform", + "isExtraImport": true, + "detail": "skimage.transform", + "documentation": {} + }, + { + "label": "estimate_transform", + "importPath": "skimage.transform", + "description": "skimage.transform", + "isExtraImport": true, + "detail": "skimage.transform", + "documentation": {} + }, + { + "label": "warp", + "importPath": "skimage.transform", + "description": "skimage.transform", + "isExtraImport": true, + "detail": "skimage.transform", + "documentation": {} + }, + { + "label": "resize", + "importPath": "skimage.transform", + "description": "skimage.transform", + "isExtraImport": true, + "detail": "skimage.transform", + "documentation": {} + }, + { + "label": "rescale", + "importPath": "skimage.transform", + "description": "skimage.transform", + "isExtraImport": true, + "detail": "skimage.transform", + "documentation": {} + }, + { + "label": "estimate_transform", + "importPath": "skimage.transform", + "description": "skimage.transform", + "isExtraImport": true, + "detail": "skimage.transform", + "documentation": {} + }, + { + "label": "warp", + "importPath": "skimage.transform", + "description": "skimage.transform", + "isExtraImport": true, + "detail": "skimage.transform", + "documentation": {} + }, + { + "label": "resize", + "importPath": "skimage.transform", + "description": "skimage.transform", + "isExtraImport": true, + "detail": "skimage.transform", + "documentation": {} + }, + { + "label": "rescale", + "importPath": "skimage.transform", + "description": "skimage.transform", + "isExtraImport": true, + "detail": "skimage.transform", + "documentation": {} + }, + { + "label": "estimate_transform", + "importPath": "skimage.transform", + "description": "skimage.transform", + "isExtraImport": true, + "detail": "skimage.transform", + "documentation": {} + }, + { + "label": "warp", + "importPath": "skimage.transform", + "description": "skimage.transform", + "isExtraImport": true, + "detail": "skimage.transform", + "documentation": {} + }, + { + "label": "resize", + "importPath": "skimage.transform", + "description": "skimage.transform", + "isExtraImport": true, + "detail": "skimage.transform", + "documentation": {} + }, + { + "label": "rescale", + "importPath": "skimage.transform", + "description": "skimage.transform", + "isExtraImport": true, + "detail": "skimage.transform", + "documentation": {} + }, + { + "label": "estimate_transform", + "importPath": "skimage.transform", + "description": "skimage.transform", + "isExtraImport": true, + "detail": "skimage.transform", + "documentation": {} + }, + { + "label": "warp", + "importPath": "skimage.transform", + "description": "skimage.transform", + "isExtraImport": true, + "detail": "skimage.transform", + "documentation": {} + }, + { + "label": "resize", + "importPath": "skimage.transform", + "description": "skimage.transform", + "isExtraImport": true, + "detail": "skimage.transform", + "documentation": {} + }, + { + "label": "rescale", + "importPath": "skimage.transform", + "description": "skimage.transform", + "isExtraImport": true, + "detail": "skimage.transform", + "documentation": {} + }, + { + "label": "estimate_transform", + "importPath": "skimage.transform", + "description": "skimage.transform", + "isExtraImport": true, + "detail": "skimage.transform", + "documentation": {} + }, + { + "label": "warp", + "importPath": "skimage.transform", + "description": "skimage.transform", + "isExtraImport": true, + "detail": "skimage.transform", + "documentation": {} + }, + { + "label": "resize", + "importPath": "skimage.transform", + "description": "skimage.transform", + "isExtraImport": true, + "detail": "skimage.transform", + "documentation": {} + }, + { + "label": "rescale", + "importPath": "skimage.transform", + "description": "skimage.transform", + "isExtraImport": true, + "detail": "skimage.transform", + "documentation": {} + }, + { + "label": "estimate_transform", + "importPath": "skimage.transform", + "description": "skimage.transform", + "isExtraImport": true, + "detail": "skimage.transform", + "documentation": {} + }, + { + "label": "warp", + "importPath": "skimage.transform", + "description": "skimage.transform", + "isExtraImport": true, + "detail": "skimage.transform", + "documentation": {} + }, + { + "label": "resize", + "importPath": "skimage.transform", + "description": "skimage.transform", + "isExtraImport": true, + "detail": "skimage.transform", + "documentation": {} + }, + { + "label": "rescale", + "importPath": "skimage.transform", + "description": "skimage.transform", + "isExtraImport": true, + "detail": "skimage.transform", + "documentation": {} + }, + { + "label": "estimate_transform", + "importPath": "skimage.transform", + "description": "skimage.transform", + "isExtraImport": true, + "detail": "skimage.transform", + "documentation": {} + }, + { + "label": "warp", + "importPath": "skimage.transform", + "description": "skimage.transform", + "isExtraImport": true, + "detail": "skimage.transform", + "documentation": {} + }, + { + "label": "resize", + "importPath": "skimage.transform", + "description": "skimage.transform", + "isExtraImport": true, + "detail": "skimage.transform", + "documentation": {} + }, + { + "label": "rescale", + "importPath": "skimage.transform", + "description": "skimage.transform", + "isExtraImport": true, + "detail": "skimage.transform", + "documentation": {} + }, + { + "label": "rescale", + "importPath": "skimage.transform", + "description": "skimage.transform", + "isExtraImport": true, + "detail": "skimage.transform", + "documentation": {} + }, + { + "label": "estimate_transform", + "importPath": "skimage.transform", + "description": "skimage.transform", + "isExtraImport": true, + "detail": "skimage.transform", + "documentation": {} + }, + { + "label": "warp", + "importPath": "skimage.transform", + "description": "skimage.transform", + "isExtraImport": true, + "detail": "skimage.transform", + "documentation": {} + }, + { + "label": "estimate_transform", + "importPath": "skimage.transform", + "description": "skimage.transform", + "isExtraImport": true, + "detail": "skimage.transform", + "documentation": {} + }, + { + "label": "warp", + "importPath": "skimage.transform", + "description": "skimage.transform", + "isExtraImport": true, + "detail": "skimage.transform", + "documentation": {} + }, + { + "label": "resize", + "importPath": "skimage.transform", + "description": "skimage.transform", + "isExtraImport": true, + "detail": "skimage.transform", + "documentation": {} + }, + { + "label": "rescale", + "importPath": "skimage.transform", + "description": "skimage.transform", + "isExtraImport": true, + "detail": "skimage.transform", + "documentation": {} + }, + { + "label": "estimate_transform", + "importPath": "skimage.transform", + "description": "skimage.transform", + "isExtraImport": true, + "detail": "skimage.transform", + "documentation": {} + }, + { + "label": "warp", + "importPath": "skimage.transform", + "description": "skimage.transform", + "isExtraImport": true, + "detail": "skimage.transform", + "documentation": {} + }, + { + "label": "resize", + "importPath": "skimage.transform", + "description": "skimage.transform", + "isExtraImport": true, + "detail": "skimage.transform", + "documentation": {} + }, + { + "label": "rescale", + "importPath": "skimage.transform", + "description": "skimage.transform", + "isExtraImport": true, + "detail": "skimage.transform", + "documentation": {} + }, + { + "label": "estimate_transform", + "importPath": "skimage.transform", + "description": "skimage.transform", + "isExtraImport": true, + "detail": "skimage.transform", + "documentation": {} + }, + { + "label": "warp", + "importPath": "skimage.transform", + "description": "skimage.transform", + "isExtraImport": true, + "detail": "skimage.transform", + "documentation": {} + }, + { + "label": "resize", + "importPath": "skimage.transform", + "description": "skimage.transform", + "isExtraImport": true, + "detail": "skimage.transform", + "documentation": {} + }, + { + "label": "rescale", + "importPath": "skimage.transform", + "description": "skimage.transform", + "isExtraImport": true, + "detail": "skimage.transform", + "documentation": {} + }, + { + "label": "estimate_transform", + "importPath": "skimage.transform", + "description": "skimage.transform", + "isExtraImport": true, + "detail": "skimage.transform", + "documentation": {} + }, + { + "label": "warp", + "importPath": "skimage.transform", + "description": "skimage.transform", + "isExtraImport": true, + "detail": "skimage.transform", + "documentation": {} + }, + { + "label": "resize", + "importPath": "skimage.transform", + "description": "skimage.transform", + "isExtraImport": true, + "detail": "skimage.transform", + "documentation": {} + }, + { + "label": "rescale", + "importPath": "skimage.transform", + "description": "skimage.transform", + "isExtraImport": true, + "detail": "skimage.transform", + "documentation": {} + }, + { + "label": "resize", + "importPath": "skimage.transform", + "description": "skimage.transform", + "isExtraImport": true, + "detail": "skimage.transform", + "documentation": {} + }, + { + "label": "resize", + "importPath": "skimage.transform", + "description": "skimage.transform", + "isExtraImport": true, + "detail": "skimage.transform", + "documentation": {} + }, + { + "label": "resize", + "importPath": "skimage.transform", + "description": "skimage.transform", + "isExtraImport": true, + "detail": "skimage.transform", + "documentation": {} + }, + { + "label": "resize", + "importPath": "skimage.transform", + "description": "skimage.transform", + "isExtraImport": true, + "detail": "skimage.transform", + "documentation": {} + }, + { + "label": "normalize", + "importPath": "sklearn.preprocessing", + "description": "sklearn.preprocessing", + "isExtraImport": true, + "detail": "sklearn.preprocessing", + "documentation": {} + }, + { + "label": "ArcFaceORT", + "importPath": "onnx_helper", + "description": "onnx_helper", + "isExtraImport": true, + "detail": "onnx_helper", + "documentation": {} + }, + { + "label": "collections", + "kind": 6, + "isExtraImport": true, + "importPath": "collections", + "description": "collections", + "detail": "collections", + "documentation": {} + }, + { + "label": "OrderedDict", + "importPath": "collections", + "description": "collections", + "isExtraImport": true, + "detail": "collections", + "documentation": {} + }, + { + "label": "OrderedDict", + "importPath": "collections", + "description": "collections", + "isExtraImport": true, + "detail": "collections", + "documentation": {} + }, + { + "label": "OrderedDict", + "importPath": "collections", + "description": "collections", + "isExtraImport": true, + "detail": "collections", + "documentation": {} + }, + { + "label": "OrderedDict", + "importPath": "collections", + "description": "collections", + "isExtraImport": true, + "detail": "collections", + "documentation": {} + }, + { + "label": "namedtuple", + "importPath": "collections", + "description": "collections", + "isExtraImport": true, + "detail": "collections", + "documentation": {} + }, + { + "label": "OrderedDict", + "importPath": "collections", + "description": "collections", + "isExtraImport": true, + "detail": "collections", + "documentation": {} + }, + { + "label": "defaultdict", + "importPath": "collections", + "description": "collections", + "isExtraImport": true, + "detail": "collections", + "documentation": {} + }, + { + "label": "namedtuple", + "importPath": "collections", + "description": "collections", + "isExtraImport": true, + "detail": "collections", + "documentation": {} + }, + { + "label": "defaultdict", + "importPath": "collections", + "description": "collections", + "isExtraImport": true, + "detail": "collections", + "documentation": {} + }, + { + "label": "defaultdict", + "importPath": "collections", + "description": "collections", + "isExtraImport": true, + "detail": "collections", + "documentation": {} + }, + { + "label": "namedtuple", + "importPath": "collections", + "description": "collections", + "isExtraImport": true, + "detail": "collections", + "documentation": {} + }, + { + "label": "defaultdict", + "importPath": "collections", + "description": "collections", + "isExtraImport": true, + "detail": "collections", + "documentation": {} + }, + { + "label": "namedtuple", + "importPath": "collections", + "description": "collections", + "isExtraImport": true, + "detail": "collections", + "documentation": {} + }, + { + "label": "defaultdict", + "importPath": "collections", + "description": "collections", + "isExtraImport": true, + "detail": "collections", + "documentation": {} + }, + { + "label": "deque", + "importPath": "collections", + "description": "collections", + "isExtraImport": true, + "detail": "collections", + "documentation": {} + }, + { + "label": "torch.nn.functional", + "kind": 6, + "isExtraImport": true, + "importPath": "torch.nn.functional", + "description": "torch.nn.functional", + "detail": "torch.nn.functional", + "documentation": {} + }, + { + "label": "linear", + "importPath": "torch.nn.functional", + "description": "torch.nn.functional", + "isExtraImport": true, + "detail": "torch.nn.functional", + "documentation": {} + }, + { + "label": "normalize", + "importPath": "torch.nn.functional", + "description": "torch.nn.functional", + "isExtraImport": true, + "detail": "torch.nn.functional", + "documentation": {} + }, + { + "label": "linear", + "importPath": "torch.nn.functional", + "description": "torch.nn.functional", + "isExtraImport": true, + "detail": "torch.nn.functional", + "documentation": {} + }, + { + "label": "normalize", + "importPath": "torch.nn.functional", + "description": "torch.nn.functional", + "isExtraImport": true, + "detail": "torch.nn.functional", + "documentation": {} + }, + { + "label": "get_dataloader", + "importPath": "dataset", + "description": "dataset", + "isExtraImport": true, + "detail": "dataset", + "documentation": {} + }, + { + "label": "get_dataloader", + "importPath": "dataset", + "description": "dataset", + "isExtraImport": true, + "detail": "dataset", + "documentation": {} + }, + { + "label": "CombinedMarginLoss", + "importPath": "losses", + "description": "losses", + "isExtraImport": true, + "detail": "losses", + "documentation": {} + }, + { + "label": "CombinedMarginLoss", + "importPath": "losses", + "description": "losses", + "isExtraImport": true, + "detail": "losses", + "documentation": {} + }, + { + "label": "PolyScheduler", + "importPath": "lr_scheduler", + "description": "lr_scheduler", + "isExtraImport": true, + "detail": "lr_scheduler", + "documentation": {} + }, + { + "label": "PolyScheduler", + "importPath": "lr_scheduler", + "description": "lr_scheduler", + "isExtraImport": true, + "detail": "lr_scheduler", + "documentation": {} + }, + { + "label": "PartialFC", + "importPath": "partial_fc", + "description": "partial_fc", + "isExtraImport": true, + "detail": "partial_fc", + "documentation": {} + }, + { + "label": "PartialFCAdamW", + "importPath": "partial_fc", + "description": "partial_fc", + "isExtraImport": true, + "detail": "partial_fc", + "documentation": {} + }, + { + "label": "CallBackLogging", + "importPath": "utils.utils_callbacks", + "description": "utils.utils_callbacks", + "isExtraImport": true, + "detail": "utils.utils_callbacks", + "documentation": {} + }, + { + "label": "CallBackVerification", + "importPath": "utils.utils_callbacks", + "description": "utils.utils_callbacks", + "isExtraImport": true, + "detail": "utils.utils_callbacks", + "documentation": {} + }, + { + "label": "CallBackLogging", + "importPath": "utils.utils_callbacks", + "description": "utils.utils_callbacks", + "isExtraImport": true, + "detail": "utils.utils_callbacks", + "documentation": {} + }, + { + "label": "CallBackVerification", + "importPath": "utils.utils_callbacks", + "description": "utils.utils_callbacks", + "isExtraImport": true, + "detail": "utils.utils_callbacks", + "documentation": {} + }, + { + "label": "get_config", + "importPath": "utils.utils_config", + "description": "utils.utils_config", + "isExtraImport": true, + "detail": "utils.utils_config", + "documentation": {} + }, + { + "label": "get_config", + "importPath": "utils.utils_config", + "description": "utils.utils_config", + "isExtraImport": true, + "detail": "utils.utils_config", + "documentation": {} + }, + { + "label": "PartialFC_V2", + "importPath": "partial_fc_v2", + "description": "partial_fc_v2", + "isExtraImport": true, + "detail": "partial_fc_v2", + "documentation": {} + }, + { + "label": "os,", + "kind": 6, + "isExtraImport": true, + "importPath": "os.", + "description": "os.", + "detail": "os.", + "documentation": {} + }, + { + "label": "torchvision.transforms", + "kind": 6, + "isExtraImport": true, + "importPath": "torchvision.transforms", + "description": "torchvision.transforms", + "detail": "torchvision.transforms", + "documentation": {} + }, + { + "label": "Normalize", + "importPath": "torchvision.transforms", + "description": "torchvision.transforms", + "isExtraImport": true, + "detail": "torchvision.transforms", + "documentation": {} + }, + { + "label": "Normalize", + "importPath": "torchvision.transforms", + "description": "torchvision.transforms", + "isExtraImport": true, + "detail": "torchvision.transforms", + "documentation": {} + }, + { + "label": "imread", + "importPath": "skimage.io", + "description": "skimage.io", + "isExtraImport": true, + "detail": "skimage.io", + "documentation": {} + }, + { + "label": "imsave", + "importPath": "skimage.io", + "description": "skimage.io", + "isExtraImport": true, + "detail": "skimage.io", + "documentation": {} + }, + { + "label": "imread", + "importPath": "skimage.io", + "description": "skimage.io", + "isExtraImport": true, + "detail": "skimage.io", + "documentation": {} + }, + { + "label": "imsave", + "importPath": "skimage.io", + "description": "skimage.io", + "isExtraImport": true, + "detail": "skimage.io", + "documentation": {} + }, + { + "label": "imread", + "importPath": "skimage.io", + "description": "skimage.io", + "isExtraImport": true, + "detail": "skimage.io", + "documentation": {} + }, + { + "label": "imsave", + "importPath": "skimage.io", + "description": "skimage.io", + "isExtraImport": true, + "detail": "skimage.io", + "documentation": {} + }, + { + "label": "imread", + "importPath": "skimage.io", + "description": "skimage.io", + "isExtraImport": true, + "detail": "skimage.io", + "documentation": {} + }, + { + "label": "imsave", + "importPath": "skimage.io", + "description": "skimage.io", + "isExtraImport": true, + "detail": "skimage.io", + "documentation": {} + }, + { + "label": "imread", + "importPath": "skimage.io", + "description": "skimage.io", + "isExtraImport": true, + "detail": "skimage.io", + "documentation": {} + }, + { + "label": "imsave", + "importPath": "skimage.io", + "description": "skimage.io", + "isExtraImport": true, + "detail": "skimage.io", + "documentation": {} + }, + { + "label": "imread", + "importPath": "skimage.io", + "description": "skimage.io", + "isExtraImport": true, + "detail": "skimage.io", + "documentation": {} + }, + { + "label": "imsave", + "importPath": "skimage.io", + "description": "skimage.io", + "isExtraImport": true, + "detail": "skimage.io", + "documentation": {} + }, + { + "label": "imread", + "importPath": "skimage.io", + "description": "skimage.io", + "isExtraImport": true, + "detail": "skimage.io", + "documentation": {} + }, + { + "label": "imsave", + "importPath": "skimage.io", + "description": "skimage.io", + "isExtraImport": true, + "detail": "skimage.io", + "documentation": {} + }, + { + "label": "imread", + "importPath": "skimage.io", + "description": "skimage.io", + "isExtraImport": true, + "detail": "skimage.io", + "documentation": {} + }, + { + "label": "imsave", + "importPath": "skimage.io", + "description": "skimage.io", + "isExtraImport": true, + "detail": "skimage.io", + "documentation": {} + }, + { + "label": "imread", + "importPath": "skimage.io", + "description": "skimage.io", + "isExtraImport": true, + "detail": "skimage.io", + "documentation": {} + }, + { + "label": "imread", + "importPath": "skimage.io", + "description": "skimage.io", + "isExtraImport": true, + "detail": "skimage.io", + "documentation": {} + }, + { + "label": "imsave", + "importPath": "skimage.io", + "description": "skimage.io", + "isExtraImport": true, + "detail": "skimage.io", + "documentation": {} + }, + { + "label": "imread", + "importPath": "skimage.io", + "description": "skimage.io", + "isExtraImport": true, + "detail": "skimage.io", + "documentation": {} + }, + { + "label": "imread", + "importPath": "skimage.io", + "description": "skimage.io", + "isExtraImport": true, + "detail": "skimage.io", + "documentation": {} + }, + { + "label": "imread", + "importPath": "skimage.io", + "description": "skimage.io", + "isExtraImport": true, + "detail": "skimage.io", + "documentation": {} + }, + { + "label": "imread", + "importPath": "skimage.io", + "description": "skimage.io", + "isExtraImport": true, + "detail": "skimage.io", + "documentation": {} + }, + { + "label": "imread", + "importPath": "skimage.io", + "description": "skimage.io", + "isExtraImport": true, + "detail": "skimage.io", + "documentation": {} + }, + { + "label": "imread", + "importPath": "skimage.io", + "description": "skimage.io", + "isExtraImport": true, + "detail": "skimage.io", + "documentation": {} + }, + { + "label": "imread", + "importPath": "skimage.io", + "description": "skimage.io", + "isExtraImport": true, + "detail": "skimage.io", + "documentation": {} + }, + { + "label": "imsave", + "importPath": "skimage.io", + "description": "skimage.io", + "isExtraImport": true, + "detail": "skimage.io", + "documentation": {} + }, + { + "label": "imread", + "importPath": "skimage.io", + "description": "skimage.io", + "isExtraImport": true, + "detail": "skimage.io", + "documentation": {} + }, + { + "label": "imsave", + "importPath": "skimage.io", + "description": "skimage.io", + "isExtraImport": true, + "detail": "skimage.io", + "documentation": {} + }, + { + "label": "imread", + "importPath": "skimage.io", + "description": "skimage.io", + "isExtraImport": true, + "detail": "skimage.io", + "documentation": {} + }, + { + "label": "imsave", + "importPath": "skimage.io", + "description": "skimage.io", + "isExtraImport": true, + "detail": "skimage.io", + "documentation": {} + }, + { + "label": "imread", + "importPath": "skimage.io", + "description": "skimage.io", + "isExtraImport": true, + "detail": "skimage.io", + "documentation": {} + }, + { + "label": "imsave", + "importPath": "skimage.io", + "description": "skimage.io", + "isExtraImport": true, + "detail": "skimage.io", + "documentation": {} + }, + { + "label": "imread", + "importPath": "skimage.io", + "description": "skimage.io", + "isExtraImport": true, + "detail": "skimage.io", + "documentation": {} + }, + { + "label": "imread", + "importPath": "skimage.io", + "description": "skimage.io", + "isExtraImport": true, + "detail": "skimage.io", + "documentation": {} + }, + { + "label": "imread", + "importPath": "skimage.io", + "description": "skimage.io", + "isExtraImport": true, + "detail": "skimage.io", + "documentation": {} + }, + { + "label": "scipy.io", + "kind": 6, + "isExtraImport": true, + "importPath": "scipy.io", + "description": "scipy.io", + "detail": "scipy.io", + "documentation": {} + }, + { + "label": "savemat", + "importPath": "scipy.io", + "description": "scipy.io", + "isExtraImport": true, + "detail": "scipy.io", + "documentation": {} + }, + { + "label": "loadmat", + "importPath": "scipy.io", + "description": "scipy.io", + "isExtraImport": true, + "detail": "scipy.io", + "documentation": {} + }, + { + "label": "savemat", + "importPath": "scipy.io", + "description": "scipy.io", + "isExtraImport": true, + "detail": "scipy.io", + "documentation": {} + }, + { + "label": "loadmat", + "importPath": "scipy.io", + "description": "scipy.io", + "isExtraImport": true, + "detail": "scipy.io", + "documentation": {} + }, + { + "label": "savemat", + "importPath": "scipy.io", + "description": "scipy.io", + "isExtraImport": true, + "detail": "scipy.io", + "documentation": {} + }, + { + "label": "savemat", + "importPath": "scipy.io", + "description": "scipy.io", + "isExtraImport": true, + "detail": "scipy.io", + "documentation": {} + }, + { + "label": "savemat", + "importPath": "scipy.io", + "description": "scipy.io", + "isExtraImport": true, + "detail": "scipy.io", + "documentation": {} + }, + { + "label": "loadmat", + "importPath": "scipy.io", + "description": "scipy.io", + "isExtraImport": true, + "detail": "scipy.io", + "documentation": {} + }, + { + "label": "savemat", + "importPath": "scipy.io", + "description": "scipy.io", + "isExtraImport": true, + "detail": "scipy.io", + "documentation": {} + }, + { + "label": "loadmat", + "importPath": "scipy.io", + "description": "scipy.io", + "isExtraImport": true, + "detail": "scipy.io", + "documentation": {} + }, + { + "label": "savemat", + "importPath": "scipy.io", + "description": "scipy.io", + "isExtraImport": true, + "detail": "scipy.io", + "documentation": {} + }, + { + "label": "loadmat", + "importPath": "scipy.io", + "description": "scipy.io", + "isExtraImport": true, + "detail": "scipy.io", + "documentation": {} + }, + { + "label": "savemat", + "importPath": "scipy.io", + "description": "scipy.io", + "isExtraImport": true, + "detail": "scipy.io", + "documentation": {} + }, + { + "label": "savemat", + "importPath": "scipy.io", + "description": "scipy.io", + "isExtraImport": true, + "detail": "scipy.io", + "documentation": {} + }, + { + "label": "loadmat", + "importPath": "scipy.io", + "description": "scipy.io", + "isExtraImport": true, + "detail": "scipy.io", + "documentation": {} + }, + { + "label": "loadmat", + "importPath": "scipy.io", + "description": "scipy.io", + "isExtraImport": true, + "detail": "scipy.io", + "documentation": {} + }, + { + "label": "savemat", + "importPath": "scipy.io", + "description": "scipy.io", + "isExtraImport": true, + "detail": "scipy.io", + "documentation": {} + }, + { + "label": "loadmat", + "importPath": "scipy.io", + "description": "scipy.io", + "isExtraImport": true, + "detail": "scipy.io", + "documentation": {} + }, + { + "label": "savemat", + "importPath": "scipy.io", + "description": "scipy.io", + "isExtraImport": true, + "detail": "scipy.io", + "documentation": {} + }, + { + "label": "loadmat", + "importPath": "scipy.io", + "description": "scipy.io", + "isExtraImport": true, + "detail": "scipy.io", + "documentation": {} + }, + { + "label": "savemat", + "importPath": "scipy.io", + "description": "scipy.io", + "isExtraImport": true, + "detail": "scipy.io", + "documentation": {} + }, + { + "label": "loadmat", + "importPath": "scipy.io", + "description": "scipy.io", + "isExtraImport": true, + "detail": "scipy.io", + "documentation": {} + }, + { + "label": "loadmat", + "importPath": "scipy.io", + "description": "scipy.io", + "isExtraImport": true, + "detail": "scipy.io", + "documentation": {} + }, + { + "label": "savemat", + "importPath": "scipy.io", + "description": "scipy.io", + "isExtraImport": true, + "detail": "scipy.io", + "documentation": {} + }, + { + "label": "torch.autograd", + "kind": 6, + "isExtraImport": true, + "importPath": "torch.autograd", + "description": "torch.autograd", + "detail": "torch.autograd", + "documentation": {} + }, + { + "label": "Variable", + "importPath": "torch.autograd", + "description": "torch.autograd", + "isExtraImport": true, + "detail": "torch.autograd", + "documentation": {} + }, + { + "label": "Variable", + "importPath": "torch.autograd", + "description": "torch.autograd", + "isExtraImport": true, + "detail": "torch.autograd", + "documentation": {} + }, + { + "label": "Parameter", + "importPath": "torch.nn.parameter", + "description": "torch.nn.parameter", + "isExtraImport": true, + "detail": "torch.nn.parameter", + "documentation": {} + }, + { + "label": "Parameter", + "importPath": "torch.nn.parameter", + "description": "torch.nn.parameter", + "isExtraImport": true, + "detail": "torch.nn.parameter", + "documentation": {} + }, + { + "label": "torch.optim", + "kind": 6, + "isExtraImport": true, + "importPath": "torch.optim", + "description": "torch.optim", + "detail": "torch.optim", + "documentation": {} + }, + { + "label": "setup", + "importPath": "setuptools", + "description": "setuptools", + "isExtraImport": true, + "detail": "setuptools", + "documentation": {} + }, + { + "label": "setup", + "importPath": "setuptools", + "description": "setuptools", + "isExtraImport": true, + "detail": "setuptools", + "documentation": {} + }, + { + "label": "setup", + "importPath": "setuptools", + "description": "setuptools", + "isExtraImport": true, + "detail": "setuptools", + "documentation": {} + }, + { + "label": "BuildExtension", + "importPath": "torch.utils.cpp_extension", + "description": "torch.utils.cpp_extension", + "isExtraImport": true, + "detail": "torch.utils.cpp_extension", + "documentation": {} + }, + { + "label": "CUDAExtension", + "importPath": "torch.utils.cpp_extension", + "description": "torch.utils.cpp_extension", + "isExtraImport": true, + "detail": "torch.utils.cpp_extension", + "documentation": {} + }, + { + "label": "BuildExtension", + "importPath": "torch.utils.cpp_extension", + "description": "torch.utils.cpp_extension", + "isExtraImport": true, + "detail": "torch.utils.cpp_extension", + "documentation": {} + }, + { + "label": "CUDAExtension", + "importPath": "torch.utils.cpp_extension", + "description": "torch.utils.cpp_extension", + "isExtraImport": true, + "detail": "torch.utils.cpp_extension", + "documentation": {} + }, + { + "label": "CfgNode", + "importPath": "yacs.config", + "description": "yacs.config", + "isExtraImport": true, + "detail": "yacs.config", + "documentation": {} + }, + { + "label": "CfgNode", + "importPath": "yacs.config", + "description": "yacs.config", + "isExtraImport": true, + "detail": "yacs.config", + "documentation": {} + }, + { + "label": "CfgNode", + "importPath": "yacs.config", + "description": "yacs.config", + "isExtraImport": true, + "detail": "yacs.config", + "documentation": {} + }, + { + "label": "CfgNode", + "importPath": "yacs.config", + "description": "yacs.config", + "isExtraImport": true, + "detail": "yacs.config", + "documentation": {} + }, + { + "label": "yaml", + "kind": 6, + "isExtraImport": true, + "importPath": "yaml", + "description": "yaml", + "detail": "yaml", + "documentation": {} + }, + { + "label": "torchvision.models", + "kind": 6, + "isExtraImport": true, + "importPath": "torchvision.models", + "description": "torchvision.models", + "detail": "torchvision.models", + "documentation": {} + }, + { + "label": "torchfile", + "kind": 6, + "isExtraImport": true, + "importPath": "torchfile", + "description": "torchfile", + "detail": "torchfile", + "documentation": {} + }, + { + "label": "imageio", + "kind": 6, + "isExtraImport": true, + "importPath": "imageio", + "description": "imageio", + "detail": "imageio", + "documentation": {} + }, + { + "label": "warp_perspective", + "importPath": "kornia.geometry.transform.imgwarp", + "description": "kornia.geometry.transform.imgwarp", + "isExtraImport": true, + "detail": "kornia.geometry.transform.imgwarp", + "documentation": {} + }, + { + "label": "get_perspective_transform", + "importPath": "kornia.geometry.transform.imgwarp", + "description": "kornia.geometry.transform.imgwarp", + "isExtraImport": true, + "detail": "kornia.geometry.transform.imgwarp", + "documentation": {} + }, + { + "label": "warp_affine", + "importPath": "kornia.geometry.transform.imgwarp", + "description": "kornia.geometry.transform.imgwarp", + "isExtraImport": true, + "detail": "kornia.geometry.transform.imgwarp", + "documentation": {} + }, + { + "label": "warp_perspective", + "importPath": "kornia.geometry.transform.imgwarp", + "description": "kornia.geometry.transform.imgwarp", + "isExtraImport": true, + "detail": "kornia.geometry.transform.imgwarp", + "documentation": {} + }, + { + "label": "get_perspective_transform", + "importPath": "kornia.geometry.transform.imgwarp", + "description": "kornia.geometry.transform.imgwarp", + "isExtraImport": true, + "detail": "kornia.geometry.transform.imgwarp", + "documentation": {} + }, + { + "label": "warp_affine", + "importPath": "kornia.geometry.transform.imgwarp", + "description": "kornia.geometry.transform.imgwarp", + "isExtraImport": true, + "detail": "kornia.geometry.transform.imgwarp", + "documentation": {} + }, + { + "label": "tqdm", + "kind": 6, + "isExtraImport": true, + "importPath": "tqdm", + "description": "tqdm", + "detail": "tqdm", + "documentation": {} + }, + { + "label": "tqdm", + "importPath": "tqdm", + "description": "tqdm", + "isExtraImport": true, + "detail": "tqdm", + "documentation": {} + }, + { + "label": "tqdm", + "importPath": "tqdm", + "description": "tqdm", + "isExtraImport": true, + "detail": "tqdm", + "documentation": {} + }, + { + "label": "tqdm", + "importPath": "tqdm", + "description": "tqdm", + "isExtraImport": true, + "detail": "tqdm", + "documentation": {} + }, + { + "label": "tqdm", + "importPath": "tqdm", + "description": "tqdm", + "isExtraImport": true, + "detail": "tqdm", + "documentation": {} + }, + { + "label": "tqdm", + "importPath": "tqdm", + "description": "tqdm", + "isExtraImport": true, + "detail": "tqdm", + "documentation": {} + }, + { + "label": "tqdm", + "importPath": "tqdm", + "description": "tqdm", + "isExtraImport": true, + "detail": "tqdm", + "documentation": {} + }, + { + "label": "tqdm", + "importPath": "tqdm", + "description": "tqdm", + "isExtraImport": true, + "detail": "tqdm", + "documentation": {} + }, + { + "label": "tqdm", + "importPath": "tqdm", + "description": "tqdm", + "isExtraImport": true, + "detail": "tqdm", + "documentation": {} + }, + { + "label": "tqdm", + "importPath": "tqdm", + "description": "tqdm", + "isExtraImport": true, + "detail": "tqdm", + "documentation": {} + }, + { + "label": "tqdm", + "importPath": "tqdm", + "description": "tqdm", + "isExtraImport": true, + "detail": "tqdm", + "documentation": {} + }, + { + "label": "tqdm", + "importPath": "tqdm", + "description": "tqdm", + "isExtraImport": true, + "detail": "tqdm", + "documentation": {} + }, + { + "label": "tqdm", + "importPath": "tqdm", + "description": "tqdm", + "isExtraImport": true, + "detail": "tqdm", + "documentation": {} + }, + { + "label": "tqdm", + "importPath": "tqdm", + "description": "tqdm", + "isExtraImport": true, + "detail": "tqdm", + "documentation": {} + }, + { + "label": "tqdm", + "importPath": "tqdm", + "description": "tqdm", + "isExtraImport": true, + "detail": "tqdm", + "documentation": {} + }, + { + "label": "tqdm", + "importPath": "tqdm", + "description": "tqdm", + "isExtraImport": true, + "detail": "tqdm", + "documentation": {} + }, + { + "label": "tqdm", + "importPath": "tqdm", + "description": "tqdm", + "isExtraImport": true, + "detail": "tqdm", + "documentation": {} + }, + { + "label": "tqdm", + "importPath": "tqdm", + "description": "tqdm", + "isExtraImport": true, + "detail": "tqdm", + "documentation": {} + }, + { + "label": "tqdm", + "importPath": "tqdm", + "description": "tqdm", + "isExtraImport": true, + "detail": "tqdm", + "documentation": {} + }, + { + "label": "tqdm", + "importPath": "tqdm", + "description": "tqdm", + "isExtraImport": true, + "detail": "tqdm", + "documentation": {} + }, + { + "label": "tqdm", + "importPath": "tqdm", + "description": "tqdm", + "isExtraImport": true, + "detail": "tqdm", + "documentation": {} + }, + { + "label": "tqdm", + "importPath": "tqdm", + "description": "tqdm", + "isExtraImport": true, + "detail": "tqdm", + "documentation": {} + }, + { + "label": "tqdm", + "importPath": "tqdm", + "description": "tqdm", + "isExtraImport": true, + "detail": "tqdm", + "documentation": {} + }, + { + "label": "tqdm", + "importPath": "tqdm", + "description": "tqdm", + "isExtraImport": true, + "detail": "tqdm", + "documentation": {} + }, + { + "label": "tqdm", + "importPath": "tqdm", + "description": "tqdm", + "isExtraImport": true, + "detail": "tqdm", + "documentation": {} + }, + { + "label": "tqdm", + "importPath": "tqdm", + "description": "tqdm", + "isExtraImport": true, + "detail": "tqdm", + "documentation": {} + }, + { + "label": "tqdm", + "importPath": "tqdm", + "description": "tqdm", + "isExtraImport": true, + "detail": "tqdm", + "documentation": {} + }, + { + "label": "tqdm", + "importPath": "tqdm", + "description": "tqdm", + "isExtraImport": true, + "detail": "tqdm", + "documentation": {} + }, + { + "label": "tqdm", + "importPath": "tqdm", + "description": "tqdm", + "isExtraImport": true, + "detail": "tqdm", + "documentation": {} + }, + { + "label": "tqdm", + "importPath": "tqdm", + "description": "tqdm", + "isExtraImport": true, + "detail": "tqdm", + "documentation": {} + }, + { + "label": "tqdm", + "importPath": "tqdm", + "description": "tqdm", + "isExtraImport": true, + "detail": "tqdm", + "documentation": {} + }, + { + "label": "tqdm", + "importPath": "tqdm", + "description": "tqdm", + "isExtraImport": true, + "detail": "tqdm", + "documentation": {} + }, + { + "label": "tqdm", + "importPath": "tqdm", + "description": "tqdm", + "isExtraImport": true, + "detail": "tqdm", + "documentation": {} + }, + { + "label": "tqdm", + "importPath": "tqdm", + "description": "tqdm", + "isExtraImport": true, + "detail": "tqdm", + "documentation": {} + }, + { + "label": "tqdm", + "importPath": "tqdm", + "description": "tqdm", + "isExtraImport": true, + "detail": "tqdm", + "documentation": {} + }, + { + "label": "tqdm", + "importPath": "tqdm", + "description": "tqdm", + "isExtraImport": true, + "detail": "tqdm", + "documentation": {} + }, + { + "label": "tqdm", + "importPath": "tqdm", + "description": "tqdm", + "isExtraImport": true, + "detail": "tqdm", + "documentation": {} + }, + { + "label": "morphology", + "importPath": "scipy.ndimage", + "description": "scipy.ndimage", + "isExtraImport": true, + "detail": "scipy.ndimage", + "documentation": {} + }, + { + "label": "morphology", + "importPath": "scipy.ndimage", + "description": "scipy.ndimage", + "isExtraImport": true, + "detail": "scipy.ndimage", + "documentation": {} + }, + { + "label": "distance_transform_edt", + "importPath": "scipy.ndimage", + "description": "scipy.ndimage", + "isExtraImport": true, + "detail": "scipy.ndimage", + "documentation": {} + }, + { + "label": "mmcv", + "kind": 6, + "isExtraImport": true, + "importPath": "mmcv", + "description": "mmcv", + "detail": "mmcv", + "documentation": {} + }, + { + "label": "Timer", + "importPath": "mmcv", + "description": "mmcv", + "isExtraImport": true, + "detail": "mmcv", + "documentation": {} + }, + { + "label": "DECA", + "importPath": "decalib.deca", + "description": "decalib.deca", + "isExtraImport": true, + "detail": "decalib.deca", + "documentation": {} + }, + { + "label": "DECA", + "importPath": "decalib.deca", + "description": "decalib.deca", + "isExtraImport": true, + "detail": "decalib.deca", + "documentation": {} + }, + { + "label": "DECA", + "importPath": "decalib.deca", + "description": "decalib.deca", + "isExtraImport": true, + "detail": "decalib.deca", + "documentation": {} + }, + { + "label": "DECA", + "importPath": "decalib.deca", + "description": "decalib.deca", + "isExtraImport": true, + "detail": "decalib.deca", + "documentation": {} + }, + { + "label": "DECA", + "importPath": "decalib.deca", + "description": "decalib.deca", + "isExtraImport": true, + "detail": "decalib.deca", + "documentation": {} + }, + { + "label": "DECA", + "importPath": "decalib.deca", + "description": "decalib.deca", + "isExtraImport": true, + "detail": "decalib.deca", + "documentation": {} + }, + { + "label": "datasets", + "importPath": "decalib.datasets", + "description": "decalib.datasets", + "isExtraImport": true, + "detail": "decalib.datasets", + "documentation": {} + }, + { + "label": "datasets", + "importPath": "decalib.datasets", + "description": "decalib.datasets", + "isExtraImport": true, + "detail": "decalib.datasets", + "documentation": {} + }, + { + "label": "datasets", + "importPath": "decalib.datasets", + "description": "decalib.datasets", + "isExtraImport": true, + "detail": "decalib.datasets", + "documentation": {} + }, + { + "label": "datasets", + "importPath": "decalib.datasets", + "description": "decalib.datasets", + "isExtraImport": true, + "detail": "decalib.datasets", + "documentation": {} + }, + { + "label": "datasets", + "importPath": "decalib.datasets", + "description": "decalib.datasets", + "isExtraImport": true, + "detail": "decalib.datasets", + "documentation": {} + }, + { + "label": "datasets", + "importPath": "decalib.datasets", + "description": "decalib.datasets", + "isExtraImport": true, + "detail": "decalib.datasets", + "documentation": {} + }, + { + "label": "util", + "importPath": "decalib.utils", + "description": "decalib.utils", + "isExtraImport": true, + "detail": "decalib.utils", + "documentation": {} + }, + { + "label": "util", + "importPath": "decalib.utils", + "description": "decalib.utils", + "isExtraImport": true, + "detail": "decalib.utils", + "documentation": {} + }, + { + "label": "util", + "importPath": "decalib.utils", + "description": "decalib.utils", + "isExtraImport": true, + "detail": "decalib.utils", + "documentation": {} + }, + { + "label": "util", + "importPath": "decalib.utils", + "description": "decalib.utils", + "isExtraImport": true, + "detail": "decalib.utils", + "documentation": {} + }, + { + "label": "util", + "importPath": "decalib.utils", + "description": "decalib.utils", + "isExtraImport": true, + "detail": "decalib.utils", + "documentation": {} + }, + { + "label": "util", + "importPath": "decalib.utils", + "description": "decalib.utils", + "isExtraImport": true, + "detail": "decalib.utils", + "documentation": {} + }, + { + "label": "get_cfg_defaults", + "importPath": "decalib.utils.config", + "description": "decalib.utils.config", + "isExtraImport": true, + "detail": "decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg", + "importPath": "decalib.utils.config", + "description": "decalib.utils.config", + "isExtraImport": true, + "detail": "decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg", + "importPath": "decalib.utils.config", + "description": "decalib.utils.config", + "isExtraImport": true, + "detail": "decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg", + "importPath": "decalib.utils.config", + "description": "decalib.utils.config", + "isExtraImport": true, + "detail": "decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg", + "importPath": "decalib.utils.config", + "description": "decalib.utils.config", + "isExtraImport": true, + "detail": "decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg", + "importPath": "decalib.utils.config", + "description": "decalib.utils.config", + "isExtraImport": true, + "detail": "decalib.utils.config", + "documentation": {} + }, + { + "label": "transform_points", + "importPath": "decalib.utils.tensor_cropper", + "description": "decalib.utils.tensor_cropper", + "isExtraImport": true, + "detail": "decalib.utils.tensor_cropper", + "documentation": {} + }, + { + "label": "transform_points", + "importPath": "decalib.utils.tensor_cropper", + "description": "decalib.utils.tensor_cropper", + "isExtraImport": true, + "detail": "decalib.utils.tensor_cropper", + "documentation": {} + }, + { + "label": "transform_points", + "importPath": "decalib.utils.tensor_cropper", + "description": "decalib.utils.tensor_cropper", + "isExtraImport": true, + "detail": "decalib.utils.tensor_cropper", + "documentation": {} + }, + { + "label": "transform_points", + "importPath": "decalib.utils.tensor_cropper", + "description": "decalib.utils.tensor_cropper", + "isExtraImport": true, + "detail": "decalib.utils.tensor_cropper", + "documentation": {} + }, + { + "label": "batch_euler2axis", + "importPath": "decalib.utils.rotation_converter", + "description": "decalib.utils.rotation_converter", + "isExtraImport": true, + "detail": "decalib.utils.rotation_converter", + "documentation": {} + }, + { + "label": "deg2rad", + "importPath": "decalib.utils.rotation_converter", + "description": "decalib.utils.rotation_converter", + "isExtraImport": true, + "detail": "decalib.utils.rotation_converter", + "documentation": {} + }, + { + "label": "torch.backends.cudnn", + "kind": 6, + "isExtraImport": true, + "importPath": "torch.backends.cudnn", + "description": "torch.backends.cudnn", + "detail": "torch.backends.cudnn", + "documentation": {} + }, + { + "label": "shutil", + "kind": 6, + "isExtraImport": true, + "importPath": "shutil", + "description": "shutil", + "detail": "shutil", + "documentation": {} + }, + { + "label": "copyfile", + "importPath": "shutil", + "description": "shutil", + "isExtraImport": true, + "detail": "shutil", + "documentation": {} + }, + { + "label": "copyfile", + "importPath": "shutil", + "description": "shutil", + "isExtraImport": true, + "detail": "shutil", + "documentation": {} + }, + { + "label": "copy", + "kind": 6, + "isExtraImport": true, + "importPath": "copy", + "description": "copy", + "detail": "copy", + "documentation": {} + }, + { + "label": "deepcopy", + "importPath": "copy", + "description": "copy", + "isExtraImport": true, + "detail": "copy", + "documentation": {} + }, + { + "label": "deepcopy", + "importPath": "copy", + "description": "copy", + "isExtraImport": true, + "detail": "copy", + "documentation": {} + }, + { + "label": "deepcopy", + "importPath": "copy", + "description": "copy", + "isExtraImport": true, + "detail": "copy", + "documentation": {} + }, + { + "label": "deepcopy", + "importPath": "copy", + "description": "copy", + "isExtraImport": true, + "detail": "copy", + "documentation": {} + }, + { + "label": "ABC", + "importPath": "abc", + "description": "abc", + "isExtraImport": true, + "detail": "abc", + "documentation": {} + }, + { + "label": "ABC", + "importPath": "abc", + "description": "abc", + "isExtraImport": true, + "detail": "abc", + "documentation": {} + }, + { + "label": "ABC", + "importPath": "abc", + "description": "abc", + "isExtraImport": true, + "detail": "abc", + "documentation": {} + }, + { + "label": "ABC", + "importPath": "abc", + "description": "abc", + "isExtraImport": true, + "detail": "abc", + "documentation": {} + }, + { + "label": "ABC", + "importPath": "abc", + "description": "abc", + "isExtraImport": true, + "detail": "abc", + "documentation": {} + }, + { + "label": "abstractmethod", + "importPath": "abc", + "description": "abc", + "isExtraImport": true, + "detail": "abc", + "documentation": {} + }, + { + "label": "ABC", + "importPath": "abc", + "description": "abc", + "isExtraImport": true, + "detail": "abc", + "documentation": {} + }, + { + "label": "ABC", + "importPath": "abc", + "description": "abc", + "isExtraImport": true, + "detail": "abc", + "documentation": {} + }, + { + "label": "ABC", + "importPath": "abc", + "description": "abc", + "isExtraImport": true, + "detail": "abc", + "documentation": {} + }, + { + "label": "ABC", + "importPath": "abc", + "description": "abc", + "isExtraImport": true, + "detail": "abc", + "documentation": {} + }, + { + "label": "abstractmethod", + "importPath": "abc", + "description": "abc", + "isExtraImport": true, + "detail": "abc", + "documentation": {} + }, + { + "label": "ABCMeta", + "importPath": "abc", + "description": "abc", + "isExtraImport": true, + "detail": "abc", + "documentation": {} + }, + { + "label": "abstractmethod", + "importPath": "abc", + "description": "abc", + "isExtraImport": true, + "detail": "abc", + "documentation": {} + }, + { + "label": "load_objs_as_meshes", + "importPath": "pytorch3d.io", + "description": "pytorch3d.io", + "isExtraImport": true, + "detail": "pytorch3d.io", + "documentation": {} + }, + { + "label": "load_objs_as_meshes", + "importPath": "pytorch3d.io", + "description": "pytorch3d.io", + "isExtraImport": true, + "detail": "pytorch3d.io", + "documentation": {} + }, + { + "label": "load_objs_as_meshes", + "importPath": "pytorch3d.io", + "description": "pytorch3d.io", + "isExtraImport": true, + "detail": "pytorch3d.io", + "documentation": {} + }, + { + "label": "load_objs_as_meshes", + "importPath": "pytorch3d.io", + "description": "pytorch3d.io", + "isExtraImport": true, + "detail": "pytorch3d.io", + "documentation": {} + }, + { + "label": "load_objs_as_meshes", + "importPath": "pytorch3d.io", + "description": "pytorch3d.io", + "isExtraImport": true, + "detail": "pytorch3d.io", + "documentation": {} + }, + { + "label": "load_obj", + "importPath": "pytorch3d.io", + "description": "pytorch3d.io", + "isExtraImport": true, + "detail": "pytorch3d.io", + "documentation": {} + }, + { + "label": "save_ply", + "importPath": "pytorch3d.io", + "description": "pytorch3d.io", + "isExtraImport": true, + "detail": "pytorch3d.io", + "documentation": {} + }, + { + "label": "save_ply", + "importPath": "pytorch3d.io", + "description": "pytorch3d.io", + "isExtraImport": true, + "detail": "pytorch3d.io", + "documentation": {} + }, + { + "label": "save_ply", + "importPath": "pytorch3d.io", + "description": "pytorch3d.io", + "isExtraImport": true, + "detail": "pytorch3d.io", + "documentation": {} + }, + { + "label": "load_obj", + "importPath": "pytorch3d.io", + "description": "pytorch3d.io", + "isExtraImport": true, + "detail": "pytorch3d.io", + "documentation": {} + }, + { + "label": "load_obj", + "importPath": "pytorch3d.io", + "description": "pytorch3d.io", + "isExtraImport": true, + "detail": "pytorch3d.io", + "documentation": {} + }, + { + "label": "load_obj", + "importPath": "pytorch3d.io", + "description": "pytorch3d.io", + "isExtraImport": true, + "detail": "pytorch3d.io", + "documentation": {} + }, + { + "label": "load_obj", + "importPath": "pytorch3d.io", + "description": "pytorch3d.io", + "isExtraImport": true, + "detail": "pytorch3d.io", + "documentation": {} + }, + { + "label": "Instance", + "importPath": "datasets.creation.instances.instance", + "description": "datasets.creation.instances.instance", + "isExtraImport": true, + "detail": "datasets.creation.instances.instance", + "documentation": {} + }, + { + "label": "Instance", + "importPath": "datasets.creation.instances.instance", + "description": "datasets.creation.instances.instance", + "isExtraImport": true, + "detail": "datasets.creation.instances.instance", + "documentation": {} + }, + { + "label": "Instance", + "importPath": "datasets.creation.instances.instance", + "description": "datasets.creation.instances.instance", + "isExtraImport": true, + "detail": "datasets.creation.instances.instance", + "documentation": {} + }, + { + "label": "Instance", + "importPath": "datasets.creation.instances.instance", + "description": "datasets.creation.instances.instance", + "isExtraImport": true, + "detail": "datasets.creation.instances.instance", + "documentation": {} + }, + { + "label": "Instance", + "importPath": "datasets.creation.instances.instance", + "description": "datasets.creation.instances.instance", + "isExtraImport": true, + "detail": "datasets.creation.instances.instance", + "documentation": {} + }, + { + "label": "Instance", + "importPath": "datasets.creation.instances.instance", + "description": "datasets.creation.instances.instance", + "isExtraImport": true, + "detail": "datasets.creation.instances.instance", + "documentation": {} + }, + { + "label": "Instance", + "importPath": "datasets.creation.instances.instance", + "description": "datasets.creation.instances.instance", + "isExtraImport": true, + "detail": "datasets.creation.instances.instance", + "documentation": {} + }, + { + "label": "Instance", + "importPath": "datasets.creation.instances.instance", + "description": "datasets.creation.instances.instance", + "isExtraImport": true, + "detail": "datasets.creation.instances.instance", + "documentation": {} + }, + { + "label": "Instance", + "importPath": "datasets.creation.instances.instance", + "description": "datasets.creation.instances.instance", + "isExtraImport": true, + "detail": "datasets.creation.instances.instance", + "documentation": {} + }, + { + "label": "RotateAxisAngle", + "importPath": "pytorch3d.transforms", + "description": "pytorch3d.transforms", + "isExtraImport": true, + "detail": "pytorch3d.transforms", + "documentation": {} + }, + { + "label": "RotateAxisAngle", + "importPath": "pytorch3d.transforms", + "description": "pytorch3d.transforms", + "isExtraImport": true, + "detail": "pytorch3d.transforms", + "documentation": {} + }, + { + "label": "rotation_6d_to_matrix", + "importPath": "pytorch3d.transforms", + "description": "pytorch3d.transforms", + "isExtraImport": true, + "detail": "pytorch3d.transforms", + "documentation": {} + }, + { + "label": "matrix_to_rotation_6d", + "importPath": "pytorch3d.transforms", + "description": "pytorch3d.transforms", + "isExtraImport": true, + "detail": "pytorch3d.transforms", + "documentation": {} + }, + { + "label": "rotation_6d_to_matrix", + "importPath": "pytorch3d.transforms", + "description": "pytorch3d.transforms", + "isExtraImport": true, + "detail": "pytorch3d.transforms", + "documentation": {} + }, + { + "label": "so3_exp_map", + "importPath": "pytorch3d.transforms", + "description": "pytorch3d.transforms", + "isExtraImport": true, + "detail": "pytorch3d.transforms", + "documentation": {} + }, + { + "label": "matrix_to_rotation_6d", + "importPath": "pytorch3d.transforms", + "description": "pytorch3d.transforms", + "isExtraImport": true, + "detail": "pytorch3d.transforms", + "documentation": {} + }, + { + "label": "rotation_6d_to_matrix", + "importPath": "pytorch3d.transforms", + "description": "pytorch3d.transforms", + "isExtraImport": true, + "detail": "pytorch3d.transforms", + "documentation": {} + }, + { + "label": "Transform3d", + "importPath": "pytorch3d.transforms", + "description": "pytorch3d.transforms", + "isExtraImport": true, + "detail": "pytorch3d.transforms", + "documentation": {} + }, + { + "label": "axis_angle_to_matrix", + "importPath": "pytorch3d.transforms", + "description": "pytorch3d.transforms", + "isExtraImport": true, + "detail": "pytorch3d.transforms", + "documentation": {} + }, + { + "label": "axis_angle_to_matrix", + "importPath": "pytorch3d.transforms", + "description": "pytorch3d.transforms", + "isExtraImport": true, + "detail": "pytorch3d.transforms", + "documentation": {} + }, + { + "label": "so3_exp_map", + "importPath": "pytorch3d.transforms", + "description": "pytorch3d.transforms", + "isExtraImport": true, + "detail": "pytorch3d.transforms", + "documentation": {} + }, + { + "label": "so3_exp_map", + "importPath": "pytorch3d.transforms", + "description": "pytorch3d.transforms", + "isExtraImport": true, + "detail": "pytorch3d.transforms", + "documentation": {} + }, + { + "label": "matrix_to_rotation_6d", + "importPath": "pytorch3d.transforms", + "description": "pytorch3d.transforms", + "isExtraImport": true, + "detail": "pytorch3d.transforms", + "documentation": {} + }, + { + "label": "rotation_6d_to_matrix", + "importPath": "pytorch3d.transforms", + "description": "pytorch3d.transforms", + "isExtraImport": true, + "detail": "pytorch3d.transforms", + "documentation": {} + }, + { + "label": "Transform3d", + "importPath": "pytorch3d.transforms", + "description": "pytorch3d.transforms", + "isExtraImport": true, + "detail": "pytorch3d.transforms", + "documentation": {} + }, + { + "label": "axis_angle_to_matrix", + "importPath": "pytorch3d.transforms", + "description": "pytorch3d.transforms", + "isExtraImport": true, + "detail": "pytorch3d.transforms", + "documentation": {} + }, + { + "label": "axis_angle_to_matrix", + "importPath": "pytorch3d.transforms", + "description": "pytorch3d.transforms", + "isExtraImport": true, + "detail": "pytorch3d.transforms", + "documentation": {} + }, + { + "label": "axis_angle_to_matrix", + "importPath": "pytorch3d.transforms", + "description": "pytorch3d.transforms", + "isExtraImport": true, + "detail": "pytorch3d.transforms", + "documentation": {} + }, + { + "label": "axis_angle_to_matrix", + "importPath": "pytorch3d.transforms", + "description": "pytorch3d.transforms", + "isExtraImport": true, + "detail": "pytorch3d.transforms", + "documentation": {} + }, + { + "label": "axis_angle_to_matrix", + "importPath": "pytorch3d.transforms", + "description": "pytorch3d.transforms", + "isExtraImport": true, + "detail": "pytorch3d.transforms", + "documentation": {} + }, + { + "label": "PIL", + "kind": 6, + "isExtraImport": true, + "importPath": "PIL", + "description": "PIL", + "detail": "PIL", + "documentation": {} + }, + { + "label": "ImageFile", + "importPath": "PIL", + "description": "PIL", + "isExtraImport": true, + "detail": "PIL", + "documentation": {} + }, + { + "label": "Image", + "importPath": "PIL", + "description": "PIL", + "isExtraImport": true, + "detail": "PIL", + "documentation": {} + }, + { + "label": "Image", + "importPath": "PIL", + "description": "PIL", + "isExtraImport": true, + "detail": "PIL", + "documentation": {} + }, + { + "label": "Image", + "importPath": "PIL", + "description": "PIL", + "isExtraImport": true, + "detail": "PIL", + "documentation": {} + }, + { + "label": "Pool", + "importPath": "multiprocessing", + "description": "multiprocessing", + "isExtraImport": true, + "detail": "multiprocessing", + "documentation": {} + }, + { + "label": "Pool", + "importPath": "multiprocessing", + "description": "multiprocessing", + "isExtraImport": true, + "detail": "multiprocessing", + "documentation": {} + }, + { + "label": "FaceAnalysis", + "importPath": "insightface.app", + "description": "insightface.app", + "isExtraImport": true, + "detail": "insightface.app", + "documentation": {} + }, + { + "label": "FaceAnalysis", + "importPath": "insightface.app", + "description": "insightface.app", + "isExtraImport": true, + "detail": "insightface.app", + "documentation": {} + }, + { + "label": "FaceAnalysis", + "importPath": "insightface.app", + "description": "insightface.app", + "isExtraImport": true, + "detail": "insightface.app", + "documentation": {} + }, + { + "label": "FaceAnalysis", + "importPath": "insightface.app", + "description": "insightface.app", + "isExtraImport": true, + "detail": "insightface.app", + "documentation": {} + }, + { + "label": "Face", + "importPath": "insightface.app.common", + "description": "insightface.app.common", + "isExtraImport": true, + "detail": "insightface.app.common", + "documentation": {} + }, + { + "label": "Face", + "importPath": "insightface.app.common", + "description": "insightface.app.common", + "isExtraImport": true, + "detail": "insightface.app.common", + "documentation": {} + }, + { + "label": "Face", + "importPath": "insightface.app.common", + "description": "insightface.app.common", + "isExtraImport": true, + "detail": "insightface.app.common", + "documentation": {} + }, + { + "label": "Face", + "importPath": "insightface.app.common", + "description": "insightface.app.common", + "isExtraImport": true, + "detail": "insightface.app.common", + "documentation": {} + }, + { + "label": "face_align", + "importPath": "insightface.utils", + "description": "insightface.utils", + "isExtraImport": true, + "detail": "insightface.utils", + "documentation": {} + }, + { + "label": "face_align", + "importPath": "insightface.utils", + "description": "insightface.utils", + "isExtraImport": true, + "detail": "insightface.utils", + "documentation": {} + }, + { + "label": "face_align", + "importPath": "insightface.utils", + "description": "insightface.utils", + "isExtraImport": true, + "detail": "insightface.utils", + "documentation": {} + }, + { + "label": "face_align", + "importPath": "insightface.utils", + "description": "insightface.utils", + "isExtraImport": true, + "detail": "insightface.utils", + "documentation": {} + }, + { + "label": "face_align", + "importPath": "insightface.utils", + "description": "insightface.utils", + "isExtraImport": true, + "detail": "insightface.utils", + "documentation": {} + }, + { + "label": "get_image", + "importPath": "datasets.creation.util", + "description": "datasets.creation.util", + "isExtraImport": true, + "detail": "datasets.creation.util", + "documentation": {} + }, + { + "label": "get_center", + "importPath": "datasets.creation.util", + "description": "datasets.creation.util", + "isExtraImport": true, + "detail": "datasets.creation.util", + "documentation": {} + }, + { + "label": "get_arcface_input", + "importPath": "datasets.creation.util", + "description": "datasets.creation.util", + "isExtraImport": true, + "detail": "datasets.creation.util", + "documentation": {} + }, + { + "label": "get_arcface_input", + "importPath": "datasets.creation.util", + "description": "datasets.creation.util", + "isExtraImport": true, + "detail": "datasets.creation.util", + "documentation": {} + }, + { + "label": "get_center", + "importPath": "datasets.creation.util", + "description": "datasets.creation.util", + "isExtraImport": true, + "detail": "datasets.creation.util", + "documentation": {} + }, + { + "label": "Generator", + "importPath": "datasets.creation.generator", + "description": "datasets.creation.generator", + "isExtraImport": true, + "detail": "datasets.creation.generator", + "documentation": {} + }, + { + "label": "BU3DFE", + "importPath": "datasets.creation.instances.bu3dfe", + "description": "datasets.creation.instances.bu3dfe", + "isExtraImport": true, + "detail": "datasets.creation.instances.bu3dfe", + "documentation": {} + }, + { + "label": "D3DFACS", + "importPath": "datasets.creation.instances.d3dfacs", + "description": "datasets.creation.instances.d3dfacs", + "isExtraImport": true, + "detail": "datasets.creation.instances.d3dfacs", + "documentation": {} + }, + { + "label": "FaceWarehouse", + "importPath": "datasets.creation.instances.facewarehouse", + "description": "datasets.creation.instances.facewarehouse", + "isExtraImport": true, + "detail": "datasets.creation.instances.facewarehouse", + "documentation": {} + }, + { + "label": "Florence", + "importPath": "datasets.creation.instances.florence", + "description": "datasets.creation.instances.florence", + "isExtraImport": true, + "detail": "datasets.creation.instances.florence", + "documentation": {} + }, + { + "label": "FRGC", + "importPath": "datasets.creation.instances.frgc", + "description": "datasets.creation.instances.frgc", + "isExtraImport": true, + "detail": "datasets.creation.instances.frgc", + "documentation": {} + }, + { + "label": "LYHM", + "importPath": "datasets.creation.instances.lyhm", + "description": "datasets.creation.instances.lyhm", + "isExtraImport": true, + "detail": "datasets.creation.instances.lyhm", + "documentation": {} + }, + { + "label": "PB4D", + "importPath": "datasets.creation.instances.pb4d", + "description": "datasets.creation.instances.pb4d", + "isExtraImport": true, + "detail": "datasets.creation.instances.pb4d", + "documentation": {} + }, + { + "label": "Stirling", + "importPath": "datasets.creation.instances.stirling", + "description": "datasets.creation.instances.stirling", + "isExtraImport": true, + "detail": "datasets.creation.instances.stirling", + "documentation": {} + }, + { + "label": "math", + "importPath": "numpy.lib", + "description": "numpy.lib", + "isExtraImport": true, + "detail": "numpy.lib", + "documentation": {} + }, + { + "label": "re", + "kind": 6, + "isExtraImport": true, + "importPath": "re", + "description": "re", + "detail": "re", + "documentation": {} + }, + { + "label": "trimesh", + "kind": 6, + "isExtraImport": true, + "importPath": "trimesh", + "description": "trimesh", + "detail": "trimesh", + "documentation": {} + }, + { + "label": "Trimesh", + "importPath": "trimesh", + "description": "trimesh", + "isExtraImport": true, + "detail": "trimesh", + "documentation": {} + }, + { + "label": "Trimesh", + "importPath": "trimesh", + "description": "trimesh", + "isExtraImport": true, + "detail": "trimesh", + "documentation": {} + }, + { + "label": "pytorch3d", + "kind": 6, + "isExtraImport": true, + "importPath": "pytorch3d", + "description": "pytorch3d", + "detail": "pytorch3d", + "documentation": {} + }, + { + "label": "FoVPerspectiveCameras", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "look_at_view_transform", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "RasterizationSettings", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "MeshRenderer", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "MeshRasterizer", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "SoftPhongShader", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "TexturesVertex", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "look_at_view_transform", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "FoVPerspectiveCameras", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "PerspectiveCameras", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "AmbientLights", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "PointLights", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "RasterizationSettings", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "BlendParams", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "MeshRenderer", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "MeshRasterizer", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "SoftPhongShader", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "SoftSilhouetteShader", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "HardPhongShader", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "HardGouraudShader", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "HardFlatShader", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "TexturesVertex", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "RasterizationSettings", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "PointLights", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "MeshRenderer", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "MeshRasterizer", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "TexturesVertex", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "SoftPhongShader", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "look_at_view_transform", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "PerspectiveCameras", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "RasterizationSettings", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "PointLights", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "MeshRenderer", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "MeshRasterizer", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "TexturesVertex", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "SoftPhongShader", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "look_at_view_transform", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "PerspectiveCameras", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "RasterizationSettings", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "PointLights", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "MeshRenderer", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "MeshRasterizer", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "TexturesVertex", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "SoftPhongShader", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "look_at_view_transform", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "PerspectiveCameras", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "RasterizationSettings", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "PointLights", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "MeshRenderer", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "MeshRasterizer", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "TexturesVertex", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "SoftPhongShader", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "look_at_view_transform", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "PerspectiveCameras", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "MeshRasterizer", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "RasterizationSettings", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "RasterizationSettings", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "MeshRenderer", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "MeshRasterizer", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "SoftSilhouetteShader", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "RasterizationSettings", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "PointLights", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "MeshRenderer", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "MeshRasterizer", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "TexturesVertex", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "SoftPhongShader", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "look_at_view_transform", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "PerspectiveCameras", + "importPath": "pytorch3d.renderer", + "description": "pytorch3d.renderer", + "isExtraImport": true, + "detail": "pytorch3d.renderer", + "documentation": {} + }, + { + "label": "datasets", + "kind": 6, + "isExtraImport": true, + "importPath": "datasets", + "description": "datasets", + "detail": "datasets", + "documentation": {} + }, + { + "label": "cfg", + "importPath": "mica_configs.config", + "description": "mica_configs.config", + "isExtraImport": true, + "detail": "mica_configs.config", + "documentation": {} + }, + { + "label": "get_cfg_defaults", + "importPath": "mica_configs.config", + "description": "mica_configs.config", + "isExtraImport": true, + "detail": "mica_configs.config", + "documentation": {} + }, + { + "label": "util", + "importPath": "utils", + "description": "utils", + "isExtraImport": true, + "detail": "utils", + "documentation": {} + }, + { + "label": "util", + "importPath": "utils", + "description": "utils", + "isExtraImport": true, + "detail": "utils", + "documentation": {} + }, + { + "label": "util", + "importPath": "utils", + "description": "utils", + "isExtraImport": true, + "detail": "utils", + "documentation": {} + }, + { + "label": "util", + "importPath": "utils", + "description": "utils", + "isExtraImport": true, + "detail": "utils", + "documentation": {} + }, + { + "label": "pose_tracker", + "importPath": "utils", + "description": "utils", + "isExtraImport": true, + "detail": "utils", + "documentation": {} + }, + { + "label": "Validator", + "importPath": "validator", + "description": "validator", + "isExtraImport": true, + "detail": "validator", + "documentation": {} + }, + { + "label": "subprocess", + "kind": 6, + "isExtraImport": true, + "importPath": "subprocess", + "description": "subprocess", + "detail": "subprocess", + "documentation": {} + }, + { + "label": "call", + "importPath": "subprocess", + "description": "subprocess", + "isExtraImport": true, + "detail": "subprocess", + "documentation": {} + }, + { + "label": "BestModel", + "importPath": "utils.best_model", + "description": "utils.best_model", + "isExtraImport": true, + "detail": "utils.best_model", + "documentation": {} + }, + { + "label": "Tester", + "importPath": "micalib.tester", + "description": "micalib.tester", + "isExtraImport": true, + "detail": "micalib.tester", + "documentation": {} + }, + { + "label": "Trainer", + "importPath": "micalib.trainer", + "description": "micalib.trainer", + "isExtraImport": true, + "detail": "micalib.trainer", + "documentation": {} + }, + { + "label": "get_cfg_defaults", + "importPath": "mica_configs.tracker_config", + "description": "mica_configs.tracker_config", + "isExtraImport": true, + "detail": "mica_configs.tracker_config", + "documentation": {} + }, + { + "label": "MeshShapeRenderer", + "importPath": "micalib.renderer", + "description": "micalib.renderer", + "isExtraImport": true, + "detail": "micalib.renderer", + "documentation": {} + }, + { + "label": "FLAME", + "importPath": "models.flame", + "description": "models.flame", + "isExtraImport": true, + "detail": "models.flame", + "documentation": {} + }, + { + "label": "torch.multiprocessing", + "kind": 6, + "isExtraImport": true, + "importPath": "torch.multiprocessing", + "description": "torch.multiprocessing", + "detail": "torch.multiprocessing", + "documentation": {} + }, + { + "label": "test", + "importPath": "jobs", + "description": "jobs", + "isExtraImport": true, + "detail": "jobs", + "documentation": {} + }, + { + "label": "train", + "importPath": "jobs", + "description": "jobs", + "isExtraImport": true, + "detail": "jobs", + "documentation": {} + }, + { + "label": "PIXIE", + "importPath": "pixielib.pixie", + "description": "pixielib.pixie", + "isExtraImport": true, + "detail": "pixielib.pixie", + "documentation": {} + }, + { + "label": "PIXIE", + "importPath": "pixielib.pixie", + "description": "pixielib.pixie", + "isExtraImport": true, + "detail": "pixielib.pixie", + "documentation": {} + }, + { + "label": "PIXIE", + "importPath": "pixielib.pixie", + "description": "pixielib.pixie", + "isExtraImport": true, + "detail": "pixielib.pixie", + "documentation": {} + }, + { + "label": "PIXIE", + "importPath": "pixielib.pixie", + "description": "pixielib.pixie", + "isExtraImport": true, + "detail": "pixielib.pixie", + "documentation": {} + }, + { + "label": "PIXIE", + "importPath": "pixielib.pixie", + "description": "pixielib.pixie", + "isExtraImport": true, + "detail": "pixielib.pixie", + "documentation": {} + }, + { + "label": "PIXIE", + "importPath": "pixielib.pixie", + "description": "pixielib.pixie", + "isExtraImport": true, + "detail": "pixielib.pixie", + "documentation": {} + }, + { + "label": "PIXIE", + "importPath": "pixielib.pixie", + "description": "pixielib.pixie", + "isExtraImport": true, + "detail": "pixielib.pixie", + "documentation": {} + }, + { + "label": "Visualizer", + "importPath": "pixielib.visualizer", + "description": "pixielib.visualizer", + "isExtraImport": true, + "detail": "pixielib.visualizer", + "documentation": {} + }, + { + "label": "Visualizer", + "importPath": "pixielib.visualizer", + "description": "pixielib.visualizer", + "isExtraImport": true, + "detail": "pixielib.visualizer", + "documentation": {} + }, + { + "label": "Visualizer", + "importPath": "pixielib.visualizer", + "description": "pixielib.visualizer", + "isExtraImport": true, + "detail": "pixielib.visualizer", + "documentation": {} + }, + { + "label": "Visualizer", + "importPath": "pixielib.visualizer", + "description": "pixielib.visualizer", + "isExtraImport": true, + "detail": "pixielib.visualizer", + "documentation": {} + }, + { + "label": "Visualizer", + "importPath": "pixielib.visualizer", + "description": "pixielib.visualizer", + "isExtraImport": true, + "detail": "pixielib.visualizer", + "documentation": {} + }, + { + "label": "Visualizer", + "importPath": "pixielib.visualizer", + "description": "pixielib.visualizer", + "isExtraImport": true, + "detail": "pixielib.visualizer", + "documentation": {} + }, + { + "label": "Visualizer", + "importPath": "pixielib.visualizer", + "description": "pixielib.visualizer", + "isExtraImport": true, + "detail": "pixielib.visualizer", + "documentation": {} + }, + { + "label": "TestData", + "importPath": "pixielib.datasets.body_datasets", + "description": "pixielib.datasets.body_datasets", + "isExtraImport": true, + "detail": "pixielib.datasets.body_datasets", + "documentation": {} + }, + { + "label": "TestData", + "importPath": "pixielib.datasets.body_datasets", + "description": "pixielib.datasets.body_datasets", + "isExtraImport": true, + "detail": "pixielib.datasets.body_datasets", + "documentation": {} + }, + { + "label": "TestData", + "importPath": "pixielib.datasets.body_datasets", + "description": "pixielib.datasets.body_datasets", + "isExtraImport": true, + "detail": "pixielib.datasets.body_datasets", + "documentation": {} + }, + { + "label": "TestData", + "importPath": "pixielib.datasets.body_datasets", + "description": "pixielib.datasets.body_datasets", + "isExtraImport": true, + "detail": "pixielib.datasets.body_datasets", + "documentation": {} + }, + { + "label": "TestData", + "importPath": "pixielib.datasets.body_datasets", + "description": "pixielib.datasets.body_datasets", + "isExtraImport": true, + "detail": "pixielib.datasets.body_datasets", + "documentation": {} + }, + { + "label": "util", + "importPath": "pixielib.utils", + "description": "pixielib.utils", + "isExtraImport": true, + "detail": "pixielib.utils", + "documentation": {} + }, + { + "label": "util", + "importPath": "pixielib.utils", + "description": "pixielib.utils", + "isExtraImport": true, + "detail": "pixielib.utils", + "documentation": {} + }, + { + "label": "util", + "importPath": "pixielib.utils", + "description": "pixielib.utils", + "isExtraImport": true, + "detail": "pixielib.utils", + "documentation": {} + }, + { + "label": "util", + "importPath": "pixielib.utils", + "description": "pixielib.utils", + "isExtraImport": true, + "detail": "pixielib.utils", + "documentation": {} + }, + { + "label": "util", + "importPath": "pixielib.utils", + "description": "pixielib.utils", + "isExtraImport": true, + "detail": "pixielib.utils", + "documentation": {} + }, + { + "label": "util", + "importPath": "pixielib.utils", + "description": "pixielib.utils", + "isExtraImport": true, + "detail": "pixielib.utils", + "documentation": {} + }, + { + "label": "util", + "importPath": "pixielib.utils", + "description": "pixielib.utils", + "isExtraImport": true, + "detail": "pixielib.utils", + "documentation": {} + }, + { + "label": "get_cfg_defaults", + "importPath": "pixielib.utils.config", + "description": "pixielib.utils.config", + "isExtraImport": true, + "detail": "pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg", + "importPath": "pixielib.utils.config", + "description": "pixielib.utils.config", + "isExtraImport": true, + "detail": "pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg", + "importPath": "pixielib.utils.config", + "description": "pixielib.utils.config", + "isExtraImport": true, + "detail": "pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg", + "importPath": "pixielib.utils.config", + "description": "pixielib.utils.config", + "isExtraImport": true, + "detail": "pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg", + "importPath": "pixielib.utils.config", + "description": "pixielib.utils.config", + "isExtraImport": true, + "detail": "pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg", + "importPath": "pixielib.utils.config", + "description": "pixielib.utils.config", + "isExtraImport": true, + "detail": "pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg", + "importPath": "pixielib.utils.config", + "description": "pixielib.utils.config", + "isExtraImport": true, + "detail": "pixielib.utils.config", + "documentation": {} + }, + { + "label": "transform_points", + "importPath": "pixielib.utils.tensor_cropper", + "description": "pixielib.utils.tensor_cropper", + "isExtraImport": true, + "detail": "pixielib.utils.tensor_cropper", + "documentation": {} + }, + { + "label": "TestData", + "importPath": "pixielib.datasets.face_datasets", + "description": "pixielib.datasets.face_datasets", + "isExtraImport": true, + "detail": "pixielib.datasets.face_datasets", + "documentation": {} + }, + { + "label": "TestData", + "importPath": "pixielib.datasets.hand_datasets", + "description": "pixielib.datasets.hand_datasets", + "isExtraImport": true, + "detail": "pixielib.datasets.hand_datasets", + "documentation": {} + }, + { + "label": "median_blur", + "importPath": "kornia.filters", + "description": "kornia.filters", + "isExtraImport": true, + "detail": "kornia.filters", + "documentation": {} + }, + { + "label": "gaussian_blur2d", + "importPath": "kornia.filters", + "description": "kornia.filters", + "isExtraImport": true, + "detail": "kornia.filters", + "documentation": {} + }, + { + "label": "motion_blur", + "importPath": "kornia.filters", + "description": "kornia.filters", + "isExtraImport": true, + "detail": "kornia.filters", + "documentation": {} + }, + { + "label": "torchvision.models.resnet", + "kind": 6, + "isExtraImport": true, + "importPath": "torchvision.models.resnet", + "description": "torchvision.models.resnet", + "detail": "torchvision.models.resnet", + "documentation": {} + }, + { + "label": "Bottleneck", + "importPath": "torchvision.models.resnet", + "description": "torchvision.models.resnet", + "isExtraImport": true, + "detail": "torchvision.models.resnet", + "documentation": {} + }, + { + "label": "BasicBlock", + "importPath": "torchvision.models.resnet", + "description": "torchvision.models.resnet", + "isExtraImport": true, + "detail": "torchvision.models.resnet", + "documentation": {} + }, + { + "label": "PIL.Image", + "kind": 6, + "isExtraImport": true, + "importPath": "PIL.Image", + "description": "PIL.Image", + "detail": "PIL.Image", + "documentation": {} + }, + { + "label": "mpl_toolkits.mplot3d", + "kind": 6, + "isExtraImport": true, + "importPath": "mpl_toolkits.mplot3d", + "description": "mpl_toolkits.mplot3d", + "detail": "mpl_toolkits.mplot3d", + "documentation": {} + }, + { + "label": "Axes3D", + "importPath": "mpl_toolkits.mplot3d", + "description": "mpl_toolkits.mplot3d", + "isExtraImport": true, + "detail": "mpl_toolkits.mplot3d", + "documentation": {} + }, + { + "label": "Axes3D", + "importPath": "mpl_toolkits.mplot3d", + "description": "mpl_toolkits.mplot3d", + "isExtraImport": true, + "detail": "mpl_toolkits.mplot3d", + "documentation": {} + }, + { + "label": "Axes3D", + "importPath": "mpl_toolkits.mplot3d", + "description": "mpl_toolkits.mplot3d", + "isExtraImport": true, + "detail": "mpl_toolkits.mplot3d", + "documentation": {} + }, + { + "label": "matplotlib.cm", + "kind": 6, + "isExtraImport": true, + "importPath": "matplotlib.cm", + "description": "matplotlib.cm", + "detail": "matplotlib.cm", + "documentation": {} + }, + { + "label": "pickle", + "importPath": "copyreg", + "description": "copyreg", + "isExtraImport": true, + "detail": "copyreg", + "documentation": {} + }, + { + "label": "enum", + "kind": 6, + "isExtraImport": true, + "importPath": "enum", + "description": "enum", + "detail": "enum", + "documentation": {} + }, + { + "label": "Enum", + "importPath": "enum", + "description": "enum", + "isExtraImport": true, + "detail": "enum", + "documentation": {} + }, + { + "label": "Enum", + "importPath": "enum", + "description": "enum", + "isExtraImport": true, + "detail": "enum", + "documentation": {} + }, + { + "label": "json", + "kind": 6, + "isExtraImport": true, + "importPath": "json", + "description": "json", + "detail": "json", + "documentation": {} + }, + { + "label": "joblib", + "kind": 6, + "isExtraImport": true, + "importPath": "joblib", + "description": "joblib", + "detail": "joblib", + "documentation": {} + }, + { + "label": "imsave", + "importPath": "matplotlib.image", + "description": "matplotlib.image", + "isExtraImport": true, + "detail": "matplotlib.image", + "documentation": {} + }, + { + "label": "cfg", + "importPath": "pymaf_core.cfgs", + "description": "pymaf_core.cfgs", + "isExtraImport": true, + "detail": "pymaf_core.cfgs", + "documentation": {} + }, + { + "label": "parse_args", + "importPath": "pymaf_core.cfgs", + "description": "pymaf_core.cfgs", + "isExtraImport": true, + "detail": "pymaf_core.cfgs", + "documentation": {} + }, + { + "label": "cfg", + "importPath": "pymaf_core.cfgs", + "description": "pymaf_core.cfgs", + "isExtraImport": true, + "detail": "pymaf_core.cfgs", + "documentation": {} + }, + { + "label": "cfg", + "importPath": "pymaf_core.cfgs", + "description": "pymaf_core.cfgs", + "isExtraImport": true, + "detail": "pymaf_core.cfgs", + "documentation": {} + }, + { + "label": "cfg", + "importPath": "pymaf_core.cfgs", + "description": "pymaf_core.cfgs", + "isExtraImport": true, + "detail": "pymaf_core.cfgs", + "documentation": {} + }, + { + "label": "cfg", + "importPath": "pymaf_core.cfgs", + "description": "pymaf_core.cfgs", + "isExtraImport": true, + "detail": "pymaf_core.cfgs", + "documentation": {} + }, + { + "label": "path_config", + "importPath": "pymaf_core", + "description": "pymaf_core", + "isExtraImport": true, + "detail": "pymaf_core", + "documentation": {} + }, + { + "label": "constants", + "importPath": "pymaf_core", + "description": "pymaf_core", + "isExtraImport": true, + "detail": "pymaf_core", + "documentation": {} + }, + { + "label": "path_config", + "importPath": "pymaf_core", + "description": "pymaf_core", + "isExtraImport": true, + "detail": "pymaf_core", + "documentation": {} + }, + { + "label": "constants", + "importPath": "pymaf_core", + "description": "pymaf_core", + "isExtraImport": true, + "detail": "pymaf_core", + "documentation": {} + }, + { + "label": "constants", + "importPath": "pymaf_core", + "description": "pymaf_core", + "isExtraImport": true, + "detail": "pymaf_core", + "documentation": {} + }, + { + "label": "path_config", + "importPath": "pymaf_core", + "description": "pymaf_core", + "isExtraImport": true, + "detail": "pymaf_core", + "documentation": {} + }, + { + "label": "constants", + "importPath": "pymaf_core", + "description": "pymaf_core", + "isExtraImport": true, + "detail": "pymaf_core", + "documentation": {} + }, + { + "label": "path_config", + "importPath": "pymaf_core", + "description": "pymaf_core", + "isExtraImport": true, + "detail": "pymaf_core", + "documentation": {} + }, + { + "label": "constants", + "importPath": "pymaf_core", + "description": "pymaf_core", + "isExtraImport": true, + "detail": "pymaf_core", + "documentation": {} + }, + { + "label": "constants", + "importPath": "pymaf_core", + "description": "pymaf_core", + "isExtraImport": true, + "detail": "pymaf_core", + "documentation": {} + }, + { + "label": "path_config", + "importPath": "pymaf_core", + "description": "pymaf_core", + "isExtraImport": true, + "detail": "pymaf_core", + "documentation": {} + }, + { + "label": "constants", + "importPath": "pymaf_core", + "description": "pymaf_core", + "isExtraImport": true, + "detail": "pymaf_core", + "documentation": {} + }, + { + "label": "path_config", + "importPath": "pymaf_core", + "description": "pymaf_core", + "isExtraImport": true, + "detail": "pymaf_core", + "documentation": {} + }, + { + "label": "hmr", + "importPath": "models", + "description": "models", + "isExtraImport": true, + "detail": "models", + "documentation": {} + }, + { + "label": "pymaf_net", + "importPath": "models", + "description": "models", + "isExtraImport": true, + "detail": "models", + "documentation": {} + }, + { + "label": "SMPL", + "importPath": "models", + "description": "models", + "isExtraImport": true, + "detail": "models", + "documentation": {} + }, + { + "label": "SMPL", + "importPath": "models", + "description": "models", + "isExtraImport": true, + "detail": "models", + "documentation": {} + }, + { + "label": "get_partial_smpl", + "importPath": "models.smpl", + "description": "models.smpl", + "isExtraImport": true, + "detail": "models.smpl", + "documentation": {} + }, + { + "label": "get_smpl_faces", + "importPath": "models.smpl", + "description": "models.smpl", + "isExtraImport": true, + "detail": "models.smpl", + "documentation": {} + }, + { + "label": "get_model_faces", + "importPath": "models.smpl", + "description": "models.smpl", + "isExtraImport": true, + "detail": "models.smpl", + "documentation": {} + }, + { + "label": "get_model_tpose", + "importPath": "models.smpl", + "description": "models.smpl", + "isExtraImport": true, + "detail": "models.smpl", + "documentation": {} + }, + { + "label": "Inference", + "importPath": "datasets.inference", + "description": "datasets.inference", + "isExtraImport": true, + "detail": "datasets.inference", + "documentation": {} + }, + { + "label": "PyRenderer", + "importPath": "utils.renderer", + "description": "utils.renderer", + "isExtraImport": true, + "detail": "utils.renderer", + "documentation": {} + }, + { + "label": "crop", + "importPath": "utils.imutils", + "description": "utils.imutils", + "isExtraImport": true, + "detail": "utils.imutils", + "documentation": {} + }, + { + "label": "crop", + "importPath": "utils.imutils", + "description": "utils.imutils", + "isExtraImport": true, + "detail": "utils.imutils", + "documentation": {} + }, + { + "label": "flip_img", + "importPath": "utils.imutils", + "description": "utils.imutils", + "isExtraImport": true, + "detail": "utils.imutils", + "documentation": {} + }, + { + "label": "flip_pose", + "importPath": "utils.imutils", + "description": "utils.imutils", + "isExtraImport": true, + "detail": "utils.imutils", + "documentation": {} + }, + { + "label": "flip_aa", + "importPath": "utils.imutils", + "description": "utils.imutils", + "isExtraImport": true, + "detail": "utils.imutils", + "documentation": {} + }, + { + "label": "flip_kp", + "importPath": "utils.imutils", + "description": "utils.imutils", + "isExtraImport": true, + "detail": "utils.imutils", + "documentation": {} + }, + { + "label": "transform", + "importPath": "utils.imutils", + "description": "utils.imutils", + "isExtraImport": true, + "detail": "utils.imutils", + "documentation": {} + }, + { + "label": "get_transform", + "importPath": "utils.imutils", + "description": "utils.imutils", + "isExtraImport": true, + "detail": "utils.imutils", + "documentation": {} + }, + { + "label": "get_rot_transf", + "importPath": "utils.imutils", + "description": "utils.imutils", + "isExtraImport": true, + "detail": "utils.imutils", + "documentation": {} + }, + { + "label": "rot_aa", + "importPath": "utils.imutils", + "description": "utils.imutils", + "isExtraImport": true, + "detail": "utils.imutils", + "documentation": {} + }, + { + "label": "j2d_processing", + "importPath": "utils.imutils", + "description": "utils.imutils", + "isExtraImport": true, + "detail": "utils.imutils", + "documentation": {} + }, + { + "label": "j2d_processing", + "importPath": "utils.imutils", + "description": "utils.imutils", + "isExtraImport": true, + "detail": "utils.imutils", + "documentation": {} + }, + { + "label": "crop", + "importPath": "utils.imutils", + "description": "utils.imutils", + "isExtraImport": true, + "detail": "utils.imutils", + "documentation": {} + }, + { + "label": "download_url", + "importPath": "utils.demo_utils", + "description": "utils.demo_utils", + "isExtraImport": true, + "detail": "utils.demo_utils", + "documentation": {} + }, + { + "label": "convert_crop_cam_to_orig_img", + "importPath": "utils.demo_utils", + "description": "utils.demo_utils", + "isExtraImport": true, + "detail": "utils.demo_utils", + "documentation": {} + }, + { + "label": "video_to_images", + "importPath": "utils.demo_utils", + "description": "utils.demo_utils", + "isExtraImport": true, + "detail": "utils.demo_utils", + "documentation": {} + }, + { + "label": "images_to_video", + "importPath": "utils.demo_utils", + "description": "utils.demo_utils", + "isExtraImport": true, + "detail": "utils.demo_utils", + "documentation": {} + }, + { + "label": "convert_to_full_img_cam", + "importPath": "utils.geometry", + "description": "utils.geometry", + "isExtraImport": true, + "detail": "utils.geometry", + "documentation": {} + }, + { + "label": "rot6d_to_rotmat", + "importPath": "utils.geometry", + "description": "utils.geometry", + "isExtraImport": true, + "detail": "utils.geometry", + "documentation": {} + }, + { + "label": "projection", + "importPath": "utils.geometry", + "description": "utils.geometry", + "isExtraImport": true, + "detail": "utils.geometry", + "documentation": {} + }, + { + "label": "rot6d_to_rotmat", + "importPath": "utils.geometry", + "description": "utils.geometry", + "isExtraImport": true, + "detail": "utils.geometry", + "documentation": {} + }, + { + "label": "rotmat_to_rot6d", + "importPath": "utils.geometry", + "description": "utils.geometry", + "isExtraImport": true, + "detail": "utils.geometry", + "documentation": {} + }, + { + "label": "projection", + "importPath": "utils.geometry", + "description": "utils.geometry", + "isExtraImport": true, + "detail": "utils.geometry", + "documentation": {} + }, + { + "label": "rotation_matrix_to_angle_axis", + "importPath": "utils.geometry", + "description": "utils.geometry", + "isExtraImport": true, + "detail": "utils.geometry", + "documentation": {} + }, + { + "label": "rotmat_to_angle", + "importPath": "utils.geometry", + "description": "utils.geometry", + "isExtraImport": true, + "detail": "utils.geometry", + "documentation": {} + }, + { + "label": "compute_twist_ratation", + "importPath": "utils.geometry", + "description": "utils.geometry", + "isExtraImport": true, + "detail": "utils.geometry", + "documentation": {} + }, + { + "label": "rotation_matrix_to_angle_axis", + "importPath": "utils.geometry", + "description": "utils.geometry", + "isExtraImport": true, + "detail": "utils.geometry", + "documentation": {} + }, + { + "label": "openpifpaf", + "kind": 6, + "isExtraImport": true, + "importPath": "openpifpaf", + "description": "openpifpaf", + "detail": "openpifpaf", + "documentation": {} + }, + { + "label": "decoder", + "importPath": "openpifpaf", + "description": "openpifpaf", + "isExtraImport": true, + "detail": "openpifpaf", + "documentation": {} + }, + { + "label": "network", + "importPath": "openpifpaf", + "description": "openpifpaf", + "isExtraImport": true, + "detail": "openpifpaf", + "documentation": {} + }, + { + "label": "Predictor", + "importPath": "openpifpaf.predictor", + "description": "openpifpaf.predictor", + "isExtraImport": true, + "detail": "openpifpaf.predictor", + "documentation": {} + }, + { + "label": "Stream", + "importPath": "openpifpaf.stream", + "description": "openpifpaf.stream", + "isExtraImport": true, + "detail": "openpifpaf.stream", + "documentation": {} + }, + { + "label": "view_as_windows", + "importPath": "skimage.util.shape", + "description": "skimage.util.shape", + "isExtraImport": true, + "detail": "skimage.util.shape", + "documentation": {} + }, + { + "label": "johnson_lindenstrauss_min_dim", + "importPath": "sklearn.random_projection", + "description": "sklearn.random_projection", + "isExtraImport": true, + "detail": "sklearn.random_projection", + "documentation": {} + }, + { + "label": "torchvision.transforms.functional", + "kind": 6, + "isExtraImport": true, + "importPath": "torchvision.transforms.functional", + "description": "torchvision.transforms.functional", + "detail": "torchvision.transforms.functional", + "documentation": {} + }, + { + "label": "to_tensor", + "importPath": "torchvision.transforms.functional", + "description": "torchvision.transforms.functional", + "isExtraImport": true, + "detail": "torchvision.transforms.functional", + "documentation": {} + }, + { + "label": "gaussian_blur", + "importPath": "torchvision.transforms.functional", + "description": "torchvision.transforms.functional", + "isExtraImport": true, + "detail": "torchvision.transforms.functional", + "documentation": {} + }, + { + "label": "gaussian_blur", + "importPath": "torchvision.transforms.functional", + "description": "torchvision.transforms.functional", + "isExtraImport": true, + "detail": "torchvision.transforms.functional", + "documentation": {} + }, + { + "label": "gaussian_blur", + "importPath": "torchvision.transforms.functional", + "description": "torchvision.transforms.functional", + "isExtraImport": true, + "detail": "torchvision.transforms.functional", + "documentation": {} + }, + { + "label": "gaussian_blur", + "importPath": "torchvision.transforms.functional", + "description": "torchvision.transforms.functional", + "isExtraImport": true, + "detail": "torchvision.transforms.functional", + "documentation": {} + }, + { + "label": "gaussian_blur", + "importPath": "torchvision.transforms.functional", + "description": "torchvision.transforms.functional", + "isExtraImport": true, + "detail": "torchvision.transforms.functional", + "documentation": {} + }, + { + "label": "get_all_bbox_params", + "importPath": "utils.smooth_bbox", + "description": "utils.smooth_bbox", + "isExtraImport": true, + "detail": "utils.smooth_bbox", + "documentation": {} + }, + { + "label": "get_smooth_bbox_params", + "importPath": "utils.smooth_bbox", + "description": "utils.smooth_bbox", + "isExtraImport": true, + "detail": "utils.smooth_bbox", + "documentation": {} + }, + { + "label": "get_all_bbox_params", + "importPath": "utils.smooth_bbox", + "description": "utils.smooth_bbox", + "isExtraImport": true, + "detail": "utils.smooth_bbox", + "documentation": {} + }, + { + "label": "read_cam_params", + "importPath": "utils.cam_params", + "description": "utils.cam_params", + "isExtraImport": true, + "detail": "utils.cam_params", + "documentation": {} + }, + { + "label": "homo_vector", + "importPath": "utils.cam_params", + "description": "utils.cam_params", + "isExtraImport": true, + "detail": "utils.cam_params", + "documentation": {} + }, + { + "label": "homo_vector", + "importPath": "utils.cam_params", + "description": "utils.cam_params", + "isExtraImport": true, + "detail": "utils.cam_params", + "documentation": {} + }, + { + "label": "src.modeling.data.config", + "kind": 6, + "isExtraImport": true, + "importPath": "src.modeling.data.config", + "description": "src.modeling.data.config", + "detail": "src.modeling.data.config", + "documentation": {} + }, + { + "label": "tempfile", + "kind": 6, + "isExtraImport": true, + "importPath": "tempfile", + "description": "tempfile", + "detail": "tempfile", + "documentation": {} + }, + { + "label": "fnmatch", + "kind": 6, + "isExtraImport": true, + "importPath": "fnmatch", + "description": "fnmatch", + "detail": "fnmatch", + "documentation": {} + }, + { + "label": "hashlib", + "kind": 6, + "isExtraImport": true, + "importPath": "hashlib", + "description": "hashlib", + "detail": "hashlib", + "documentation": {} + }, + { + "label": "sha256", + "importPath": "hashlib", + "description": "hashlib", + "isExtraImport": true, + "detail": "hashlib", + "documentation": {} + }, + { + "label": "open", + "importPath": "io", + "description": "io", + "isExtraImport": true, + "detail": "io", + "documentation": {} + }, + { + "label": "open", + "importPath": "io", + "description": "io", + "isExtraImport": true, + "detail": "io", + "documentation": {} + }, + { + "label": "open", + "importPath": "io", + "description": "io", + "isExtraImport": true, + "detail": "io", + "documentation": {} + }, + { + "label": "boto3", + "kind": 6, + "isExtraImport": true, + "importPath": "boto3", + "description": "boto3", + "detail": "boto3", + "documentation": {} + }, + { + "label": "requests", + "kind": 6, + "isExtraImport": true, + "importPath": "requests", + "description": "requests", + "detail": "requests", + "documentation": {} + }, + { + "label": "ClientError", + "importPath": "botocore.exceptions", + "description": "botocore.exceptions", + "isExtraImport": true, + "detail": "botocore.exceptions", + "documentation": {} + }, + { + "label": "code", + "kind": 6, + "isExtraImport": true, + "importPath": "code", + "description": "code", + "detail": "code", + "documentation": {} + }, + { + "label": "six", + "kind": 6, + "isExtraImport": true, + "importPath": "six", + "description": "six", + "detail": "six", + "documentation": {} + }, + { + "label": "torch._utils", + "kind": 6, + "isExtraImport": true, + "importPath": "torch._utils", + "description": "torch._utils", + "detail": "torch._utils", + "documentation": {} + }, + { + "label": "triu_indices_from", + "importPath": "numpy.lib.twodim_base", + "description": "numpy.lib.twodim_base", + "isExtraImport": true, + "detail": "numpy.lib.twodim_base", + "documentation": {} + }, + { + "label": "version", + "importPath": "packaging", + "description": "packaging", + "isExtraImport": true, + "detail": "packaging", + "documentation": {} + }, + { + "label": "iuv_img2map", + "importPath": "utils.iuvmap", + "description": "utils.iuvmap", + "isExtraImport": true, + "detail": "utils.iuvmap", + "documentation": {} + }, + { + "label": "iuv_map2img", + "importPath": "utils.iuvmap", + "description": "utils.iuvmap", + "isExtraImport": true, + "detail": "utils.iuvmap", + "documentation": {} + }, + { + "label": "seg_img2map", + "importPath": "utils.iuvmap", + "description": "utils.iuvmap", + "isExtraImport": true, + "detail": "utils.iuvmap", + "documentation": {} + }, + { + "label": "print_arguments", + "importPath": "cgi", + "description": "cgi", + "isExtraImport": true, + "detail": "cgi", + "documentation": {} + }, + { + "label": "pi", + "importPath": "cmath", + "description": "cmath", + "isExtraImport": true, + "detail": "cmath", + "documentation": {} + }, + { + "label": "pi", + "importPath": "cmath", + "description": "cmath", + "isExtraImport": true, + "detail": "cmath", + "documentation": {} + }, + { + "label": "pi", + "importPath": "cmath", + "description": "cmath", + "isExtraImport": true, + "detail": "cmath", + "documentation": {} + }, + { + "label": "base_repr", + "importPath": "numpy.core.numeric", + "description": "numpy.core.numeric", + "isExtraImport": true, + "detail": "numpy.core.numeric", + "documentation": {} + }, + { + "label": "rearrange", + "importPath": "einops", + "description": "einops", + "isExtraImport": true, + "detail": "einops", + "documentation": {} + }, + { + "label": "batch_rodrigues", + "importPath": "smplx.lbs", + "description": "smplx.lbs", + "isExtraImport": true, + "detail": "smplx.lbs", + "documentation": {} + }, + { + "label": "batch_rodrigues", + "importPath": "smplx.lbs", + "description": "smplx.lbs", + "isExtraImport": true, + "detail": "smplx.lbs", + "documentation": {} + }, + { + "label": "batch_rigid_transform", + "importPath": "smplx.lbs", + "description": "smplx.lbs", + "isExtraImport": true, + "detail": "smplx.lbs", + "documentation": {} + }, + { + "label": "transform_mat", + "importPath": "smplx.lbs", + "description": "smplx.lbs", + "isExtraImport": true, + "detail": "smplx.lbs", + "documentation": {} + }, + { + "label": "vertices2joints", + "importPath": "smplx.lbs", + "description": "smplx.lbs", + "isExtraImport": true, + "detail": "smplx.lbs", + "documentation": {} + }, + { + "label": "blend_shapes", + "importPath": "smplx.lbs", + "description": "smplx.lbs", + "isExtraImport": true, + "detail": "smplx.lbs", + "documentation": {} + }, + { + "label": "vertices2landmarks", + "importPath": "smplx.lbs", + "description": "smplx.lbs", + "isExtraImport": true, + "detail": "smplx.lbs", + "documentation": {} + }, + { + "label": "vertices2landmarks", + "importPath": "smplx.lbs", + "description": "smplx.lbs", + "isExtraImport": true, + "detail": "smplx.lbs", + "documentation": {} + }, + { + "label": "softmax_integral_tensor", + "importPath": "utils.keypoints", + "description": "utils.keypoints", + "isExtraImport": true, + "detail": "utils.keypoints", + "documentation": {} + }, + { + "label": "imp", + "kind": 6, + "isExtraImport": true, + "importPath": "imp", + "description": "imp", + "detail": "imp", + "documentation": {} + }, + { + "label": "dataclass", + "importPath": "dataclasses", + "description": "dataclasses", + "isExtraImport": true, + "detail": "dataclasses", + "documentation": {} + }, + { + "label": "dataclass", + "importPath": "dataclasses", + "description": "dataclasses", + "isExtraImport": true, + "detail": "dataclasses", + "documentation": {} + }, + { + "label": "dataclass", + "importPath": "dataclasses", + "description": "dataclasses", + "isExtraImport": true, + "detail": "dataclasses", + "documentation": {} + }, + { + "label": "smplx", + "kind": 6, + "isExtraImport": true, + "importPath": "smplx", + "description": "smplx", + "detail": "smplx", + "documentation": {} + }, + { + "label": "body_models", + "importPath": "smplx", + "description": "smplx", + "isExtraImport": true, + "detail": "smplx", + "documentation": {} + }, + { + "label": "SMPL", + "importPath": "smplx", + "description": "smplx", + "isExtraImport": true, + "detail": "smplx", + "documentation": {} + }, + { + "label": "MANO", + "importPath": "smplx", + "description": "smplx", + "isExtraImport": true, + "detail": "smplx", + "documentation": {} + }, + { + "label": "SMPLX", + "importPath": "smplx", + "description": "smplx", + "isExtraImport": true, + "detail": "smplx", + "documentation": {} + }, + { + "label": "SMPLXLayer", + "importPath": "smplx", + "description": "smplx", + "isExtraImport": true, + "detail": "smplx", + "documentation": {} + }, + { + "label": "MANOLayer", + "importPath": "smplx", + "description": "smplx", + "isExtraImport": true, + "detail": "smplx", + "documentation": {} + }, + { + "label": "FLAMELayer", + "importPath": "smplx", + "description": "smplx", + "isExtraImport": true, + "detail": "smplx", + "documentation": {} + }, + { + "label": "SMPLXOutput", + "importPath": "smplx.body_models", + "description": "smplx.body_models", + "isExtraImport": true, + "detail": "smplx.body_models", + "documentation": {} + }, + { + "label": "SMPLXLayer", + "importPath": "smplx.body_models", + "description": "smplx.body_models", + "isExtraImport": true, + "detail": "smplx.body_models", + "documentation": {} + }, + { + "label": "string", + "kind": 6, + "isExtraImport": true, + "importPath": "string", + "description": "string", + "detail": "string", + "documentation": {} + }, + { + "label": "smplx.joint_names", + "kind": 6, + "isExtraImport": true, + "importPath": "smplx.joint_names", + "description": "smplx.joint_names", + "detail": "smplx.joint_names", + "documentation": {} + }, + { + "label": "cPickle", + "importPath": "six.moves", + "description": "six.moves", + "isExtraImport": true, + "detail": "six.moves", + "documentation": {} + }, + { + "label": "cPickle", + "importPath": "six.moves", + "description": "six.moves", + "isExtraImport": true, + "detail": "six.moves", + "documentation": {} + }, + { + "label": "cfg", + "importPath": "models.core.config", + "description": "models.core.config", + "isExtraImport": true, + "detail": "models.core.config", + "documentation": {} + }, + { + "label": "print_assert_equal", + "importPath": "numpy.testing._private.utils", + "description": "numpy.testing._private.utils", + "isExtraImport": true, + "detail": "numpy.testing._private.utils", + "documentation": {} + }, + { + "label": "print_assert_equal", + "importPath": "numpy.testing._private.utils", + "description": "numpy.testing._private.utils", + "isExtraImport": true, + "detail": "numpy.testing._private.utils", + "documentation": {} + }, + { + "label": "print_assert_equal", + "importPath": "numpy.testing._private.utils", + "description": "numpy.testing._private.utils", + "isExtraImport": true, + "detail": "numpy.testing._private.utils", + "documentation": {} + }, + { + "label": "Sampler", + "importPath": "torch.utils.data.sampler", + "description": "torch.utils.data.sampler", + "isExtraImport": true, + "detail": "torch.utils.data.sampler", + "documentation": {} + }, + { + "label": "get_single_image_crop_demo", + "importPath": "datasets.data_utils.img_utils", + "description": "datasets.data_utils.img_utils", + "isExtraImport": true, + "detail": "datasets.data_utils.img_utils", + "documentation": {} + }, + { + "label": "scipy.spatial.distance", + "kind": 6, + "isExtraImport": true, + "importPath": "scipy.spatial.distance", + "description": "scipy.spatial.distance", + "detail": "scipy.spatial.distance", + "documentation": {} + }, + { + "label": "rearrange", + "importPath": "einops.einops", + "description": "einops.einops", + "isExtraImport": true, + "detail": "einops.einops", + "documentation": {} + }, + { + "label": "torch.cuda.comm", + "kind": 6, + "isExtraImport": true, + "importPath": "torch.cuda.comm", + "description": "torch.cuda.comm", + "detail": "torch.cuda.comm", + "documentation": {} + }, + { + "label": "neural_renderer", + "kind": 6, + "isExtraImport": true, + "importPath": "neural_renderer", + "description": "neural_renderer", + "detail": "neural_renderer", + "documentation": {} + }, + { + "label": "torchvision.utils", + "kind": 6, + "isExtraImport": true, + "importPath": "torchvision.utils", + "description": "torchvision.utils", + "detail": "torchvision.utils", + "documentation": {} + }, + { + "label": "make_grid", + "importPath": "torchvision.utils", + "description": "torchvision.utils", + "isExtraImport": true, + "detail": "torchvision.utils", + "documentation": {} + }, + { + "label": "DensePoseMethods", + "importPath": "utils.densepose_methods", + "description": "utils.densepose_methods", + "isExtraImport": true, + "detail": "utils.densepose_methods", + "documentation": {} + }, + { + "label": "pyrender", + "kind": 6, + "isExtraImport": true, + "importPath": "pyrender", + "description": "pyrender", + "detail": "pyrender", + "documentation": {} + }, + { + "label": "RenderFlags", + "importPath": "pyrender.constants", + "description": "pyrender.constants", + "isExtraImport": true, + "detail": "pyrender.constants", + "documentation": {} + }, + { + "label": "Meshes", + "importPath": "pytorch3d.structures.meshes", + "description": "pytorch3d.structures.meshes", + "isExtraImport": true, + "detail": "pytorch3d.structures.meshes", + "documentation": {} + }, + { + "label": "pycocotools.mask", + "kind": 6, + "isExtraImport": true, + "importPath": "pycocotools.mask", + "description": "pycocotools.mask", + "detail": "pycocotools.mask", + "documentation": {} + }, + { + "label": "scipy.signal", + "kind": 6, + "isExtraImport": true, + "importPath": "scipy.signal", + "description": "scipy.signal", + "detail": "scipy.signal", + "documentation": {} + }, + { + "label": "gaussian_filter1d", + "importPath": "scipy.ndimage.filters", + "description": "scipy.ndimage.filters", + "isExtraImport": true, + "detail": "scipy.ndimage.filters", + "documentation": {} + }, + { + "label": "Polygon", + "importPath": "matplotlib.patches", + "description": "matplotlib.patches", + "isExtraImport": true, + "detail": "matplotlib.patches", + "documentation": {} + }, + { + "label": "Image", + "importPath": "IPython.display", + "description": "IPython.display", + "isExtraImport": true, + "detail": "IPython.display", + "documentation": {} + }, + { + "label": "HTML", + "importPath": "IPython.display", + "description": "IPython.display", + "isExtraImport": true, + "detail": "IPython.display", + "documentation": {} + }, + { + "label": "HTML", + "importPath": "IPython.display", + "description": "IPython.display", + "isExtraImport": true, + "detail": "IPython.display", + "documentation": {} + }, + { + "label": "Image", + "importPath": "IPython.display", + "description": "IPython.display", + "isExtraImport": true, + "detail": "IPython.display", + "documentation": {} + }, + { + "label": "b64encode", + "importPath": "base64", + "description": "base64", + "isExtraImport": true, + "detail": "base64", + "documentation": {} + }, + { + "label": "b64encode", + "importPath": "base64", + "description": "base64", + "isExtraImport": true, + "detail": "base64", + "documentation": {} + }, + { + "label": "api_multi_body", + "importPath": "modules.PIXIE.demos.api_multi_pixie", + "description": "modules.PIXIE.demos.api_multi_pixie", + "isExtraImport": true, + "detail": "modules.PIXIE.demos.api_multi_pixie", + "documentation": {} + }, + { + "label": "api_multi_deca", + "importPath": "modules.DECA.demos.api_multi_deca", + "description": "modules.DECA.demos.api_multi_deca", + "isExtraImport": true, + "detail": "modules.DECA.demos.api_multi_deca", + "documentation": {} + }, + { + "label": "FaceDetector", + "importPath": "SHOW.detector.face_detector", + "description": "SHOW.detector.face_detector", + "isExtraImport": true, + "detail": "SHOW.detector.face_detector", + "documentation": {} + }, + { + "label": "FaceDetector", + "importPath": "SHOW.detector.face_detector", + "description": "SHOW.detector.face_detector", + "isExtraImport": true, + "detail": "SHOW.detector.face_detector", + "documentation": {} + }, + { + "label": "deeplab_seg", + "importPath": "SHOW.video_filter.deeplab_seg", + "description": "SHOW.video_filter.deeplab_seg", + "isExtraImport": true, + "detail": "SHOW.video_filter.deeplab_seg", + "documentation": {} + }, + { + "label": "images_to_video", + "importPath": "SHOW.utils.video", + "description": "SHOW.utils.video", + "isExtraImport": true, + "detail": "SHOW.utils.video", + "documentation": {} + }, + { + "label": "images_to_video", + "importPath": "SHOW.utils.video", + "description": "SHOW.utils.video", + "isExtraImport": true, + "detail": "SHOW.utils.video", + "documentation": {} + }, + { + "label": "images_to_video", + "importPath": "SHOW.utils.video", + "description": "SHOW.utils.video", + "isExtraImport": true, + "detail": "SHOW.utils.video", + "documentation": {} + }, + { + "label": "mediapipe", + "kind": 6, + "isExtraImport": true, + "importPath": "mediapipe", + "description": "mediapipe", + "detail": "mediapipe", + "documentation": {} + }, + { + "label": "Registry", + "importPath": "mmcv.utils", + "description": "mmcv.utils", + "isExtraImport": true, + "detail": "mmcv.utils", + "documentation": {} + }, + { + "label": "Registry", + "importPath": "mmcv.utils", + "description": "mmcv.utils", + "isExtraImport": true, + "detail": "mmcv.utils", + "documentation": {} + }, + { + "label": "Registry", + "importPath": "mmcv.utils", + "description": "mmcv.utils", + "isExtraImport": true, + "detail": "mmcv.utils", + "documentation": {} + }, + { + "label": "wandb", + "kind": 6, + "isExtraImport": true, + "importPath": "wandb", + "description": "wandb", + "detail": "wandb", + "documentation": {} + }, + { + "label": "inspect", + "kind": 6, + "isExtraImport": true, + "importPath": "inspect", + "description": "inspect", + "detail": "inspect", + "documentation": {} + }, + { + "label": "Parameter", + "importPath": "inspect", + "description": "inspect", + "isExtraImport": true, + "detail": "inspect", + "documentation": {} + }, + { + "label": "signature", + "importPath": "inspect", + "description": "inspect", + "isExtraImport": true, + "detail": "inspect", + "documentation": {} + }, + { + "label": "NeptuneLoggerHook", + "importPath": "mmcv.runner.hooks.logger", + "description": "mmcv.runner.hooks.logger", + "isExtraImport": true, + "detail": "mmcv.runner.hooks.logger", + "documentation": {} + }, + { + "label": "NeptuneLoggerHook", + "importPath": "mmcv.runner.hooks.logger", + "description": "mmcv.runner.hooks.logger", + "isExtraImport": true, + "detail": "mmcv.runner.hooks.logger", + "documentation": {} + }, + { + "label": "TensorboardLoggerHook", + "importPath": "mmcv.runner.hooks.logger", + "description": "mmcv.runner.hooks.logger", + "isExtraImport": true, + "detail": "mmcv.runner.hooks.logger", + "documentation": {} + }, + { + "label": "WandbLoggerHook", + "importPath": "mmcv.runner.hooks.logger", + "description": "mmcv.runner.hooks.logger", + "isExtraImport": true, + "detail": "mmcv.runner.hooks.logger", + "documentation": {} + }, + { + "label": "opencv_from_cameras_projection", + "importPath": "pytorch3d.utils", + "description": "pytorch3d.utils", + "isExtraImport": true, + "detail": "pytorch3d.utils", + "documentation": {} + }, + { + "label": "opencv_from_cameras_projection", + "importPath": "pytorch3d.utils", + "description": "pytorch3d.utils", + "isExtraImport": true, + "detail": "pytorch3d.utils", + "documentation": {} + }, + { + "label": "opencv_from_cameras_projection", + "importPath": "pytorch3d.utils", + "description": "pytorch3d.utils", + "isExtraImport": true, + "detail": "pytorch3d.utils", + "documentation": {} + }, + { + "label": "Meshes", + "importPath": "pytorch3d.structures", + "description": "pytorch3d.structures", + "isExtraImport": true, + "detail": "pytorch3d.structures", + "documentation": {} + }, + { + "label": "Meshes", + "importPath": "pytorch3d.structures", + "description": "pytorch3d.structures", + "isExtraImport": true, + "detail": "pytorch3d.structures", + "documentation": {} + }, + { + "label": "Meshes", + "importPath": "pytorch3d.structures", + "description": "pytorch3d.structures", + "isExtraImport": true, + "detail": "pytorch3d.structures", + "documentation": {} + }, + { + "label": "contextlib", + "kind": 6, + "isExtraImport": true, + "importPath": "contextlib", + "description": "contextlib", + "detail": "contextlib", + "documentation": {} + }, + { + "label": "cv2,", + "kind": 6, + "isExtraImport": true, + "importPath": "cv2.", + "description": "cv2.", + "detail": "cv2.", + "documentation": {} + }, + { + "label": "torch,", + "kind": 6, + "isExtraImport": true, + "importPath": "torch.", + "description": "torch.", + "detail": "torch.", + "documentation": {} + }, + { + "label": "shutil,", + "kind": 6, + "isExtraImport": true, + "importPath": "shutil.", + "description": "shutil.", + "detail": "shutil.", + "documentation": {} + }, + { + "label": "FigureCanvasAgg", + "importPath": "matplotlib.backends.backend_agg", + "description": "matplotlib.backends.backend_agg", + "isExtraImport": true, + "detail": "matplotlib.backends.backend_agg", + "documentation": {} + }, + { + "label": "Figure", + "importPath": "matplotlib.figure", + "description": "matplotlib.figure", + "isExtraImport": true, + "detail": "matplotlib.figure", + "documentation": {} + }, + { + "label": "distance_transform_edt", + "importPath": "scipy.ndimage.morphology", + "description": "scipy.ndimage.morphology", + "isExtraImport": true, + "detail": "scipy.ndimage.morphology", + "documentation": {} + }, + { + "label": "PlyData", + "importPath": "plyfile", + "description": "plyfile", + "isExtraImport": true, + "detail": "plyfile", + "documentation": {} + }, + { + "label": "api_MICA", + "importPath": "modules.MICA.api_MICA", + "description": "modules.MICA.api_MICA", + "isExtraImport": true, + "detail": "modules.MICA.api_MICA", + "documentation": {} + }, + { + "label": "MMPoseAnalyzer", + "importPath": "SHOW.video_filter.MMposer", + "description": "SHOW.video_filter.MMposer", + "isExtraImport": true, + "detail": "SHOW.video_filter.MMposer", + "documentation": {} + }, + { + "label": "load_vposer", + "importPath": "human_body_prior.tools.model_loader", + "description": "human_body_prior.tools.model_loader", + "isExtraImport": true, + "detail": "human_body_prior.tools.model_loader", + "documentation": {} + }, + { + "label": "SHOW.utils", + "kind": 6, + "isExtraImport": true, + "importPath": "SHOW.utils", + "description": "SHOW.utils", + "detail": "SHOW.utils", + "documentation": {} + }, + { + "label": "View", + "importPath": "SHOW.utils", + "description": "SHOW.utils", + "isExtraImport": true, + "detail": "SHOW.utils", + "documentation": {} + }, + { + "label": "default_timers", + "importPath": "SHOW.utils", + "description": "SHOW.utils", + "isExtraImport": true, + "detail": "SHOW.utils", + "documentation": {} + }, + { + "label": "default_timers", + "importPath": "SHOW.utils", + "description": "SHOW.utils", + "isExtraImport": true, + "detail": "SHOW.utils", + "documentation": {} + }, + { + "label": "is_valid_json", + "importPath": "SHOW.utils", + "description": "SHOW.utils", + "isExtraImport": true, + "detail": "SHOW.utils", + "documentation": {} + }, + { + "label": "default_timers", + "importPath": "SHOW.utils", + "description": "SHOW.utils", + "isExtraImport": true, + "detail": "SHOW.utils", + "documentation": {} + }, + { + "label": "is_valid_json", + "importPath": "SHOW.utils", + "description": "SHOW.utils", + "isExtraImport": true, + "detail": "SHOW.utils", + "documentation": {} + }, + { + "label": "face_alignment", + "kind": 6, + "isExtraImport": true, + "importPath": "face_alignment", + "description": "face_alignment", + "detail": "face_alignment", + "documentation": {} + }, + { + "label": "match_faces", + "importPath": "SHOW.face_iders", + "description": "SHOW.face_iders", + "isExtraImport": true, + "detail": "SHOW.face_iders", + "documentation": {} + }, + { + "label": "lmk2d_to_bbox", + "importPath": "SHOW.image", + "description": "SHOW.image", + "isExtraImport": true, + "detail": "SHOW.image", + "documentation": {} + }, + { + "label": "torchgeometry", + "kind": 6, + "isExtraImport": true, + "importPath": "torchgeometry", + "description": "torchgeometry", + "detail": "torchgeometry", + "documentation": {} + }, + { + "label": "SHOW_stage1", + "importPath": "stage1_main", + "description": "stage1_main", + "isExtraImport": true, + "detail": "stage1_main", + "documentation": {} + }, + { + "label": "SHOW_stage2", + "importPath": "stage2_main", + "description": "stage2_main", + "isExtraImport": true, + "detail": "stage2_main", + "documentation": {} + }, + { + "label": "gen_path_from_ours_root", + "importPath": "configs.csv_parser", + "description": "configs.csv_parser", + "isExtraImport": true, + "detail": "configs.csv_parser", + "documentation": {} + }, + { + "label": "MeterBuffer", + "importPath": "SHOW.utils.metric", + "description": "SHOW.utils.metric", + "isExtraImport": true, + "detail": "SHOW.utils.metric", + "documentation": {} + }, + { + "label": "MeterBuffer", + "importPath": "SHOW.utils.metric", + "description": "SHOW.utils.metric", + "isExtraImport": true, + "detail": "SHOW.utils.metric", + "documentation": {} + }, + { + "label": "load_save_pkl", + "importPath": "SHOW.load_models", + "description": "SHOW.load_models", + "isExtraImport": true, + "detail": "SHOW.load_models", + "documentation": {} + }, + { + "label": "load_smplx_model", + "importPath": "SHOW.load_models", + "description": "SHOW.load_models", + "isExtraImport": true, + "detail": "SHOW.load_models", + "documentation": {} + }, + { + "label": "load_vposer_model", + "importPath": "SHOW.load_models", + "description": "SHOW.load_models", + "isExtraImport": true, + "detail": "SHOW.load_models", + "documentation": {} + }, + { + "label": "load_save_pkl", + "importPath": "SHOW.load_models", + "description": "SHOW.load_models", + "isExtraImport": true, + "detail": "SHOW.load_models", + "documentation": {} + }, + { + "label": "load_save_pkl", + "importPath": "SHOW.load_models", + "description": "SHOW.load_models", + "isExtraImport": true, + "detail": "SHOW.load_models", + "documentation": {} + }, + { + "label": "load_save_pkl", + "importPath": "SHOW.load_models", + "description": "SHOW.load_models", + "isExtraImport": true, + "detail": "SHOW.load_models", + "documentation": {} + }, + { + "label": "load_smplx_model", + "importPath": "SHOW.load_models", + "description": "SHOW.load_models", + "isExtraImport": true, + "detail": "SHOW.load_models", + "documentation": {} + }, + { + "label": "load_vposer_model", + "importPath": "SHOW.load_models", + "description": "SHOW.load_models", + "isExtraImport": true, + "detail": "SHOW.load_models", + "documentation": {} + }, + { + "label": "load_smplx_model", + "importPath": "SHOW.load_models", + "description": "SHOW.load_models", + "isExtraImport": true, + "detail": "SHOW.load_models", + "documentation": {} + }, + { + "label": "load_vposer_model", + "importPath": "SHOW.load_models", + "description": "SHOW.load_models", + "isExtraImport": true, + "detail": "SHOW.load_models", + "documentation": {} + }, + { + "label": "load_save_pkl", + "importPath": "SHOW.load_models", + "description": "SHOW.load_models", + "isExtraImport": true, + "detail": "SHOW.load_models", + "documentation": {} + }, + { + "label": "save_one_results", + "importPath": "SHOW.save_results", + "description": "SHOW.save_results", + "isExtraImport": true, + "detail": "SHOW.save_results", + "documentation": {} + }, + { + "label": "save_one_results", + "importPath": "SHOW.save_results", + "description": "SHOW.save_results", + "isExtraImport": true, + "detail": "SHOW.save_results", + "documentation": {} + }, + { + "label": "op_base", + "importPath": "SHOW.datasets", + "description": "SHOW.datasets", + "isExtraImport": true, + "detail": "SHOW.datasets", + "documentation": {} + }, + { + "label": "op_dataset", + "importPath": "SHOW.datasets", + "description": "SHOW.datasets", + "isExtraImport": true, + "detail": "SHOW.datasets", + "documentation": {} + }, + { + "label": "op_base", + "importPath": "SHOW.datasets", + "description": "SHOW.datasets", + "isExtraImport": true, + "detail": "SHOW.datasets", + "documentation": {} + }, + { + "label": "images_to_video", + "importPath": "mmhuman3d.utils.ffmpeg_utils", + "description": "mmhuman3d.utils.ffmpeg_utils", + "isExtraImport": true, + "detail": "mmhuman3d.utils.ffmpeg_utils", + "documentation": {} + }, + { + "label": "setup_logger", + "importPath": "SHOW.loggers.logger", + "description": "SHOW.loggers.logger", + "isExtraImport": true, + "detail": "SHOW.loggers.logger", + "documentation": {} + }, + { + "label": "setup_logger", + "importPath": "SHOW.loggers.logger", + "description": "SHOW.loggers.logger", + "isExtraImport": true, + "detail": "SHOW.loggers.logger", + "documentation": {} + }, + { + "label": "build_prior", + "importPath": "SHOW.prior", + "description": "SHOW.prior", + "isExtraImport": true, + "detail": "SHOW.prior", + "documentation": {} + }, + { + "label": "load_assets", + "importPath": "SHOW.load_assets", + "description": "SHOW.load_assets", + "isExtraImport": true, + "detail": "SHOW.load_assets", + "documentation": {} + }, + { + "label": "load_assets", + "importPath": "SHOW.load_assets", + "description": "SHOW.load_assets", + "isExtraImport": true, + "detail": "SHOW.load_assets", + "documentation": {} + }, + { + "label": "parse_weight", + "importPath": "SHOW.parse_weight", + "description": "SHOW.parse_weight", + "isExtraImport": true, + "detail": "SHOW.parse_weight", + "documentation": {} + }, + { + "label": "*", + "importPath": "SHOW.losses", + "description": "SHOW.losses", + "isExtraImport": true, + "detail": "SHOW.losses", + "documentation": {} + }, + { + "label": "condor_cfg", + "importPath": "configs.cfg_ins", + "description": "configs.cfg_ins", + "isExtraImport": true, + "detail": "configs.cfg_ins", + "documentation": {} + }, + { + "label": "condor_cfg", + "importPath": "configs.cfg_ins", + "description": "configs.cfg_ins", + "isExtraImport": true, + "detail": "configs.cfg_ins", + "documentation": {} + }, + { + "label": "atach_model_func", + "importPath": "SHOW.datasets.model_func_atach", + "description": "SHOW.datasets.model_func_atach", + "isExtraImport": true, + "detail": "SHOW.datasets.model_func_atach", + "documentation": {} + }, + { + "label": "FLAMETex", + "importPath": "SHOW.flame.FLAME", + "description": "SHOW.flame.FLAME", + "isExtraImport": true, + "detail": "SHOW.flame.FLAME", + "documentation": {} + }, + { + "label": "ImagesDataset", + "importPath": "SHOW.smplx_dataset", + "description": "SHOW.smplx_dataset", + "isExtraImport": true, + "detail": "SHOW.smplx_dataset", + "documentation": {} + }, + { + "label": "Renderer", + "importPath": "SHOW.renderer", + "description": "SHOW.renderer", + "isExtraImport": true, + "detail": "SHOW.renderer", + "documentation": {} + }, + { + "label": "save_tracker", + "importPath": "SHOW.save_tracker", + "description": "SHOW.save_tracker", + "isExtraImport": true, + "detail": "SHOW.save_tracker", + "documentation": {} + }, + { + "label": "log_config_tracker_nep", + "kind": 5, + "importPath": "configs.base.log_config", + "description": "configs.base.log_config", + "peekOfCode": "log_config_tracker_nep = dict( \n interval=100, \n hooks=[ \n dict(type='MyNeptuneLogger', \n with_step=False,\n init_kwargs=dict(\n name='test',\n project='lithiumice/tracker',\n api_token=\"==\",\n )) ", + "detail": "configs.base.log_config", + "documentation": {} + }, + { + "label": "log_config_smplifyx_nep", + "kind": 5, + "importPath": "configs.base.log_config", + "description": "configs.base.log_config", + "peekOfCode": "log_config_smplifyx_nep = dict( \n interval=100, \n hooks=[ \n dict(type='MyNeptuneLogger', \n with_step=False,\n init_kwargs=dict(\n name='test',\n project='lithiumice/smplifyx',\n api_token=\"==\",\n )) ", + "detail": "configs.base.log_config", + "documentation": {} + }, + { + "label": "log_config_smplifyx_wandb", + "kind": 5, + "importPath": "configs.base.log_config", + "description": "configs.base.log_config", + "peekOfCode": "log_config_smplifyx_wandb = dict( \n interval=100, \n hooks=[ \n dict(type='MyWandbLogger', \n log_artifact=False,\n wandb_key='',\n wandb_name='NEED_TO_BE_FILLED',\n init_kwargs=dict(\n reinit=True,\n resume='allow',", + "detail": "configs.base.log_config", + "documentation": {} + }, + { + "label": "log_config_tracker_wandb", + "kind": 5, + "importPath": "configs.base.log_config", + "description": "configs.base.log_config", + "peekOfCode": "log_config_tracker_wandb = dict( \n interval=100, \n hooks=[ \n dict(type='MyWandbLogger', \n log_artifact=False,\n wandb_key='',\n wandb_name='NEED_TO_BE_FILLED',\n init_kwargs=dict(\n reinit=True,\n resume='allow',", + "detail": "configs.base.log_config", + "documentation": {} + }, + { + "label": "cfg_smoothnet_w8", + "kind": 5, + "importPath": "configs.base.smoothnet_cfg", + "description": "configs.base.smoothnet_cfg", + "peekOfCode": "cfg_smoothnet_w8 = dict(\n type='smoothnet',\n window_size=8,\n output_size=8,\n checkpoint='https://openmmlab-share.oss-cn-hangzhou.aliyuncs.com/'\n 'mmhuman3d/../models/smoothnet/smoothnet_windowsize8.pth.tar?versionId'\n '=CAEQPhiBgMDo0s7shhgiIDgzNTRmNWM2ZWEzYTQyYzRhNzUwYTkzZWZkMmU5MWEw',\n device='cpu')\ncfg_smoothnet_w16 = dict(\n type='smoothnet',", + "detail": "configs.base.smoothnet_cfg", + "documentation": {} + }, + { + "label": "cfg_smoothnet_w16", + "kind": 5, + "importPath": "configs.base.smoothnet_cfg", + "description": "configs.base.smoothnet_cfg", + "peekOfCode": "cfg_smoothnet_w16 = dict(\n type='smoothnet',\n window_size=16,\n output_size=16,\n checkpoint='https://openmmlab-share.oss-cn-hangzhou.aliyuncs.com/'\n 'mmhuman3d/../models/smoothnet/smoothnet_windowsize16.pth.tar?versionId'\n '=CAEQPhiBgMC.s87shhgiIGM3ZTI1ZGY1Y2NhNDQ2YzRiNmEyOGZhY2VjYWFiN2Zi',\n device='cpu')\ncfg_smoothnet_w32 = dict(\n type='smoothnet',", + "detail": "configs.base.smoothnet_cfg", + "documentation": {} + }, + { + "label": "cfg_smoothnet_w32", + "kind": 5, + "importPath": "configs.base.smoothnet_cfg", + "description": "configs.base.smoothnet_cfg", + "peekOfCode": "cfg_smoothnet_w32 = dict(\n type='smoothnet',\n window_size=32,\n output_size=32,\n checkpoint='https://openmmlab-share.oss-cn-hangzhou.aliyuncs.com/'\n 'mmhuman3d/../models/smoothnet/smoothnet_windowsize32.pth.tar?versionId'\n '=CAEQPhiBgIDf0s7shhgiIDhmYmM3YWQ0ZGI3NjRmZTc4NTk2NDE1MTA2MTUyMGRm',\n device='cpu')\ncfg_smoothnet_w64 = dict(\n type='smoothnet',", + "detail": "configs.base.smoothnet_cfg", + "documentation": {} + }, + { + "label": "cfg_smoothnet_w64", + "kind": 5, + "importPath": "configs.base.smoothnet_cfg", + "description": "configs.base.smoothnet_cfg", + "peekOfCode": "cfg_smoothnet_w64 = dict(\n type='smoothnet',\n window_size=64,\n output_size=64,\n checkpoint='https://openmmlab-share.oss-cn-hangzhou.aliyuncs.com/'\n 'mmhuman3d/../models/smoothnet/smoothnet_windowsize64.pth.tar?versionId'\n '=CAEQPhiBgMCyw87shhgiIGEwODI4ZjdiYmFkYTQ0NzZiNDVkODk3MDBlYzE1Y2Rh',\n device='cpu')", + "detail": "configs.base.smoothnet_cfg", + "documentation": {} + }, + { + "label": "num_gaussians", + "kind": 5, + "importPath": "configs.base.smplifyx_prior_config", + "description": "configs.base.smplifyx_prior_config", + "peekOfCode": "num_gaussians = 12\nhand_prior_type = 'l2'", + "detail": "configs.base.smplifyx_prior_config", + "documentation": {} + }, + { + "label": "hand_prior_type", + "kind": 5, + "importPath": "configs.base.smplifyx_prior_config", + "description": "configs.base.smplifyx_prior_config", + "peekOfCode": "hand_prior_type = 'l2'", + "detail": "configs.base.smplifyx_prior_config", + "documentation": {} + }, + { + "label": "opt_weights_dict", + "kind": 5, + "importPath": "configs.base.smplifyx_weights", + "description": "configs.base.smplifyx_weights", + "peekOfCode": "opt_weights_dict = dict(\n jaw_prior_weight=[[47.8, 478.0, 478.0],]*4,\n bending_prior_weight=[15.15,]*4,\n body_pose_weight=[4.78, ]*4,\n expr_prior_weight=[5.0, ]*4,\n hand_prior_weight=[4.78,]*4,\n coll_loss_weight=[1.0,]*4,\n shape_weight=[5.0,]*4,\n data_weight=[1.0,]*4,\n body_joints_weight=[2,],", + "detail": "configs.base.smplifyx_weights", + "documentation": {} + }, + { + "label": "ffmpeg_path", + "kind": 5, + "importPath": "configs.configs.machine_cfg", + "description": "configs.configs.machine_cfg", + "peekOfCode": "ffmpeg_path = '/usr/bin/ffmpeg'\nopenpose_root_path = '/content/openpose'\nopenpose_bin_path = 'build/examples/openpose/openpose.bin'\nvideo_out_base_path: str = '{{ fileDirname }}/../../../speech2gesture_dataset/crop4'\nintervals_csv_path: str = \"{{ fileDirname }}/../data_csv/intervals_sub4.csv\"\nintervals_csv_path_debug: str = \"{{ fileDirname }}/../data_csv/test.csv\"\nfolder_version: int = 1\nlow_res: int = 1\nfps=30\nbs_at_a_time=15", + "detail": "configs.configs.machine_cfg", + "documentation": {} + }, + { + "label": "openpose_root_path", + "kind": 5, + "importPath": "configs.configs.machine_cfg", + "description": "configs.configs.machine_cfg", + "peekOfCode": "openpose_root_path = '/content/openpose'\nopenpose_bin_path = 'build/examples/openpose/openpose.bin'\nvideo_out_base_path: str = '{{ fileDirname }}/../../../speech2gesture_dataset/crop4'\nintervals_csv_path: str = \"{{ fileDirname }}/../data_csv/intervals_sub4.csv\"\nintervals_csv_path_debug: str = \"{{ fileDirname }}/../data_csv/test.csv\"\nfolder_version: int = 1\nlow_res: int = 1\nfps=30\nbs_at_a_time=15\ncoap_bs_at_a_time=400", + "detail": "configs.configs.machine_cfg", + "documentation": {} + }, + { + "label": "openpose_bin_path", + "kind": 5, + "importPath": "configs.configs.machine_cfg", + "description": "configs.configs.machine_cfg", + "peekOfCode": "openpose_bin_path = 'build/examples/openpose/openpose.bin'\nvideo_out_base_path: str = '{{ fileDirname }}/../../../speech2gesture_dataset/crop4'\nintervals_csv_path: str = \"{{ fileDirname }}/../data_csv/intervals_sub4.csv\"\nintervals_csv_path_debug: str = \"{{ fileDirname }}/../data_csv/test.csv\"\nfolder_version: int = 1\nlow_res: int = 1\nfps=30\nbs_at_a_time=15\ncoap_bs_at_a_time=400\no3d_opt_bs_at_a_time=400", + "detail": "configs.configs.machine_cfg", + "documentation": {} + }, + { + "label": "request_gpus", + "kind": 5, + "importPath": "configs.configs.machine_cfg", + "description": "configs.configs.machine_cfg", + "peekOfCode": "request_gpus = 1", + "detail": "configs.configs.machine_cfg", + "documentation": {} + }, + { + "label": "condor_cfg", + "kind": 5, + "importPath": "configs.cfg_ins", + "description": "configs.cfg_ins", + "peekOfCode": "condor_cfg = SHOW.from_rela_path(\n __file__, \n './configs/condor_mmcv_cfg.py')\ncondor_cfg.is_linux = 1 if platform.system() == \"Linux\" else 0\ngpu_info = SHOW.get_gpu_info()\ncondor_cfg.merge_from_dict(SHOW.from_rela_path(\n __file__,\n './configs/machine_cfg.py'))", + "detail": "configs.cfg_ins", + "documentation": {} + }, + { + "label": "condor_cfg.is_linux", + "kind": 5, + "importPath": "configs.cfg_ins", + "description": "configs.cfg_ins", + "peekOfCode": "condor_cfg.is_linux = 1 if platform.system() == \"Linux\" else 0\ngpu_info = SHOW.get_gpu_info()\ncondor_cfg.merge_from_dict(SHOW.from_rela_path(\n __file__,\n './configs/machine_cfg.py'))", + "detail": "configs.cfg_ins", + "documentation": {} + }, + { + "label": "gpu_info", + "kind": 5, + "importPath": "configs.cfg_ins", + "description": "configs.cfg_ins", + "peekOfCode": "gpu_info = SHOW.get_gpu_info()\ncondor_cfg.merge_from_dict(SHOW.from_rela_path(\n __file__,\n './configs/machine_cfg.py'))", + "detail": "configs.cfg_ins", + "documentation": {} + }, + { + "label": "gen_path_from_ours_root", + "kind": 2, + "importPath": "configs.csv_parser", + "description": "configs.csv_parser", + "peekOfCode": "def gen_path_from_ours_root(\n speaker_name,\n all_top_dir,\n ours_name='ours',\n mica_name='ours_exp',\n **kwargs\n):\n ###########################################\n ours_output_folder = osp.join(\n all_top_dir, ours_name)", + "detail": "configs.csv_parser", + "documentation": {} + }, + { + "label": "_base_", + "kind": 5, + "importPath": "configs.mmcv_smplifyx_config", + "description": "configs.mmcv_smplifyx_config", + "peekOfCode": "_base_ = [\n './base/face_ider.py',\n './base/log_config.py',\n './base/optimizer_config.py',\n './base/model_smplx_config.py',\n './base/model_flame_config.py',\n './base/smplifyx_weights.py',\n './base/smplifyx_loss_configs.py',\n './base/smplifyx_prior_config.py',\n './base/betas_generate.py',", + "detail": "configs.mmcv_smplifyx_config", + "documentation": {} + }, + { + "label": "batch_size", + "kind": 5, + "importPath": "configs.mmcv_smplifyx_config", + "description": "configs.mmcv_smplifyx_config", + "peekOfCode": "batch_size = -1\nsave_betas=False\nload_betas=False\nshape_path = ''\nsave_objs = False\nsave_template = False\nsave_smplpix = True\nsave_ours_images = True\nsave_pkl_file=True\nfocal_length = 5000", + "detail": "configs.mmcv_smplifyx_config", + "documentation": {} + }, + { + "label": "shape_path", + "kind": 5, + "importPath": "configs.mmcv_smplifyx_config", + "description": "configs.mmcv_smplifyx_config", + "peekOfCode": "shape_path = ''\nsave_objs = False\nsave_template = False\nsave_smplpix = True\nsave_ours_images = True\nsave_pkl_file=True\nfocal_length = 5000\nuse_vposer = True\ndevice = 'cuda'\ndtype = \"float32\"", + "detail": "configs.mmcv_smplifyx_config", + "documentation": {} + }, + { + "label": "save_objs", + "kind": 5, + "importPath": "configs.mmcv_smplifyx_config", + "description": "configs.mmcv_smplifyx_config", + "peekOfCode": "save_objs = False\nsave_template = False\nsave_smplpix = True\nsave_ours_images = True\nsave_pkl_file=True\nfocal_length = 5000\nuse_vposer = True\ndevice = 'cuda'\ndtype = \"float32\"\noutput_img_ext = 'png'", + "detail": "configs.mmcv_smplifyx_config", + "documentation": {} + }, + { + "label": "save_template", + "kind": 5, + "importPath": "configs.mmcv_smplifyx_config", + "description": "configs.mmcv_smplifyx_config", + "peekOfCode": "save_template = False\nsave_smplpix = True\nsave_ours_images = True\nsave_pkl_file=True\nfocal_length = 5000\nuse_vposer = True\ndevice = 'cuda'\ndtype = \"float32\"\noutput_img_ext = 'png'\nimg_save_mode='origin'", + "detail": "configs.mmcv_smplifyx_config", + "documentation": {} + }, + { + "label": "save_smplpix", + "kind": 5, + "importPath": "configs.mmcv_smplifyx_config", + "description": "configs.mmcv_smplifyx_config", + "peekOfCode": "save_smplpix = True\nsave_ours_images = True\nsave_pkl_file=True\nfocal_length = 5000\nuse_vposer = True\ndevice = 'cuda'\ndtype = \"float32\"\noutput_img_ext = 'png'\nimg_save_mode='origin'\nload_tracker_checkpoint=False", + "detail": "configs.mmcv_smplifyx_config", + "documentation": {} + }, + { + "label": "save_ours_images", + "kind": 5, + "importPath": "configs.mmcv_smplifyx_config", + "description": "configs.mmcv_smplifyx_config", + "peekOfCode": "save_ours_images = True\nsave_pkl_file=True\nfocal_length = 5000\nuse_vposer = True\ndevice = 'cuda'\ndtype = \"float32\"\noutput_img_ext = 'png'\nimg_save_mode='origin'\nload_tracker_checkpoint=False\ntracker_checkpoint_root=''", + "detail": "configs.mmcv_smplifyx_config", + "documentation": {} + }, + { + "label": "focal_length", + "kind": 5, + "importPath": "configs.mmcv_smplifyx_config", + "description": "configs.mmcv_smplifyx_config", + "peekOfCode": "focal_length = 5000\nuse_vposer = True\ndevice = 'cuda'\ndtype = \"float32\"\noutput_img_ext = 'png'\nimg_save_mode='origin'\nload_tracker_checkpoint=False\ntracker_checkpoint_root=''\nstart_stage=0\nend_stage=None", + "detail": "configs.mmcv_smplifyx_config", + "documentation": {} + }, + { + "label": "use_vposer", + "kind": 5, + "importPath": "configs.mmcv_smplifyx_config", + "description": "configs.mmcv_smplifyx_config", + "peekOfCode": "use_vposer = True\ndevice = 'cuda'\ndtype = \"float32\"\noutput_img_ext = 'png'\nimg_save_mode='origin'\nload_tracker_checkpoint=False\ntracker_checkpoint_root=''\nstart_stage=0\nend_stage=None\nload_checkpoint=False", + "detail": "configs.mmcv_smplifyx_config", + "documentation": {} + }, + { + "label": "device", + "kind": 5, + "importPath": "configs.mmcv_smplifyx_config", + "description": "configs.mmcv_smplifyx_config", + "peekOfCode": "device = 'cuda'\ndtype = \"float32\"\noutput_img_ext = 'png'\nimg_save_mode='origin'\nload_tracker_checkpoint=False\ntracker_checkpoint_root=''\nstart_stage=0\nend_stage=None\nload_checkpoint=False\ncheck_pkl_metric=False", + "detail": "configs.mmcv_smplifyx_config", + "documentation": {} + }, + { + "label": "dtype", + "kind": 5, + "importPath": "configs.mmcv_smplifyx_config", + "description": "configs.mmcv_smplifyx_config", + "peekOfCode": "dtype = \"float32\"\noutput_img_ext = 'png'\nimg_save_mode='origin'\nload_tracker_checkpoint=False\ntracker_checkpoint_root=''\nstart_stage=0\nend_stage=None\nload_checkpoint=False\ncheck_pkl_metric=False\nload_ckpt_st_stage=-1", + "detail": "configs.mmcv_smplifyx_config", + "documentation": {} + }, + { + "label": "output_img_ext", + "kind": 5, + "importPath": "configs.mmcv_smplifyx_config", + "description": "configs.mmcv_smplifyx_config", + "peekOfCode": "output_img_ext = 'png'\nimg_save_mode='origin'\nload_tracker_checkpoint=False\ntracker_checkpoint_root=''\nstart_stage=0\nend_stage=None\nload_checkpoint=False\ncheck_pkl_metric=False\nload_ckpt_st_stage=-1\nload_ckpt_ed_stage=None", + "detail": "configs.mmcv_smplifyx_config", + "documentation": {} + }, + { + "label": "_base_", + "kind": 5, + "importPath": "configs.mmcv_tracker_config", + "description": "configs.mmcv_tracker_config", + "peekOfCode": "_base_ = [\n './base/model_smplx_config.py',\n './base/model_flame_config.py',\n './base/log_config.py',\n './base/face_ider.py',\n]\nbatch_size = -1\nshape_path = ''\ndevice = 'cuda'\ndtype = \"float32\"", + "detail": "configs.mmcv_tracker_config", + "documentation": {} + }, + { + "label": "batch_size", + "kind": 5, + "importPath": "configs.mmcv_tracker_config", + "description": "configs.mmcv_tracker_config", + "peekOfCode": "batch_size = -1\nshape_path = ''\ndevice = 'cuda'\ndtype = \"float32\"\noutput_img_ext = 'png'\nexp_weight = 0.02\nbatch_size = 1\nw_pho = 10.\nw_lmks = 2.\nsampling = 0", + "detail": "configs.mmcv_tracker_config", + "documentation": {} + }, + { + "label": "shape_path", + "kind": 5, + "importPath": "configs.mmcv_tracker_config", + "description": "configs.mmcv_tracker_config", + "peekOfCode": "shape_path = ''\ndevice = 'cuda'\ndtype = \"float32\"\noutput_img_ext = 'png'\nexp_weight = 0.02\nbatch_size = 1\nw_pho = 10.\nw_lmks = 2.\nsampling = 0\nkeyframes = []", + "detail": "configs.mmcv_tracker_config", + "documentation": {} + }, + { + "label": "device", + "kind": 5, + "importPath": "configs.mmcv_tracker_config", + "description": "configs.mmcv_tracker_config", + "peekOfCode": "device = 'cuda'\ndtype = \"float32\"\noutput_img_ext = 'png'\nexp_weight = 0.02\nbatch_size = 1\nw_pho = 10.\nw_lmks = 2.\nsampling = 0\nkeyframes = []\nuse_keyframes = False", + "detail": "configs.mmcv_tracker_config", + "documentation": {} + }, + { + "label": "dtype", + "kind": 5, + "importPath": "configs.mmcv_tracker_config", + "description": "configs.mmcv_tracker_config", + "peekOfCode": "dtype = \"float32\"\noutput_img_ext = 'png'\nexp_weight = 0.02\nbatch_size = 1\nw_pho = 10.\nw_lmks = 2.\nsampling = 0\nkeyframes = []\nuse_keyframes = False\nwarmup_steps = 1", + "detail": "configs.mmcv_tracker_config", + "documentation": {} + }, + { + "label": "output_img_ext", + "kind": 5, + "importPath": "configs.mmcv_tracker_config", + "description": "configs.mmcv_tracker_config", + "peekOfCode": "output_img_ext = 'png'\nexp_weight = 0.02\nbatch_size = 1\nw_pho = 10.\nw_lmks = 2.\nsampling = 0\nkeyframes = []\nuse_keyframes = False\nwarmup_steps = 1\nbbox_scale = 2.5", + "detail": "configs.mmcv_tracker_config", + "documentation": {} + }, + { + "label": "exp_weight", + "kind": 5, + "importPath": "configs.mmcv_tracker_config", + "description": "configs.mmcv_tracker_config", + "peekOfCode": "exp_weight = 0.02\nbatch_size = 1\nw_pho = 10.\nw_lmks = 2.\nsampling = 0\nkeyframes = []\nuse_keyframes = False\nwarmup_steps = 1\nbbox_scale = 2.5\nmake_image_square = True", + "detail": "configs.mmcv_tracker_config", + "documentation": {} + }, + { + "label": "batch_size", + "kind": 5, + "importPath": "configs.mmcv_tracker_config", + "description": "configs.mmcv_tracker_config", + "peekOfCode": "batch_size = 1\nw_pho = 10.\nw_lmks = 2.\nsampling = 0\nkeyframes = []\nuse_keyframes = False\nwarmup_steps = 1\nbbox_scale = 2.5\nmake_image_square = True\nsquare_size = 512", + "detail": "configs.mmcv_tracker_config", + "documentation": {} + }, + { + "label": "w_pho", + "kind": 5, + "importPath": "configs.mmcv_tracker_config", + "description": "configs.mmcv_tracker_config", + "peekOfCode": "w_pho = 10.\nw_lmks = 2.\nsampling = 0\nkeyframes = []\nuse_keyframes = False\nwarmup_steps = 1\nbbox_scale = 2.5\nmake_image_square = True\nsquare_size = 512\nuse_kinect = False", + "detail": "configs.mmcv_tracker_config", + "documentation": {} + }, + { + "label": "w_lmks", + "kind": 5, + "importPath": "configs.mmcv_tracker_config", + "description": "configs.mmcv_tracker_config", + "peekOfCode": "w_lmks = 2.\nsampling = 0\nkeyframes = []\nuse_keyframes = False\nwarmup_steps = 1\nbbox_scale = 2.5\nmake_image_square = True\nsquare_size = 512\nuse_kinect = False\nuse_mediapipe = True", + "detail": "configs.mmcv_tracker_config", + "documentation": {} + }, + { + "label": "sampling", + "kind": 5, + "importPath": "configs.mmcv_tracker_config", + "description": "configs.mmcv_tracker_config", + "peekOfCode": "sampling = 0\nkeyframes = []\nuse_keyframes = False\nwarmup_steps = 1\nbbox_scale = 2.5\nmake_image_square = True\nsquare_size = 512\nuse_kinect = False\nuse_mediapipe = True\noptimize_shape = False", + "detail": "configs.mmcv_tracker_config", + "documentation": {} + }, + { + "label": "keyframes", + "kind": 5, + "importPath": "configs.mmcv_tracker_config", + "description": "configs.mmcv_tracker_config", + "peekOfCode": "keyframes = []\nuse_keyframes = False\nwarmup_steps = 1\nbbox_scale = 2.5\nmake_image_square = True\nsquare_size = 512\nuse_kinect = False\nuse_mediapipe = True\noptimize_shape = False\nimage_size = [512, 512]", + "detail": "configs.mmcv_tracker_config", + "documentation": {} + }, + { + "label": "use_keyframes", + "kind": 5, + "importPath": "configs.mmcv_tracker_config", + "description": "configs.mmcv_tracker_config", + "peekOfCode": "use_keyframes = False\nwarmup_steps = 1\nbbox_scale = 2.5\nmake_image_square = True\nsquare_size = 512\nuse_kinect = False\nuse_mediapipe = True\noptimize_shape = False\nimage_size = [512, 512]\nconfig_name = ''", + "detail": "configs.mmcv_tracker_config", + "documentation": {} + }, + { + "label": "warmup_steps", + "kind": 5, + "importPath": "configs.mmcv_tracker_config", + "description": "configs.mmcv_tracker_config", + "peekOfCode": "warmup_steps = 1\nbbox_scale = 2.5\nmake_image_square = True\nsquare_size = 512\nuse_kinect = False\nuse_mediapipe = True\noptimize_shape = False\nimage_size = [512, 512]\nconfig_name = ''\nsave_final_vis=True", + "detail": "configs.mmcv_tracker_config", + "documentation": {} + }, + { + "label": "bbox_scale", + "kind": 5, + "importPath": "configs.mmcv_tracker_config", + "description": "configs.mmcv_tracker_config", + "peekOfCode": "bbox_scale = 2.5\nmake_image_square = True\nsquare_size = 512\nuse_kinect = False\nuse_mediapipe = True\noptimize_shape = False\nimage_size = [512, 512]\nconfig_name = ''\nsave_final_vis=True\nsave_final_flame=True", + "detail": "configs.mmcv_tracker_config", + "documentation": {} + }, + { + "label": "make_image_square", + "kind": 5, + "importPath": "configs.mmcv_tracker_config", + "description": "configs.mmcv_tracker_config", + "peekOfCode": "make_image_square = True\nsquare_size = 512\nuse_kinect = False\nuse_mediapipe = True\noptimize_shape = False\nimage_size = [512, 512]\nconfig_name = ''\nsave_final_vis=True\nsave_final_flame=True\niters=149", + "detail": "configs.mmcv_tracker_config", + "documentation": {} + }, + { + "label": "square_size", + "kind": 5, + "importPath": "configs.mmcv_tracker_config", + "description": "configs.mmcv_tracker_config", + "peekOfCode": "square_size = 512\nuse_kinect = False\nuse_mediapipe = True\noptimize_shape = False\nimage_size = [512, 512]\nconfig_name = ''\nsave_final_vis=True\nsave_final_flame=True\niters=149\nbs_at_a_time=3", + "detail": "configs.mmcv_tracker_config", + "documentation": {} + }, + { + "label": "use_kinect", + "kind": 5, + "importPath": "configs.mmcv_tracker_config", + "description": "configs.mmcv_tracker_config", + "peekOfCode": "use_kinect = False\nuse_mediapipe = True\noptimize_shape = False\nimage_size = [512, 512]\nconfig_name = ''\nsave_final_vis=True\nsave_final_flame=True\niters=149\nbs_at_a_time=3\nsampling=[1/2,1,2]", + "detail": "configs.mmcv_tracker_config", + "documentation": {} + }, + { + "label": "use_mediapipe", + "kind": 5, + "importPath": "configs.mmcv_tracker_config", + "description": "configs.mmcv_tracker_config", + "peekOfCode": "use_mediapipe = True\noptimize_shape = False\nimage_size = [512, 512]\nconfig_name = ''\nsave_final_vis=True\nsave_final_flame=True\niters=149\nbs_at_a_time=3\nsampling=[1/2,1,2]\nuse_face_upsample=False", + "detail": "configs.mmcv_tracker_config", + "documentation": {} + }, + { + "label": "optimize_shape", + "kind": 5, + "importPath": "configs.mmcv_tracker_config", + "description": "configs.mmcv_tracker_config", + "peekOfCode": "optimize_shape = False\nimage_size = [512, 512]\nconfig_name = ''\nsave_final_vis=True\nsave_final_flame=True\niters=149\nbs_at_a_time=3\nsampling=[1/2,1,2]\nuse_face_upsample=False", + "detail": "configs.mmcv_tracker_config", + "documentation": {} + }, + { + "label": "image_size", + "kind": 5, + "importPath": "configs.mmcv_tracker_config", + "description": "configs.mmcv_tracker_config", + "peekOfCode": "image_size = [512, 512]\nconfig_name = ''\nsave_final_vis=True\nsave_final_flame=True\niters=149\nbs_at_a_time=3\nsampling=[1/2,1,2]\nuse_face_upsample=False", + "detail": "configs.mmcv_tracker_config", + "documentation": {} + }, + { + "label": "config_name", + "kind": 5, + "importPath": "configs.mmcv_tracker_config", + "description": "configs.mmcv_tracker_config", + "peekOfCode": "config_name = ''\nsave_final_vis=True\nsave_final_flame=True\niters=149\nbs_at_a_time=3\nsampling=[1/2,1,2]\nuse_face_upsample=False", + "detail": "configs.mmcv_tracker_config", + "documentation": {} + }, + { + "label": "check_or_make_var", + "kind": 2, + "importPath": "configs.utils", + "description": "configs.utils", + "peekOfCode": "def check_or_make_var(meta_data,key_name,get_default_val_func=lambda :[]):\n if meta_data.get(key_name,None) is None:\n meta_data[key_name]=get_default_val_func()", + "detail": "configs.utils", + "documentation": {} + }, + { + "label": "add_path", + "kind": 2, + "importPath": "configs._init_paths", + "description": "configs._init_paths", + "peekOfCode": "def add_path(path):\n if path not in sys.path:\n sys.path.insert(0, path)\nlocal_bin_paths=[\n r'C:\\Users\\lithiumice',\n r'C:\\Users\\lithiumice\\code',\n os.path.join(this_dir, '..'),\n # sys.path.append(Path(__file__).parent.parent.__str__())\n]\nfor path in local_bin_paths:", + "detail": "configs._init_paths", + "documentation": {} + }, + { + "label": "this_dir", + "kind": 5, + "importPath": "configs._init_paths", + "description": "configs._init_paths", + "peekOfCode": "this_dir = os.path.dirname(__file__)\ndef add_path(path):\n if path not in sys.path:\n sys.path.insert(0, path)\nlocal_bin_paths=[\n r'C:\\Users\\lithiumice',\n r'C:\\Users\\lithiumice\\code',\n os.path.join(this_dir, '..'),\n # sys.path.append(Path(__file__).parent.parent.__str__())\n]", + "detail": "configs._init_paths", + "documentation": {} + }, + { + "label": "IBasicBlock", + "kind": 6, + "importPath": "modules.arcface_torch.backbones.iresnet", + "description": "modules.arcface_torch.backbones.iresnet", + "peekOfCode": "class IBasicBlock(nn.Module):\n expansion = 1\n def __init__(self, inplanes, planes, stride=1, downsample=None,\n groups=1, base_width=64, dilation=1):\n super(IBasicBlock, self).__init__()\n if groups != 1 or base_width != 64:\n raise ValueError('BasicBlock only supports groups=1 and base_width=64')\n if dilation > 1:\n raise NotImplementedError(\"Dilation > 1 not supported in BasicBlock\")\n self.bn1 = nn.BatchNorm2d(inplanes, eps=1e-05,)", + "detail": "modules.arcface_torch.backbones.iresnet", + "documentation": {} + }, + { + "label": "IResNet", + "kind": 6, + "importPath": "modules.arcface_torch.backbones.iresnet", + "description": "modules.arcface_torch.backbones.iresnet", + "peekOfCode": "class IResNet(nn.Module):\n fc_scale = 7 * 7\n def __init__(self,\n block, layers, dropout=0, num_features=512, zero_init_residual=False,\n groups=1, width_per_group=64, replace_stride_with_dilation=None, fp16=False):\n super(IResNet, self).__init__()\n self.extra_gflops = 0.0\n self.fp16 = fp16\n self.inplanes = 64\n self.dilation = 1", + "detail": "modules.arcface_torch.backbones.iresnet", + "documentation": {} + }, + { + "label": "conv3x3", + "kind": 2, + "importPath": "modules.arcface_torch.backbones.iresnet", + "description": "modules.arcface_torch.backbones.iresnet", + "peekOfCode": "def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes,\n out_planes,\n kernel_size=3,\n stride=stride,\n padding=dilation,\n groups=groups,\n bias=False,\n dilation=dilation)", + "detail": "modules.arcface_torch.backbones.iresnet", + "documentation": {} + }, + { + "label": "conv1x1", + "kind": 2, + "importPath": "modules.arcface_torch.backbones.iresnet", + "description": "modules.arcface_torch.backbones.iresnet", + "peekOfCode": "def conv1x1(in_planes, out_planes, stride=1):\n \"\"\"1x1 convolution\"\"\"\n return nn.Conv2d(in_planes,\n out_planes,\n kernel_size=1,\n stride=stride,\n bias=False)\nclass IBasicBlock(nn.Module):\n expansion = 1\n def __init__(self, inplanes, planes, stride=1, downsample=None,", + "detail": "modules.arcface_torch.backbones.iresnet", + "documentation": {} + }, + { + "label": "iresnet18", + "kind": 2, + "importPath": "modules.arcface_torch.backbones.iresnet", + "description": "modules.arcface_torch.backbones.iresnet", + "peekOfCode": "def iresnet18(pretrained=False, progress=True, **kwargs):\n return _iresnet('iresnet18', IBasicBlock, [2, 2, 2, 2], pretrained,\n progress, **kwargs)\ndef iresnet34(pretrained=False, progress=True, **kwargs):\n return _iresnet('iresnet34', IBasicBlock, [3, 4, 6, 3], pretrained,\n progress, **kwargs)\ndef iresnet50(pretrained=False, progress=True, **kwargs):\n return _iresnet('iresnet50', IBasicBlock, [3, 4, 14, 3], pretrained,\n progress, **kwargs)\ndef iresnet100(pretrained=False, progress=True, **kwargs):", + "detail": "modules.arcface_torch.backbones.iresnet", + "documentation": {} + }, + { + "label": "iresnet34", + "kind": 2, + "importPath": "modules.arcface_torch.backbones.iresnet", + "description": "modules.arcface_torch.backbones.iresnet", + "peekOfCode": "def iresnet34(pretrained=False, progress=True, **kwargs):\n return _iresnet('iresnet34', IBasicBlock, [3, 4, 6, 3], pretrained,\n progress, **kwargs)\ndef iresnet50(pretrained=False, progress=True, **kwargs):\n return _iresnet('iresnet50', IBasicBlock, [3, 4, 14, 3], pretrained,\n progress, **kwargs)\ndef iresnet100(pretrained=False, progress=True, **kwargs):\n return _iresnet('iresnet100', IBasicBlock, [3, 13, 30, 3], pretrained,\n progress, **kwargs)\ndef iresnet200(pretrained=False, progress=True, **kwargs):", + "detail": "modules.arcface_torch.backbones.iresnet", + "documentation": {} + }, + { + "label": "iresnet50", + "kind": 2, + "importPath": "modules.arcface_torch.backbones.iresnet", + "description": "modules.arcface_torch.backbones.iresnet", + "peekOfCode": "def iresnet50(pretrained=False, progress=True, **kwargs):\n return _iresnet('iresnet50', IBasicBlock, [3, 4, 14, 3], pretrained,\n progress, **kwargs)\ndef iresnet100(pretrained=False, progress=True, **kwargs):\n return _iresnet('iresnet100', IBasicBlock, [3, 13, 30, 3], pretrained,\n progress, **kwargs)\ndef iresnet200(pretrained=False, progress=True, **kwargs):\n return _iresnet('iresnet200', IBasicBlock, [6, 26, 60, 6], pretrained,\n progress, **kwargs)", + "detail": "modules.arcface_torch.backbones.iresnet", + "documentation": {} + }, + { + "label": "iresnet100", + "kind": 2, + "importPath": "modules.arcface_torch.backbones.iresnet", + "description": "modules.arcface_torch.backbones.iresnet", + "peekOfCode": "def iresnet100(pretrained=False, progress=True, **kwargs):\n return _iresnet('iresnet100', IBasicBlock, [3, 13, 30, 3], pretrained,\n progress, **kwargs)\ndef iresnet200(pretrained=False, progress=True, **kwargs):\n return _iresnet('iresnet200', IBasicBlock, [6, 26, 60, 6], pretrained,\n progress, **kwargs)", + "detail": "modules.arcface_torch.backbones.iresnet", + "documentation": {} + }, + { + "label": "iresnet200", + "kind": 2, + "importPath": "modules.arcface_torch.backbones.iresnet", + "description": "modules.arcface_torch.backbones.iresnet", + "peekOfCode": "def iresnet200(pretrained=False, progress=True, **kwargs):\n return _iresnet('iresnet200', IBasicBlock, [6, 26, 60, 6], pretrained,\n progress, **kwargs)", + "detail": "modules.arcface_torch.backbones.iresnet", + "documentation": {} + }, + { + "label": "__all__", + "kind": 5, + "importPath": "modules.arcface_torch.backbones.iresnet", + "description": "modules.arcface_torch.backbones.iresnet", + "peekOfCode": "__all__ = ['iresnet18', 'iresnet34', 'iresnet50', 'iresnet100', 'iresnet200']\nusing_ckpt = False\ndef conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes,\n out_planes,\n kernel_size=3,\n stride=stride,\n padding=dilation,\n groups=groups,", + "detail": "modules.arcface_torch.backbones.iresnet", + "documentation": {} + }, + { + "label": "using_ckpt", + "kind": 5, + "importPath": "modules.arcface_torch.backbones.iresnet", + "description": "modules.arcface_torch.backbones.iresnet", + "peekOfCode": "using_ckpt = False\ndef conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes,\n out_planes,\n kernel_size=3,\n stride=stride,\n padding=dilation,\n groups=groups,\n bias=False,", + "detail": "modules.arcface_torch.backbones.iresnet", + "documentation": {} + }, + { + "label": "IBasicBlock", + "kind": 6, + "importPath": "modules.arcface_torch.backbones.iresnet2060", + "description": "modules.arcface_torch.backbones.iresnet2060", + "peekOfCode": "class IBasicBlock(nn.Module):\n expansion = 1\n def __init__(self, inplanes, planes, stride=1, downsample=None,\n groups=1, base_width=64, dilation=1):\n super(IBasicBlock, self).__init__()\n if groups != 1 or base_width != 64:\n raise ValueError('BasicBlock only supports groups=1 and base_width=64')\n if dilation > 1:\n raise NotImplementedError(\"Dilation > 1 not supported in BasicBlock\")\n self.bn1 = nn.BatchNorm2d(inplanes, eps=1e-05, )", + "detail": "modules.arcface_torch.backbones.iresnet2060", + "documentation": {} + }, + { + "label": "IResNet", + "kind": 6, + "importPath": "modules.arcface_torch.backbones.iresnet2060", + "description": "modules.arcface_torch.backbones.iresnet2060", + "peekOfCode": "class IResNet(nn.Module):\n fc_scale = 7 * 7\n def __init__(self,\n block, layers, dropout=0, num_features=512, zero_init_residual=False,\n groups=1, width_per_group=64, replace_stride_with_dilation=None, fp16=False):\n super(IResNet, self).__init__()\n self.fp16 = fp16\n self.inplanes = 64\n self.dilation = 1\n if replace_stride_with_dilation is None:", + "detail": "modules.arcface_torch.backbones.iresnet2060", + "documentation": {} + }, + { + "label": "conv3x3", + "kind": 2, + "importPath": "modules.arcface_torch.backbones.iresnet2060", + "description": "modules.arcface_torch.backbones.iresnet2060", + "peekOfCode": "def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes,\n out_planes,\n kernel_size=3,\n stride=stride,\n padding=dilation,\n groups=groups,\n bias=False,\n dilation=dilation)", + "detail": "modules.arcface_torch.backbones.iresnet2060", + "documentation": {} + }, + { + "label": "conv1x1", + "kind": 2, + "importPath": "modules.arcface_torch.backbones.iresnet2060", + "description": "modules.arcface_torch.backbones.iresnet2060", + "peekOfCode": "def conv1x1(in_planes, out_planes, stride=1):\n \"\"\"1x1 convolution\"\"\"\n return nn.Conv2d(in_planes,\n out_planes,\n kernel_size=1,\n stride=stride,\n bias=False)\nclass IBasicBlock(nn.Module):\n expansion = 1\n def __init__(self, inplanes, planes, stride=1, downsample=None,", + "detail": "modules.arcface_torch.backbones.iresnet2060", + "documentation": {} + }, + { + "label": "iresnet2060", + "kind": 2, + "importPath": "modules.arcface_torch.backbones.iresnet2060", + "description": "modules.arcface_torch.backbones.iresnet2060", + "peekOfCode": "def iresnet2060(pretrained=False, progress=True, **kwargs):\n return _iresnet('iresnet2060', IBasicBlock, [3, 128, 1024 - 128, 3], pretrained, progress, **kwargs)", + "detail": "modules.arcface_torch.backbones.iresnet2060", + "documentation": {} + }, + { + "label": "__all__", + "kind": 5, + "importPath": "modules.arcface_torch.backbones.iresnet2060", + "description": "modules.arcface_torch.backbones.iresnet2060", + "peekOfCode": "__all__ = ['iresnet2060']\ndef conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes,\n out_planes,\n kernel_size=3,\n stride=stride,\n padding=dilation,\n groups=groups,\n bias=False,", + "detail": "modules.arcface_torch.backbones.iresnet2060", + "documentation": {} + }, + { + "label": "Flatten", + "kind": 6, + "importPath": "modules.arcface_torch.backbones.mobilefacenet", + "description": "modules.arcface_torch.backbones.mobilefacenet", + "peekOfCode": "class Flatten(Module):\n def forward(self, x):\n return x.view(x.size(0), -1)\nclass ConvBlock(Module):\n def __init__(self, in_c, out_c, kernel=(1, 1), stride=(1, 1), padding=(0, 0), groups=1):\n super(ConvBlock, self).__init__()\n self.layers = nn.Sequential(\n Conv2d(in_c, out_c, kernel, groups=groups, stride=stride, padding=padding, bias=False),\n BatchNorm2d(num_features=out_c),\n PReLU(num_parameters=out_c)", + "detail": "modules.arcface_torch.backbones.mobilefacenet", + "documentation": {} + }, + { + "label": "ConvBlock", + "kind": 6, + "importPath": "modules.arcface_torch.backbones.mobilefacenet", + "description": "modules.arcface_torch.backbones.mobilefacenet", + "peekOfCode": "class ConvBlock(Module):\n def __init__(self, in_c, out_c, kernel=(1, 1), stride=(1, 1), padding=(0, 0), groups=1):\n super(ConvBlock, self).__init__()\n self.layers = nn.Sequential(\n Conv2d(in_c, out_c, kernel, groups=groups, stride=stride, padding=padding, bias=False),\n BatchNorm2d(num_features=out_c),\n PReLU(num_parameters=out_c)\n )\n def forward(self, x):\n return self.layers(x)", + "detail": "modules.arcface_torch.backbones.mobilefacenet", + "documentation": {} + }, + { + "label": "LinearBlock", + "kind": 6, + "importPath": "modules.arcface_torch.backbones.mobilefacenet", + "description": "modules.arcface_torch.backbones.mobilefacenet", + "peekOfCode": "class LinearBlock(Module):\n def __init__(self, in_c, out_c, kernel=(1, 1), stride=(1, 1), padding=(0, 0), groups=1):\n super(LinearBlock, self).__init__()\n self.layers = nn.Sequential(\n Conv2d(in_c, out_c, kernel, stride, padding, groups=groups, bias=False),\n BatchNorm2d(num_features=out_c)\n )\n def forward(self, x):\n return self.layers(x)\nclass DepthWise(Module):", + "detail": "modules.arcface_torch.backbones.mobilefacenet", + "documentation": {} + }, + { + "label": "DepthWise", + "kind": 6, + "importPath": "modules.arcface_torch.backbones.mobilefacenet", + "description": "modules.arcface_torch.backbones.mobilefacenet", + "peekOfCode": "class DepthWise(Module):\n def __init__(self, in_c, out_c, residual=False, kernel=(3, 3), stride=(2, 2), padding=(1, 1), groups=1):\n super(DepthWise, self).__init__()\n self.residual = residual\n self.layers = nn.Sequential(\n ConvBlock(in_c, out_c=groups, kernel=(1, 1), padding=(0, 0), stride=(1, 1)),\n ConvBlock(groups, groups, groups=groups, kernel=kernel, padding=padding, stride=stride),\n LinearBlock(groups, out_c, kernel=(1, 1), padding=(0, 0), stride=(1, 1))\n )\n def forward(self, x):", + "detail": "modules.arcface_torch.backbones.mobilefacenet", + "documentation": {} + }, + { + "label": "Residual", + "kind": 6, + "importPath": "modules.arcface_torch.backbones.mobilefacenet", + "description": "modules.arcface_torch.backbones.mobilefacenet", + "peekOfCode": "class Residual(Module):\n def __init__(self, c, num_block, groups, kernel=(3, 3), stride=(1, 1), padding=(1, 1)):\n super(Residual, self).__init__()\n modules = []\n for _ in range(num_block):\n modules.append(DepthWise(c, c, True, kernel, stride, padding, groups))\n self.layers = Sequential(*modules)\n def forward(self, x):\n return self.layers(x)\nclass GDC(Module):", + "detail": "modules.arcface_torch.backbones.mobilefacenet", + "documentation": {} + }, + { + "label": "GDC", + "kind": 6, + "importPath": "modules.arcface_torch.backbones.mobilefacenet", + "description": "modules.arcface_torch.backbones.mobilefacenet", + "peekOfCode": "class GDC(Module):\n def __init__(self, embedding_size):\n super(GDC, self).__init__()\n self.layers = nn.Sequential(\n LinearBlock(512, 512, groups=512, kernel=(7, 7), stride=(1, 1), padding=(0, 0)),\n Flatten(),\n Linear(512, embedding_size, bias=False),\n BatchNorm1d(embedding_size))\n def forward(self, x):\n return self.layers(x)", + "detail": "modules.arcface_torch.backbones.mobilefacenet", + "documentation": {} + }, + { + "label": "MobileFaceNet", + "kind": 6, + "importPath": "modules.arcface_torch.backbones.mobilefacenet", + "description": "modules.arcface_torch.backbones.mobilefacenet", + "peekOfCode": "class MobileFaceNet(Module):\n def __init__(self, fp16=False, num_features=512, blocks=(1, 4, 6, 2), scale=2):\n super(MobileFaceNet, self).__init__()\n self.scale = scale\n self.fp16 = fp16\n self.layers = nn.ModuleList()\n self.layers.append(\n ConvBlock(3, 64 * self.scale, kernel=(3, 3), stride=(2, 2), padding=(1, 1))\n )\n if blocks[0] == 1:", + "detail": "modules.arcface_torch.backbones.mobilefacenet", + "documentation": {} + }, + { + "label": "get_mbf", + "kind": 2, + "importPath": "modules.arcface_torch.backbones.mobilefacenet", + "description": "modules.arcface_torch.backbones.mobilefacenet", + "peekOfCode": "def get_mbf(fp16, num_features, blocks=(1, 4, 6, 2), scale=2):\n return MobileFaceNet(fp16, num_features, blocks, scale=scale)\ndef get_mbf_large(fp16, num_features, blocks=(2, 8, 12, 4), scale=4):\n return MobileFaceNet(fp16, num_features, blocks, scale=scale)", + "detail": "modules.arcface_torch.backbones.mobilefacenet", + "documentation": {} + }, + { + "label": "get_mbf_large", + "kind": 2, + "importPath": "modules.arcface_torch.backbones.mobilefacenet", + "description": "modules.arcface_torch.backbones.mobilefacenet", + "peekOfCode": "def get_mbf_large(fp16, num_features, blocks=(2, 8, 12, 4), scale=4):\n return MobileFaceNet(fp16, num_features, blocks, scale=scale)", + "detail": "modules.arcface_torch.backbones.mobilefacenet", + "documentation": {} + }, + { + "label": "Mlp", + "kind": 6, + "importPath": "modules.arcface_torch.backbones.vit", + "description": "modules.arcface_torch.backbones.vit", + "peekOfCode": "class Mlp(nn.Module):\n def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.ReLU6, drop=0.):\n super().__init__()\n out_features = out_features or in_features\n hidden_features = hidden_features or in_features\n self.fc1 = nn.Linear(in_features, hidden_features)\n self.act = act_layer()\n self.fc2 = nn.Linear(hidden_features, out_features)\n self.drop = nn.Dropout(drop)\n def forward(self, x):", + "detail": "modules.arcface_torch.backbones.vit", + "documentation": {} + }, + { + "label": "VITBatchNorm", + "kind": 6, + "importPath": "modules.arcface_torch.backbones.vit", + "description": "modules.arcface_torch.backbones.vit", + "peekOfCode": "class VITBatchNorm(nn.Module):\n def __init__(self, num_features):\n super().__init__()\n self.num_features = num_features\n self.bn = nn.BatchNorm1d(num_features=num_features)\n def forward(self, x):\n return self.bn(x)\nclass Attention(nn.Module):\n def __init__(self,\n dim: int,", + "detail": "modules.arcface_torch.backbones.vit", + "documentation": {} + }, + { + "label": "Attention", + "kind": 6, + "importPath": "modules.arcface_torch.backbones.vit", + "description": "modules.arcface_torch.backbones.vit", + "peekOfCode": "class Attention(nn.Module):\n def __init__(self,\n dim: int,\n num_heads: int = 8,\n qkv_bias: bool = False,\n qk_scale: Optional[None] = None,\n attn_drop: float = 0.,\n proj_drop: float = 0.):\n super().__init__()\n self.num_heads = num_heads", + "detail": "modules.arcface_torch.backbones.vit", + "documentation": {} + }, + { + "label": "Block", + "kind": 6, + "importPath": "modules.arcface_torch.backbones.vit", + "description": "modules.arcface_torch.backbones.vit", + "peekOfCode": "class Block(nn.Module):\n def __init__(self,\n dim: int,\n num_heads: int,\n num_patches: int,\n mlp_ratio: float = 4.,\n qkv_bias: bool = False,\n qk_scale: Optional[None] = None,\n drop: float = 0.,\n attn_drop: float = 0.,", + "detail": "modules.arcface_torch.backbones.vit", + "documentation": {} + }, + { + "label": "PatchEmbed", + "kind": 6, + "importPath": "modules.arcface_torch.backbones.vit", + "description": "modules.arcface_torch.backbones.vit", + "peekOfCode": "class PatchEmbed(nn.Module):\n def __init__(self, img_size=108, patch_size=9, in_channels=3, embed_dim=768):\n super().__init__()\n img_size = to_2tuple(img_size)\n patch_size = to_2tuple(patch_size)\n num_patches = (img_size[1] // patch_size[1]) * \\\n (img_size[0] // patch_size[0])\n self.img_size = img_size\n self.patch_size = patch_size\n self.num_patches = num_patches", + "detail": "modules.arcface_torch.backbones.vit", + "documentation": {} + }, + { + "label": "VisionTransformer", + "kind": 6, + "importPath": "modules.arcface_torch.backbones.vit", + "description": "modules.arcface_torch.backbones.vit", + "peekOfCode": "class VisionTransformer(nn.Module):\n \"\"\" Vision Transformer with support for patch or hybrid CNN input stage\n \"\"\"\n def __init__(self,\n img_size: int = 112,\n patch_size: int = 16,\n in_channels: int = 3,\n num_classes: int = 1000,\n embed_dim: int = 768,\n depth: int = 12,", + "detail": "modules.arcface_torch.backbones.vit", + "documentation": {} + }, + { + "label": "config", + "kind": 5, + "importPath": "modules.arcface_torch.configs.3millions", + "description": "modules.arcface_torch.configs.3millions", + "peekOfCode": "config = edict()\nconfig.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"mbf\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.1\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4", + "detail": "modules.arcface_torch.configs.3millions", + "documentation": {} + }, + { + "label": "config.margin_list", + "kind": 5, + "importPath": "modules.arcface_torch.configs.3millions", + "description": "modules.arcface_torch.configs.3millions", + "peekOfCode": "config.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"mbf\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.1\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 512 # total_batch_size = batch_size * num_gpus", + "detail": "modules.arcface_torch.configs.3millions", + "documentation": {} + }, + { + "label": "config.network", + "kind": 5, + "importPath": "modules.arcface_torch.configs.3millions", + "description": "modules.arcface_torch.configs.3millions", + "peekOfCode": "config.network = \"mbf\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.1\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 512 # total_batch_size = batch_size * num_gpus\nconfig.lr = 0.1 # batch size is 512", + "detail": "modules.arcface_torch.configs.3millions", + "documentation": {} + }, + { + "label": "config.resume", + "kind": 5, + "importPath": "modules.arcface_torch.configs.3millions", + "description": "modules.arcface_torch.configs.3millions", + "peekOfCode": "config.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.1\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 512 # total_batch_size = batch_size * num_gpus\nconfig.lr = 0.1 # batch size is 512\nconfig.rec = \"synthetic\"", + "detail": "modules.arcface_torch.configs.3millions", + "documentation": {} + }, + { + "label": "config.output", + "kind": 5, + "importPath": "modules.arcface_torch.configs.3millions", + "description": "modules.arcface_torch.configs.3millions", + "peekOfCode": "config.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.1\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 512 # total_batch_size = batch_size * num_gpus\nconfig.lr = 0.1 # batch size is 512\nconfig.rec = \"synthetic\"\nconfig.num_classes = 30 * 10000", + "detail": "modules.arcface_torch.configs.3millions", + "documentation": {} + }, + { + "label": "config.embedding_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.3millions", + "description": "modules.arcface_torch.configs.3millions", + "peekOfCode": "config.embedding_size = 512\nconfig.sample_rate = 0.1\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 512 # total_batch_size = batch_size * num_gpus\nconfig.lr = 0.1 # batch size is 512\nconfig.rec = \"synthetic\"\nconfig.num_classes = 30 * 10000\nconfig.num_image = 100000", + "detail": "modules.arcface_torch.configs.3millions", + "documentation": {} + }, + { + "label": "config.sample_rate", + "kind": 5, + "importPath": "modules.arcface_torch.configs.3millions", + "description": "modules.arcface_torch.configs.3millions", + "peekOfCode": "config.sample_rate = 0.1\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 512 # total_batch_size = batch_size * num_gpus\nconfig.lr = 0.1 # batch size is 512\nconfig.rec = \"synthetic\"\nconfig.num_classes = 30 * 10000\nconfig.num_image = 100000\nconfig.num_epoch = 30", + "detail": "modules.arcface_torch.configs.3millions", + "documentation": {} + }, + { + "label": "config.fp16", + "kind": 5, + "importPath": "modules.arcface_torch.configs.3millions", + "description": "modules.arcface_torch.configs.3millions", + "peekOfCode": "config.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 512 # total_batch_size = batch_size * num_gpus\nconfig.lr = 0.1 # batch size is 512\nconfig.rec = \"synthetic\"\nconfig.num_classes = 30 * 10000\nconfig.num_image = 100000\nconfig.num_epoch = 30\nconfig.warmup_epoch = -1", + "detail": "modules.arcface_torch.configs.3millions", + "documentation": {} + }, + { + "label": "config.momentum", + "kind": 5, + "importPath": "modules.arcface_torch.configs.3millions", + "description": "modules.arcface_torch.configs.3millions", + "peekOfCode": "config.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 512 # total_batch_size = batch_size * num_gpus\nconfig.lr = 0.1 # batch size is 512\nconfig.rec = \"synthetic\"\nconfig.num_classes = 30 * 10000\nconfig.num_image = 100000\nconfig.num_epoch = 30\nconfig.warmup_epoch = -1\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.3millions", + "documentation": {} + }, + { + "label": "config.weight_decay", + "kind": 5, + "importPath": "modules.arcface_torch.configs.3millions", + "description": "modules.arcface_torch.configs.3millions", + "peekOfCode": "config.weight_decay = 5e-4\nconfig.batch_size = 512 # total_batch_size = batch_size * num_gpus\nconfig.lr = 0.1 # batch size is 512\nconfig.rec = \"synthetic\"\nconfig.num_classes = 30 * 10000\nconfig.num_image = 100000\nconfig.num_epoch = 30\nconfig.warmup_epoch = -1\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.3millions", + "documentation": {} + }, + { + "label": "config.batch_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.3millions", + "description": "modules.arcface_torch.configs.3millions", + "peekOfCode": "config.batch_size = 512 # total_batch_size = batch_size * num_gpus\nconfig.lr = 0.1 # batch size is 512\nconfig.rec = \"synthetic\"\nconfig.num_classes = 30 * 10000\nconfig.num_image = 100000\nconfig.num_epoch = 30\nconfig.warmup_epoch = -1\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.3millions", + "documentation": {} + }, + { + "label": "config.lr", + "kind": 5, + "importPath": "modules.arcface_torch.configs.3millions", + "description": "modules.arcface_torch.configs.3millions", + "peekOfCode": "config.lr = 0.1 # batch size is 512\nconfig.rec = \"synthetic\"\nconfig.num_classes = 30 * 10000\nconfig.num_image = 100000\nconfig.num_epoch = 30\nconfig.warmup_epoch = -1\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.3millions", + "documentation": {} + }, + { + "label": "config.rec", + "kind": 5, + "importPath": "modules.arcface_torch.configs.3millions", + "description": "modules.arcface_torch.configs.3millions", + "peekOfCode": "config.rec = \"synthetic\"\nconfig.num_classes = 30 * 10000\nconfig.num_image = 100000\nconfig.num_epoch = 30\nconfig.warmup_epoch = -1\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.3millions", + "documentation": {} + }, + { + "label": "config.num_classes", + "kind": 5, + "importPath": "modules.arcface_torch.configs.3millions", + "description": "modules.arcface_torch.configs.3millions", + "peekOfCode": "config.num_classes = 30 * 10000\nconfig.num_image = 100000\nconfig.num_epoch = 30\nconfig.warmup_epoch = -1\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.3millions", + "documentation": {} + }, + { + "label": "config.num_image", + "kind": 5, + "importPath": "modules.arcface_torch.configs.3millions", + "description": "modules.arcface_torch.configs.3millions", + "peekOfCode": "config.num_image = 100000\nconfig.num_epoch = 30\nconfig.warmup_epoch = -1\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.3millions", + "documentation": {} + }, + { + "label": "config.num_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.3millions", + "description": "modules.arcface_torch.configs.3millions", + "peekOfCode": "config.num_epoch = 30\nconfig.warmup_epoch = -1\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.3millions", + "documentation": {} + }, + { + "label": "config.warmup_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.3millions", + "description": "modules.arcface_torch.configs.3millions", + "peekOfCode": "config.warmup_epoch = -1\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.3millions", + "documentation": {} + }, + { + "label": "config.val_targets", + "kind": 5, + "importPath": "modules.arcface_torch.configs.3millions", + "description": "modules.arcface_torch.configs.3millions", + "peekOfCode": "config.val_targets = []", + "detail": "modules.arcface_torch.configs.3millions", + "documentation": {} + }, + { + "label": "config", + "kind": 5, + "importPath": "modules.arcface_torch.configs.base", + "description": "modules.arcface_torch.configs.base", + "peekOfCode": "config = edict()\n# Margin Base Softmax\nconfig.margin_list = (1.0, 0.5, 0.0)\nconfig.network = \"r50\"\nconfig.resume = False\nconfig.save_all_states = False\nconfig.output = \"ms1mv3_arcface_r50\"\nconfig.embedding_size = 512\n# Partial FC\nconfig.sample_rate = 1", + "detail": "modules.arcface_torch.configs.base", + "documentation": {} + }, + { + "label": "config.margin_list", + "kind": 5, + "importPath": "modules.arcface_torch.configs.base", + "description": "modules.arcface_torch.configs.base", + "peekOfCode": "config.margin_list = (1.0, 0.5, 0.0)\nconfig.network = \"r50\"\nconfig.resume = False\nconfig.save_all_states = False\nconfig.output = \"ms1mv3_arcface_r50\"\nconfig.embedding_size = 512\n# Partial FC\nconfig.sample_rate = 1\nconfig.interclass_filtering_threshold = 0\nconfig.fp16 = False", + "detail": "modules.arcface_torch.configs.base", + "documentation": {} + }, + { + "label": "config.network", + "kind": 5, + "importPath": "modules.arcface_torch.configs.base", + "description": "modules.arcface_torch.configs.base", + "peekOfCode": "config.network = \"r50\"\nconfig.resume = False\nconfig.save_all_states = False\nconfig.output = \"ms1mv3_arcface_r50\"\nconfig.embedding_size = 512\n# Partial FC\nconfig.sample_rate = 1\nconfig.interclass_filtering_threshold = 0\nconfig.fp16 = False\nconfig.batch_size = 128", + "detail": "modules.arcface_torch.configs.base", + "documentation": {} + }, + { + "label": "config.resume", + "kind": 5, + "importPath": "modules.arcface_torch.configs.base", + "description": "modules.arcface_torch.configs.base", + "peekOfCode": "config.resume = False\nconfig.save_all_states = False\nconfig.output = \"ms1mv3_arcface_r50\"\nconfig.embedding_size = 512\n# Partial FC\nconfig.sample_rate = 1\nconfig.interclass_filtering_threshold = 0\nconfig.fp16 = False\nconfig.batch_size = 128\n# For SGD ", + "detail": "modules.arcface_torch.configs.base", + "documentation": {} + }, + { + "label": "config.save_all_states", + "kind": 5, + "importPath": "modules.arcface_torch.configs.base", + "description": "modules.arcface_torch.configs.base", + "peekOfCode": "config.save_all_states = False\nconfig.output = \"ms1mv3_arcface_r50\"\nconfig.embedding_size = 512\n# Partial FC\nconfig.sample_rate = 1\nconfig.interclass_filtering_threshold = 0\nconfig.fp16 = False\nconfig.batch_size = 128\n# For SGD \nconfig.optimizer = \"sgd\"", + "detail": "modules.arcface_torch.configs.base", + "documentation": {} + }, + { + "label": "config.output", + "kind": 5, + "importPath": "modules.arcface_torch.configs.base", + "description": "modules.arcface_torch.configs.base", + "peekOfCode": "config.output = \"ms1mv3_arcface_r50\"\nconfig.embedding_size = 512\n# Partial FC\nconfig.sample_rate = 1\nconfig.interclass_filtering_threshold = 0\nconfig.fp16 = False\nconfig.batch_size = 128\n# For SGD \nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1", + "detail": "modules.arcface_torch.configs.base", + "documentation": {} + }, + { + "label": "config.embedding_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.base", + "description": "modules.arcface_torch.configs.base", + "peekOfCode": "config.embedding_size = 512\n# Partial FC\nconfig.sample_rate = 1\nconfig.interclass_filtering_threshold = 0\nconfig.fp16 = False\nconfig.batch_size = 128\n# For SGD \nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.momentum = 0.9", + "detail": "modules.arcface_torch.configs.base", + "documentation": {} + }, + { + "label": "config.sample_rate", + "kind": 5, + "importPath": "modules.arcface_torch.configs.base", + "description": "modules.arcface_torch.configs.base", + "peekOfCode": "config.sample_rate = 1\nconfig.interclass_filtering_threshold = 0\nconfig.fp16 = False\nconfig.batch_size = 128\n# For SGD \nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\n# For AdamW", + "detail": "modules.arcface_torch.configs.base", + "documentation": {} + }, + { + "label": "config.interclass_filtering_threshold", + "kind": 5, + "importPath": "modules.arcface_torch.configs.base", + "description": "modules.arcface_torch.configs.base", + "peekOfCode": "config.interclass_filtering_threshold = 0\nconfig.fp16 = False\nconfig.batch_size = 128\n# For SGD \nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\n# For AdamW\n# config.optimizer = \"adamw\"", + "detail": "modules.arcface_torch.configs.base", + "documentation": {} + }, + { + "label": "config.fp16", + "kind": 5, + "importPath": "modules.arcface_torch.configs.base", + "description": "modules.arcface_torch.configs.base", + "peekOfCode": "config.fp16 = False\nconfig.batch_size = 128\n# For SGD \nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\n# For AdamW\n# config.optimizer = \"adamw\"\n# config.lr = 0.001", + "detail": "modules.arcface_torch.configs.base", + "documentation": {} + }, + { + "label": "config.batch_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.base", + "description": "modules.arcface_torch.configs.base", + "peekOfCode": "config.batch_size = 128\n# For SGD \nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\n# For AdamW\n# config.optimizer = \"adamw\"\n# config.lr = 0.001\n# config.weight_decay = 0.1", + "detail": "modules.arcface_torch.configs.base", + "documentation": {} + }, + { + "label": "config.optimizer", + "kind": 5, + "importPath": "modules.arcface_torch.configs.base", + "description": "modules.arcface_torch.configs.base", + "peekOfCode": "config.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\n# For AdamW\n# config.optimizer = \"adamw\"\n# config.lr = 0.001\n# config.weight_decay = 0.1\nconfig.verbose = 2000\nconfig.frequent = 10", + "detail": "modules.arcface_torch.configs.base", + "documentation": {} + }, + { + "label": "config.lr", + "kind": 5, + "importPath": "modules.arcface_torch.configs.base", + "description": "modules.arcface_torch.configs.base", + "peekOfCode": "config.lr = 0.1\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\n# For AdamW\n# config.optimizer = \"adamw\"\n# config.lr = 0.001\n# config.weight_decay = 0.1\nconfig.verbose = 2000\nconfig.frequent = 10\n# For Large Sacle Dataset, such as WebFace42M", + "detail": "modules.arcface_torch.configs.base", + "documentation": {} + }, + { + "label": "config.momentum", + "kind": 5, + "importPath": "modules.arcface_torch.configs.base", + "description": "modules.arcface_torch.configs.base", + "peekOfCode": "config.momentum = 0.9\nconfig.weight_decay = 5e-4\n# For AdamW\n# config.optimizer = \"adamw\"\n# config.lr = 0.001\n# config.weight_decay = 0.1\nconfig.verbose = 2000\nconfig.frequent = 10\n# For Large Sacle Dataset, such as WebFace42M\nconfig.dali = False ", + "detail": "modules.arcface_torch.configs.base", + "documentation": {} + }, + { + "label": "config.weight_decay", + "kind": 5, + "importPath": "modules.arcface_torch.configs.base", + "description": "modules.arcface_torch.configs.base", + "peekOfCode": "config.weight_decay = 5e-4\n# For AdamW\n# config.optimizer = \"adamw\"\n# config.lr = 0.001\n# config.weight_decay = 0.1\nconfig.verbose = 2000\nconfig.frequent = 10\n# For Large Sacle Dataset, such as WebFace42M\nconfig.dali = False \n# Gradient ACC", + "detail": "modules.arcface_torch.configs.base", + "documentation": {} + }, + { + "label": "config.verbose", + "kind": 5, + "importPath": "modules.arcface_torch.configs.base", + "description": "modules.arcface_torch.configs.base", + "peekOfCode": "config.verbose = 2000\nconfig.frequent = 10\n# For Large Sacle Dataset, such as WebFace42M\nconfig.dali = False \n# Gradient ACC\nconfig.gradient_acc = 1\n# setup seed\nconfig.seed = 2048\n# dataload numworkers\nconfig.num_workers = 2", + "detail": "modules.arcface_torch.configs.base", + "documentation": {} + }, + { + "label": "config.frequent", + "kind": 5, + "importPath": "modules.arcface_torch.configs.base", + "description": "modules.arcface_torch.configs.base", + "peekOfCode": "config.frequent = 10\n# For Large Sacle Dataset, such as WebFace42M\nconfig.dali = False \n# Gradient ACC\nconfig.gradient_acc = 1\n# setup seed\nconfig.seed = 2048\n# dataload numworkers\nconfig.num_workers = 2", + "detail": "modules.arcface_torch.configs.base", + "documentation": {} + }, + { + "label": "config.dali", + "kind": 5, + "importPath": "modules.arcface_torch.configs.base", + "description": "modules.arcface_torch.configs.base", + "peekOfCode": "config.dali = False \n# Gradient ACC\nconfig.gradient_acc = 1\n# setup seed\nconfig.seed = 2048\n# dataload numworkers\nconfig.num_workers = 2", + "detail": "modules.arcface_torch.configs.base", + "documentation": {} + }, + { + "label": "config.gradient_acc", + "kind": 5, + "importPath": "modules.arcface_torch.configs.base", + "description": "modules.arcface_torch.configs.base", + "peekOfCode": "config.gradient_acc = 1\n# setup seed\nconfig.seed = 2048\n# dataload numworkers\nconfig.num_workers = 2", + "detail": "modules.arcface_torch.configs.base", + "documentation": {} + }, + { + "label": "config.seed", + "kind": 5, + "importPath": "modules.arcface_torch.configs.base", + "description": "modules.arcface_torch.configs.base", + "peekOfCode": "config.seed = 2048\n# dataload numworkers\nconfig.num_workers = 2", + "detail": "modules.arcface_torch.configs.base", + "documentation": {} + }, + { + "label": "config.num_workers", + "kind": 5, + "importPath": "modules.arcface_torch.configs.base", + "description": "modules.arcface_torch.configs.base", + "peekOfCode": "config.num_workers = 2", + "detail": "modules.arcface_torch.configs.base", + "documentation": {} + }, + { + "label": "config", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_mbf", + "description": "modules.arcface_torch.configs.glint360k_mbf", + "peekOfCode": "config = edict()\nconfig.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"mbf\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 1e-4", + "detail": "modules.arcface_torch.configs.glint360k_mbf", + "documentation": {} + }, + { + "label": "config.margin_list", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_mbf", + "description": "modules.arcface_torch.configs.glint360k_mbf", + "peekOfCode": "config.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"mbf\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 1e-4\nconfig.batch_size = 128", + "detail": "modules.arcface_torch.configs.glint360k_mbf", + "documentation": {} + }, + { + "label": "config.network", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_mbf", + "description": "modules.arcface_torch.configs.glint360k_mbf", + "peekOfCode": "config.network = \"mbf\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 1e-4\nconfig.batch_size = 128\nconfig.lr = 0.1", + "detail": "modules.arcface_torch.configs.glint360k_mbf", + "documentation": {} + }, + { + "label": "config.resume", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_mbf", + "description": "modules.arcface_torch.configs.glint360k_mbf", + "peekOfCode": "config.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 1e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000", + "detail": "modules.arcface_torch.configs.glint360k_mbf", + "documentation": {} + }, + { + "label": "config.output", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_mbf", + "description": "modules.arcface_torch.configs.glint360k_mbf", + "peekOfCode": "config.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 1e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False", + "detail": "modules.arcface_torch.configs.glint360k_mbf", + "documentation": {} + }, + { + "label": "config.embedding_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_mbf", + "description": "modules.arcface_torch.configs.glint360k_mbf", + "peekOfCode": "config.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 1e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/glint360k\"", + "detail": "modules.arcface_torch.configs.glint360k_mbf", + "documentation": {} + }, + { + "label": "config.sample_rate", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_mbf", + "description": "modules.arcface_torch.configs.glint360k_mbf", + "peekOfCode": "config.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 1e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/glint360k\"\nconfig.num_classes = 360232", + "detail": "modules.arcface_torch.configs.glint360k_mbf", + "documentation": {} + }, + { + "label": "config.fp16", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_mbf", + "description": "modules.arcface_torch.configs.glint360k_mbf", + "peekOfCode": "config.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 1e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/glint360k\"\nconfig.num_classes = 360232\nconfig.num_image = 17091657", + "detail": "modules.arcface_torch.configs.glint360k_mbf", + "documentation": {} + }, + { + "label": "config.momentum", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_mbf", + "description": "modules.arcface_torch.configs.glint360k_mbf", + "peekOfCode": "config.momentum = 0.9\nconfig.weight_decay = 1e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/glint360k\"\nconfig.num_classes = 360232\nconfig.num_image = 17091657\nconfig.num_epoch = 20", + "detail": "modules.arcface_torch.configs.glint360k_mbf", + "documentation": {} + }, + { + "label": "config.weight_decay", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_mbf", + "description": "modules.arcface_torch.configs.glint360k_mbf", + "peekOfCode": "config.weight_decay = 1e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/glint360k\"\nconfig.num_classes = 360232\nconfig.num_image = 17091657\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0", + "detail": "modules.arcface_torch.configs.glint360k_mbf", + "documentation": {} + }, + { + "label": "config.batch_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_mbf", + "description": "modules.arcface_torch.configs.glint360k_mbf", + "peekOfCode": "config.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/glint360k\"\nconfig.num_classes = 360232\nconfig.num_image = 17091657\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.glint360k_mbf", + "documentation": {} + }, + { + "label": "config.lr", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_mbf", + "description": "modules.arcface_torch.configs.glint360k_mbf", + "peekOfCode": "config.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/glint360k\"\nconfig.num_classes = 360232\nconfig.num_image = 17091657\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.glint360k_mbf", + "documentation": {} + }, + { + "label": "config.verbose", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_mbf", + "description": "modules.arcface_torch.configs.glint360k_mbf", + "peekOfCode": "config.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/glint360k\"\nconfig.num_classes = 360232\nconfig.num_image = 17091657\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.glint360k_mbf", + "documentation": {} + }, + { + "label": "config.dali", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_mbf", + "description": "modules.arcface_torch.configs.glint360k_mbf", + "peekOfCode": "config.dali = False\nconfig.rec = \"/train_tmp/glint360k\"\nconfig.num_classes = 360232\nconfig.num_image = 17091657\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.glint360k_mbf", + "documentation": {} + }, + { + "label": "config.rec", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_mbf", + "description": "modules.arcface_torch.configs.glint360k_mbf", + "peekOfCode": "config.rec = \"/train_tmp/glint360k\"\nconfig.num_classes = 360232\nconfig.num_image = 17091657\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.glint360k_mbf", + "documentation": {} + }, + { + "label": "config.num_classes", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_mbf", + "description": "modules.arcface_torch.configs.glint360k_mbf", + "peekOfCode": "config.num_classes = 360232\nconfig.num_image = 17091657\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.glint360k_mbf", + "documentation": {} + }, + { + "label": "config.num_image", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_mbf", + "description": "modules.arcface_torch.configs.glint360k_mbf", + "peekOfCode": "config.num_image = 17091657\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.glint360k_mbf", + "documentation": {} + }, + { + "label": "config.num_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_mbf", + "description": "modules.arcface_torch.configs.glint360k_mbf", + "peekOfCode": "config.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.glint360k_mbf", + "documentation": {} + }, + { + "label": "config.warmup_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_mbf", + "description": "modules.arcface_torch.configs.glint360k_mbf", + "peekOfCode": "config.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.glint360k_mbf", + "documentation": {} + }, + { + "label": "config.val_targets", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_mbf", + "description": "modules.arcface_torch.configs.glint360k_mbf", + "peekOfCode": "config.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.glint360k_mbf", + "documentation": {} + }, + { + "label": "config", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_r100", + "description": "modules.arcface_torch.configs.glint360k_r100", + "peekOfCode": "config = edict()\nconfig.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"r100\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 1e-4", + "detail": "modules.arcface_torch.configs.glint360k_r100", + "documentation": {} + }, + { + "label": "config.margin_list", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_r100", + "description": "modules.arcface_torch.configs.glint360k_r100", + "peekOfCode": "config.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"r100\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 1e-4\nconfig.batch_size = 128", + "detail": "modules.arcface_torch.configs.glint360k_r100", + "documentation": {} + }, + { + "label": "config.network", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_r100", + "description": "modules.arcface_torch.configs.glint360k_r100", + "peekOfCode": "config.network = \"r100\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 1e-4\nconfig.batch_size = 128\nconfig.lr = 0.1", + "detail": "modules.arcface_torch.configs.glint360k_r100", + "documentation": {} + }, + { + "label": "config.resume", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_r100", + "description": "modules.arcface_torch.configs.glint360k_r100", + "peekOfCode": "config.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 1e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000", + "detail": "modules.arcface_torch.configs.glint360k_r100", + "documentation": {} + }, + { + "label": "config.output", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_r100", + "description": "modules.arcface_torch.configs.glint360k_r100", + "peekOfCode": "config.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 1e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False", + "detail": "modules.arcface_torch.configs.glint360k_r100", + "documentation": {} + }, + { + "label": "config.embedding_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_r100", + "description": "modules.arcface_torch.configs.glint360k_r100", + "peekOfCode": "config.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 1e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/glint360k\"", + "detail": "modules.arcface_torch.configs.glint360k_r100", + "documentation": {} + }, + { + "label": "config.sample_rate", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_r100", + "description": "modules.arcface_torch.configs.glint360k_r100", + "peekOfCode": "config.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 1e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/glint360k\"\nconfig.num_classes = 360232", + "detail": "modules.arcface_torch.configs.glint360k_r100", + "documentation": {} + }, + { + "label": "config.fp16", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_r100", + "description": "modules.arcface_torch.configs.glint360k_r100", + "peekOfCode": "config.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 1e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/glint360k\"\nconfig.num_classes = 360232\nconfig.num_image = 17091657", + "detail": "modules.arcface_torch.configs.glint360k_r100", + "documentation": {} + }, + { + "label": "config.momentum", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_r100", + "description": "modules.arcface_torch.configs.glint360k_r100", + "peekOfCode": "config.momentum = 0.9\nconfig.weight_decay = 1e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/glint360k\"\nconfig.num_classes = 360232\nconfig.num_image = 17091657\nconfig.num_epoch = 20", + "detail": "modules.arcface_torch.configs.glint360k_r100", + "documentation": {} + }, + { + "label": "config.weight_decay", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_r100", + "description": "modules.arcface_torch.configs.glint360k_r100", + "peekOfCode": "config.weight_decay = 1e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/glint360k\"\nconfig.num_classes = 360232\nconfig.num_image = 17091657\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0", + "detail": "modules.arcface_torch.configs.glint360k_r100", + "documentation": {} + }, + { + "label": "config.batch_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_r100", + "description": "modules.arcface_torch.configs.glint360k_r100", + "peekOfCode": "config.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/glint360k\"\nconfig.num_classes = 360232\nconfig.num_image = 17091657\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.glint360k_r100", + "documentation": {} + }, + { + "label": "config.lr", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_r100", + "description": "modules.arcface_torch.configs.glint360k_r100", + "peekOfCode": "config.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/glint360k\"\nconfig.num_classes = 360232\nconfig.num_image = 17091657\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.glint360k_r100", + "documentation": {} + }, + { + "label": "config.verbose", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_r100", + "description": "modules.arcface_torch.configs.glint360k_r100", + "peekOfCode": "config.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/glint360k\"\nconfig.num_classes = 360232\nconfig.num_image = 17091657\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.glint360k_r100", + "documentation": {} + }, + { + "label": "config.dali", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_r100", + "description": "modules.arcface_torch.configs.glint360k_r100", + "peekOfCode": "config.dali = False\nconfig.rec = \"/train_tmp/glint360k\"\nconfig.num_classes = 360232\nconfig.num_image = 17091657\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.glint360k_r100", + "documentation": {} + }, + { + "label": "config.rec", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_r100", + "description": "modules.arcface_torch.configs.glint360k_r100", + "peekOfCode": "config.rec = \"/train_tmp/glint360k\"\nconfig.num_classes = 360232\nconfig.num_image = 17091657\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.glint360k_r100", + "documentation": {} + }, + { + "label": "config.num_classes", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_r100", + "description": "modules.arcface_torch.configs.glint360k_r100", + "peekOfCode": "config.num_classes = 360232\nconfig.num_image = 17091657\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.glint360k_r100", + "documentation": {} + }, + { + "label": "config.num_image", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_r100", + "description": "modules.arcface_torch.configs.glint360k_r100", + "peekOfCode": "config.num_image = 17091657\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.glint360k_r100", + "documentation": {} + }, + { + "label": "config.num_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_r100", + "description": "modules.arcface_torch.configs.glint360k_r100", + "peekOfCode": "config.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.glint360k_r100", + "documentation": {} + }, + { + "label": "config.warmup_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_r100", + "description": "modules.arcface_torch.configs.glint360k_r100", + "peekOfCode": "config.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.glint360k_r100", + "documentation": {} + }, + { + "label": "config.val_targets", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_r100", + "description": "modules.arcface_torch.configs.glint360k_r100", + "peekOfCode": "config.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.glint360k_r100", + "documentation": {} + }, + { + "label": "config", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_r50", + "description": "modules.arcface_torch.configs.glint360k_r50", + "peekOfCode": "config = edict()\nconfig.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"r50\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 1e-4", + "detail": "modules.arcface_torch.configs.glint360k_r50", + "documentation": {} + }, + { + "label": "config.margin_list", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_r50", + "description": "modules.arcface_torch.configs.glint360k_r50", + "peekOfCode": "config.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"r50\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 1e-4\nconfig.batch_size = 128", + "detail": "modules.arcface_torch.configs.glint360k_r50", + "documentation": {} + }, + { + "label": "config.network", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_r50", + "description": "modules.arcface_torch.configs.glint360k_r50", + "peekOfCode": "config.network = \"r50\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 1e-4\nconfig.batch_size = 128\nconfig.lr = 0.1", + "detail": "modules.arcface_torch.configs.glint360k_r50", + "documentation": {} + }, + { + "label": "config.resume", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_r50", + "description": "modules.arcface_torch.configs.glint360k_r50", + "peekOfCode": "config.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 1e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000", + "detail": "modules.arcface_torch.configs.glint360k_r50", + "documentation": {} + }, + { + "label": "config.output", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_r50", + "description": "modules.arcface_torch.configs.glint360k_r50", + "peekOfCode": "config.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 1e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False", + "detail": "modules.arcface_torch.configs.glint360k_r50", + "documentation": {} + }, + { + "label": "config.embedding_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_r50", + "description": "modules.arcface_torch.configs.glint360k_r50", + "peekOfCode": "config.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 1e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/glint360k\"", + "detail": "modules.arcface_torch.configs.glint360k_r50", + "documentation": {} + }, + { + "label": "config.sample_rate", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_r50", + "description": "modules.arcface_torch.configs.glint360k_r50", + "peekOfCode": "config.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 1e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/glint360k\"\nconfig.num_classes = 360232", + "detail": "modules.arcface_torch.configs.glint360k_r50", + "documentation": {} + }, + { + "label": "config.fp16", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_r50", + "description": "modules.arcface_torch.configs.glint360k_r50", + "peekOfCode": "config.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 1e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/glint360k\"\nconfig.num_classes = 360232\nconfig.num_image = 17091657", + "detail": "modules.arcface_torch.configs.glint360k_r50", + "documentation": {} + }, + { + "label": "config.momentum", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_r50", + "description": "modules.arcface_torch.configs.glint360k_r50", + "peekOfCode": "config.momentum = 0.9\nconfig.weight_decay = 1e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/glint360k\"\nconfig.num_classes = 360232\nconfig.num_image = 17091657\nconfig.num_epoch = 20", + "detail": "modules.arcface_torch.configs.glint360k_r50", + "documentation": {} + }, + { + "label": "config.weight_decay", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_r50", + "description": "modules.arcface_torch.configs.glint360k_r50", + "peekOfCode": "config.weight_decay = 1e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/glint360k\"\nconfig.num_classes = 360232\nconfig.num_image = 17091657\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0", + "detail": "modules.arcface_torch.configs.glint360k_r50", + "documentation": {} + }, + { + "label": "config.batch_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_r50", + "description": "modules.arcface_torch.configs.glint360k_r50", + "peekOfCode": "config.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/glint360k\"\nconfig.num_classes = 360232\nconfig.num_image = 17091657\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.glint360k_r50", + "documentation": {} + }, + { + "label": "config.lr", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_r50", + "description": "modules.arcface_torch.configs.glint360k_r50", + "peekOfCode": "config.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/glint360k\"\nconfig.num_classes = 360232\nconfig.num_image = 17091657\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.glint360k_r50", + "documentation": {} + }, + { + "label": "config.verbose", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_r50", + "description": "modules.arcface_torch.configs.glint360k_r50", + "peekOfCode": "config.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/glint360k\"\nconfig.num_classes = 360232\nconfig.num_image = 17091657\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.glint360k_r50", + "documentation": {} + }, + { + "label": "config.dali", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_r50", + "description": "modules.arcface_torch.configs.glint360k_r50", + "peekOfCode": "config.dali = False\nconfig.rec = \"/train_tmp/glint360k\"\nconfig.num_classes = 360232\nconfig.num_image = 17091657\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.glint360k_r50", + "documentation": {} + }, + { + "label": "config.rec", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_r50", + "description": "modules.arcface_torch.configs.glint360k_r50", + "peekOfCode": "config.rec = \"/train_tmp/glint360k\"\nconfig.num_classes = 360232\nconfig.num_image = 17091657\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.glint360k_r50", + "documentation": {} + }, + { + "label": "config.num_classes", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_r50", + "description": "modules.arcface_torch.configs.glint360k_r50", + "peekOfCode": "config.num_classes = 360232\nconfig.num_image = 17091657\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.glint360k_r50", + "documentation": {} + }, + { + "label": "config.num_image", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_r50", + "description": "modules.arcface_torch.configs.glint360k_r50", + "peekOfCode": "config.num_image = 17091657\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.glint360k_r50", + "documentation": {} + }, + { + "label": "config.num_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_r50", + "description": "modules.arcface_torch.configs.glint360k_r50", + "peekOfCode": "config.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.glint360k_r50", + "documentation": {} + }, + { + "label": "config.warmup_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_r50", + "description": "modules.arcface_torch.configs.glint360k_r50", + "peekOfCode": "config.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.glint360k_r50", + "documentation": {} + }, + { + "label": "config.val_targets", + "kind": 5, + "importPath": "modules.arcface_torch.configs.glint360k_r50", + "description": "modules.arcface_torch.configs.glint360k_r50", + "peekOfCode": "config.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.glint360k_r50", + "documentation": {} + }, + { + "label": "config", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_mbf", + "description": "modules.arcface_torch.configs.ms1mv2_mbf", + "peekOfCode": "config = edict()\nconfig.margin_list = (1.0, 0.5, 0.0)\nconfig.network = \"mbf\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 1e-4", + "detail": "modules.arcface_torch.configs.ms1mv2_mbf", + "documentation": {} + }, + { + "label": "config.margin_list", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_mbf", + "description": "modules.arcface_torch.configs.ms1mv2_mbf", + "peekOfCode": "config.margin_list = (1.0, 0.5, 0.0)\nconfig.network = \"mbf\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 1e-4\nconfig.batch_size = 128", + "detail": "modules.arcface_torch.configs.ms1mv2_mbf", + "documentation": {} + }, + { + "label": "config.network", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_mbf", + "description": "modules.arcface_torch.configs.ms1mv2_mbf", + "peekOfCode": "config.network = \"mbf\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 1e-4\nconfig.batch_size = 128\nconfig.lr = 0.1", + "detail": "modules.arcface_torch.configs.ms1mv2_mbf", + "documentation": {} + }, + { + "label": "config.resume", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_mbf", + "description": "modules.arcface_torch.configs.ms1mv2_mbf", + "peekOfCode": "config.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 1e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000", + "detail": "modules.arcface_torch.configs.ms1mv2_mbf", + "documentation": {} + }, + { + "label": "config.output", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_mbf", + "description": "modules.arcface_torch.configs.ms1mv2_mbf", + "peekOfCode": "config.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 1e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False", + "detail": "modules.arcface_torch.configs.ms1mv2_mbf", + "documentation": {} + }, + { + "label": "config.embedding_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_mbf", + "description": "modules.arcface_torch.configs.ms1mv2_mbf", + "peekOfCode": "config.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 1e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/faces_emore\"", + "detail": "modules.arcface_torch.configs.ms1mv2_mbf", + "documentation": {} + }, + { + "label": "config.sample_rate", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_mbf", + "description": "modules.arcface_torch.configs.ms1mv2_mbf", + "peekOfCode": "config.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 1e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/faces_emore\"\nconfig.num_classes = 85742", + "detail": "modules.arcface_torch.configs.ms1mv2_mbf", + "documentation": {} + }, + { + "label": "config.fp16", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_mbf", + "description": "modules.arcface_torch.configs.ms1mv2_mbf", + "peekOfCode": "config.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 1e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/faces_emore\"\nconfig.num_classes = 85742\nconfig.num_image = 5822653", + "detail": "modules.arcface_torch.configs.ms1mv2_mbf", + "documentation": {} + }, + { + "label": "config.momentum", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_mbf", + "description": "modules.arcface_torch.configs.ms1mv2_mbf", + "peekOfCode": "config.momentum = 0.9\nconfig.weight_decay = 1e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/faces_emore\"\nconfig.num_classes = 85742\nconfig.num_image = 5822653\nconfig.num_epoch = 40", + "detail": "modules.arcface_torch.configs.ms1mv2_mbf", + "documentation": {} + }, + { + "label": "config.weight_decay", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_mbf", + "description": "modules.arcface_torch.configs.ms1mv2_mbf", + "peekOfCode": "config.weight_decay = 1e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/faces_emore\"\nconfig.num_classes = 85742\nconfig.num_image = 5822653\nconfig.num_epoch = 40\nconfig.warmup_epoch = 0", + "detail": "modules.arcface_torch.configs.ms1mv2_mbf", + "documentation": {} + }, + { + "label": "config.batch_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_mbf", + "description": "modules.arcface_torch.configs.ms1mv2_mbf", + "peekOfCode": "config.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/faces_emore\"\nconfig.num_classes = 85742\nconfig.num_image = 5822653\nconfig.num_epoch = 40\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv2_mbf", + "documentation": {} + }, + { + "label": "config.lr", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_mbf", + "description": "modules.arcface_torch.configs.ms1mv2_mbf", + "peekOfCode": "config.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/faces_emore\"\nconfig.num_classes = 85742\nconfig.num_image = 5822653\nconfig.num_epoch = 40\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv2_mbf", + "documentation": {} + }, + { + "label": "config.verbose", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_mbf", + "description": "modules.arcface_torch.configs.ms1mv2_mbf", + "peekOfCode": "config.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/faces_emore\"\nconfig.num_classes = 85742\nconfig.num_image = 5822653\nconfig.num_epoch = 40\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv2_mbf", + "documentation": {} + }, + { + "label": "config.dali", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_mbf", + "description": "modules.arcface_torch.configs.ms1mv2_mbf", + "peekOfCode": "config.dali = False\nconfig.rec = \"/train_tmp/faces_emore\"\nconfig.num_classes = 85742\nconfig.num_image = 5822653\nconfig.num_epoch = 40\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv2_mbf", + "documentation": {} + }, + { + "label": "config.rec", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_mbf", + "description": "modules.arcface_torch.configs.ms1mv2_mbf", + "peekOfCode": "config.rec = \"/train_tmp/faces_emore\"\nconfig.num_classes = 85742\nconfig.num_image = 5822653\nconfig.num_epoch = 40\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv2_mbf", + "documentation": {} + }, + { + "label": "config.num_classes", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_mbf", + "description": "modules.arcface_torch.configs.ms1mv2_mbf", + "peekOfCode": "config.num_classes = 85742\nconfig.num_image = 5822653\nconfig.num_epoch = 40\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv2_mbf", + "documentation": {} + }, + { + "label": "config.num_image", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_mbf", + "description": "modules.arcface_torch.configs.ms1mv2_mbf", + "peekOfCode": "config.num_image = 5822653\nconfig.num_epoch = 40\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv2_mbf", + "documentation": {} + }, + { + "label": "config.num_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_mbf", + "description": "modules.arcface_torch.configs.ms1mv2_mbf", + "peekOfCode": "config.num_epoch = 40\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv2_mbf", + "documentation": {} + }, + { + "label": "config.warmup_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_mbf", + "description": "modules.arcface_torch.configs.ms1mv2_mbf", + "peekOfCode": "config.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv2_mbf", + "documentation": {} + }, + { + "label": "config.val_targets", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_mbf", + "description": "modules.arcface_torch.configs.ms1mv2_mbf", + "peekOfCode": "config.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv2_mbf", + "documentation": {} + }, + { + "label": "config", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_r100", + "description": "modules.arcface_torch.configs.ms1mv2_r100", + "peekOfCode": "config = edict()\nconfig.margin_list = (1.0, 0.5, 0.0)\nconfig.network = \"r100\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4", + "detail": "modules.arcface_torch.configs.ms1mv2_r100", + "documentation": {} + }, + { + "label": "config.margin_list", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_r100", + "description": "modules.arcface_torch.configs.ms1mv2_r100", + "peekOfCode": "config.margin_list = (1.0, 0.5, 0.0)\nconfig.network = \"r100\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128", + "detail": "modules.arcface_torch.configs.ms1mv2_r100", + "documentation": {} + }, + { + "label": "config.network", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_r100", + "description": "modules.arcface_torch.configs.ms1mv2_r100", + "peekOfCode": "config.network = \"r100\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.1", + "detail": "modules.arcface_torch.configs.ms1mv2_r100", + "documentation": {} + }, + { + "label": "config.resume", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_r100", + "description": "modules.arcface_torch.configs.ms1mv2_r100", + "peekOfCode": "config.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000", + "detail": "modules.arcface_torch.configs.ms1mv2_r100", + "documentation": {} + }, + { + "label": "config.output", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_r100", + "description": "modules.arcface_torch.configs.ms1mv2_r100", + "peekOfCode": "config.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False", + "detail": "modules.arcface_torch.configs.ms1mv2_r100", + "documentation": {} + }, + { + "label": "config.embedding_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_r100", + "description": "modules.arcface_torch.configs.ms1mv2_r100", + "peekOfCode": "config.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/faces_emore\"", + "detail": "modules.arcface_torch.configs.ms1mv2_r100", + "documentation": {} + }, + { + "label": "config.sample_rate", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_r100", + "description": "modules.arcface_torch.configs.ms1mv2_r100", + "peekOfCode": "config.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/faces_emore\"\nconfig.num_classes = 85742", + "detail": "modules.arcface_torch.configs.ms1mv2_r100", + "documentation": {} + }, + { + "label": "config.fp16", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_r100", + "description": "modules.arcface_torch.configs.ms1mv2_r100", + "peekOfCode": "config.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/faces_emore\"\nconfig.num_classes = 85742\nconfig.num_image = 5822653", + "detail": "modules.arcface_torch.configs.ms1mv2_r100", + "documentation": {} + }, + { + "label": "config.momentum", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_r100", + "description": "modules.arcface_torch.configs.ms1mv2_r100", + "peekOfCode": "config.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/faces_emore\"\nconfig.num_classes = 85742\nconfig.num_image = 5822653\nconfig.num_epoch = 20", + "detail": "modules.arcface_torch.configs.ms1mv2_r100", + "documentation": {} + }, + { + "label": "config.weight_decay", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_r100", + "description": "modules.arcface_torch.configs.ms1mv2_r100", + "peekOfCode": "config.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/faces_emore\"\nconfig.num_classes = 85742\nconfig.num_image = 5822653\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0", + "detail": "modules.arcface_torch.configs.ms1mv2_r100", + "documentation": {} + }, + { + "label": "config.batch_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_r100", + "description": "modules.arcface_torch.configs.ms1mv2_r100", + "peekOfCode": "config.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/faces_emore\"\nconfig.num_classes = 85742\nconfig.num_image = 5822653\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv2_r100", + "documentation": {} + }, + { + "label": "config.lr", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_r100", + "description": "modules.arcface_torch.configs.ms1mv2_r100", + "peekOfCode": "config.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/faces_emore\"\nconfig.num_classes = 85742\nconfig.num_image = 5822653\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv2_r100", + "documentation": {} + }, + { + "label": "config.verbose", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_r100", + "description": "modules.arcface_torch.configs.ms1mv2_r100", + "peekOfCode": "config.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/faces_emore\"\nconfig.num_classes = 85742\nconfig.num_image = 5822653\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv2_r100", + "documentation": {} + }, + { + "label": "config.dali", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_r100", + "description": "modules.arcface_torch.configs.ms1mv2_r100", + "peekOfCode": "config.dali = False\nconfig.rec = \"/train_tmp/faces_emore\"\nconfig.num_classes = 85742\nconfig.num_image = 5822653\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv2_r100", + "documentation": {} + }, + { + "label": "config.rec", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_r100", + "description": "modules.arcface_torch.configs.ms1mv2_r100", + "peekOfCode": "config.rec = \"/train_tmp/faces_emore\"\nconfig.num_classes = 85742\nconfig.num_image = 5822653\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv2_r100", + "documentation": {} + }, + { + "label": "config.num_classes", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_r100", + "description": "modules.arcface_torch.configs.ms1mv2_r100", + "peekOfCode": "config.num_classes = 85742\nconfig.num_image = 5822653\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv2_r100", + "documentation": {} + }, + { + "label": "config.num_image", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_r100", + "description": "modules.arcface_torch.configs.ms1mv2_r100", + "peekOfCode": "config.num_image = 5822653\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv2_r100", + "documentation": {} + }, + { + "label": "config.num_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_r100", + "description": "modules.arcface_torch.configs.ms1mv2_r100", + "peekOfCode": "config.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv2_r100", + "documentation": {} + }, + { + "label": "config.warmup_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_r100", + "description": "modules.arcface_torch.configs.ms1mv2_r100", + "peekOfCode": "config.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv2_r100", + "documentation": {} + }, + { + "label": "config.val_targets", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_r100", + "description": "modules.arcface_torch.configs.ms1mv2_r100", + "peekOfCode": "config.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv2_r100", + "documentation": {} + }, + { + "label": "config", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_r50", + "description": "modules.arcface_torch.configs.ms1mv2_r50", + "peekOfCode": "config = edict()\nconfig.margin_list = (1.0, 0.5, 0.0)\nconfig.network = \"r50\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4", + "detail": "modules.arcface_torch.configs.ms1mv2_r50", + "documentation": {} + }, + { + "label": "config.margin_list", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_r50", + "description": "modules.arcface_torch.configs.ms1mv2_r50", + "peekOfCode": "config.margin_list = (1.0, 0.5, 0.0)\nconfig.network = \"r50\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128", + "detail": "modules.arcface_torch.configs.ms1mv2_r50", + "documentation": {} + }, + { + "label": "config.network", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_r50", + "description": "modules.arcface_torch.configs.ms1mv2_r50", + "peekOfCode": "config.network = \"r50\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.1", + "detail": "modules.arcface_torch.configs.ms1mv2_r50", + "documentation": {} + }, + { + "label": "config.resume", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_r50", + "description": "modules.arcface_torch.configs.ms1mv2_r50", + "peekOfCode": "config.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000", + "detail": "modules.arcface_torch.configs.ms1mv2_r50", + "documentation": {} + }, + { + "label": "config.output", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_r50", + "description": "modules.arcface_torch.configs.ms1mv2_r50", + "peekOfCode": "config.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False", + "detail": "modules.arcface_torch.configs.ms1mv2_r50", + "documentation": {} + }, + { + "label": "config.embedding_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_r50", + "description": "modules.arcface_torch.configs.ms1mv2_r50", + "peekOfCode": "config.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/faces_emore\"", + "detail": "modules.arcface_torch.configs.ms1mv2_r50", + "documentation": {} + }, + { + "label": "config.sample_rate", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_r50", + "description": "modules.arcface_torch.configs.ms1mv2_r50", + "peekOfCode": "config.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/faces_emore\"\nconfig.num_classes = 85742", + "detail": "modules.arcface_torch.configs.ms1mv2_r50", + "documentation": {} + }, + { + "label": "config.fp16", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_r50", + "description": "modules.arcface_torch.configs.ms1mv2_r50", + "peekOfCode": "config.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/faces_emore\"\nconfig.num_classes = 85742\nconfig.num_image = 5822653", + "detail": "modules.arcface_torch.configs.ms1mv2_r50", + "documentation": {} + }, + { + "label": "config.momentum", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_r50", + "description": "modules.arcface_torch.configs.ms1mv2_r50", + "peekOfCode": "config.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/faces_emore\"\nconfig.num_classes = 85742\nconfig.num_image = 5822653\nconfig.num_epoch = 20", + "detail": "modules.arcface_torch.configs.ms1mv2_r50", + "documentation": {} + }, + { + "label": "config.weight_decay", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_r50", + "description": "modules.arcface_torch.configs.ms1mv2_r50", + "peekOfCode": "config.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/faces_emore\"\nconfig.num_classes = 85742\nconfig.num_image = 5822653\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0", + "detail": "modules.arcface_torch.configs.ms1mv2_r50", + "documentation": {} + }, + { + "label": "config.batch_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_r50", + "description": "modules.arcface_torch.configs.ms1mv2_r50", + "peekOfCode": "config.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/faces_emore\"\nconfig.num_classes = 85742\nconfig.num_image = 5822653\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv2_r50", + "documentation": {} + }, + { + "label": "config.lr", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_r50", + "description": "modules.arcface_torch.configs.ms1mv2_r50", + "peekOfCode": "config.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/faces_emore\"\nconfig.num_classes = 85742\nconfig.num_image = 5822653\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv2_r50", + "documentation": {} + }, + { + "label": "config.verbose", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_r50", + "description": "modules.arcface_torch.configs.ms1mv2_r50", + "peekOfCode": "config.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/faces_emore\"\nconfig.num_classes = 85742\nconfig.num_image = 5822653\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv2_r50", + "documentation": {} + }, + { + "label": "config.dali", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_r50", + "description": "modules.arcface_torch.configs.ms1mv2_r50", + "peekOfCode": "config.dali = False\nconfig.rec = \"/train_tmp/faces_emore\"\nconfig.num_classes = 85742\nconfig.num_image = 5822653\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv2_r50", + "documentation": {} + }, + { + "label": "config.rec", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_r50", + "description": "modules.arcface_torch.configs.ms1mv2_r50", + "peekOfCode": "config.rec = \"/train_tmp/faces_emore\"\nconfig.num_classes = 85742\nconfig.num_image = 5822653\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv2_r50", + "documentation": {} + }, + { + "label": "config.num_classes", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_r50", + "description": "modules.arcface_torch.configs.ms1mv2_r50", + "peekOfCode": "config.num_classes = 85742\nconfig.num_image = 5822653\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv2_r50", + "documentation": {} + }, + { + "label": "config.num_image", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_r50", + "description": "modules.arcface_torch.configs.ms1mv2_r50", + "peekOfCode": "config.num_image = 5822653\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv2_r50", + "documentation": {} + }, + { + "label": "config.num_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_r50", + "description": "modules.arcface_torch.configs.ms1mv2_r50", + "peekOfCode": "config.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv2_r50", + "documentation": {} + }, + { + "label": "config.warmup_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_r50", + "description": "modules.arcface_torch.configs.ms1mv2_r50", + "peekOfCode": "config.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv2_r50", + "documentation": {} + }, + { + "label": "config.val_targets", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv2_r50", + "description": "modules.arcface_torch.configs.ms1mv2_r50", + "peekOfCode": "config.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv2_r50", + "documentation": {} + }, + { + "label": "config", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_mbf", + "description": "modules.arcface_torch.configs.ms1mv3_mbf", + "peekOfCode": "config = edict()\nconfig.margin_list = (1.0, 0.5, 0.0)\nconfig.network = \"mbf\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 1e-4", + "detail": "modules.arcface_torch.configs.ms1mv3_mbf", + "documentation": {} + }, + { + "label": "config.margin_list", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_mbf", + "description": "modules.arcface_torch.configs.ms1mv3_mbf", + "peekOfCode": "config.margin_list = (1.0, 0.5, 0.0)\nconfig.network = \"mbf\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 1e-4\nconfig.batch_size = 128", + "detail": "modules.arcface_torch.configs.ms1mv3_mbf", + "documentation": {} + }, + { + "label": "config.network", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_mbf", + "description": "modules.arcface_torch.configs.ms1mv3_mbf", + "peekOfCode": "config.network = \"mbf\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 1e-4\nconfig.batch_size = 128\nconfig.lr = 0.1", + "detail": "modules.arcface_torch.configs.ms1mv3_mbf", + "documentation": {} + }, + { + "label": "config.resume", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_mbf", + "description": "modules.arcface_torch.configs.ms1mv3_mbf", + "peekOfCode": "config.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 1e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000", + "detail": "modules.arcface_torch.configs.ms1mv3_mbf", + "documentation": {} + }, + { + "label": "config.output", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_mbf", + "description": "modules.arcface_torch.configs.ms1mv3_mbf", + "peekOfCode": "config.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 1e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False", + "detail": "modules.arcface_torch.configs.ms1mv3_mbf", + "documentation": {} + }, + { + "label": "config.embedding_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_mbf", + "description": "modules.arcface_torch.configs.ms1mv3_mbf", + "peekOfCode": "config.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 1e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/ms1m-retinaface-t1\"", + "detail": "modules.arcface_torch.configs.ms1mv3_mbf", + "documentation": {} + }, + { + "label": "config.sample_rate", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_mbf", + "description": "modules.arcface_torch.configs.ms1mv3_mbf", + "peekOfCode": "config.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 1e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/ms1m-retinaface-t1\"\nconfig.num_classes = 93431", + "detail": "modules.arcface_torch.configs.ms1mv3_mbf", + "documentation": {} + }, + { + "label": "config.fp16", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_mbf", + "description": "modules.arcface_torch.configs.ms1mv3_mbf", + "peekOfCode": "config.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 1e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/ms1m-retinaface-t1\"\nconfig.num_classes = 93431\nconfig.num_image = 5179510", + "detail": "modules.arcface_torch.configs.ms1mv3_mbf", + "documentation": {} + }, + { + "label": "config.momentum", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_mbf", + "description": "modules.arcface_torch.configs.ms1mv3_mbf", + "peekOfCode": "config.momentum = 0.9\nconfig.weight_decay = 1e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/ms1m-retinaface-t1\"\nconfig.num_classes = 93431\nconfig.num_image = 5179510\nconfig.num_epoch = 40", + "detail": "modules.arcface_torch.configs.ms1mv3_mbf", + "documentation": {} + }, + { + "label": "config.weight_decay", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_mbf", + "description": "modules.arcface_torch.configs.ms1mv3_mbf", + "peekOfCode": "config.weight_decay = 1e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/ms1m-retinaface-t1\"\nconfig.num_classes = 93431\nconfig.num_image = 5179510\nconfig.num_epoch = 40\nconfig.warmup_epoch = 0", + "detail": "modules.arcface_torch.configs.ms1mv3_mbf", + "documentation": {} + }, + { + "label": "config.batch_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_mbf", + "description": "modules.arcface_torch.configs.ms1mv3_mbf", + "peekOfCode": "config.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/ms1m-retinaface-t1\"\nconfig.num_classes = 93431\nconfig.num_image = 5179510\nconfig.num_epoch = 40\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv3_mbf", + "documentation": {} + }, + { + "label": "config.lr", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_mbf", + "description": "modules.arcface_torch.configs.ms1mv3_mbf", + "peekOfCode": "config.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/ms1m-retinaface-t1\"\nconfig.num_classes = 93431\nconfig.num_image = 5179510\nconfig.num_epoch = 40\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv3_mbf", + "documentation": {} + }, + { + "label": "config.verbose", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_mbf", + "description": "modules.arcface_torch.configs.ms1mv3_mbf", + "peekOfCode": "config.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/ms1m-retinaface-t1\"\nconfig.num_classes = 93431\nconfig.num_image = 5179510\nconfig.num_epoch = 40\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv3_mbf", + "documentation": {} + }, + { + "label": "config.dali", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_mbf", + "description": "modules.arcface_torch.configs.ms1mv3_mbf", + "peekOfCode": "config.dali = False\nconfig.rec = \"/train_tmp/ms1m-retinaface-t1\"\nconfig.num_classes = 93431\nconfig.num_image = 5179510\nconfig.num_epoch = 40\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv3_mbf", + "documentation": {} + }, + { + "label": "config.rec", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_mbf", + "description": "modules.arcface_torch.configs.ms1mv3_mbf", + "peekOfCode": "config.rec = \"/train_tmp/ms1m-retinaface-t1\"\nconfig.num_classes = 93431\nconfig.num_image = 5179510\nconfig.num_epoch = 40\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv3_mbf", + "documentation": {} + }, + { + "label": "config.num_classes", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_mbf", + "description": "modules.arcface_torch.configs.ms1mv3_mbf", + "peekOfCode": "config.num_classes = 93431\nconfig.num_image = 5179510\nconfig.num_epoch = 40\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv3_mbf", + "documentation": {} + }, + { + "label": "config.num_image", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_mbf", + "description": "modules.arcface_torch.configs.ms1mv3_mbf", + "peekOfCode": "config.num_image = 5179510\nconfig.num_epoch = 40\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv3_mbf", + "documentation": {} + }, + { + "label": "config.num_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_mbf", + "description": "modules.arcface_torch.configs.ms1mv3_mbf", + "peekOfCode": "config.num_epoch = 40\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv3_mbf", + "documentation": {} + }, + { + "label": "config.warmup_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_mbf", + "description": "modules.arcface_torch.configs.ms1mv3_mbf", + "peekOfCode": "config.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv3_mbf", + "documentation": {} + }, + { + "label": "config.val_targets", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_mbf", + "description": "modules.arcface_torch.configs.ms1mv3_mbf", + "peekOfCode": "config.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv3_mbf", + "documentation": {} + }, + { + "label": "config", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_r100", + "description": "modules.arcface_torch.configs.ms1mv3_r100", + "peekOfCode": "config = edict()\nconfig.margin_list = (1.0, 0.5, 0.0)\nconfig.network = \"r100\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4", + "detail": "modules.arcface_torch.configs.ms1mv3_r100", + "documentation": {} + }, + { + "label": "config.margin_list", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_r100", + "description": "modules.arcface_torch.configs.ms1mv3_r100", + "peekOfCode": "config.margin_list = (1.0, 0.5, 0.0)\nconfig.network = \"r100\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128", + "detail": "modules.arcface_torch.configs.ms1mv3_r100", + "documentation": {} + }, + { + "label": "config.network", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_r100", + "description": "modules.arcface_torch.configs.ms1mv3_r100", + "peekOfCode": "config.network = \"r100\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.1", + "detail": "modules.arcface_torch.configs.ms1mv3_r100", + "documentation": {} + }, + { + "label": "config.resume", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_r100", + "description": "modules.arcface_torch.configs.ms1mv3_r100", + "peekOfCode": "config.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000", + "detail": "modules.arcface_torch.configs.ms1mv3_r100", + "documentation": {} + }, + { + "label": "config.output", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_r100", + "description": "modules.arcface_torch.configs.ms1mv3_r100", + "peekOfCode": "config.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False", + "detail": "modules.arcface_torch.configs.ms1mv3_r100", + "documentation": {} + }, + { + "label": "config.embedding_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_r100", + "description": "modules.arcface_torch.configs.ms1mv3_r100", + "peekOfCode": "config.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/ms1m-retinaface-t1\"", + "detail": "modules.arcface_torch.configs.ms1mv3_r100", + "documentation": {} + }, + { + "label": "config.sample_rate", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_r100", + "description": "modules.arcface_torch.configs.ms1mv3_r100", + "peekOfCode": "config.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/ms1m-retinaface-t1\"\nconfig.num_classes = 93431", + "detail": "modules.arcface_torch.configs.ms1mv3_r100", + "documentation": {} + }, + { + "label": "config.fp16", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_r100", + "description": "modules.arcface_torch.configs.ms1mv3_r100", + "peekOfCode": "config.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/ms1m-retinaface-t1\"\nconfig.num_classes = 93431\nconfig.num_image = 5179510", + "detail": "modules.arcface_torch.configs.ms1mv3_r100", + "documentation": {} + }, + { + "label": "config.momentum", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_r100", + "description": "modules.arcface_torch.configs.ms1mv3_r100", + "peekOfCode": "config.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/ms1m-retinaface-t1\"\nconfig.num_classes = 93431\nconfig.num_image = 5179510\nconfig.num_epoch = 20", + "detail": "modules.arcface_torch.configs.ms1mv3_r100", + "documentation": {} + }, + { + "label": "config.weight_decay", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_r100", + "description": "modules.arcface_torch.configs.ms1mv3_r100", + "peekOfCode": "config.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/ms1m-retinaface-t1\"\nconfig.num_classes = 93431\nconfig.num_image = 5179510\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0", + "detail": "modules.arcface_torch.configs.ms1mv3_r100", + "documentation": {} + }, + { + "label": "config.batch_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_r100", + "description": "modules.arcface_torch.configs.ms1mv3_r100", + "peekOfCode": "config.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/ms1m-retinaface-t1\"\nconfig.num_classes = 93431\nconfig.num_image = 5179510\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv3_r100", + "documentation": {} + }, + { + "label": "config.lr", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_r100", + "description": "modules.arcface_torch.configs.ms1mv3_r100", + "peekOfCode": "config.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/ms1m-retinaface-t1\"\nconfig.num_classes = 93431\nconfig.num_image = 5179510\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv3_r100", + "documentation": {} + }, + { + "label": "config.verbose", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_r100", + "description": "modules.arcface_torch.configs.ms1mv3_r100", + "peekOfCode": "config.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/ms1m-retinaface-t1\"\nconfig.num_classes = 93431\nconfig.num_image = 5179510\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv3_r100", + "documentation": {} + }, + { + "label": "config.dali", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_r100", + "description": "modules.arcface_torch.configs.ms1mv3_r100", + "peekOfCode": "config.dali = False\nconfig.rec = \"/train_tmp/ms1m-retinaface-t1\"\nconfig.num_classes = 93431\nconfig.num_image = 5179510\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv3_r100", + "documentation": {} + }, + { + "label": "config.rec", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_r100", + "description": "modules.arcface_torch.configs.ms1mv3_r100", + "peekOfCode": "config.rec = \"/train_tmp/ms1m-retinaface-t1\"\nconfig.num_classes = 93431\nconfig.num_image = 5179510\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv3_r100", + "documentation": {} + }, + { + "label": "config.num_classes", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_r100", + "description": "modules.arcface_torch.configs.ms1mv3_r100", + "peekOfCode": "config.num_classes = 93431\nconfig.num_image = 5179510\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv3_r100", + "documentation": {} + }, + { + "label": "config.num_image", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_r100", + "description": "modules.arcface_torch.configs.ms1mv3_r100", + "peekOfCode": "config.num_image = 5179510\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv3_r100", + "documentation": {} + }, + { + "label": "config.num_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_r100", + "description": "modules.arcface_torch.configs.ms1mv3_r100", + "peekOfCode": "config.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv3_r100", + "documentation": {} + }, + { + "label": "config.warmup_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_r100", + "description": "modules.arcface_torch.configs.ms1mv3_r100", + "peekOfCode": "config.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv3_r100", + "documentation": {} + }, + { + "label": "config.val_targets", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_r100", + "description": "modules.arcface_torch.configs.ms1mv3_r100", + "peekOfCode": "config.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv3_r100", + "documentation": {} + }, + { + "label": "config", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_r50", + "description": "modules.arcface_torch.configs.ms1mv3_r50", + "peekOfCode": "config = edict()\nconfig.margin_list = (1.0, 0.5, 0.0)\nconfig.network = \"r50\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4", + "detail": "modules.arcface_torch.configs.ms1mv3_r50", + "documentation": {} + }, + { + "label": "config.margin_list", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_r50", + "description": "modules.arcface_torch.configs.ms1mv3_r50", + "peekOfCode": "config.margin_list = (1.0, 0.5, 0.0)\nconfig.network = \"r50\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128", + "detail": "modules.arcface_torch.configs.ms1mv3_r50", + "documentation": {} + }, + { + "label": "config.network", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_r50", + "description": "modules.arcface_torch.configs.ms1mv3_r50", + "peekOfCode": "config.network = \"r50\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.1", + "detail": "modules.arcface_torch.configs.ms1mv3_r50", + "documentation": {} + }, + { + "label": "config.resume", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_r50", + "description": "modules.arcface_torch.configs.ms1mv3_r50", + "peekOfCode": "config.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000", + "detail": "modules.arcface_torch.configs.ms1mv3_r50", + "documentation": {} + }, + { + "label": "config.output", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_r50", + "description": "modules.arcface_torch.configs.ms1mv3_r50", + "peekOfCode": "config.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False", + "detail": "modules.arcface_torch.configs.ms1mv3_r50", + "documentation": {} + }, + { + "label": "config.embedding_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_r50", + "description": "modules.arcface_torch.configs.ms1mv3_r50", + "peekOfCode": "config.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/ms1m-retinaface-t1\"", + "detail": "modules.arcface_torch.configs.ms1mv3_r50", + "documentation": {} + }, + { + "label": "config.sample_rate", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_r50", + "description": "modules.arcface_torch.configs.ms1mv3_r50", + "peekOfCode": "config.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/ms1m-retinaface-t1\"\nconfig.num_classes = 93431", + "detail": "modules.arcface_torch.configs.ms1mv3_r50", + "documentation": {} + }, + { + "label": "config.fp16", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_r50", + "description": "modules.arcface_torch.configs.ms1mv3_r50", + "peekOfCode": "config.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/ms1m-retinaface-t1\"\nconfig.num_classes = 93431\nconfig.num_image = 5179510", + "detail": "modules.arcface_torch.configs.ms1mv3_r50", + "documentation": {} + }, + { + "label": "config.momentum", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_r50", + "description": "modules.arcface_torch.configs.ms1mv3_r50", + "peekOfCode": "config.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/ms1m-retinaface-t1\"\nconfig.num_classes = 93431\nconfig.num_image = 5179510\nconfig.num_epoch = 20", + "detail": "modules.arcface_torch.configs.ms1mv3_r50", + "documentation": {} + }, + { + "label": "config.weight_decay", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_r50", + "description": "modules.arcface_torch.configs.ms1mv3_r50", + "peekOfCode": "config.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/ms1m-retinaface-t1\"\nconfig.num_classes = 93431\nconfig.num_image = 5179510\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0", + "detail": "modules.arcface_torch.configs.ms1mv3_r50", + "documentation": {} + }, + { + "label": "config.batch_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_r50", + "description": "modules.arcface_torch.configs.ms1mv3_r50", + "peekOfCode": "config.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/ms1m-retinaface-t1\"\nconfig.num_classes = 93431\nconfig.num_image = 5179510\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv3_r50", + "documentation": {} + }, + { + "label": "config.lr", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_r50", + "description": "modules.arcface_torch.configs.ms1mv3_r50", + "peekOfCode": "config.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/ms1m-retinaface-t1\"\nconfig.num_classes = 93431\nconfig.num_image = 5179510\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv3_r50", + "documentation": {} + }, + { + "label": "config.verbose", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_r50", + "description": "modules.arcface_torch.configs.ms1mv3_r50", + "peekOfCode": "config.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/ms1m-retinaface-t1\"\nconfig.num_classes = 93431\nconfig.num_image = 5179510\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv3_r50", + "documentation": {} + }, + { + "label": "config.dali", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_r50", + "description": "modules.arcface_torch.configs.ms1mv3_r50", + "peekOfCode": "config.dali = False\nconfig.rec = \"/train_tmp/ms1m-retinaface-t1\"\nconfig.num_classes = 93431\nconfig.num_image = 5179510\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv3_r50", + "documentation": {} + }, + { + "label": "config.rec", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_r50", + "description": "modules.arcface_torch.configs.ms1mv3_r50", + "peekOfCode": "config.rec = \"/train_tmp/ms1m-retinaface-t1\"\nconfig.num_classes = 93431\nconfig.num_image = 5179510\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv3_r50", + "documentation": {} + }, + { + "label": "config.num_classes", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_r50", + "description": "modules.arcface_torch.configs.ms1mv3_r50", + "peekOfCode": "config.num_classes = 93431\nconfig.num_image = 5179510\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv3_r50", + "documentation": {} + }, + { + "label": "config.num_image", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_r50", + "description": "modules.arcface_torch.configs.ms1mv3_r50", + "peekOfCode": "config.num_image = 5179510\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv3_r50", + "documentation": {} + }, + { + "label": "config.num_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_r50", + "description": "modules.arcface_torch.configs.ms1mv3_r50", + "peekOfCode": "config.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv3_r50", + "documentation": {} + }, + { + "label": "config.warmup_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_r50", + "description": "modules.arcface_torch.configs.ms1mv3_r50", + "peekOfCode": "config.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv3_r50", + "documentation": {} + }, + { + "label": "config.val_targets", + "kind": 5, + "importPath": "modules.arcface_torch.configs.ms1mv3_r50", + "description": "modules.arcface_torch.configs.ms1mv3_r50", + "peekOfCode": "config.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.ms1mv3_r50", + "documentation": {} + }, + { + "label": "config", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_conflict_r50", + "description": "modules.arcface_torch.configs.wf12m_conflict_r50", + "peekOfCode": "config = edict()\nconfig.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"r50\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.interclass_filtering_threshold = 0\nconfig.fp16 = True\nconfig.weight_decay = 5e-4", + "detail": "modules.arcface_torch.configs.wf12m_conflict_r50", + "documentation": {} + }, + { + "label": "config.margin_list", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_conflict_r50", + "description": "modules.arcface_torch.configs.wf12m_conflict_r50", + "peekOfCode": "config.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"r50\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.interclass_filtering_threshold = 0\nconfig.fp16 = True\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128", + "detail": "modules.arcface_torch.configs.wf12m_conflict_r50", + "documentation": {} + }, + { + "label": "config.network", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_conflict_r50", + "description": "modules.arcface_torch.configs.wf12m_conflict_r50", + "peekOfCode": "config.network = \"r50\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.interclass_filtering_threshold = 0\nconfig.fp16 = True\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"", + "detail": "modules.arcface_torch.configs.wf12m_conflict_r50", + "documentation": {} + }, + { + "label": "config.resume", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_conflict_r50", + "description": "modules.arcface_torch.configs.wf12m_conflict_r50", + "peekOfCode": "config.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.interclass_filtering_threshold = 0\nconfig.fp16 = True\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1", + "detail": "modules.arcface_torch.configs.wf12m_conflict_r50", + "documentation": {} + }, + { + "label": "config.output", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_conflict_r50", + "description": "modules.arcface_torch.configs.wf12m_conflict_r50", + "peekOfCode": "config.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.interclass_filtering_threshold = 0\nconfig.fp16 = True\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000", + "detail": "modules.arcface_torch.configs.wf12m_conflict_r50", + "documentation": {} + }, + { + "label": "config.embedding_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_conflict_r50", + "description": "modules.arcface_torch.configs.wf12m_conflict_r50", + "peekOfCode": "config.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.interclass_filtering_threshold = 0\nconfig.fp16 = True\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False", + "detail": "modules.arcface_torch.configs.wf12m_conflict_r50", + "documentation": {} + }, + { + "label": "config.sample_rate", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_conflict_r50", + "description": "modules.arcface_torch.configs.wf12m_conflict_r50", + "peekOfCode": "config.sample_rate = 1.0\nconfig.interclass_filtering_threshold = 0\nconfig.fp16 = True\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M_Conflict\"", + "detail": "modules.arcface_torch.configs.wf12m_conflict_r50", + "documentation": {} + }, + { + "label": "config.interclass_filtering_threshold", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_conflict_r50", + "description": "modules.arcface_torch.configs.wf12m_conflict_r50", + "peekOfCode": "config.interclass_filtering_threshold = 0\nconfig.fp16 = True\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M_Conflict\"\nconfig.num_classes = 1017970", + "detail": "modules.arcface_torch.configs.wf12m_conflict_r50", + "documentation": {} + }, + { + "label": "config.fp16", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_conflict_r50", + "description": "modules.arcface_torch.configs.wf12m_conflict_r50", + "peekOfCode": "config.fp16 = True\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M_Conflict\"\nconfig.num_classes = 1017970\nconfig.num_image = 12720066", + "detail": "modules.arcface_torch.configs.wf12m_conflict_r50", + "documentation": {} + }, + { + "label": "config.weight_decay", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_conflict_r50", + "description": "modules.arcface_torch.configs.wf12m_conflict_r50", + "peekOfCode": "config.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M_Conflict\"\nconfig.num_classes = 1017970\nconfig.num_image = 12720066\nconfig.num_epoch = 20", + "detail": "modules.arcface_torch.configs.wf12m_conflict_r50", + "documentation": {} + }, + { + "label": "config.batch_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_conflict_r50", + "description": "modules.arcface_torch.configs.wf12m_conflict_r50", + "peekOfCode": "config.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M_Conflict\"\nconfig.num_classes = 1017970\nconfig.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10", + "detail": "modules.arcface_torch.configs.wf12m_conflict_r50", + "documentation": {} + }, + { + "label": "config.optimizer", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_conflict_r50", + "description": "modules.arcface_torch.configs.wf12m_conflict_r50", + "peekOfCode": "config.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M_Conflict\"\nconfig.num_classes = 1017970\nconfig.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_conflict_r50", + "documentation": {} + }, + { + "label": "config.lr", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_conflict_r50", + "description": "modules.arcface_torch.configs.wf12m_conflict_r50", + "peekOfCode": "config.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M_Conflict\"\nconfig.num_classes = 1017970\nconfig.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_conflict_r50", + "documentation": {} + }, + { + "label": "config.verbose", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_conflict_r50", + "description": "modules.arcface_torch.configs.wf12m_conflict_r50", + "peekOfCode": "config.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M_Conflict\"\nconfig.num_classes = 1017970\nconfig.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_conflict_r50", + "documentation": {} + }, + { + "label": "config.dali", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_conflict_r50", + "description": "modules.arcface_torch.configs.wf12m_conflict_r50", + "peekOfCode": "config.dali = False\nconfig.rec = \"/train_tmp/WebFace12M_Conflict\"\nconfig.num_classes = 1017970\nconfig.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_conflict_r50", + "documentation": {} + }, + { + "label": "config.rec", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_conflict_r50", + "description": "modules.arcface_torch.configs.wf12m_conflict_r50", + "peekOfCode": "config.rec = \"/train_tmp/WebFace12M_Conflict\"\nconfig.num_classes = 1017970\nconfig.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_conflict_r50", + "documentation": {} + }, + { + "label": "config.num_classes", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_conflict_r50", + "description": "modules.arcface_torch.configs.wf12m_conflict_r50", + "peekOfCode": "config.num_classes = 1017970\nconfig.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_conflict_r50", + "documentation": {} + }, + { + "label": "config.num_image", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_conflict_r50", + "description": "modules.arcface_torch.configs.wf12m_conflict_r50", + "peekOfCode": "config.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_conflict_r50", + "documentation": {} + }, + { + "label": "config.num_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_conflict_r50", + "description": "modules.arcface_torch.configs.wf12m_conflict_r50", + "peekOfCode": "config.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_conflict_r50", + "documentation": {} + }, + { + "label": "config.warmup_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_conflict_r50", + "description": "modules.arcface_torch.configs.wf12m_conflict_r50", + "peekOfCode": "config.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_conflict_r50", + "documentation": {} + }, + { + "label": "config.val_targets", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_conflict_r50", + "description": "modules.arcface_torch.configs.wf12m_conflict_r50", + "peekOfCode": "config.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_conflict_r50", + "documentation": {} + }, + { + "label": "config", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "description": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "peekOfCode": "config = edict()\nconfig.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"r50\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.interclass_filtering_threshold = 0.4\nconfig.fp16 = True\nconfig.weight_decay = 5e-4", + "detail": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "documentation": {} + }, + { + "label": "config.margin_list", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "description": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "peekOfCode": "config.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"r50\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.interclass_filtering_threshold = 0.4\nconfig.fp16 = True\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128", + "detail": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "documentation": {} + }, + { + "label": "config.network", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "description": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "peekOfCode": "config.network = \"r50\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.interclass_filtering_threshold = 0.4\nconfig.fp16 = True\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"", + "detail": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "documentation": {} + }, + { + "label": "config.resume", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "description": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "peekOfCode": "config.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.interclass_filtering_threshold = 0.4\nconfig.fp16 = True\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1", + "detail": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "documentation": {} + }, + { + "label": "config.output", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "description": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "peekOfCode": "config.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.interclass_filtering_threshold = 0.4\nconfig.fp16 = True\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000", + "detail": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "documentation": {} + }, + { + "label": "config.embedding_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "description": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "peekOfCode": "config.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.interclass_filtering_threshold = 0.4\nconfig.fp16 = True\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False", + "detail": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "documentation": {} + }, + { + "label": "config.sample_rate", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "description": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "peekOfCode": "config.sample_rate = 0.3\nconfig.interclass_filtering_threshold = 0.4\nconfig.fp16 = True\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M_Conflict\"", + "detail": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "documentation": {} + }, + { + "label": "config.interclass_filtering_threshold", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "description": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "peekOfCode": "config.interclass_filtering_threshold = 0.4\nconfig.fp16 = True\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M_Conflict\"\nconfig.num_classes = 1017970", + "detail": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "documentation": {} + }, + { + "label": "config.fp16", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "description": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "peekOfCode": "config.fp16 = True\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M_Conflict\"\nconfig.num_classes = 1017970\nconfig.num_image = 12720066", + "detail": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "documentation": {} + }, + { + "label": "config.weight_decay", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "description": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "peekOfCode": "config.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M_Conflict\"\nconfig.num_classes = 1017970\nconfig.num_image = 12720066\nconfig.num_epoch = 20", + "detail": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "documentation": {} + }, + { + "label": "config.batch_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "description": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "peekOfCode": "config.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M_Conflict\"\nconfig.num_classes = 1017970\nconfig.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10", + "detail": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "documentation": {} + }, + { + "label": "config.optimizer", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "description": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "peekOfCode": "config.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M_Conflict\"\nconfig.num_classes = 1017970\nconfig.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "documentation": {} + }, + { + "label": "config.lr", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "description": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "peekOfCode": "config.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M_Conflict\"\nconfig.num_classes = 1017970\nconfig.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "documentation": {} + }, + { + "label": "config.verbose", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "description": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "peekOfCode": "config.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M_Conflict\"\nconfig.num_classes = 1017970\nconfig.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "documentation": {} + }, + { + "label": "config.dali", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "description": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "peekOfCode": "config.dali = False\nconfig.rec = \"/train_tmp/WebFace12M_Conflict\"\nconfig.num_classes = 1017970\nconfig.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "documentation": {} + }, + { + "label": "config.rec", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "description": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "peekOfCode": "config.rec = \"/train_tmp/WebFace12M_Conflict\"\nconfig.num_classes = 1017970\nconfig.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "documentation": {} + }, + { + "label": "config.num_classes", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "description": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "peekOfCode": "config.num_classes = 1017970\nconfig.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "documentation": {} + }, + { + "label": "config.num_image", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "description": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "peekOfCode": "config.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "documentation": {} + }, + { + "label": "config.num_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "description": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "peekOfCode": "config.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "documentation": {} + }, + { + "label": "config.warmup_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "description": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "peekOfCode": "config.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "documentation": {} + }, + { + "label": "config.val_targets", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "description": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "peekOfCode": "config.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_conflict_r50_pfc03_filter04", + "documentation": {} + }, + { + "label": "config", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "description": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "peekOfCode": "config = edict()\nconfig.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"r50\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.1\nconfig.interclass_filtering_threshold = 0.4\nconfig.fp16 = True\nconfig.weight_decay = 5e-4", + "detail": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "documentation": {} + }, + { + "label": "config.margin_list", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "description": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "peekOfCode": "config.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"r50\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.1\nconfig.interclass_filtering_threshold = 0.4\nconfig.fp16 = True\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128", + "detail": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "documentation": {} + }, + { + "label": "config.network", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "description": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "peekOfCode": "config.network = \"r50\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.1\nconfig.interclass_filtering_threshold = 0.4\nconfig.fp16 = True\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"", + "detail": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "documentation": {} + }, + { + "label": "config.resume", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "description": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "peekOfCode": "config.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.1\nconfig.interclass_filtering_threshold = 0.4\nconfig.fp16 = True\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1", + "detail": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "documentation": {} + }, + { + "label": "config.output", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "description": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "peekOfCode": "config.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.1\nconfig.interclass_filtering_threshold = 0.4\nconfig.fp16 = True\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000", + "detail": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "documentation": {} + }, + { + "label": "config.embedding_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "description": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "peekOfCode": "config.embedding_size = 512\nconfig.sample_rate = 0.1\nconfig.interclass_filtering_threshold = 0.4\nconfig.fp16 = True\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False", + "detail": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "documentation": {} + }, + { + "label": "config.sample_rate", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "description": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "peekOfCode": "config.sample_rate = 0.1\nconfig.interclass_filtering_threshold = 0.4\nconfig.fp16 = True\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M_FLIP40\"", + "detail": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "documentation": {} + }, + { + "label": "config.interclass_filtering_threshold", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "description": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "peekOfCode": "config.interclass_filtering_threshold = 0.4\nconfig.fp16 = True\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M_FLIP40\"\nconfig.num_classes = 617970", + "detail": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "documentation": {} + }, + { + "label": "config.fp16", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "description": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "peekOfCode": "config.fp16 = True\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M_FLIP40\"\nconfig.num_classes = 617970\nconfig.num_image = 12720066", + "detail": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "documentation": {} + }, + { + "label": "config.weight_decay", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "description": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "peekOfCode": "config.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M_FLIP40\"\nconfig.num_classes = 617970\nconfig.num_image = 12720066\nconfig.num_epoch = 20", + "detail": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "documentation": {} + }, + { + "label": "config.batch_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "description": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "peekOfCode": "config.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M_FLIP40\"\nconfig.num_classes = 617970\nconfig.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10", + "detail": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "documentation": {} + }, + { + "label": "config.optimizer", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "description": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "peekOfCode": "config.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M_FLIP40\"\nconfig.num_classes = 617970\nconfig.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "documentation": {} + }, + { + "label": "config.lr", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "description": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "peekOfCode": "config.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M_FLIP40\"\nconfig.num_classes = 617970\nconfig.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "documentation": {} + }, + { + "label": "config.verbose", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "description": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "peekOfCode": "config.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M_FLIP40\"\nconfig.num_classes = 617970\nconfig.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "documentation": {} + }, + { + "label": "config.dali", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "description": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "peekOfCode": "config.dali = False\nconfig.rec = \"/train_tmp/WebFace12M_FLIP40\"\nconfig.num_classes = 617970\nconfig.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "documentation": {} + }, + { + "label": "config.rec", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "description": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "peekOfCode": "config.rec = \"/train_tmp/WebFace12M_FLIP40\"\nconfig.num_classes = 617970\nconfig.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "documentation": {} + }, + { + "label": "config.num_classes", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "description": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "peekOfCode": "config.num_classes = 617970\nconfig.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "documentation": {} + }, + { + "label": "config.num_image", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "description": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "peekOfCode": "config.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "documentation": {} + }, + { + "label": "config.num_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "description": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "peekOfCode": "config.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "documentation": {} + }, + { + "label": "config.warmup_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "description": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "peekOfCode": "config.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "documentation": {} + }, + { + "label": "config.val_targets", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "description": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "peekOfCode": "config.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_flip_pfc01_filter04_r50", + "documentation": {} + }, + { + "label": "config", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_flip_r50", + "description": "modules.arcface_torch.configs.wf12m_flip_r50", + "peekOfCode": "config = edict()\nconfig.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"r50\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.interclass_filtering_threshold = 0\nconfig.fp16 = True\nconfig.weight_decay = 5e-4", + "detail": "modules.arcface_torch.configs.wf12m_flip_r50", + "documentation": {} + }, + { + "label": "config.margin_list", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_flip_r50", + "description": "modules.arcface_torch.configs.wf12m_flip_r50", + "peekOfCode": "config.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"r50\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.interclass_filtering_threshold = 0\nconfig.fp16 = True\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128", + "detail": "modules.arcface_torch.configs.wf12m_flip_r50", + "documentation": {} + }, + { + "label": "config.network", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_flip_r50", + "description": "modules.arcface_torch.configs.wf12m_flip_r50", + "peekOfCode": "config.network = \"r50\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.interclass_filtering_threshold = 0\nconfig.fp16 = True\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"", + "detail": "modules.arcface_torch.configs.wf12m_flip_r50", + "documentation": {} + }, + { + "label": "config.resume", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_flip_r50", + "description": "modules.arcface_torch.configs.wf12m_flip_r50", + "peekOfCode": "config.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.interclass_filtering_threshold = 0\nconfig.fp16 = True\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1", + "detail": "modules.arcface_torch.configs.wf12m_flip_r50", + "documentation": {} + }, + { + "label": "config.output", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_flip_r50", + "description": "modules.arcface_torch.configs.wf12m_flip_r50", + "peekOfCode": "config.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.interclass_filtering_threshold = 0\nconfig.fp16 = True\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000", + "detail": "modules.arcface_torch.configs.wf12m_flip_r50", + "documentation": {} + }, + { + "label": "config.embedding_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_flip_r50", + "description": "modules.arcface_torch.configs.wf12m_flip_r50", + "peekOfCode": "config.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.interclass_filtering_threshold = 0\nconfig.fp16 = True\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False", + "detail": "modules.arcface_torch.configs.wf12m_flip_r50", + "documentation": {} + }, + { + "label": "config.sample_rate", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_flip_r50", + "description": "modules.arcface_torch.configs.wf12m_flip_r50", + "peekOfCode": "config.sample_rate = 1.0\nconfig.interclass_filtering_threshold = 0\nconfig.fp16 = True\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M_FLIP40\"", + "detail": "modules.arcface_torch.configs.wf12m_flip_r50", + "documentation": {} + }, + { + "label": "config.interclass_filtering_threshold", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_flip_r50", + "description": "modules.arcface_torch.configs.wf12m_flip_r50", + "peekOfCode": "config.interclass_filtering_threshold = 0\nconfig.fp16 = True\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M_FLIP40\"\nconfig.num_classes = 617970", + "detail": "modules.arcface_torch.configs.wf12m_flip_r50", + "documentation": {} + }, + { + "label": "config.fp16", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_flip_r50", + "description": "modules.arcface_torch.configs.wf12m_flip_r50", + "peekOfCode": "config.fp16 = True\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M_FLIP40\"\nconfig.num_classes = 617970\nconfig.num_image = 12720066", + "detail": "modules.arcface_torch.configs.wf12m_flip_r50", + "documentation": {} + }, + { + "label": "config.weight_decay", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_flip_r50", + "description": "modules.arcface_torch.configs.wf12m_flip_r50", + "peekOfCode": "config.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M_FLIP40\"\nconfig.num_classes = 617970\nconfig.num_image = 12720066\nconfig.num_epoch = 20", + "detail": "modules.arcface_torch.configs.wf12m_flip_r50", + "documentation": {} + }, + { + "label": "config.batch_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_flip_r50", + "description": "modules.arcface_torch.configs.wf12m_flip_r50", + "peekOfCode": "config.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M_FLIP40\"\nconfig.num_classes = 617970\nconfig.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10", + "detail": "modules.arcface_torch.configs.wf12m_flip_r50", + "documentation": {} + }, + { + "label": "config.optimizer", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_flip_r50", + "description": "modules.arcface_torch.configs.wf12m_flip_r50", + "peekOfCode": "config.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M_FLIP40\"\nconfig.num_classes = 617970\nconfig.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_flip_r50", + "documentation": {} + }, + { + "label": "config.lr", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_flip_r50", + "description": "modules.arcface_torch.configs.wf12m_flip_r50", + "peekOfCode": "config.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M_FLIP40\"\nconfig.num_classes = 617970\nconfig.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_flip_r50", + "documentation": {} + }, + { + "label": "config.verbose", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_flip_r50", + "description": "modules.arcface_torch.configs.wf12m_flip_r50", + "peekOfCode": "config.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M_FLIP40\"\nconfig.num_classes = 617970\nconfig.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_flip_r50", + "documentation": {} + }, + { + "label": "config.dali", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_flip_r50", + "description": "modules.arcface_torch.configs.wf12m_flip_r50", + "peekOfCode": "config.dali = False\nconfig.rec = \"/train_tmp/WebFace12M_FLIP40\"\nconfig.num_classes = 617970\nconfig.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_flip_r50", + "documentation": {} + }, + { + "label": "config.rec", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_flip_r50", + "description": "modules.arcface_torch.configs.wf12m_flip_r50", + "peekOfCode": "config.rec = \"/train_tmp/WebFace12M_FLIP40\"\nconfig.num_classes = 617970\nconfig.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_flip_r50", + "documentation": {} + }, + { + "label": "config.num_classes", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_flip_r50", + "description": "modules.arcface_torch.configs.wf12m_flip_r50", + "peekOfCode": "config.num_classes = 617970\nconfig.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_flip_r50", + "documentation": {} + }, + { + "label": "config.num_image", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_flip_r50", + "description": "modules.arcface_torch.configs.wf12m_flip_r50", + "peekOfCode": "config.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_flip_r50", + "documentation": {} + }, + { + "label": "config.num_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_flip_r50", + "description": "modules.arcface_torch.configs.wf12m_flip_r50", + "peekOfCode": "config.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_flip_r50", + "documentation": {} + }, + { + "label": "config.warmup_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_flip_r50", + "description": "modules.arcface_torch.configs.wf12m_flip_r50", + "peekOfCode": "config.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_flip_r50", + "documentation": {} + }, + { + "label": "config.val_targets", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_flip_r50", + "description": "modules.arcface_torch.configs.wf12m_flip_r50", + "peekOfCode": "config.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_flip_r50", + "documentation": {} + }, + { + "label": "config", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_mbf", + "description": "modules.arcface_torch.configs.wf12m_mbf", + "peekOfCode": "config = edict()\nconfig.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"mbf\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.interclass_filtering_threshold = 0\nconfig.fp16 = True\nconfig.weight_decay = 1e-4", + "detail": "modules.arcface_torch.configs.wf12m_mbf", + "documentation": {} + }, + { + "label": "config.margin_list", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_mbf", + "description": "modules.arcface_torch.configs.wf12m_mbf", + "peekOfCode": "config.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"mbf\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.interclass_filtering_threshold = 0\nconfig.fp16 = True\nconfig.weight_decay = 1e-4\nconfig.batch_size = 128", + "detail": "modules.arcface_torch.configs.wf12m_mbf", + "documentation": {} + }, + { + "label": "config.network", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_mbf", + "description": "modules.arcface_torch.configs.wf12m_mbf", + "peekOfCode": "config.network = \"mbf\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.interclass_filtering_threshold = 0\nconfig.fp16 = True\nconfig.weight_decay = 1e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"", + "detail": "modules.arcface_torch.configs.wf12m_mbf", + "documentation": {} + }, + { + "label": "config.resume", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_mbf", + "description": "modules.arcface_torch.configs.wf12m_mbf", + "peekOfCode": "config.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.interclass_filtering_threshold = 0\nconfig.fp16 = True\nconfig.weight_decay = 1e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1", + "detail": "modules.arcface_torch.configs.wf12m_mbf", + "documentation": {} + }, + { + "label": "config.output", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_mbf", + "description": "modules.arcface_torch.configs.wf12m_mbf", + "peekOfCode": "config.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.interclass_filtering_threshold = 0\nconfig.fp16 = True\nconfig.weight_decay = 1e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000", + "detail": "modules.arcface_torch.configs.wf12m_mbf", + "documentation": {} + }, + { + "label": "config.embedding_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_mbf", + "description": "modules.arcface_torch.configs.wf12m_mbf", + "peekOfCode": "config.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.interclass_filtering_threshold = 0\nconfig.fp16 = True\nconfig.weight_decay = 1e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False", + "detail": "modules.arcface_torch.configs.wf12m_mbf", + "documentation": {} + }, + { + "label": "config.sample_rate", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_mbf", + "description": "modules.arcface_torch.configs.wf12m_mbf", + "peekOfCode": "config.sample_rate = 1.0\nconfig.interclass_filtering_threshold = 0\nconfig.fp16 = True\nconfig.weight_decay = 1e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M\"", + "detail": "modules.arcface_torch.configs.wf12m_mbf", + "documentation": {} + }, + { + "label": "config.interclass_filtering_threshold", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_mbf", + "description": "modules.arcface_torch.configs.wf12m_mbf", + "peekOfCode": "config.interclass_filtering_threshold = 0\nconfig.fp16 = True\nconfig.weight_decay = 1e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M\"\nconfig.num_classes = 617970", + "detail": "modules.arcface_torch.configs.wf12m_mbf", + "documentation": {} + }, + { + "label": "config.fp16", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_mbf", + "description": "modules.arcface_torch.configs.wf12m_mbf", + "peekOfCode": "config.fp16 = True\nconfig.weight_decay = 1e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M\"\nconfig.num_classes = 617970\nconfig.num_image = 12720066", + "detail": "modules.arcface_torch.configs.wf12m_mbf", + "documentation": {} + }, + { + "label": "config.weight_decay", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_mbf", + "description": "modules.arcface_torch.configs.wf12m_mbf", + "peekOfCode": "config.weight_decay = 1e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M\"\nconfig.num_classes = 617970\nconfig.num_image = 12720066\nconfig.num_epoch = 20", + "detail": "modules.arcface_torch.configs.wf12m_mbf", + "documentation": {} + }, + { + "label": "config.batch_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_mbf", + "description": "modules.arcface_torch.configs.wf12m_mbf", + "peekOfCode": "config.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M\"\nconfig.num_classes = 617970\nconfig.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0", + "detail": "modules.arcface_torch.configs.wf12m_mbf", + "documentation": {} + }, + { + "label": "config.optimizer", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_mbf", + "description": "modules.arcface_torch.configs.wf12m_mbf", + "peekOfCode": "config.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M\"\nconfig.num_classes = 617970\nconfig.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_mbf", + "documentation": {} + }, + { + "label": "config.lr", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_mbf", + "description": "modules.arcface_torch.configs.wf12m_mbf", + "peekOfCode": "config.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M\"\nconfig.num_classes = 617970\nconfig.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_mbf", + "documentation": {} + }, + { + "label": "config.verbose", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_mbf", + "description": "modules.arcface_torch.configs.wf12m_mbf", + "peekOfCode": "config.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M\"\nconfig.num_classes = 617970\nconfig.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_mbf", + "documentation": {} + }, + { + "label": "config.dali", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_mbf", + "description": "modules.arcface_torch.configs.wf12m_mbf", + "peekOfCode": "config.dali = False\nconfig.rec = \"/train_tmp/WebFace12M\"\nconfig.num_classes = 617970\nconfig.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_mbf", + "documentation": {} + }, + { + "label": "config.rec", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_mbf", + "description": "modules.arcface_torch.configs.wf12m_mbf", + "peekOfCode": "config.rec = \"/train_tmp/WebFace12M\"\nconfig.num_classes = 617970\nconfig.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_mbf", + "documentation": {} + }, + { + "label": "config.num_classes", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_mbf", + "description": "modules.arcface_torch.configs.wf12m_mbf", + "peekOfCode": "config.num_classes = 617970\nconfig.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_mbf", + "documentation": {} + }, + { + "label": "config.num_image", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_mbf", + "description": "modules.arcface_torch.configs.wf12m_mbf", + "peekOfCode": "config.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_mbf", + "documentation": {} + }, + { + "label": "config.num_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_mbf", + "description": "modules.arcface_torch.configs.wf12m_mbf", + "peekOfCode": "config.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_mbf", + "documentation": {} + }, + { + "label": "config.warmup_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_mbf", + "description": "modules.arcface_torch.configs.wf12m_mbf", + "peekOfCode": "config.warmup_epoch = 0\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_mbf", + "documentation": {} + }, + { + "label": "config.val_targets", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_mbf", + "description": "modules.arcface_torch.configs.wf12m_mbf", + "peekOfCode": "config.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_mbf", + "documentation": {} + }, + { + "label": "config", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "description": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "peekOfCode": "config = edict()\nconfig.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"r100\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.2\nconfig.interclass_filtering_threshold = 0\nconfig.fp16 = True\nconfig.weight_decay = 5e-4", + "detail": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "documentation": {} + }, + { + "label": "config.margin_list", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "description": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "peekOfCode": "config.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"r100\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.2\nconfig.interclass_filtering_threshold = 0\nconfig.fp16 = True\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128", + "detail": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "documentation": {} + }, + { + "label": "config.network", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "description": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "peekOfCode": "config.network = \"r100\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.2\nconfig.interclass_filtering_threshold = 0\nconfig.fp16 = True\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"", + "detail": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "documentation": {} + }, + { + "label": "config.resume", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "description": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "peekOfCode": "config.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.2\nconfig.interclass_filtering_threshold = 0\nconfig.fp16 = True\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1", + "detail": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "documentation": {} + }, + { + "label": "config.output", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "description": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "peekOfCode": "config.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.2\nconfig.interclass_filtering_threshold = 0\nconfig.fp16 = True\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000", + "detail": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "documentation": {} + }, + { + "label": "config.embedding_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "description": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "peekOfCode": "config.embedding_size = 512\nconfig.sample_rate = 0.2\nconfig.interclass_filtering_threshold = 0\nconfig.fp16 = True\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False", + "detail": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "documentation": {} + }, + { + "label": "config.sample_rate", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "description": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "peekOfCode": "config.sample_rate = 0.2\nconfig.interclass_filtering_threshold = 0\nconfig.fp16 = True\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M\"", + "detail": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "documentation": {} + }, + { + "label": "config.interclass_filtering_threshold", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "description": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "peekOfCode": "config.interclass_filtering_threshold = 0\nconfig.fp16 = True\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M\"\nconfig.num_classes = 617970", + "detail": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "documentation": {} + }, + { + "label": "config.fp16", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "description": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "peekOfCode": "config.fp16 = True\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M\"\nconfig.num_classes = 617970\nconfig.num_image = 12720066", + "detail": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "documentation": {} + }, + { + "label": "config.weight_decay", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "description": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "peekOfCode": "config.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M\"\nconfig.num_classes = 617970\nconfig.num_image = 12720066\nconfig.num_epoch = 20", + "detail": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "documentation": {} + }, + { + "label": "config.batch_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "description": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "peekOfCode": "config.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M\"\nconfig.num_classes = 617970\nconfig.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0", + "detail": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "documentation": {} + }, + { + "label": "config.optimizer", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "description": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "peekOfCode": "config.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M\"\nconfig.num_classes = 617970\nconfig.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "documentation": {} + }, + { + "label": "config.lr", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "description": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "peekOfCode": "config.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M\"\nconfig.num_classes = 617970\nconfig.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "documentation": {} + }, + { + "label": "config.verbose", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "description": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "peekOfCode": "config.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M\"\nconfig.num_classes = 617970\nconfig.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "documentation": {} + }, + { + "label": "config.dali", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "description": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "peekOfCode": "config.dali = False\nconfig.rec = \"/train_tmp/WebFace12M\"\nconfig.num_classes = 617970\nconfig.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "documentation": {} + }, + { + "label": "config.rec", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "description": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "peekOfCode": "config.rec = \"/train_tmp/WebFace12M\"\nconfig.num_classes = 617970\nconfig.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "documentation": {} + }, + { + "label": "config.num_classes", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "description": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "peekOfCode": "config.num_classes = 617970\nconfig.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "documentation": {} + }, + { + "label": "config.num_image", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "description": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "peekOfCode": "config.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "documentation": {} + }, + { + "label": "config.num_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "description": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "peekOfCode": "config.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "documentation": {} + }, + { + "label": "config.warmup_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "description": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "peekOfCode": "config.warmup_epoch = 0\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "documentation": {} + }, + { + "label": "config.val_targets", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "description": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "peekOfCode": "config.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_pfc02_r100", + "documentation": {} + }, + { + "label": "config", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_r100", + "description": "modules.arcface_torch.configs.wf12m_r100", + "peekOfCode": "config = edict()\nconfig.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"r100\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.interclass_filtering_threshold = 0\nconfig.fp16 = True\nconfig.weight_decay = 5e-4", + "detail": "modules.arcface_torch.configs.wf12m_r100", + "documentation": {} + }, + { + "label": "config.margin_list", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_r100", + "description": "modules.arcface_torch.configs.wf12m_r100", + "peekOfCode": "config.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"r100\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.interclass_filtering_threshold = 0\nconfig.fp16 = True\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128", + "detail": "modules.arcface_torch.configs.wf12m_r100", + "documentation": {} + }, + { + "label": "config.network", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_r100", + "description": "modules.arcface_torch.configs.wf12m_r100", + "peekOfCode": "config.network = \"r100\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.interclass_filtering_threshold = 0\nconfig.fp16 = True\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"", + "detail": "modules.arcface_torch.configs.wf12m_r100", + "documentation": {} + }, + { + "label": "config.resume", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_r100", + "description": "modules.arcface_torch.configs.wf12m_r100", + "peekOfCode": "config.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.interclass_filtering_threshold = 0\nconfig.fp16 = True\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1", + "detail": "modules.arcface_torch.configs.wf12m_r100", + "documentation": {} + }, + { + "label": "config.output", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_r100", + "description": "modules.arcface_torch.configs.wf12m_r100", + "peekOfCode": "config.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.interclass_filtering_threshold = 0\nconfig.fp16 = True\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000", + "detail": "modules.arcface_torch.configs.wf12m_r100", + "documentation": {} + }, + { + "label": "config.embedding_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_r100", + "description": "modules.arcface_torch.configs.wf12m_r100", + "peekOfCode": "config.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.interclass_filtering_threshold = 0\nconfig.fp16 = True\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False", + "detail": "modules.arcface_torch.configs.wf12m_r100", + "documentation": {} + }, + { + "label": "config.sample_rate", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_r100", + "description": "modules.arcface_torch.configs.wf12m_r100", + "peekOfCode": "config.sample_rate = 1.0\nconfig.interclass_filtering_threshold = 0\nconfig.fp16 = True\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M\"", + "detail": "modules.arcface_torch.configs.wf12m_r100", + "documentation": {} + }, + { + "label": "config.interclass_filtering_threshold", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_r100", + "description": "modules.arcface_torch.configs.wf12m_r100", + "peekOfCode": "config.interclass_filtering_threshold = 0\nconfig.fp16 = True\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M\"\nconfig.num_classes = 617970", + "detail": "modules.arcface_torch.configs.wf12m_r100", + "documentation": {} + }, + { + "label": "config.fp16", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_r100", + "description": "modules.arcface_torch.configs.wf12m_r100", + "peekOfCode": "config.fp16 = True\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M\"\nconfig.num_classes = 617970\nconfig.num_image = 12720066", + "detail": "modules.arcface_torch.configs.wf12m_r100", + "documentation": {} + }, + { + "label": "config.weight_decay", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_r100", + "description": "modules.arcface_torch.configs.wf12m_r100", + "peekOfCode": "config.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M\"\nconfig.num_classes = 617970\nconfig.num_image = 12720066\nconfig.num_epoch = 20", + "detail": "modules.arcface_torch.configs.wf12m_r100", + "documentation": {} + }, + { + "label": "config.batch_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_r100", + "description": "modules.arcface_torch.configs.wf12m_r100", + "peekOfCode": "config.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M\"\nconfig.num_classes = 617970\nconfig.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0", + "detail": "modules.arcface_torch.configs.wf12m_r100", + "documentation": {} + }, + { + "label": "config.optimizer", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_r100", + "description": "modules.arcface_torch.configs.wf12m_r100", + "peekOfCode": "config.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M\"\nconfig.num_classes = 617970\nconfig.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_r100", + "documentation": {} + }, + { + "label": "config.lr", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_r100", + "description": "modules.arcface_torch.configs.wf12m_r100", + "peekOfCode": "config.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M\"\nconfig.num_classes = 617970\nconfig.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_r100", + "documentation": {} + }, + { + "label": "config.verbose", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_r100", + "description": "modules.arcface_torch.configs.wf12m_r100", + "peekOfCode": "config.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M\"\nconfig.num_classes = 617970\nconfig.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_r100", + "documentation": {} + }, + { + "label": "config.dali", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_r100", + "description": "modules.arcface_torch.configs.wf12m_r100", + "peekOfCode": "config.dali = False\nconfig.rec = \"/train_tmp/WebFace12M\"\nconfig.num_classes = 617970\nconfig.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_r100", + "documentation": {} + }, + { + "label": "config.rec", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_r100", + "description": "modules.arcface_torch.configs.wf12m_r100", + "peekOfCode": "config.rec = \"/train_tmp/WebFace12M\"\nconfig.num_classes = 617970\nconfig.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_r100", + "documentation": {} + }, + { + "label": "config.num_classes", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_r100", + "description": "modules.arcface_torch.configs.wf12m_r100", + "peekOfCode": "config.num_classes = 617970\nconfig.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_r100", + "documentation": {} + }, + { + "label": "config.num_image", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_r100", + "description": "modules.arcface_torch.configs.wf12m_r100", + "peekOfCode": "config.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_r100", + "documentation": {} + }, + { + "label": "config.num_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_r100", + "description": "modules.arcface_torch.configs.wf12m_r100", + "peekOfCode": "config.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_r100", + "documentation": {} + }, + { + "label": "config.warmup_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_r100", + "description": "modules.arcface_torch.configs.wf12m_r100", + "peekOfCode": "config.warmup_epoch = 0\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_r100", + "documentation": {} + }, + { + "label": "config.val_targets", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_r100", + "description": "modules.arcface_torch.configs.wf12m_r100", + "peekOfCode": "config.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_r100", + "documentation": {} + }, + { + "label": "config", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_r50", + "description": "modules.arcface_torch.configs.wf12m_r50", + "peekOfCode": "config = edict()\nconfig.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"r50\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.interclass_filtering_threshold = 0\nconfig.fp16 = True\nconfig.weight_decay = 5e-4", + "detail": "modules.arcface_torch.configs.wf12m_r50", + "documentation": {} + }, + { + "label": "config.margin_list", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_r50", + "description": "modules.arcface_torch.configs.wf12m_r50", + "peekOfCode": "config.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"r50\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.interclass_filtering_threshold = 0\nconfig.fp16 = True\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128", + "detail": "modules.arcface_torch.configs.wf12m_r50", + "documentation": {} + }, + { + "label": "config.network", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_r50", + "description": "modules.arcface_torch.configs.wf12m_r50", + "peekOfCode": "config.network = \"r50\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.interclass_filtering_threshold = 0\nconfig.fp16 = True\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"", + "detail": "modules.arcface_torch.configs.wf12m_r50", + "documentation": {} + }, + { + "label": "config.resume", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_r50", + "description": "modules.arcface_torch.configs.wf12m_r50", + "peekOfCode": "config.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.interclass_filtering_threshold = 0\nconfig.fp16 = True\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1", + "detail": "modules.arcface_torch.configs.wf12m_r50", + "documentation": {} + }, + { + "label": "config.output", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_r50", + "description": "modules.arcface_torch.configs.wf12m_r50", + "peekOfCode": "config.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.interclass_filtering_threshold = 0\nconfig.fp16 = True\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000", + "detail": "modules.arcface_torch.configs.wf12m_r50", + "documentation": {} + }, + { + "label": "config.embedding_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_r50", + "description": "modules.arcface_torch.configs.wf12m_r50", + "peekOfCode": "config.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.interclass_filtering_threshold = 0\nconfig.fp16 = True\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False", + "detail": "modules.arcface_torch.configs.wf12m_r50", + "documentation": {} + }, + { + "label": "config.sample_rate", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_r50", + "description": "modules.arcface_torch.configs.wf12m_r50", + "peekOfCode": "config.sample_rate = 1.0\nconfig.interclass_filtering_threshold = 0\nconfig.fp16 = True\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M\"", + "detail": "modules.arcface_torch.configs.wf12m_r50", + "documentation": {} + }, + { + "label": "config.interclass_filtering_threshold", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_r50", + "description": "modules.arcface_torch.configs.wf12m_r50", + "peekOfCode": "config.interclass_filtering_threshold = 0\nconfig.fp16 = True\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M\"\nconfig.num_classes = 617970", + "detail": "modules.arcface_torch.configs.wf12m_r50", + "documentation": {} + }, + { + "label": "config.fp16", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_r50", + "description": "modules.arcface_torch.configs.wf12m_r50", + "peekOfCode": "config.fp16 = True\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M\"\nconfig.num_classes = 617970\nconfig.num_image = 12720066", + "detail": "modules.arcface_torch.configs.wf12m_r50", + "documentation": {} + }, + { + "label": "config.weight_decay", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_r50", + "description": "modules.arcface_torch.configs.wf12m_r50", + "peekOfCode": "config.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M\"\nconfig.num_classes = 617970\nconfig.num_image = 12720066\nconfig.num_epoch = 20", + "detail": "modules.arcface_torch.configs.wf12m_r50", + "documentation": {} + }, + { + "label": "config.batch_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_r50", + "description": "modules.arcface_torch.configs.wf12m_r50", + "peekOfCode": "config.batch_size = 128\nconfig.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M\"\nconfig.num_classes = 617970\nconfig.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0", + "detail": "modules.arcface_torch.configs.wf12m_r50", + "documentation": {} + }, + { + "label": "config.optimizer", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_r50", + "description": "modules.arcface_torch.configs.wf12m_r50", + "peekOfCode": "config.optimizer = \"sgd\"\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M\"\nconfig.num_classes = 617970\nconfig.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_r50", + "documentation": {} + }, + { + "label": "config.lr", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_r50", + "description": "modules.arcface_torch.configs.wf12m_r50", + "peekOfCode": "config.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M\"\nconfig.num_classes = 617970\nconfig.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_r50", + "documentation": {} + }, + { + "label": "config.verbose", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_r50", + "description": "modules.arcface_torch.configs.wf12m_r50", + "peekOfCode": "config.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace12M\"\nconfig.num_classes = 617970\nconfig.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_r50", + "documentation": {} + }, + { + "label": "config.dali", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_r50", + "description": "modules.arcface_torch.configs.wf12m_r50", + "peekOfCode": "config.dali = False\nconfig.rec = \"/train_tmp/WebFace12M\"\nconfig.num_classes = 617970\nconfig.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_r50", + "documentation": {} + }, + { + "label": "config.rec", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_r50", + "description": "modules.arcface_torch.configs.wf12m_r50", + "peekOfCode": "config.rec = \"/train_tmp/WebFace12M\"\nconfig.num_classes = 617970\nconfig.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_r50", + "documentation": {} + }, + { + "label": "config.num_classes", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_r50", + "description": "modules.arcface_torch.configs.wf12m_r50", + "peekOfCode": "config.num_classes = 617970\nconfig.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_r50", + "documentation": {} + }, + { + "label": "config.num_image", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_r50", + "description": "modules.arcface_torch.configs.wf12m_r50", + "peekOfCode": "config.num_image = 12720066\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_r50", + "documentation": {} + }, + { + "label": "config.num_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_r50", + "description": "modules.arcface_torch.configs.wf12m_r50", + "peekOfCode": "config.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_r50", + "documentation": {} + }, + { + "label": "config.warmup_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_r50", + "description": "modules.arcface_torch.configs.wf12m_r50", + "peekOfCode": "config.warmup_epoch = 0\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_r50", + "documentation": {} + }, + { + "label": "config.val_targets", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf12m_r50", + "description": "modules.arcface_torch.configs.wf12m_r50", + "peekOfCode": "config.val_targets = []", + "detail": "modules.arcface_torch.configs.wf12m_r50", + "documentation": {} + }, + { + "label": "config", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "peekOfCode": "config = edict()\nconfig.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"r100\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4", + "detail": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "documentation": {} + }, + { + "label": "config.margin_list", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "peekOfCode": "config.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"r100\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 512", + "detail": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "documentation": {} + }, + { + "label": "config.network", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "peekOfCode": "config.network = \"r100\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 512\nconfig.lr = 0.4", + "detail": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "documentation": {} + }, + { + "label": "config.resume", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "peekOfCode": "config.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 512\nconfig.lr = 0.4\nconfig.verbose = 2000", + "detail": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "documentation": {} + }, + { + "label": "config.output", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "peekOfCode": "config.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 512\nconfig.lr = 0.4\nconfig.verbose = 2000\nconfig.dali = False", + "detail": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "documentation": {} + }, + { + "label": "config.embedding_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "peekOfCode": "config.embedding_size = 512\nconfig.sample_rate = 0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 512\nconfig.lr = 0.4\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"", + "detail": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "documentation": {} + }, + { + "label": "config.sample_rate", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "peekOfCode": "config.sample_rate = 0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 512\nconfig.lr = 0.4\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906", + "detail": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "documentation": {} + }, + { + "label": "config.fp16", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "peekOfCode": "config.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 512\nconfig.lr = 0.4\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557", + "detail": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "documentation": {} + }, + { + "label": "config.momentum", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "peekOfCode": "config.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 512\nconfig.lr = 0.4\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20", + "detail": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "documentation": {} + }, + { + "label": "config.weight_decay", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "peekOfCode": "config.weight_decay = 5e-4\nconfig.batch_size = 512\nconfig.lr = 0.4\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10", + "detail": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "documentation": {} + }, + { + "label": "config.batch_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "peekOfCode": "config.batch_size = 512\nconfig.lr = 0.4\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "documentation": {} + }, + { + "label": "config.lr", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "peekOfCode": "config.lr = 0.4\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "documentation": {} + }, + { + "label": "config.verbose", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "peekOfCode": "config.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "documentation": {} + }, + { + "label": "config.dali", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "peekOfCode": "config.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "documentation": {} + }, + { + "label": "config.rec", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "peekOfCode": "config.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "documentation": {} + }, + { + "label": "config.num_classes", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "peekOfCode": "config.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "documentation": {} + }, + { + "label": "config.num_image", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "peekOfCode": "config.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "documentation": {} + }, + { + "label": "config.num_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "peekOfCode": "config.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "documentation": {} + }, + { + "label": "config.warmup_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "peekOfCode": "config.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "documentation": {} + }, + { + "label": "config.val_targets", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "peekOfCode": "config.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc0008_32gpu_r100", + "documentation": {} + }, + { + "label": "config", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "peekOfCode": "config = edict()\nconfig.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"mbf\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.2\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 1e-4", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "documentation": {} + }, + { + "label": "config.margin_list", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "peekOfCode": "config.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"mbf\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.2\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 1e-4\nconfig.batch_size = 512", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "documentation": {} + }, + { + "label": "config.network", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "peekOfCode": "config.network = \"mbf\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.2\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 1e-4\nconfig.batch_size = 512\nconfig.lr = 0.4", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "documentation": {} + }, + { + "label": "config.resume", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "peekOfCode": "config.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.2\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 1e-4\nconfig.batch_size = 512\nconfig.lr = 0.4\nconfig.verbose = 10000", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "documentation": {} + }, + { + "label": "config.output", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "peekOfCode": "config.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.2\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 1e-4\nconfig.batch_size = 512\nconfig.lr = 0.4\nconfig.verbose = 10000\nconfig.dali = False", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "documentation": {} + }, + { + "label": "config.embedding_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "peekOfCode": "config.embedding_size = 512\nconfig.sample_rate = 0.2\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 1e-4\nconfig.batch_size = 512\nconfig.lr = 0.4\nconfig.verbose = 10000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "documentation": {} + }, + { + "label": "config.sample_rate", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "peekOfCode": "config.sample_rate = 0.2\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 1e-4\nconfig.batch_size = 512\nconfig.lr = 0.4\nconfig.verbose = 10000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "documentation": {} + }, + { + "label": "config.fp16", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "peekOfCode": "config.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 1e-4\nconfig.batch_size = 512\nconfig.lr = 0.4\nconfig.verbose = 10000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "documentation": {} + }, + { + "label": "config.momentum", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "peekOfCode": "config.momentum = 0.9\nconfig.weight_decay = 1e-4\nconfig.batch_size = 512\nconfig.lr = 0.4\nconfig.verbose = 10000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "documentation": {} + }, + { + "label": "config.weight_decay", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "peekOfCode": "config.weight_decay = 1e-4\nconfig.batch_size = 512\nconfig.lr = 0.4\nconfig.verbose = 10000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = 2", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "documentation": {} + }, + { + "label": "config.batch_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "peekOfCode": "config.batch_size = 512\nconfig.lr = 0.4\nconfig.verbose = 10000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = 2\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "documentation": {} + }, + { + "label": "config.lr", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "peekOfCode": "config.lr = 0.4\nconfig.verbose = 10000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = 2\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "documentation": {} + }, + { + "label": "config.verbose", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "peekOfCode": "config.verbose = 10000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = 2\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "documentation": {} + }, + { + "label": "config.dali", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "peekOfCode": "config.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = 2\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "documentation": {} + }, + { + "label": "config.rec", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "peekOfCode": "config.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = 2\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "documentation": {} + }, + { + "label": "config.num_classes", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "peekOfCode": "config.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = 2\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "documentation": {} + }, + { + "label": "config.num_image", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "peekOfCode": "config.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = 2\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "documentation": {} + }, + { + "label": "config.num_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "peekOfCode": "config.num_epoch = 20\nconfig.warmup_epoch = 2\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "documentation": {} + }, + { + "label": "config.warmup_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "peekOfCode": "config.warmup_epoch = 2\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "documentation": {} + }, + { + "label": "config.val_targets", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "peekOfCode": "config.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_mbf_bs8k", + "documentation": {} + }, + { + "label": "config", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "peekOfCode": "config = edict()\nconfig.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"r100\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.2\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "documentation": {} + }, + { + "label": "config.margin_list", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "peekOfCode": "config.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"r100\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.2\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 256", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "documentation": {} + }, + { + "label": "config.network", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "peekOfCode": "config.network = \"r100\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.2\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 256\nconfig.lr = 0.3", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "documentation": {} + }, + { + "label": "config.resume", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "peekOfCode": "config.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.2\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 256\nconfig.lr = 0.3\nconfig.verbose = 2000", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "documentation": {} + }, + { + "label": "config.output", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "peekOfCode": "config.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.2\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 256\nconfig.lr = 0.3\nconfig.verbose = 2000\nconfig.dali = False", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "documentation": {} + }, + { + "label": "config.embedding_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "peekOfCode": "config.embedding_size = 512\nconfig.sample_rate = 0.2\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 256\nconfig.lr = 0.3\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "documentation": {} + }, + { + "label": "config.sample_rate", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "peekOfCode": "config.sample_rate = 0.2\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 256\nconfig.lr = 0.3\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "documentation": {} + }, + { + "label": "config.fp16", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "peekOfCode": "config.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 256\nconfig.lr = 0.3\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "documentation": {} + }, + { + "label": "config.momentum", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "peekOfCode": "config.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 256\nconfig.lr = 0.3\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "documentation": {} + }, + { + "label": "config.weight_decay", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "peekOfCode": "config.weight_decay = 5e-4\nconfig.batch_size = 256\nconfig.lr = 0.3\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = 1", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "documentation": {} + }, + { + "label": "config.batch_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "peekOfCode": "config.batch_size = 256\nconfig.lr = 0.3\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = 1\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "documentation": {} + }, + { + "label": "config.lr", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "peekOfCode": "config.lr = 0.3\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = 1\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "documentation": {} + }, + { + "label": "config.verbose", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "peekOfCode": "config.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = 1\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "documentation": {} + }, + { + "label": "config.dali", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "peekOfCode": "config.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = 1\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "documentation": {} + }, + { + "label": "config.rec", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "peekOfCode": "config.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = 1\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "documentation": {} + }, + { + "label": "config.num_classes", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "peekOfCode": "config.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = 1\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "documentation": {} + }, + { + "label": "config.num_image", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "peekOfCode": "config.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = 1\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "documentation": {} + }, + { + "label": "config.num_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "peekOfCode": "config.num_epoch = 20\nconfig.warmup_epoch = 1\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "documentation": {} + }, + { + "label": "config.warmup_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "peekOfCode": "config.warmup_epoch = 1\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "documentation": {} + }, + { + "label": "config.val_targets", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "peekOfCode": "config.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r100", + "documentation": {} + }, + { + "label": "config", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "peekOfCode": "config = edict()\nconfig.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"r50\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.2\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "documentation": {} + }, + { + "label": "config.margin_list", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "peekOfCode": "config.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"r50\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.2\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 512", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "documentation": {} + }, + { + "label": "config.network", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "peekOfCode": "config.network = \"r50\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.2\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 512\nconfig.lr = 0.6", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "documentation": {} + }, + { + "label": "config.resume", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "peekOfCode": "config.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.2\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 512\nconfig.lr = 0.6\nconfig.verbose = 10000", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "documentation": {} + }, + { + "label": "config.output", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "peekOfCode": "config.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.2\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 512\nconfig.lr = 0.6\nconfig.verbose = 10000\nconfig.dali = False", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "documentation": {} + }, + { + "label": "config.embedding_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "peekOfCode": "config.embedding_size = 512\nconfig.sample_rate = 0.2\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 512\nconfig.lr = 0.6\nconfig.verbose = 10000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "documentation": {} + }, + { + "label": "config.sample_rate", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "peekOfCode": "config.sample_rate = 0.2\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 512\nconfig.lr = 0.6\nconfig.verbose = 10000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "documentation": {} + }, + { + "label": "config.fp16", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "peekOfCode": "config.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 512\nconfig.lr = 0.6\nconfig.verbose = 10000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "documentation": {} + }, + { + "label": "config.momentum", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "peekOfCode": "config.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 512\nconfig.lr = 0.6\nconfig.verbose = 10000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "documentation": {} + }, + { + "label": "config.weight_decay", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "peekOfCode": "config.weight_decay = 5e-4\nconfig.batch_size = 512\nconfig.lr = 0.6\nconfig.verbose = 10000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = 4", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "documentation": {} + }, + { + "label": "config.batch_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "peekOfCode": "config.batch_size = 512\nconfig.lr = 0.6\nconfig.verbose = 10000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = 4\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "documentation": {} + }, + { + "label": "config.lr", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "peekOfCode": "config.lr = 0.6\nconfig.verbose = 10000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = 4\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "documentation": {} + }, + { + "label": "config.verbose", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "peekOfCode": "config.verbose = 10000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = 4\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "documentation": {} + }, + { + "label": "config.dali", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "peekOfCode": "config.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = 4\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "documentation": {} + }, + { + "label": "config.rec", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "peekOfCode": "config.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = 4\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "documentation": {} + }, + { + "label": "config.num_classes", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "peekOfCode": "config.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = 4\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "documentation": {} + }, + { + "label": "config.num_image", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "peekOfCode": "config.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = 4\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "documentation": {} + }, + { + "label": "config.num_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "peekOfCode": "config.num_epoch = 20\nconfig.warmup_epoch = 4\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "documentation": {} + }, + { + "label": "config.warmup_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "peekOfCode": "config.warmup_epoch = 4\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "documentation": {} + }, + { + "label": "config.val_targets", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "peekOfCode": "config.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_16gpus_r50_bs8k", + "documentation": {} + }, + { + "label": "config", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "peekOfCode": "config = edict()\nconfig.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"r50\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.2\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "documentation": {} + }, + { + "label": "config.margin_list", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "peekOfCode": "config.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"r50\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.2\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "documentation": {} + }, + { + "label": "config.network", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "peekOfCode": "config.network = \"r50\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.2\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.4", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "documentation": {} + }, + { + "label": "config.resume", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "peekOfCode": "config.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.2\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.4\nconfig.verbose = 10000", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "documentation": {} + }, + { + "label": "config.output", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "peekOfCode": "config.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.2\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.4\nconfig.verbose = 10000\nconfig.dali = False", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "documentation": {} + }, + { + "label": "config.embedding_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "peekOfCode": "config.embedding_size = 512\nconfig.sample_rate = 0.2\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.4\nconfig.verbose = 10000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "documentation": {} + }, + { + "label": "config.sample_rate", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "peekOfCode": "config.sample_rate = 0.2\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.4\nconfig.verbose = 10000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "documentation": {} + }, + { + "label": "config.fp16", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "peekOfCode": "config.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.4\nconfig.verbose = 10000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "documentation": {} + }, + { + "label": "config.momentum", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "peekOfCode": "config.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.4\nconfig.verbose = 10000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "documentation": {} + }, + { + "label": "config.weight_decay", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "peekOfCode": "config.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.4\nconfig.verbose = 10000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = 2", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "documentation": {} + }, + { + "label": "config.batch_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "peekOfCode": "config.batch_size = 128\nconfig.lr = 0.4\nconfig.verbose = 10000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = 2\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "documentation": {} + }, + { + "label": "config.lr", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "peekOfCode": "config.lr = 0.4\nconfig.verbose = 10000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = 2\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "documentation": {} + }, + { + "label": "config.verbose", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "peekOfCode": "config.verbose = 10000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = 2\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "documentation": {} + }, + { + "label": "config.dali", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "peekOfCode": "config.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = 2\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "documentation": {} + }, + { + "label": "config.rec", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "peekOfCode": "config.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = 2\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "documentation": {} + }, + { + "label": "config.num_classes", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "peekOfCode": "config.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = 2\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "documentation": {} + }, + { + "label": "config.num_image", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "peekOfCode": "config.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = 2\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "documentation": {} + }, + { + "label": "config.num_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "peekOfCode": "config.num_epoch = 20\nconfig.warmup_epoch = 2\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "documentation": {} + }, + { + "label": "config.warmup_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "peekOfCode": "config.warmup_epoch = 2\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "documentation": {} + }, + { + "label": "config.val_targets", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "peekOfCode": "config.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_32gpus_r50_bs4k", + "documentation": {} + }, + { + "label": "config", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "peekOfCode": "config = edict()\nconfig.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"r50\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.2\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "documentation": {} + }, + { + "label": "config.margin_list", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "peekOfCode": "config.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"r50\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.2\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 512", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "documentation": {} + }, + { + "label": "config.network", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "peekOfCode": "config.network = \"r50\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.2\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 512\nconfig.lr = 0.4", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "documentation": {} + }, + { + "label": "config.resume", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "peekOfCode": "config.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.2\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 512\nconfig.lr = 0.4\nconfig.verbose = 10000", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "documentation": {} + }, + { + "label": "config.output", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "peekOfCode": "config.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.2\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 512\nconfig.lr = 0.4\nconfig.verbose = 10000\nconfig.dali = False", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "documentation": {} + }, + { + "label": "config.embedding_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "peekOfCode": "config.embedding_size = 512\nconfig.sample_rate = 0.2\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 512\nconfig.lr = 0.4\nconfig.verbose = 10000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "documentation": {} + }, + { + "label": "config.sample_rate", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "peekOfCode": "config.sample_rate = 0.2\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 512\nconfig.lr = 0.4\nconfig.verbose = 10000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "documentation": {} + }, + { + "label": "config.fp16", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "peekOfCode": "config.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 512\nconfig.lr = 0.4\nconfig.verbose = 10000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "documentation": {} + }, + { + "label": "config.momentum", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "peekOfCode": "config.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 512\nconfig.lr = 0.4\nconfig.verbose = 10000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "documentation": {} + }, + { + "label": "config.weight_decay", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "peekOfCode": "config.weight_decay = 5e-4\nconfig.batch_size = 512\nconfig.lr = 0.4\nconfig.verbose = 10000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = 2", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "documentation": {} + }, + { + "label": "config.batch_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "peekOfCode": "config.batch_size = 512\nconfig.lr = 0.4\nconfig.verbose = 10000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = 2\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "documentation": {} + }, + { + "label": "config.lr", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "peekOfCode": "config.lr = 0.4\nconfig.verbose = 10000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = 2\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "documentation": {} + }, + { + "label": "config.verbose", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "peekOfCode": "config.verbose = 10000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = 2\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "documentation": {} + }, + { + "label": "config.dali", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "peekOfCode": "config.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = 2\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "documentation": {} + }, + { + "label": "config.rec", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "peekOfCode": "config.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = 2\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "documentation": {} + }, + { + "label": "config.num_classes", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "peekOfCode": "config.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = 2\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "documentation": {} + }, + { + "label": "config.num_image", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "peekOfCode": "config.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = 2\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "documentation": {} + }, + { + "label": "config.num_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "peekOfCode": "config.num_epoch = 20\nconfig.warmup_epoch = 2\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "documentation": {} + }, + { + "label": "config.warmup_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "peekOfCode": "config.warmup_epoch = 2\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "documentation": {} + }, + { + "label": "config.val_targets", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "description": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "peekOfCode": "config.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_8gpus_r50_bs4k", + "documentation": {} + }, + { + "label": "config", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "peekOfCode": "config = edict()\nconfig.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"r100\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.2\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "documentation": {} + }, + { + "label": "config.margin_list", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "peekOfCode": "config.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"r100\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.2\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "documentation": {} + }, + { + "label": "config.network", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "peekOfCode": "config.network = \"r100\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.2\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.1", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "documentation": {} + }, + { + "label": "config.resume", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "peekOfCode": "config.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.2\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 10000", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "documentation": {} + }, + { + "label": "config.output", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "peekOfCode": "config.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.2\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 10000\nconfig.dali = False", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "documentation": {} + }, + { + "label": "config.embedding_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "peekOfCode": "config.embedding_size = 512\nconfig.sample_rate = 0.2\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 10000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "documentation": {} + }, + { + "label": "config.sample_rate", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "peekOfCode": "config.sample_rate = 0.2\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 10000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "documentation": {} + }, + { + "label": "config.fp16", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "peekOfCode": "config.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 10000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "documentation": {} + }, + { + "label": "config.momentum", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "peekOfCode": "config.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 10000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "documentation": {} + }, + { + "label": "config.weight_decay", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "peekOfCode": "config.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 10000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "documentation": {} + }, + { + "label": "config.batch_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "peekOfCode": "config.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 10000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "documentation": {} + }, + { + "label": "config.lr", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "peekOfCode": "config.lr = 0.1\nconfig.verbose = 10000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "documentation": {} + }, + { + "label": "config.verbose", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "peekOfCode": "config.verbose = 10000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "documentation": {} + }, + { + "label": "config.dali", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "peekOfCode": "config.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "documentation": {} + }, + { + "label": "config.rec", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "peekOfCode": "config.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "documentation": {} + }, + { + "label": "config.num_classes", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "peekOfCode": "config.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "documentation": {} + }, + { + "label": "config.num_image", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "peekOfCode": "config.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "documentation": {} + }, + { + "label": "config.num_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "peekOfCode": "config.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "documentation": {} + }, + { + "label": "config.warmup_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "peekOfCode": "config.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "documentation": {} + }, + { + "label": "config.val_targets", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "peekOfCode": "config.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100", + "documentation": {} + }, + { + "label": "config", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "peekOfCode": "config = edict()\nconfig.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"r100\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.2\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "documentation": {} + }, + { + "label": "config.margin_list", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "peekOfCode": "config.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"r100\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.2\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "documentation": {} + }, + { + "label": "config.network", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "peekOfCode": "config.network = \"r100\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.2\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.2", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "documentation": {} + }, + { + "label": "config.resume", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "peekOfCode": "config.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.2\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.2\nconfig.verbose = 10000", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "documentation": {} + }, + { + "label": "config.output", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "peekOfCode": "config.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.2\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.2\nconfig.verbose = 10000\nconfig.dali = False", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "documentation": {} + }, + { + "label": "config.embedding_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "peekOfCode": "config.embedding_size = 512\nconfig.sample_rate = 0.2\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.2\nconfig.verbose = 10000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "documentation": {} + }, + { + "label": "config.sample_rate", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "peekOfCode": "config.sample_rate = 0.2\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.2\nconfig.verbose = 10000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "documentation": {} + }, + { + "label": "config.fp16", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "peekOfCode": "config.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.2\nconfig.verbose = 10000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "documentation": {} + }, + { + "label": "config.momentum", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "peekOfCode": "config.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.2\nconfig.verbose = 10000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "documentation": {} + }, + { + "label": "config.weight_decay", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "peekOfCode": "config.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.2\nconfig.verbose = 10000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "documentation": {} + }, + { + "label": "config.batch_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "peekOfCode": "config.batch_size = 128\nconfig.lr = 0.2\nconfig.verbose = 10000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "documentation": {} + }, + { + "label": "config.lr", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "peekOfCode": "config.lr = 0.2\nconfig.verbose = 10000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "documentation": {} + }, + { + "label": "config.verbose", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "peekOfCode": "config.verbose = 10000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "documentation": {} + }, + { + "label": "config.dali", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "peekOfCode": "config.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "documentation": {} + }, + { + "label": "config.rec", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "peekOfCode": "config.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "documentation": {} + }, + { + "label": "config.num_classes", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "peekOfCode": "config.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "documentation": {} + }, + { + "label": "config.num_image", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "peekOfCode": "config.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "documentation": {} + }, + { + "label": "config.num_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "peekOfCode": "config.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "documentation": {} + }, + { + "label": "config.warmup_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "peekOfCode": "config.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "documentation": {} + }, + { + "label": "config.val_targets", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "peekOfCode": "config.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100_16gpus", + "documentation": {} + }, + { + "label": "config", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "peekOfCode": "config = edict()\nconfig.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"r100\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.2\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "documentation": {} + }, + { + "label": "config.margin_list", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "peekOfCode": "config.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"r100\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.2\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "documentation": {} + }, + { + "label": "config.network", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "peekOfCode": "config.network = \"r100\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.2\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.4", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "documentation": {} + }, + { + "label": "config.resume", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "peekOfCode": "config.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.2\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.4\nconfig.verbose = 10000", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "documentation": {} + }, + { + "label": "config.output", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "peekOfCode": "config.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.2\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.4\nconfig.verbose = 10000\nconfig.dali = False", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "documentation": {} + }, + { + "label": "config.embedding_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "peekOfCode": "config.embedding_size = 512\nconfig.sample_rate = 0.2\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.4\nconfig.verbose = 10000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "documentation": {} + }, + { + "label": "config.sample_rate", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "peekOfCode": "config.sample_rate = 0.2\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.4\nconfig.verbose = 10000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "documentation": {} + }, + { + "label": "config.fp16", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "peekOfCode": "config.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.4\nconfig.verbose = 10000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "documentation": {} + }, + { + "label": "config.momentum", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "peekOfCode": "config.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.4\nconfig.verbose = 10000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "documentation": {} + }, + { + "label": "config.weight_decay", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "peekOfCode": "config.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.4\nconfig.verbose = 10000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "documentation": {} + }, + { + "label": "config.batch_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "peekOfCode": "config.batch_size = 128\nconfig.lr = 0.4\nconfig.verbose = 10000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "documentation": {} + }, + { + "label": "config.lr", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "peekOfCode": "config.lr = 0.4\nconfig.verbose = 10000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "documentation": {} + }, + { + "label": "config.verbose", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "peekOfCode": "config.verbose = 10000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "documentation": {} + }, + { + "label": "config.dali", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "peekOfCode": "config.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "documentation": {} + }, + { + "label": "config.rec", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "peekOfCode": "config.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "documentation": {} + }, + { + "label": "config.num_classes", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "peekOfCode": "config.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "documentation": {} + }, + { + "label": "config.num_image", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "peekOfCode": "config.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "documentation": {} + }, + { + "label": "config.num_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "peekOfCode": "config.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "documentation": {} + }, + { + "label": "config.warmup_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "peekOfCode": "config.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "documentation": {} + }, + { + "label": "config.val_targets", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "description": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "peekOfCode": "config.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc02_r100_32gpus", + "documentation": {} + }, + { + "label": "config", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "peekOfCode": "config = edict()\nconfig.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"r100\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "documentation": {} + }, + { + "label": "config.margin_list", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "peekOfCode": "config.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"r100\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "documentation": {} + }, + { + "label": "config.network", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "peekOfCode": "config.network = \"r100\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.4", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "documentation": {} + }, + { + "label": "config.resume", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "peekOfCode": "config.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.4\nconfig.verbose = 2000", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "documentation": {} + }, + { + "label": "config.output", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "peekOfCode": "config.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.4\nconfig.verbose = 2000\nconfig.dali = False", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "documentation": {} + }, + { + "label": "config.embedding_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "peekOfCode": "config.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.4\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "documentation": {} + }, + { + "label": "config.sample_rate", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "peekOfCode": "config.sample_rate = 0.3\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.4\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "documentation": {} + }, + { + "label": "config.fp16", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "peekOfCode": "config.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.4\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "documentation": {} + }, + { + "label": "config.momentum", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "peekOfCode": "config.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.4\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "documentation": {} + }, + { + "label": "config.weight_decay", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "peekOfCode": "config.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.4\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "documentation": {} + }, + { + "label": "config.batch_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "peekOfCode": "config.batch_size = 128\nconfig.lr = 0.4\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "documentation": {} + }, + { + "label": "config.lr", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "peekOfCode": "config.lr = 0.4\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "documentation": {} + }, + { + "label": "config.verbose", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "peekOfCode": "config.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "documentation": {} + }, + { + "label": "config.dali", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "peekOfCode": "config.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "documentation": {} + }, + { + "label": "config.rec", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "peekOfCode": "config.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "documentation": {} + }, + { + "label": "config.num_classes", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "peekOfCode": "config.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "documentation": {} + }, + { + "label": "config.num_image", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "peekOfCode": "config.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "documentation": {} + }, + { + "label": "config.num_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "peekOfCode": "config.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "documentation": {} + }, + { + "label": "config.warmup_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "peekOfCode": "config.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "documentation": {} + }, + { + "label": "config.val_targets", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "peekOfCode": "config.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r100", + "documentation": {} + }, + { + "label": "config", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "peekOfCode": "config = edict()\nconfig.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"r18\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "documentation": {} + }, + { + "label": "config.margin_list", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "peekOfCode": "config.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"r18\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "documentation": {} + }, + { + "label": "config.network", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "peekOfCode": "config.network = \"r18\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.4", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "documentation": {} + }, + { + "label": "config.resume", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "peekOfCode": "config.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.4\nconfig.verbose = 2000", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "documentation": {} + }, + { + "label": "config.output", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "peekOfCode": "config.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.4\nconfig.verbose = 2000\nconfig.dali = False", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "documentation": {} + }, + { + "label": "config.embedding_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "peekOfCode": "config.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.4\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "documentation": {} + }, + { + "label": "config.sample_rate", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "peekOfCode": "config.sample_rate = 0.3\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.4\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "documentation": {} + }, + { + "label": "config.fp16", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "peekOfCode": "config.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.4\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "documentation": {} + }, + { + "label": "config.momentum", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "peekOfCode": "config.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.4\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "documentation": {} + }, + { + "label": "config.weight_decay", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "peekOfCode": "config.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.4\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "documentation": {} + }, + { + "label": "config.batch_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "peekOfCode": "config.batch_size = 128\nconfig.lr = 0.4\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "documentation": {} + }, + { + "label": "config.lr", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "peekOfCode": "config.lr = 0.4\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "documentation": {} + }, + { + "label": "config.verbose", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "peekOfCode": "config.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "documentation": {} + }, + { + "label": "config.dali", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "peekOfCode": "config.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "documentation": {} + }, + { + "label": "config.rec", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "peekOfCode": "config.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "documentation": {} + }, + { + "label": "config.num_classes", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "peekOfCode": "config.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "documentation": {} + }, + { + "label": "config.num_image", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "peekOfCode": "config.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "documentation": {} + }, + { + "label": "config.num_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "peekOfCode": "config.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "documentation": {} + }, + { + "label": "config.warmup_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "peekOfCode": "config.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "documentation": {} + }, + { + "label": "config.val_targets", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "peekOfCode": "config.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r18", + "documentation": {} + }, + { + "label": "config", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "peekOfCode": "config = edict()\nconfig.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"r200\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "documentation": {} + }, + { + "label": "config.margin_list", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "peekOfCode": "config.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"r200\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "documentation": {} + }, + { + "label": "config.network", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "peekOfCode": "config.network = \"r200\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.4", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "documentation": {} + }, + { + "label": "config.resume", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "peekOfCode": "config.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.4\nconfig.verbose = 2000", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "documentation": {} + }, + { + "label": "config.output", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "peekOfCode": "config.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.4\nconfig.verbose = 2000\nconfig.dali = False", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "documentation": {} + }, + { + "label": "config.embedding_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "peekOfCode": "config.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.4\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "documentation": {} + }, + { + "label": "config.sample_rate", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "peekOfCode": "config.sample_rate = 0.3\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.4\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "documentation": {} + }, + { + "label": "config.fp16", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "peekOfCode": "config.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.4\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "documentation": {} + }, + { + "label": "config.momentum", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "peekOfCode": "config.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.4\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "documentation": {} + }, + { + "label": "config.weight_decay", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "peekOfCode": "config.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.4\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "documentation": {} + }, + { + "label": "config.batch_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "peekOfCode": "config.batch_size = 128\nconfig.lr = 0.4\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "documentation": {} + }, + { + "label": "config.lr", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "peekOfCode": "config.lr = 0.4\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "documentation": {} + }, + { + "label": "config.verbose", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "peekOfCode": "config.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "documentation": {} + }, + { + "label": "config.dali", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "peekOfCode": "config.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "documentation": {} + }, + { + "label": "config.rec", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "peekOfCode": "config.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "documentation": {} + }, + { + "label": "config.num_classes", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "peekOfCode": "config.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "documentation": {} + }, + { + "label": "config.num_image", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "peekOfCode": "config.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "documentation": {} + }, + { + "label": "config.num_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "peekOfCode": "config.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "documentation": {} + }, + { + "label": "config.warmup_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "peekOfCode": "config.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "documentation": {} + }, + { + "label": "config.val_targets", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "peekOfCode": "config.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r200", + "documentation": {} + }, + { + "label": "config", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "peekOfCode": "config = edict()\nconfig.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"r50\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "documentation": {} + }, + { + "label": "config.margin_list", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "peekOfCode": "config.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"r50\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "documentation": {} + }, + { + "label": "config.network", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "peekOfCode": "config.network = \"r50\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.4", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "documentation": {} + }, + { + "label": "config.resume", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "peekOfCode": "config.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.4\nconfig.verbose = 2000", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "documentation": {} + }, + { + "label": "config.output", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "peekOfCode": "config.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.4\nconfig.verbose = 2000\nconfig.dali = False", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "documentation": {} + }, + { + "label": "config.embedding_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "peekOfCode": "config.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.4\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "documentation": {} + }, + { + "label": "config.sample_rate", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "peekOfCode": "config.sample_rate = 0.3\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.4\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "documentation": {} + }, + { + "label": "config.fp16", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "peekOfCode": "config.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.4\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "documentation": {} + }, + { + "label": "config.momentum", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "peekOfCode": "config.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.4\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "documentation": {} + }, + { + "label": "config.weight_decay", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "peekOfCode": "config.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.4\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "documentation": {} + }, + { + "label": "config.batch_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "peekOfCode": "config.batch_size = 128\nconfig.lr = 0.4\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "documentation": {} + }, + { + "label": "config.lr", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "peekOfCode": "config.lr = 0.4\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "documentation": {} + }, + { + "label": "config.verbose", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "peekOfCode": "config.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "documentation": {} + }, + { + "label": "config.dali", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "peekOfCode": "config.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "documentation": {} + }, + { + "label": "config.rec", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "peekOfCode": "config.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "documentation": {} + }, + { + "label": "config.num_classes", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "peekOfCode": "config.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "documentation": {} + }, + { + "label": "config.num_image", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "peekOfCode": "config.num_image = 42474557\nconfig.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "documentation": {} + }, + { + "label": "config.num_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "peekOfCode": "config.num_epoch = 20\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "documentation": {} + }, + { + "label": "config.warmup_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "peekOfCode": "config.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "documentation": {} + }, + { + "label": "config.val_targets", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "description": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "peekOfCode": "config.val_targets = [\"lfw\", \"cfp_fp\", \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_32gpu_r50", + "documentation": {} + }, + { + "label": "config", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "peekOfCode": "config = edict()\nconfig.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"vit_b_dp005_mask_005\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.weight_decay = 0.1\nconfig.batch_size = 384", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "documentation": {} + }, + { + "label": "config.margin_list", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "peekOfCode": "config.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"vit_b_dp005_mask_005\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.weight_decay = 0.1\nconfig.batch_size = 384\nconfig.optimizer = \"adamw\"", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "documentation": {} + }, + { + "label": "config.network", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "peekOfCode": "config.network = \"vit_b_dp005_mask_005\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.weight_decay = 0.1\nconfig.batch_size = 384\nconfig.optimizer = \"adamw\"\nconfig.lr = 0.001", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "documentation": {} + }, + { + "label": "config.resume", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "peekOfCode": "config.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.weight_decay = 0.1\nconfig.batch_size = 384\nconfig.optimizer = \"adamw\"\nconfig.lr = 0.001\nconfig.verbose = 2000", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "documentation": {} + }, + { + "label": "config.output", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "peekOfCode": "config.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.weight_decay = 0.1\nconfig.batch_size = 384\nconfig.optimizer = \"adamw\"\nconfig.lr = 0.001\nconfig.verbose = 2000\nconfig.dali = False", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "documentation": {} + }, + { + "label": "config.embedding_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "peekOfCode": "config.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.weight_decay = 0.1\nconfig.batch_size = 384\nconfig.optimizer = \"adamw\"\nconfig.lr = 0.001\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "documentation": {} + }, + { + "label": "config.sample_rate", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "peekOfCode": "config.sample_rate = 0.3\nconfig.fp16 = True\nconfig.weight_decay = 0.1\nconfig.batch_size = 384\nconfig.optimizer = \"adamw\"\nconfig.lr = 0.001\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "documentation": {} + }, + { + "label": "config.fp16", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "peekOfCode": "config.fp16 = True\nconfig.weight_decay = 0.1\nconfig.batch_size = 384\nconfig.optimizer = \"adamw\"\nconfig.lr = 0.001\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "documentation": {} + }, + { + "label": "config.weight_decay", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "peekOfCode": "config.weight_decay = 0.1\nconfig.batch_size = 384\nconfig.optimizer = \"adamw\"\nconfig.lr = 0.001\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 40", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "documentation": {} + }, + { + "label": "config.batch_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "peekOfCode": "config.batch_size = 384\nconfig.optimizer = \"adamw\"\nconfig.lr = 0.001\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 40\nconfig.warmup_epoch = config.num_epoch // 10", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "documentation": {} + }, + { + "label": "config.optimizer", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "peekOfCode": "config.optimizer = \"adamw\"\nconfig.lr = 0.001\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 40\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "documentation": {} + }, + { + "label": "config.lr", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "peekOfCode": "config.lr = 0.001\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 40\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "documentation": {} + }, + { + "label": "config.verbose", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "peekOfCode": "config.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 40\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "documentation": {} + }, + { + "label": "config.dali", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "peekOfCode": "config.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 40\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "documentation": {} + }, + { + "label": "config.rec", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "peekOfCode": "config.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 40\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "documentation": {} + }, + { + "label": "config.num_classes", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "peekOfCode": "config.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 40\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "documentation": {} + }, + { + "label": "config.num_image", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "peekOfCode": "config.num_image = 42474557\nconfig.num_epoch = 40\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "documentation": {} + }, + { + "label": "config.num_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "peekOfCode": "config.num_epoch = 40\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "documentation": {} + }, + { + "label": "config.warmup_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "peekOfCode": "config.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "documentation": {} + }, + { + "label": "config.val_targets", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "peekOfCode": "config.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_b", + "documentation": {} + }, + { + "label": "config", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "peekOfCode": "config = edict()\nconfig.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"vit_l_dp005_mask_005\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.weight_decay = 0.1\nconfig.batch_size = 384", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "documentation": {} + }, + { + "label": "config.margin_list", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "peekOfCode": "config.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"vit_l_dp005_mask_005\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.weight_decay = 0.1\nconfig.batch_size = 384\nconfig.optimizer = \"adamw\"", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "documentation": {} + }, + { + "label": "config.network", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "peekOfCode": "config.network = \"vit_l_dp005_mask_005\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.weight_decay = 0.1\nconfig.batch_size = 384\nconfig.optimizer = \"adamw\"\nconfig.lr = 0.001", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "documentation": {} + }, + { + "label": "config.resume", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "peekOfCode": "config.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.weight_decay = 0.1\nconfig.batch_size = 384\nconfig.optimizer = \"adamw\"\nconfig.lr = 0.001\nconfig.verbose = 2000", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "documentation": {} + }, + { + "label": "config.output", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "peekOfCode": "config.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.weight_decay = 0.1\nconfig.batch_size = 384\nconfig.optimizer = \"adamw\"\nconfig.lr = 0.001\nconfig.verbose = 2000\nconfig.dali = False", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "documentation": {} + }, + { + "label": "config.embedding_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "peekOfCode": "config.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.weight_decay = 0.1\nconfig.batch_size = 384\nconfig.optimizer = \"adamw\"\nconfig.lr = 0.001\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "documentation": {} + }, + { + "label": "config.sample_rate", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "peekOfCode": "config.sample_rate = 0.3\nconfig.fp16 = True\nconfig.weight_decay = 0.1\nconfig.batch_size = 384\nconfig.optimizer = \"adamw\"\nconfig.lr = 0.001\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "documentation": {} + }, + { + "label": "config.fp16", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "peekOfCode": "config.fp16 = True\nconfig.weight_decay = 0.1\nconfig.batch_size = 384\nconfig.optimizer = \"adamw\"\nconfig.lr = 0.001\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "documentation": {} + }, + { + "label": "config.weight_decay", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "peekOfCode": "config.weight_decay = 0.1\nconfig.batch_size = 384\nconfig.optimizer = \"adamw\"\nconfig.lr = 0.001\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 40", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "documentation": {} + }, + { + "label": "config.batch_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "peekOfCode": "config.batch_size = 384\nconfig.optimizer = \"adamw\"\nconfig.lr = 0.001\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 40\nconfig.warmup_epoch = config.num_epoch // 10", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "documentation": {} + }, + { + "label": "config.optimizer", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "peekOfCode": "config.optimizer = \"adamw\"\nconfig.lr = 0.001\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 40\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "documentation": {} + }, + { + "label": "config.lr", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "peekOfCode": "config.lr = 0.001\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 40\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "documentation": {} + }, + { + "label": "config.verbose", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "peekOfCode": "config.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 40\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "documentation": {} + }, + { + "label": "config.dali", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "peekOfCode": "config.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 40\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "documentation": {} + }, + { + "label": "config.rec", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "peekOfCode": "config.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 40\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "documentation": {} + }, + { + "label": "config.num_classes", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "peekOfCode": "config.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 40\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "documentation": {} + }, + { + "label": "config.num_image", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "peekOfCode": "config.num_image = 42474557\nconfig.num_epoch = 40\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "documentation": {} + }, + { + "label": "config.num_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "peekOfCode": "config.num_epoch = 40\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "documentation": {} + }, + { + "label": "config.warmup_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "peekOfCode": "config.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "documentation": {} + }, + { + "label": "config.val_targets", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "peekOfCode": "config.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_l", + "documentation": {} + }, + { + "label": "config", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "peekOfCode": "config = edict()\nconfig.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"vit_s_dp005_mask_0\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.weight_decay = 0.1\nconfig.batch_size = 384", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "documentation": {} + }, + { + "label": "config.margin_list", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "peekOfCode": "config.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"vit_s_dp005_mask_0\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.weight_decay = 0.1\nconfig.batch_size = 384\nconfig.optimizer = \"adamw\"", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "documentation": {} + }, + { + "label": "config.network", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "peekOfCode": "config.network = \"vit_s_dp005_mask_0\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.weight_decay = 0.1\nconfig.batch_size = 384\nconfig.optimizer = \"adamw\"\nconfig.lr = 0.001", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "documentation": {} + }, + { + "label": "config.resume", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "peekOfCode": "config.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.weight_decay = 0.1\nconfig.batch_size = 384\nconfig.optimizer = \"adamw\"\nconfig.lr = 0.001\nconfig.verbose = 2000", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "documentation": {} + }, + { + "label": "config.output", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "peekOfCode": "config.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.weight_decay = 0.1\nconfig.batch_size = 384\nconfig.optimizer = \"adamw\"\nconfig.lr = 0.001\nconfig.verbose = 2000\nconfig.dali = False", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "documentation": {} + }, + { + "label": "config.embedding_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "peekOfCode": "config.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.weight_decay = 0.1\nconfig.batch_size = 384\nconfig.optimizer = \"adamw\"\nconfig.lr = 0.001\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "documentation": {} + }, + { + "label": "config.sample_rate", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "peekOfCode": "config.sample_rate = 0.3\nconfig.fp16 = True\nconfig.weight_decay = 0.1\nconfig.batch_size = 384\nconfig.optimizer = \"adamw\"\nconfig.lr = 0.001\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "documentation": {} + }, + { + "label": "config.fp16", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "peekOfCode": "config.fp16 = True\nconfig.weight_decay = 0.1\nconfig.batch_size = 384\nconfig.optimizer = \"adamw\"\nconfig.lr = 0.001\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "documentation": {} + }, + { + "label": "config.weight_decay", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "peekOfCode": "config.weight_decay = 0.1\nconfig.batch_size = 384\nconfig.optimizer = \"adamw\"\nconfig.lr = 0.001\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 40", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "documentation": {} + }, + { + "label": "config.batch_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "peekOfCode": "config.batch_size = 384\nconfig.optimizer = \"adamw\"\nconfig.lr = 0.001\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 40\nconfig.warmup_epoch = config.num_epoch // 10", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "documentation": {} + }, + { + "label": "config.optimizer", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "peekOfCode": "config.optimizer = \"adamw\"\nconfig.lr = 0.001\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 40\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "documentation": {} + }, + { + "label": "config.lr", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "peekOfCode": "config.lr = 0.001\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 40\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "documentation": {} + }, + { + "label": "config.verbose", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "peekOfCode": "config.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 40\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "documentation": {} + }, + { + "label": "config.dali", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "peekOfCode": "config.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 40\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "documentation": {} + }, + { + "label": "config.rec", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "peekOfCode": "config.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 40\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "documentation": {} + }, + { + "label": "config.num_classes", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "peekOfCode": "config.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 40\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "documentation": {} + }, + { + "label": "config.num_image", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "peekOfCode": "config.num_image = 42474557\nconfig.num_epoch = 40\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "documentation": {} + }, + { + "label": "config.num_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "peekOfCode": "config.num_epoch = 40\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "documentation": {} + }, + { + "label": "config.warmup_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "peekOfCode": "config.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "documentation": {} + }, + { + "label": "config.val_targets", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "peekOfCode": "config.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_s", + "documentation": {} + }, + { + "label": "config", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "peekOfCode": "config = edict()\nconfig.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"vit_t_dp005_mask0\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.weight_decay = 0.1\nconfig.batch_size = 384", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "documentation": {} + }, + { + "label": "config.margin_list", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "peekOfCode": "config.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"vit_t_dp005_mask0\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.weight_decay = 0.1\nconfig.batch_size = 384\nconfig.optimizer = \"adamw\"", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "documentation": {} + }, + { + "label": "config.network", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "peekOfCode": "config.network = \"vit_t_dp005_mask0\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.weight_decay = 0.1\nconfig.batch_size = 384\nconfig.optimizer = \"adamw\"\nconfig.lr = 0.001", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "documentation": {} + }, + { + "label": "config.resume", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "peekOfCode": "config.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.weight_decay = 0.1\nconfig.batch_size = 384\nconfig.optimizer = \"adamw\"\nconfig.lr = 0.001\nconfig.verbose = 2000", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "documentation": {} + }, + { + "label": "config.output", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "peekOfCode": "config.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.weight_decay = 0.1\nconfig.batch_size = 384\nconfig.optimizer = \"adamw\"\nconfig.lr = 0.001\nconfig.verbose = 2000\nconfig.dali = False", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "documentation": {} + }, + { + "label": "config.embedding_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "peekOfCode": "config.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.weight_decay = 0.1\nconfig.batch_size = 384\nconfig.optimizer = \"adamw\"\nconfig.lr = 0.001\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "documentation": {} + }, + { + "label": "config.sample_rate", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "peekOfCode": "config.sample_rate = 0.3\nconfig.fp16 = True\nconfig.weight_decay = 0.1\nconfig.batch_size = 384\nconfig.optimizer = \"adamw\"\nconfig.lr = 0.001\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "documentation": {} + }, + { + "label": "config.fp16", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "peekOfCode": "config.fp16 = True\nconfig.weight_decay = 0.1\nconfig.batch_size = 384\nconfig.optimizer = \"adamw\"\nconfig.lr = 0.001\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "documentation": {} + }, + { + "label": "config.weight_decay", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "peekOfCode": "config.weight_decay = 0.1\nconfig.batch_size = 384\nconfig.optimizer = \"adamw\"\nconfig.lr = 0.001\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 40", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "documentation": {} + }, + { + "label": "config.batch_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "peekOfCode": "config.batch_size = 384\nconfig.optimizer = \"adamw\"\nconfig.lr = 0.001\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 40\nconfig.warmup_epoch = config.num_epoch // 10", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "documentation": {} + }, + { + "label": "config.optimizer", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "peekOfCode": "config.optimizer = \"adamw\"\nconfig.lr = 0.001\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 40\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "documentation": {} + }, + { + "label": "config.lr", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "peekOfCode": "config.lr = 0.001\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 40\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "documentation": {} + }, + { + "label": "config.verbose", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "peekOfCode": "config.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 40\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "documentation": {} + }, + { + "label": "config.dali", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "peekOfCode": "config.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 40\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "documentation": {} + }, + { + "label": "config.rec", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "peekOfCode": "config.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 40\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "documentation": {} + }, + { + "label": "config.num_classes", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "peekOfCode": "config.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 40\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "documentation": {} + }, + { + "label": "config.num_image", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "peekOfCode": "config.num_image = 42474557\nconfig.num_epoch = 40\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "documentation": {} + }, + { + "label": "config.num_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "peekOfCode": "config.num_epoch = 40\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "documentation": {} + }, + { + "label": "config.warmup_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "peekOfCode": "config.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "documentation": {} + }, + { + "label": "config.val_targets", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "peekOfCode": "config.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_64gpu_vit_t", + "documentation": {} + }, + { + "label": "config", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "peekOfCode": "config = edict()\nconfig.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"vit_b_dp005_mask_005\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.weight_decay = 0.1\nconfig.batch_size = 256", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "documentation": {} + }, + { + "label": "config.margin_list", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "peekOfCode": "config.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"vit_b_dp005_mask_005\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.weight_decay = 0.1\nconfig.batch_size = 256\nconfig.gradient_acc = 12 # total batchsize is 256 * 12", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "documentation": {} + }, + { + "label": "config.network", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "peekOfCode": "config.network = \"vit_b_dp005_mask_005\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.weight_decay = 0.1\nconfig.batch_size = 256\nconfig.gradient_acc = 12 # total batchsize is 256 * 12\nconfig.optimizer = \"adamw\"", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "documentation": {} + }, + { + "label": "config.resume", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "peekOfCode": "config.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.weight_decay = 0.1\nconfig.batch_size = 256\nconfig.gradient_acc = 12 # total batchsize is 256 * 12\nconfig.optimizer = \"adamw\"\nconfig.lr = 0.001", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "documentation": {} + }, + { + "label": "config.output", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "peekOfCode": "config.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.weight_decay = 0.1\nconfig.batch_size = 256\nconfig.gradient_acc = 12 # total batchsize is 256 * 12\nconfig.optimizer = \"adamw\"\nconfig.lr = 0.001\nconfig.verbose = 2000", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "documentation": {} + }, + { + "label": "config.embedding_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "peekOfCode": "config.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.weight_decay = 0.1\nconfig.batch_size = 256\nconfig.gradient_acc = 12 # total batchsize is 256 * 12\nconfig.optimizer = \"adamw\"\nconfig.lr = 0.001\nconfig.verbose = 2000\nconfig.dali = False", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "documentation": {} + }, + { + "label": "config.sample_rate", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "peekOfCode": "config.sample_rate = 0.3\nconfig.fp16 = True\nconfig.weight_decay = 0.1\nconfig.batch_size = 256\nconfig.gradient_acc = 12 # total batchsize is 256 * 12\nconfig.optimizer = \"adamw\"\nconfig.lr = 0.001\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "documentation": {} + }, + { + "label": "config.fp16", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "peekOfCode": "config.fp16 = True\nconfig.weight_decay = 0.1\nconfig.batch_size = 256\nconfig.gradient_acc = 12 # total batchsize is 256 * 12\nconfig.optimizer = \"adamw\"\nconfig.lr = 0.001\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "documentation": {} + }, + { + "label": "config.weight_decay", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "peekOfCode": "config.weight_decay = 0.1\nconfig.batch_size = 256\nconfig.gradient_acc = 12 # total batchsize is 256 * 12\nconfig.optimizer = \"adamw\"\nconfig.lr = 0.001\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "documentation": {} + }, + { + "label": "config.batch_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "peekOfCode": "config.batch_size = 256\nconfig.gradient_acc = 12 # total batchsize is 256 * 12\nconfig.optimizer = \"adamw\"\nconfig.lr = 0.001\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 40", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "documentation": {} + }, + { + "label": "config.gradient_acc", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "peekOfCode": "config.gradient_acc = 12 # total batchsize is 256 * 12\nconfig.optimizer = \"adamw\"\nconfig.lr = 0.001\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 40\nconfig.warmup_epoch = config.num_epoch // 10", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "documentation": {} + }, + { + "label": "config.optimizer", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "peekOfCode": "config.optimizer = \"adamw\"\nconfig.lr = 0.001\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 40\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "documentation": {} + }, + { + "label": "config.lr", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "peekOfCode": "config.lr = 0.001\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 40\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "documentation": {} + }, + { + "label": "config.verbose", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "peekOfCode": "config.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 40\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "documentation": {} + }, + { + "label": "config.dali", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "peekOfCode": "config.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 40\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "documentation": {} + }, + { + "label": "config.rec", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "peekOfCode": "config.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 40\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "documentation": {} + }, + { + "label": "config.num_classes", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "peekOfCode": "config.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 40\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "documentation": {} + }, + { + "label": "config.num_image", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "peekOfCode": "config.num_image = 42474557\nconfig.num_epoch = 40\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "documentation": {} + }, + { + "label": "config.num_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "peekOfCode": "config.num_epoch = 40\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "documentation": {} + }, + { + "label": "config.warmup_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "peekOfCode": "config.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "documentation": {} + }, + { + "label": "config.val_targets", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "peekOfCode": "config.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_b", + "documentation": {} + }, + { + "label": "config", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "peekOfCode": "config = edict()\nconfig.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"vit_t_dp005_mask0\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.weight_decay = 0.1\nconfig.batch_size = 512", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "documentation": {} + }, + { + "label": "config.margin_list", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "peekOfCode": "config.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"vit_t_dp005_mask0\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.weight_decay = 0.1\nconfig.batch_size = 512\nconfig.optimizer = \"adamw\"", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "documentation": {} + }, + { + "label": "config.network", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "peekOfCode": "config.network = \"vit_t_dp005_mask0\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.weight_decay = 0.1\nconfig.batch_size = 512\nconfig.optimizer = \"adamw\"\nconfig.lr = 0.001", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "documentation": {} + }, + { + "label": "config.resume", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "peekOfCode": "config.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.weight_decay = 0.1\nconfig.batch_size = 512\nconfig.optimizer = \"adamw\"\nconfig.lr = 0.001\nconfig.verbose = 2000", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "documentation": {} + }, + { + "label": "config.output", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "peekOfCode": "config.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.weight_decay = 0.1\nconfig.batch_size = 512\nconfig.optimizer = \"adamw\"\nconfig.lr = 0.001\nconfig.verbose = 2000\nconfig.dali = False", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "documentation": {} + }, + { + "label": "config.embedding_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "peekOfCode": "config.embedding_size = 512\nconfig.sample_rate = 0.3\nconfig.fp16 = True\nconfig.weight_decay = 0.1\nconfig.batch_size = 512\nconfig.optimizer = \"adamw\"\nconfig.lr = 0.001\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "documentation": {} + }, + { + "label": "config.sample_rate", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "peekOfCode": "config.sample_rate = 0.3\nconfig.fp16 = True\nconfig.weight_decay = 0.1\nconfig.batch_size = 512\nconfig.optimizer = \"adamw\"\nconfig.lr = 0.001\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "documentation": {} + }, + { + "label": "config.fp16", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "peekOfCode": "config.fp16 = True\nconfig.weight_decay = 0.1\nconfig.batch_size = 512\nconfig.optimizer = \"adamw\"\nconfig.lr = 0.001\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "documentation": {} + }, + { + "label": "config.weight_decay", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "peekOfCode": "config.weight_decay = 0.1\nconfig.batch_size = 512\nconfig.optimizer = \"adamw\"\nconfig.lr = 0.001\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 40", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "documentation": {} + }, + { + "label": "config.batch_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "peekOfCode": "config.batch_size = 512\nconfig.optimizer = \"adamw\"\nconfig.lr = 0.001\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 40\nconfig.warmup_epoch = config.num_epoch // 10", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "documentation": {} + }, + { + "label": "config.optimizer", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "peekOfCode": "config.optimizer = \"adamw\"\nconfig.lr = 0.001\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 40\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "documentation": {} + }, + { + "label": "config.lr", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "peekOfCode": "config.lr = 0.001\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 40\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "documentation": {} + }, + { + "label": "config.verbose", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "peekOfCode": "config.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 40\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "documentation": {} + }, + { + "label": "config.dali", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "peekOfCode": "config.dali = False\nconfig.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 40\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "documentation": {} + }, + { + "label": "config.rec", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "peekOfCode": "config.rec = \"/train_tmp/WebFace42M\"\nconfig.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 40\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "documentation": {} + }, + { + "label": "config.num_classes", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "peekOfCode": "config.num_classes = 2059906\nconfig.num_image = 42474557\nconfig.num_epoch = 40\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "documentation": {} + }, + { + "label": "config.num_image", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "peekOfCode": "config.num_image = 42474557\nconfig.num_epoch = 40\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "documentation": {} + }, + { + "label": "config.num_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "peekOfCode": "config.num_epoch = 40\nconfig.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "documentation": {} + }, + { + "label": "config.warmup_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "peekOfCode": "config.warmup_epoch = config.num_epoch // 10\nconfig.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "documentation": {} + }, + { + "label": "config.val_targets", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "description": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "peekOfCode": "config.val_targets = []", + "detail": "modules.arcface_torch.configs.wf42m_pfc03_40epoch_8gpu_vit_t", + "documentation": {} + }, + { + "label": "config", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_mbf", + "description": "modules.arcface_torch.configs.wf4m_mbf", + "peekOfCode": "config = edict()\nconfig.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"mbf\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 1e-4", + "detail": "modules.arcface_torch.configs.wf4m_mbf", + "documentation": {} + }, + { + "label": "config.margin_list", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_mbf", + "description": "modules.arcface_torch.configs.wf4m_mbf", + "peekOfCode": "config.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"mbf\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 1e-4\nconfig.batch_size = 128", + "detail": "modules.arcface_torch.configs.wf4m_mbf", + "documentation": {} + }, + { + "label": "config.network", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_mbf", + "description": "modules.arcface_torch.configs.wf4m_mbf", + "peekOfCode": "config.network = \"mbf\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 1e-4\nconfig.batch_size = 128\nconfig.lr = 0.1", + "detail": "modules.arcface_torch.configs.wf4m_mbf", + "documentation": {} + }, + { + "label": "config.resume", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_mbf", + "description": "modules.arcface_torch.configs.wf4m_mbf", + "peekOfCode": "config.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 1e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000", + "detail": "modules.arcface_torch.configs.wf4m_mbf", + "documentation": {} + }, + { + "label": "config.output", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_mbf", + "description": "modules.arcface_torch.configs.wf4m_mbf", + "peekOfCode": "config.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 1e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False", + "detail": "modules.arcface_torch.configs.wf4m_mbf", + "documentation": {} + }, + { + "label": "config.embedding_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_mbf", + "description": "modules.arcface_torch.configs.wf4m_mbf", + "peekOfCode": "config.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 1e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace4M\"", + "detail": "modules.arcface_torch.configs.wf4m_mbf", + "documentation": {} + }, + { + "label": "config.sample_rate", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_mbf", + "description": "modules.arcface_torch.configs.wf4m_mbf", + "peekOfCode": "config.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 1e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace4M\"\nconfig.num_classes = 205990", + "detail": "modules.arcface_torch.configs.wf4m_mbf", + "documentation": {} + }, + { + "label": "config.fp16", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_mbf", + "description": "modules.arcface_torch.configs.wf4m_mbf", + "peekOfCode": "config.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 1e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace4M\"\nconfig.num_classes = 205990\nconfig.num_image = 4235242", + "detail": "modules.arcface_torch.configs.wf4m_mbf", + "documentation": {} + }, + { + "label": "config.momentum", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_mbf", + "description": "modules.arcface_torch.configs.wf4m_mbf", + "peekOfCode": "config.momentum = 0.9\nconfig.weight_decay = 1e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace4M\"\nconfig.num_classes = 205990\nconfig.num_image = 4235242\nconfig.num_epoch = 20", + "detail": "modules.arcface_torch.configs.wf4m_mbf", + "documentation": {} + }, + { + "label": "config.weight_decay", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_mbf", + "description": "modules.arcface_torch.configs.wf4m_mbf", + "peekOfCode": "config.weight_decay = 1e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace4M\"\nconfig.num_classes = 205990\nconfig.num_image = 4235242\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0", + "detail": "modules.arcface_torch.configs.wf4m_mbf", + "documentation": {} + }, + { + "label": "config.batch_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_mbf", + "description": "modules.arcface_torch.configs.wf4m_mbf", + "peekOfCode": "config.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace4M\"\nconfig.num_classes = 205990\nconfig.num_image = 4235242\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf4m_mbf", + "documentation": {} + }, + { + "label": "config.lr", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_mbf", + "description": "modules.arcface_torch.configs.wf4m_mbf", + "peekOfCode": "config.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace4M\"\nconfig.num_classes = 205990\nconfig.num_image = 4235242\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf4m_mbf", + "documentation": {} + }, + { + "label": "config.verbose", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_mbf", + "description": "modules.arcface_torch.configs.wf4m_mbf", + "peekOfCode": "config.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace4M\"\nconfig.num_classes = 205990\nconfig.num_image = 4235242\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf4m_mbf", + "documentation": {} + }, + { + "label": "config.dali", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_mbf", + "description": "modules.arcface_torch.configs.wf4m_mbf", + "peekOfCode": "config.dali = False\nconfig.rec = \"/train_tmp/WebFace4M\"\nconfig.num_classes = 205990\nconfig.num_image = 4235242\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf4m_mbf", + "documentation": {} + }, + { + "label": "config.rec", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_mbf", + "description": "modules.arcface_torch.configs.wf4m_mbf", + "peekOfCode": "config.rec = \"/train_tmp/WebFace4M\"\nconfig.num_classes = 205990\nconfig.num_image = 4235242\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf4m_mbf", + "documentation": {} + }, + { + "label": "config.num_classes", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_mbf", + "description": "modules.arcface_torch.configs.wf4m_mbf", + "peekOfCode": "config.num_classes = 205990\nconfig.num_image = 4235242\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf4m_mbf", + "documentation": {} + }, + { + "label": "config.num_image", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_mbf", + "description": "modules.arcface_torch.configs.wf4m_mbf", + "peekOfCode": "config.num_image = 4235242\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf4m_mbf", + "documentation": {} + }, + { + "label": "config.num_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_mbf", + "description": "modules.arcface_torch.configs.wf4m_mbf", + "peekOfCode": "config.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf4m_mbf", + "documentation": {} + }, + { + "label": "config.warmup_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_mbf", + "description": "modules.arcface_torch.configs.wf4m_mbf", + "peekOfCode": "config.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf4m_mbf", + "documentation": {} + }, + { + "label": "config.val_targets", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_mbf", + "description": "modules.arcface_torch.configs.wf4m_mbf", + "peekOfCode": "config.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf4m_mbf", + "documentation": {} + }, + { + "label": "config", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_r100", + "description": "modules.arcface_torch.configs.wf4m_r100", + "peekOfCode": "config = edict()\nconfig.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"r100\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4", + "detail": "modules.arcface_torch.configs.wf4m_r100", + "documentation": {} + }, + { + "label": "config.margin_list", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_r100", + "description": "modules.arcface_torch.configs.wf4m_r100", + "peekOfCode": "config.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"r100\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128", + "detail": "modules.arcface_torch.configs.wf4m_r100", + "documentation": {} + }, + { + "label": "config.network", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_r100", + "description": "modules.arcface_torch.configs.wf4m_r100", + "peekOfCode": "config.network = \"r100\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.1", + "detail": "modules.arcface_torch.configs.wf4m_r100", + "documentation": {} + }, + { + "label": "config.resume", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_r100", + "description": "modules.arcface_torch.configs.wf4m_r100", + "peekOfCode": "config.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000", + "detail": "modules.arcface_torch.configs.wf4m_r100", + "documentation": {} + }, + { + "label": "config.output", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_r100", + "description": "modules.arcface_torch.configs.wf4m_r100", + "peekOfCode": "config.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False", + "detail": "modules.arcface_torch.configs.wf4m_r100", + "documentation": {} + }, + { + "label": "config.embedding_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_r100", + "description": "modules.arcface_torch.configs.wf4m_r100", + "peekOfCode": "config.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace4M\"", + "detail": "modules.arcface_torch.configs.wf4m_r100", + "documentation": {} + }, + { + "label": "config.sample_rate", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_r100", + "description": "modules.arcface_torch.configs.wf4m_r100", + "peekOfCode": "config.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace4M\"\nconfig.num_classes = 205990", + "detail": "modules.arcface_torch.configs.wf4m_r100", + "documentation": {} + }, + { + "label": "config.fp16", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_r100", + "description": "modules.arcface_torch.configs.wf4m_r100", + "peekOfCode": "config.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace4M\"\nconfig.num_classes = 205990\nconfig.num_image = 4235242", + "detail": "modules.arcface_torch.configs.wf4m_r100", + "documentation": {} + }, + { + "label": "config.momentum", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_r100", + "description": "modules.arcface_torch.configs.wf4m_r100", + "peekOfCode": "config.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace4M\"\nconfig.num_classes = 205990\nconfig.num_image = 4235242\nconfig.num_epoch = 20", + "detail": "modules.arcface_torch.configs.wf4m_r100", + "documentation": {} + }, + { + "label": "config.weight_decay", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_r100", + "description": "modules.arcface_torch.configs.wf4m_r100", + "peekOfCode": "config.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace4M\"\nconfig.num_classes = 205990\nconfig.num_image = 4235242\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0", + "detail": "modules.arcface_torch.configs.wf4m_r100", + "documentation": {} + }, + { + "label": "config.batch_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_r100", + "description": "modules.arcface_torch.configs.wf4m_r100", + "peekOfCode": "config.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace4M\"\nconfig.num_classes = 205990\nconfig.num_image = 4235242\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf4m_r100", + "documentation": {} + }, + { + "label": "config.lr", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_r100", + "description": "modules.arcface_torch.configs.wf4m_r100", + "peekOfCode": "config.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace4M\"\nconfig.num_classes = 205990\nconfig.num_image = 4235242\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf4m_r100", + "documentation": {} + }, + { + "label": "config.verbose", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_r100", + "description": "modules.arcface_torch.configs.wf4m_r100", + "peekOfCode": "config.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace4M\"\nconfig.num_classes = 205990\nconfig.num_image = 4235242\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf4m_r100", + "documentation": {} + }, + { + "label": "config.dali", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_r100", + "description": "modules.arcface_torch.configs.wf4m_r100", + "peekOfCode": "config.dali = False\nconfig.rec = \"/train_tmp/WebFace4M\"\nconfig.num_classes = 205990\nconfig.num_image = 4235242\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf4m_r100", + "documentation": {} + }, + { + "label": "config.rec", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_r100", + "description": "modules.arcface_torch.configs.wf4m_r100", + "peekOfCode": "config.rec = \"/train_tmp/WebFace4M\"\nconfig.num_classes = 205990\nconfig.num_image = 4235242\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf4m_r100", + "documentation": {} + }, + { + "label": "config.num_classes", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_r100", + "description": "modules.arcface_torch.configs.wf4m_r100", + "peekOfCode": "config.num_classes = 205990\nconfig.num_image = 4235242\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf4m_r100", + "documentation": {} + }, + { + "label": "config.num_image", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_r100", + "description": "modules.arcface_torch.configs.wf4m_r100", + "peekOfCode": "config.num_image = 4235242\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf4m_r100", + "documentation": {} + }, + { + "label": "config.num_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_r100", + "description": "modules.arcface_torch.configs.wf4m_r100", + "peekOfCode": "config.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf4m_r100", + "documentation": {} + }, + { + "label": "config.warmup_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_r100", + "description": "modules.arcface_torch.configs.wf4m_r100", + "peekOfCode": "config.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf4m_r100", + "documentation": {} + }, + { + "label": "config.val_targets", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_r100", + "description": "modules.arcface_torch.configs.wf4m_r100", + "peekOfCode": "config.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf4m_r100", + "documentation": {} + }, + { + "label": "config", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_r50", + "description": "modules.arcface_torch.configs.wf4m_r50", + "peekOfCode": "config = edict()\nconfig.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"r50\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4", + "detail": "modules.arcface_torch.configs.wf4m_r50", + "documentation": {} + }, + { + "label": "config.margin_list", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_r50", + "description": "modules.arcface_torch.configs.wf4m_r50", + "peekOfCode": "config.margin_list = (1.0, 0.0, 0.4)\nconfig.network = \"r50\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128", + "detail": "modules.arcface_torch.configs.wf4m_r50", + "documentation": {} + }, + { + "label": "config.network", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_r50", + "description": "modules.arcface_torch.configs.wf4m_r50", + "peekOfCode": "config.network = \"r50\"\nconfig.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.1", + "detail": "modules.arcface_torch.configs.wf4m_r50", + "documentation": {} + }, + { + "label": "config.resume", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_r50", + "description": "modules.arcface_torch.configs.wf4m_r50", + "peekOfCode": "config.resume = False\nconfig.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000", + "detail": "modules.arcface_torch.configs.wf4m_r50", + "documentation": {} + }, + { + "label": "config.output", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_r50", + "description": "modules.arcface_torch.configs.wf4m_r50", + "peekOfCode": "config.output = None\nconfig.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False", + "detail": "modules.arcface_torch.configs.wf4m_r50", + "documentation": {} + }, + { + "label": "config.embedding_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_r50", + "description": "modules.arcface_torch.configs.wf4m_r50", + "peekOfCode": "config.embedding_size = 512\nconfig.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace4M\"", + "detail": "modules.arcface_torch.configs.wf4m_r50", + "documentation": {} + }, + { + "label": "config.sample_rate", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_r50", + "description": "modules.arcface_torch.configs.wf4m_r50", + "peekOfCode": "config.sample_rate = 1.0\nconfig.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace4M\"\nconfig.num_classes = 205990", + "detail": "modules.arcface_torch.configs.wf4m_r50", + "documentation": {} + }, + { + "label": "config.fp16", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_r50", + "description": "modules.arcface_torch.configs.wf4m_r50", + "peekOfCode": "config.fp16 = True\nconfig.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace4M\"\nconfig.num_classes = 205990\nconfig.num_image = 4235242", + "detail": "modules.arcface_torch.configs.wf4m_r50", + "documentation": {} + }, + { + "label": "config.momentum", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_r50", + "description": "modules.arcface_torch.configs.wf4m_r50", + "peekOfCode": "config.momentum = 0.9\nconfig.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace4M\"\nconfig.num_classes = 205990\nconfig.num_image = 4235242\nconfig.num_epoch = 20", + "detail": "modules.arcface_torch.configs.wf4m_r50", + "documentation": {} + }, + { + "label": "config.weight_decay", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_r50", + "description": "modules.arcface_torch.configs.wf4m_r50", + "peekOfCode": "config.weight_decay = 5e-4\nconfig.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace4M\"\nconfig.num_classes = 205990\nconfig.num_image = 4235242\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0", + "detail": "modules.arcface_torch.configs.wf4m_r50", + "documentation": {} + }, + { + "label": "config.batch_size", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_r50", + "description": "modules.arcface_torch.configs.wf4m_r50", + "peekOfCode": "config.batch_size = 128\nconfig.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace4M\"\nconfig.num_classes = 205990\nconfig.num_image = 4235242\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf4m_r50", + "documentation": {} + }, + { + "label": "config.lr", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_r50", + "description": "modules.arcface_torch.configs.wf4m_r50", + "peekOfCode": "config.lr = 0.1\nconfig.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace4M\"\nconfig.num_classes = 205990\nconfig.num_image = 4235242\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf4m_r50", + "documentation": {} + }, + { + "label": "config.verbose", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_r50", + "description": "modules.arcface_torch.configs.wf4m_r50", + "peekOfCode": "config.verbose = 2000\nconfig.dali = False\nconfig.rec = \"/train_tmp/WebFace4M\"\nconfig.num_classes = 205990\nconfig.num_image = 4235242\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf4m_r50", + "documentation": {} + }, + { + "label": "config.dali", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_r50", + "description": "modules.arcface_torch.configs.wf4m_r50", + "peekOfCode": "config.dali = False\nconfig.rec = \"/train_tmp/WebFace4M\"\nconfig.num_classes = 205990\nconfig.num_image = 4235242\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf4m_r50", + "documentation": {} + }, + { + "label": "config.rec", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_r50", + "description": "modules.arcface_torch.configs.wf4m_r50", + "peekOfCode": "config.rec = \"/train_tmp/WebFace4M\"\nconfig.num_classes = 205990\nconfig.num_image = 4235242\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf4m_r50", + "documentation": {} + }, + { + "label": "config.num_classes", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_r50", + "description": "modules.arcface_torch.configs.wf4m_r50", + "peekOfCode": "config.num_classes = 205990\nconfig.num_image = 4235242\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf4m_r50", + "documentation": {} + }, + { + "label": "config.num_image", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_r50", + "description": "modules.arcface_torch.configs.wf4m_r50", + "peekOfCode": "config.num_image = 4235242\nconfig.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf4m_r50", + "documentation": {} + }, + { + "label": "config.num_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_r50", + "description": "modules.arcface_torch.configs.wf4m_r50", + "peekOfCode": "config.num_epoch = 20\nconfig.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf4m_r50", + "documentation": {} + }, + { + "label": "config.warmup_epoch", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_r50", + "description": "modules.arcface_torch.configs.wf4m_r50", + "peekOfCode": "config.warmup_epoch = 0\nconfig.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf4m_r50", + "documentation": {} + }, + { + "label": "config.val_targets", + "kind": 5, + "importPath": "modules.arcface_torch.configs.wf4m_r50", + "description": "modules.arcface_torch.configs.wf4m_r50", + "peekOfCode": "config.val_targets = ['lfw', 'cfp_fp', \"agedb_30\"]", + "detail": "modules.arcface_torch.configs.wf4m_r50", + "documentation": {} + }, + { + "label": "LFold", + "kind": 6, + "importPath": "modules.arcface_torch.eval.verification", + "description": "modules.arcface_torch.eval.verification", + "peekOfCode": "class LFold:\n def __init__(self, n_splits=2, shuffle=False):\n self.n_splits = n_splits\n if self.n_splits > 1:\n self.k_fold = KFold(n_splits=n_splits, shuffle=shuffle)\n def split(self, indices):\n if self.n_splits > 1:\n return self.k_fold.split(indices)\n else:\n return [(indices, indices)]", + "detail": "modules.arcface_torch.eval.verification", + "documentation": {} + }, + { + "label": "calculate_roc", + "kind": 2, + "importPath": "modules.arcface_torch.eval.verification", + "description": "modules.arcface_torch.eval.verification", + "peekOfCode": "def calculate_roc(thresholds,\n embeddings1,\n embeddings2,\n actual_issame,\n nrof_folds=10,\n pca=0):\n assert (embeddings1.shape[0] == embeddings2.shape[0])\n assert (embeddings1.shape[1] == embeddings2.shape[1])\n nrof_pairs = min(len(actual_issame), embeddings1.shape[0])\n nrof_thresholds = len(thresholds)", + "detail": "modules.arcface_torch.eval.verification", + "documentation": {} + }, + { + "label": "calculate_accuracy", + "kind": 2, + "importPath": "modules.arcface_torch.eval.verification", + "description": "modules.arcface_torch.eval.verification", + "peekOfCode": "def calculate_accuracy(threshold, dist, actual_issame):\n predict_issame = np.less(dist, threshold)\n tp = np.sum(np.logical_and(predict_issame, actual_issame))\n fp = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame)))\n tn = np.sum(\n np.logical_and(np.logical_not(predict_issame),\n np.logical_not(actual_issame)))\n fn = np.sum(np.logical_and(np.logical_not(predict_issame), actual_issame))\n tpr = 0 if (tp + fn == 0) else float(tp) / float(tp + fn)\n fpr = 0 if (fp + tn == 0) else float(fp) / float(fp + tn)", + "detail": "modules.arcface_torch.eval.verification", + "documentation": {} + }, + { + "label": "calculate_val", + "kind": 2, + "importPath": "modules.arcface_torch.eval.verification", + "description": "modules.arcface_torch.eval.verification", + "peekOfCode": "def calculate_val(thresholds,\n embeddings1,\n embeddings2,\n actual_issame,\n far_target,\n nrof_folds=10):\n assert (embeddings1.shape[0] == embeddings2.shape[0])\n assert (embeddings1.shape[1] == embeddings2.shape[1])\n nrof_pairs = min(len(actual_issame), embeddings1.shape[0])\n nrof_thresholds = len(thresholds)", + "detail": "modules.arcface_torch.eval.verification", + "documentation": {} + }, + { + "label": "calculate_val_far", + "kind": 2, + "importPath": "modules.arcface_torch.eval.verification", + "description": "modules.arcface_torch.eval.verification", + "peekOfCode": "def calculate_val_far(threshold, dist, actual_issame):\n predict_issame = np.less(dist, threshold)\n true_accept = np.sum(np.logical_and(predict_issame, actual_issame))\n false_accept = np.sum(\n np.logical_and(predict_issame, np.logical_not(actual_issame)))\n n_same = np.sum(actual_issame)\n n_diff = np.sum(np.logical_not(actual_issame))\n # print(true_accept, false_accept)\n # print(n_same, n_diff)\n val = float(true_accept) / float(n_same)", + "detail": "modules.arcface_torch.eval.verification", + "documentation": {} + }, + { + "label": "evaluate", + "kind": 2, + "importPath": "modules.arcface_torch.eval.verification", + "description": "modules.arcface_torch.eval.verification", + "peekOfCode": "def evaluate(embeddings, actual_issame, nrof_folds=10, pca=0):\n # Calculate evaluation metrics\n thresholds = np.arange(0, 4, 0.01)\n embeddings1 = embeddings[0::2]\n embeddings2 = embeddings[1::2]\n tpr, fpr, accuracy = calculate_roc(thresholds,\n embeddings1,\n embeddings2,\n np.asarray(actual_issame),\n nrof_folds=nrof_folds,", + "detail": "modules.arcface_torch.eval.verification", + "documentation": {} + }, + { + "label": "load_bin", + "kind": 2, + "importPath": "modules.arcface_torch.eval.verification", + "description": "modules.arcface_torch.eval.verification", + "peekOfCode": "def load_bin(path, image_size):\n try:\n with open(path, 'rb') as f:\n bins, issame_list = pickle.load(f) # py2\n except UnicodeDecodeError as e:\n with open(path, 'rb') as f:\n bins, issame_list = pickle.load(f, encoding='bytes') # py3\n data_list = []\n for flip in [0, 1]:\n data = torch.empty((len(issame_list) * 2, 3, image_size[0], image_size[1]))", + "detail": "modules.arcface_torch.eval.verification", + "documentation": {} + }, + { + "label": "test", + "kind": 2, + "importPath": "modules.arcface_torch.eval.verification", + "description": "modules.arcface_torch.eval.verification", + "peekOfCode": "def test(data_set, backbone, batch_size, nfolds=10):\n print('testing verification..')\n data_list = data_set[0]\n issame_list = data_set[1]\n embeddings_list = []\n time_consumed = 0.0\n for i in range(len(data_list)):\n data = data_list[i]\n embeddings = None\n ba = 0", + "detail": "modules.arcface_torch.eval.verification", + "documentation": {} + }, + { + "label": "dumpR", + "kind": 2, + "importPath": "modules.arcface_torch.eval.verification", + "description": "modules.arcface_torch.eval.verification", + "peekOfCode": "def dumpR(data_set,\n backbone,\n batch_size,\n name='',\n data_extra=None,\n label_shape=None):\n print('dump verification embedding..')\n data_list = data_set[0]\n issame_list = data_set[1]\n embeddings_list = []", + "detail": "modules.arcface_torch.eval.verification", + "documentation": {} + }, + { + "label": "read_template_pair_list", + "kind": 2, + "importPath": "modules.arcface_torch.utils.plot", + "description": "modules.arcface_torch.utils.plot", + "peekOfCode": "def read_template_pair_list(path):\n pairs = pd.read_csv(path, sep=' ', header=None).values\n t1 = pairs[:, 0].astype(np.int)\n t2 = pairs[:, 1].astype(np.int)\n label = pairs[:, 2].astype(np.int)\n return t1, t2, label\np1, p2, label = read_template_pair_list(\n os.path.join('%s/meta' % image_path,\n '%s_template_pair_label.txt' % 'ijbc'))\nmethods = []", + "detail": "modules.arcface_torch.utils.plot", + "documentation": {} + }, + { + "label": "files", + "kind": 5, + "importPath": "modules.arcface_torch.utils.plot", + "description": "modules.arcface_torch.utils.plot", + "peekOfCode": "files = [x.strip() for x in files]\nimage_path = \"/train_tmp/IJB_release/IJBC\"\ndef read_template_pair_list(path):\n pairs = pd.read_csv(path, sep=' ', header=None).values\n t1 = pairs[:, 0].astype(np.int)\n t2 = pairs[:, 1].astype(np.int)\n label = pairs[:, 2].astype(np.int)\n return t1, t2, label\np1, p2, label = read_template_pair_list(\n os.path.join('%s/meta' % image_path,", + "detail": "modules.arcface_torch.utils.plot", + "documentation": {} + }, + { + "label": "image_path", + "kind": 5, + "importPath": "modules.arcface_torch.utils.plot", + "description": "modules.arcface_torch.utils.plot", + "peekOfCode": "image_path = \"/train_tmp/IJB_release/IJBC\"\ndef read_template_pair_list(path):\n pairs = pd.read_csv(path, sep=' ', header=None).values\n t1 = pairs[:, 0].astype(np.int)\n t2 = pairs[:, 1].astype(np.int)\n label = pairs[:, 2].astype(np.int)\n return t1, t2, label\np1, p2, label = read_template_pair_list(\n os.path.join('%s/meta' % image_path,\n '%s_template_pair_label.txt' % 'ijbc'))", + "detail": "modules.arcface_torch.utils.plot", + "documentation": {} + }, + { + "label": "methods", + "kind": 5, + "importPath": "modules.arcface_torch.utils.plot", + "description": "modules.arcface_torch.utils.plot", + "peekOfCode": "methods = []\nscores = []\nfor file in files:\n methods.append(file)\n scores.append(np.load(file))\nmethods = np.array(methods)\nscores = dict(zip(methods, scores))\ncolours = dict(\n zip(methods, sample_colours_from_colourmap(methods.shape[0], 'Set2')))\nx_labels = [10 ** -6, 10 ** -5, 10 ** -4, 10 ** -3, 10 ** -2, 10 ** -1]", + "detail": "modules.arcface_torch.utils.plot", + "documentation": {} + }, + { + "label": "scores", + "kind": 5, + "importPath": "modules.arcface_torch.utils.plot", + "description": "modules.arcface_torch.utils.plot", + "peekOfCode": "scores = []\nfor file in files:\n methods.append(file)\n scores.append(np.load(file))\nmethods = np.array(methods)\nscores = dict(zip(methods, scores))\ncolours = dict(\n zip(methods, sample_colours_from_colourmap(methods.shape[0], 'Set2')))\nx_labels = [10 ** -6, 10 ** -5, 10 ** -4, 10 ** -3, 10 ** -2, 10 ** -1]\ntpr_fpr_table = PrettyTable(['Methods'] + [str(x) for x in x_labels])", + "detail": "modules.arcface_torch.utils.plot", + "documentation": {} + }, + { + "label": "methods", + "kind": 5, + "importPath": "modules.arcface_torch.utils.plot", + "description": "modules.arcface_torch.utils.plot", + "peekOfCode": "methods = np.array(methods)\nscores = dict(zip(methods, scores))\ncolours = dict(\n zip(methods, sample_colours_from_colourmap(methods.shape[0], 'Set2')))\nx_labels = [10 ** -6, 10 ** -5, 10 ** -4, 10 ** -3, 10 ** -2, 10 ** -1]\ntpr_fpr_table = PrettyTable(['Methods'] + [str(x) for x in x_labels])\nfig = plt.figure()\nfor method in methods:\n fpr, tpr, _ = roc_curve(label, scores[method])\n roc_auc = auc(fpr, tpr)", + "detail": "modules.arcface_torch.utils.plot", + "documentation": {} + }, + { + "label": "scores", + "kind": 5, + "importPath": "modules.arcface_torch.utils.plot", + "description": "modules.arcface_torch.utils.plot", + "peekOfCode": "scores = dict(zip(methods, scores))\ncolours = dict(\n zip(methods, sample_colours_from_colourmap(methods.shape[0], 'Set2')))\nx_labels = [10 ** -6, 10 ** -5, 10 ** -4, 10 ** -3, 10 ** -2, 10 ** -1]\ntpr_fpr_table = PrettyTable(['Methods'] + [str(x) for x in x_labels])\nfig = plt.figure()\nfor method in methods:\n fpr, tpr, _ = roc_curve(label, scores[method])\n roc_auc = auc(fpr, tpr)\n fpr = np.flipud(fpr)", + "detail": "modules.arcface_torch.utils.plot", + "documentation": {} + }, + { + "label": "colours", + "kind": 5, + "importPath": "modules.arcface_torch.utils.plot", + "description": "modules.arcface_torch.utils.plot", + "peekOfCode": "colours = dict(\n zip(methods, sample_colours_from_colourmap(methods.shape[0], 'Set2')))\nx_labels = [10 ** -6, 10 ** -5, 10 ** -4, 10 ** -3, 10 ** -2, 10 ** -1]\ntpr_fpr_table = PrettyTable(['Methods'] + [str(x) for x in x_labels])\nfig = plt.figure()\nfor method in methods:\n fpr, tpr, _ = roc_curve(label, scores[method])\n roc_auc = auc(fpr, tpr)\n fpr = np.flipud(fpr)\n tpr = np.flipud(tpr) # select largest tpr at same fpr", + "detail": "modules.arcface_torch.utils.plot", + "documentation": {} + }, + { + "label": "x_labels", + "kind": 5, + "importPath": "modules.arcface_torch.utils.plot", + "description": "modules.arcface_torch.utils.plot", + "peekOfCode": "x_labels = [10 ** -6, 10 ** -5, 10 ** -4, 10 ** -3, 10 ** -2, 10 ** -1]\ntpr_fpr_table = PrettyTable(['Methods'] + [str(x) for x in x_labels])\nfig = plt.figure()\nfor method in methods:\n fpr, tpr, _ = roc_curve(label, scores[method])\n roc_auc = auc(fpr, tpr)\n fpr = np.flipud(fpr)\n tpr = np.flipud(tpr) # select largest tpr at same fpr\n plt.plot(fpr,\n tpr,", + "detail": "modules.arcface_torch.utils.plot", + "documentation": {} + }, + { + "label": "tpr_fpr_table", + "kind": 5, + "importPath": "modules.arcface_torch.utils.plot", + "description": "modules.arcface_torch.utils.plot", + "peekOfCode": "tpr_fpr_table = PrettyTable(['Methods'] + [str(x) for x in x_labels])\nfig = plt.figure()\nfor method in methods:\n fpr, tpr, _ = roc_curve(label, scores[method])\n roc_auc = auc(fpr, tpr)\n fpr = np.flipud(fpr)\n tpr = np.flipud(tpr) # select largest tpr at same fpr\n plt.plot(fpr,\n tpr,\n color=colours[method],", + "detail": "modules.arcface_torch.utils.plot", + "documentation": {} + }, + { + "label": "fig", + "kind": 5, + "importPath": "modules.arcface_torch.utils.plot", + "description": "modules.arcface_torch.utils.plot", + "peekOfCode": "fig = plt.figure()\nfor method in methods:\n fpr, tpr, _ = roc_curve(label, scores[method])\n roc_auc = auc(fpr, tpr)\n fpr = np.flipud(fpr)\n tpr = np.flipud(tpr) # select largest tpr at same fpr\n plt.plot(fpr,\n tpr,\n color=colours[method],\n lw=1,", + "detail": "modules.arcface_torch.utils.plot", + "documentation": {} + }, + { + "label": "CallBackVerification", + "kind": 6, + "importPath": "modules.arcface_torch.utils.utils_callbacks", + "description": "modules.arcface_torch.utils.utils_callbacks", + "peekOfCode": "class CallBackVerification(object):\n def __init__(self, val_targets, rec_prefix, summary_writer=None, image_size=(112, 112)):\n self.rank: int = distributed.get_rank()\n self.highest_acc: float = 0.0\n self.highest_acc_list: List[float] = [0.0] * len(val_targets)\n self.ver_list: List[object] = []\n self.ver_name_list: List[str] = []\n if self.rank is 0:\n self.init_dataset(val_targets=val_targets, data_dir=rec_prefix, image_size=image_size)\n self.summary_writer = summary_writer", + "detail": "modules.arcface_torch.utils.utils_callbacks", + "documentation": {} + }, + { + "label": "CallBackLogging", + "kind": 6, + "importPath": "modules.arcface_torch.utils.utils_callbacks", + "description": "modules.arcface_torch.utils.utils_callbacks", + "peekOfCode": "class CallBackLogging(object):\n def __init__(self, frequent, total_step, batch_size, start_step=0,writer=None):\n self.frequent: int = frequent\n self.rank: int = distributed.get_rank()\n self.world_size: int = distributed.get_world_size()\n self.time_start = time.time()\n self.total_step: int = total_step\n self.start_step: int = start_step\n self.batch_size: int = batch_size\n self.writer = writer", + "detail": "modules.arcface_torch.utils.utils_callbacks", + "documentation": {} + }, + { + "label": "get_config", + "kind": 2, + "importPath": "modules.arcface_torch.utils.utils_config", + "description": "modules.arcface_torch.utils.utils_config", + "peekOfCode": "def get_config(config_file):\n assert config_file.startswith('configs/'), 'config file setting must start with configs/'\n temp_config_name = osp.basename(config_file)\n temp_module_name = osp.splitext(temp_config_name)[0]\n config = importlib.import_module(\"configs.base\")\n cfg = config.config\n config = importlib.import_module(\"configs.%s\" % temp_module_name)\n job_cfg = config.config\n cfg.update(job_cfg)\n if cfg.output is None:", + "detail": "modules.arcface_torch.utils.utils_config", + "documentation": {} + }, + { + "label": "DistributedSampler", + "kind": 6, + "importPath": "modules.arcface_torch.utils.utils_distributed_sampler", + "description": "modules.arcface_torch.utils.utils_distributed_sampler", + "peekOfCode": "class DistributedSampler(_DistributedSampler):\n def __init__(\n self,\n dataset,\n num_replicas=None, # world_size\n rank=None, # local_rank\n shuffle=True,\n seed=0,\n ):\n super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)", + "detail": "modules.arcface_torch.utils.utils_distributed_sampler", + "documentation": {} + }, + { + "label": "setup_seed", + "kind": 2, + "importPath": "modules.arcface_torch.utils.utils_distributed_sampler", + "description": "modules.arcface_torch.utils.utils_distributed_sampler", + "peekOfCode": "def setup_seed(seed, cuda_deterministic=True):\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n np.random.seed(seed)\n random.seed(seed)\n os.environ[\"PYTHONHASHSEED\"] = str(seed)\n if cuda_deterministic: # slower, more reproducible\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n else: # faster, less reproducible", + "detail": "modules.arcface_torch.utils.utils_distributed_sampler", + "documentation": {} + }, + { + "label": "worker_init_fn", + "kind": 2, + "importPath": "modules.arcface_torch.utils.utils_distributed_sampler", + "description": "modules.arcface_torch.utils.utils_distributed_sampler", + "peekOfCode": "def worker_init_fn(worker_id, num_workers, rank, seed):\n # The seed of each worker equals to\n # num_worker * rank + worker_id + user_seed\n worker_seed = num_workers * rank + worker_id + seed\n np.random.seed(worker_seed)\n random.seed(worker_seed)\n torch.manual_seed(worker_seed)\ndef get_dist_info():\n if dist.is_available() and dist.is_initialized():\n rank = dist.get_rank()", + "detail": "modules.arcface_torch.utils.utils_distributed_sampler", + "documentation": {} + }, + { + "label": "get_dist_info", + "kind": 2, + "importPath": "modules.arcface_torch.utils.utils_distributed_sampler", + "description": "modules.arcface_torch.utils.utils_distributed_sampler", + "peekOfCode": "def get_dist_info():\n if dist.is_available() and dist.is_initialized():\n rank = dist.get_rank()\n world_size = dist.get_world_size()\n else:\n rank = 0\n world_size = 1\n return rank, world_size\ndef sync_random_seed(seed=None, device=\"cuda\"):\n \"\"\"Make sure different ranks share the same seed.", + "detail": "modules.arcface_torch.utils.utils_distributed_sampler", + "documentation": {} + }, + { + "label": "sync_random_seed", + "kind": 2, + "importPath": "modules.arcface_torch.utils.utils_distributed_sampler", + "description": "modules.arcface_torch.utils.utils_distributed_sampler", + "peekOfCode": "def sync_random_seed(seed=None, device=\"cuda\"):\n \"\"\"Make sure different ranks share the same seed.\n All workers must call this function, otherwise it will deadlock.\n This method is generally used in `DistributedSampler`,\n because the seed should be identical across all processes\n in the distributed group.\n In distributed sampling, different ranks should sample non-overlapped\n data in the dataset. Therefore, this function is used to make sure that\n each rank shuffles the data indices in the same order based\n on the same seed. Then different ranks could use different indices", + "detail": "modules.arcface_torch.utils.utils_distributed_sampler", + "documentation": {} + }, + { + "label": "AverageMeter", + "kind": 6, + "importPath": "modules.arcface_torch.utils.utils_logging", + "description": "modules.arcface_torch.utils.utils_logging", + "peekOfCode": "class AverageMeter(object):\n \"\"\"Computes and stores the average and current value\n \"\"\"\n def __init__(self):\n self.val = None\n self.avg = None\n self.sum = None\n self.count = None\n self.reset()\n def reset(self):", + "detail": "modules.arcface_torch.utils.utils_logging", + "documentation": {} + }, + { + "label": "init_logging", + "kind": 2, + "importPath": "modules.arcface_torch.utils.utils_logging", + "description": "modules.arcface_torch.utils.utils_logging", + "peekOfCode": "def init_logging(rank, models_root):\n if rank == 0:\n log_root = logging.getLogger()\n log_root.setLevel(logging.INFO)\n formatter = logging.Formatter(\"Training: %(asctime)s-%(message)s\")\n handler_file = logging.FileHandler(os.path.join(models_root, \"training.log\"))\n handler_stream = logging.StreamHandler(sys.stdout)\n handler_file.setFormatter(formatter)\n handler_stream.setFormatter(formatter)\n log_root.addHandler(handler_file)", + "detail": "modules.arcface_torch.utils.utils_logging", + "documentation": {} + }, + { + "label": "BackgroundGenerator", + "kind": 6, + "importPath": "modules.arcface_torch.dataset", + "description": "modules.arcface_torch.dataset", + "peekOfCode": "class BackgroundGenerator(threading.Thread):\n def __init__(self, generator, local_rank, max_prefetch=6):\n super(BackgroundGenerator, self).__init__()\n self.queue = Queue.Queue(max_prefetch)\n self.generator = generator\n self.local_rank = local_rank\n self.daemon = True\n self.start()\n def run(self):\n torch.cuda.set_device(self.local_rank)", + "detail": "modules.arcface_torch.dataset", + "documentation": {} + }, + { + "label": "DataLoaderX", + "kind": 6, + "importPath": "modules.arcface_torch.dataset", + "description": "modules.arcface_torch.dataset", + "peekOfCode": "class DataLoaderX(DataLoader):\n def __init__(self, local_rank, **kwargs):\n super(DataLoaderX, self).__init__(**kwargs)\n self.stream = torch.cuda.Stream(local_rank)\n self.local_rank = local_rank\n def __iter__(self):\n self.iter = super(DataLoaderX, self).__iter__()\n self.iter = BackgroundGenerator(self.iter, self.local_rank)\n self.preload()\n return self", + "detail": "modules.arcface_torch.dataset", + "documentation": {} + }, + { + "label": "MXFaceDataset", + "kind": 6, + "importPath": "modules.arcface_torch.dataset", + "description": "modules.arcface_torch.dataset", + "peekOfCode": "class MXFaceDataset(Dataset):\n def __init__(self, root_dir, local_rank):\n super(MXFaceDataset, self).__init__()\n self.transform = transforms.Compose(\n [transforms.ToPILImage(),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),\n ])\n self.root_dir = root_dir", + "detail": "modules.arcface_torch.dataset", + "documentation": {} + }, + { + "label": "SyntheticDataset", + "kind": 6, + "importPath": "modules.arcface_torch.dataset", + "description": "modules.arcface_torch.dataset", + "peekOfCode": "class SyntheticDataset(Dataset):\n def __init__(self):\n super(SyntheticDataset, self).__init__()\n img = np.random.randint(0, 255, size=(112, 112, 3), dtype=np.int32)\n img = np.transpose(img, (2, 0, 1))\n img = torch.from_numpy(img).squeeze(0).float()\n img = ((img / 255) - 0.5) / 0.5\n self.img = img\n self.label = 1\n def __getitem__(self, index):", + "detail": "modules.arcface_torch.dataset", + "documentation": {} + }, + { + "label": "DALIWarper", + "kind": 6, + "importPath": "modules.arcface_torch.dataset", + "description": "modules.arcface_torch.dataset", + "peekOfCode": "class DALIWarper(object):\n def __init__(self, dali_iter):\n self.iter = dali_iter\n def __next__(self):\n data_dict = self.iter.__next__()[0]\n tensor_data = data_dict['data'].cuda()\n tensor_label: torch.Tensor = data_dict['label'].cuda().long()\n tensor_label.squeeze_()\n return tensor_data, tensor_label\n def __iter__(self):", + "detail": "modules.arcface_torch.dataset", + "documentation": {} + }, + { + "label": "get_dataloader", + "kind": 2, + "importPath": "modules.arcface_torch.dataset", + "description": "modules.arcface_torch.dataset", + "peekOfCode": "def get_dataloader(\n root_dir,\n local_rank,\n batch_size,\n dali = False,\n seed = 2048,\n num_workers = 2,\n ) -> Iterable:\n rec = os.path.join(root_dir, 'train.rec')\n idx = os.path.join(root_dir, 'train.idx')", + "detail": "modules.arcface_torch.dataset", + "documentation": {} + }, + { + "label": "dali_data_iter", + "kind": 2, + "importPath": "modules.arcface_torch.dataset", + "description": "modules.arcface_torch.dataset", + "peekOfCode": "def dali_data_iter(\n batch_size: int, rec_file: str, idx_file: str, num_threads: int,\n initial_fill=32768, random_shuffle=True,\n prefetch_queue_depth=1, local_rank=0, name=\"reader\",\n mean=(127.5, 127.5, 127.5), \n std=(127.5, 127.5, 127.5)):\n \"\"\"\n Parameters:\n ----------\n initial_fill: int", + "detail": "modules.arcface_torch.dataset", + "documentation": {} + }, + { + "label": "Embedding", + "kind": 6, + "importPath": "modules.arcface_torch.eval_ijbc", + "description": "modules.arcface_torch.eval_ijbc", + "peekOfCode": "class Embedding(object):\n def __init__(self, prefix, data_shape, batch_size=1):\n image_size = (112, 112)\n self.image_size = image_size\n weight = torch.load(prefix)\n resnet = get_model(args.network, dropout=0, fp16=False).cuda()\n resnet.load_state_dict(weight)\n model = torch.nn.DataParallel(resnet)\n self.model = model\n self.model.eval()", + "detail": "modules.arcface_torch.eval_ijbc", + "documentation": {} + }, + { + "label": "divideIntoNstrand", + "kind": 2, + "importPath": "modules.arcface_torch.eval_ijbc", + "description": "modules.arcface_torch.eval_ijbc", + "peekOfCode": "def divideIntoNstrand(listTemp, n):\n twoList = [[] for i in range(n)]\n for i, e in enumerate(listTemp):\n twoList[i % n].append(e)\n return twoList\ndef read_template_media_list(path):\n # ijb_meta = np.loadtxt(path, dtype=str)\n ijb_meta = pd.read_csv(path, sep=' ', header=None).values\n templates = ijb_meta[:, 1].astype(np.int)\n medias = ijb_meta[:, 2].astype(np.int)", + "detail": "modules.arcface_torch.eval_ijbc", + "documentation": {} + }, + { + "label": "read_template_media_list", + "kind": 2, + "importPath": "modules.arcface_torch.eval_ijbc", + "description": "modules.arcface_torch.eval_ijbc", + "peekOfCode": "def read_template_media_list(path):\n # ijb_meta = np.loadtxt(path, dtype=str)\n ijb_meta = pd.read_csv(path, sep=' ', header=None).values\n templates = ijb_meta[:, 1].astype(np.int)\n medias = ijb_meta[:, 2].astype(np.int)\n return templates, medias\n# In[ ]:\ndef read_template_pair_list(path):\n # pairs = np.loadtxt(path, dtype=str)\n pairs = pd.read_csv(path, sep=' ', header=None).values", + "detail": "modules.arcface_torch.eval_ijbc", + "documentation": {} + }, + { + "label": "read_template_pair_list", + "kind": 2, + "importPath": "modules.arcface_torch.eval_ijbc", + "description": "modules.arcface_torch.eval_ijbc", + "peekOfCode": "def read_template_pair_list(path):\n # pairs = np.loadtxt(path, dtype=str)\n pairs = pd.read_csv(path, sep=' ', header=None).values\n # print(pairs.shape)\n # print(pairs[:, 0].astype(np.int))\n t1 = pairs[:, 0].astype(np.int)\n t2 = pairs[:, 1].astype(np.int)\n label = pairs[:, 2].astype(np.int)\n return t1, t2, label\n# In[ ]:", + "detail": "modules.arcface_torch.eval_ijbc", + "documentation": {} + }, + { + "label": "read_image_feature", + "kind": 2, + "importPath": "modules.arcface_torch.eval_ijbc", + "description": "modules.arcface_torch.eval_ijbc", + "peekOfCode": "def read_image_feature(path):\n with open(path, 'rb') as fid:\n img_feats = pickle.load(fid)\n return img_feats\n# In[ ]:\ndef get_image_feature(img_path, files_list, model_path, epoch, gpu_id):\n batch_size = args.batch_size\n data_shape = (3, 112, 112)\n files = files_list\n print('files:', len(files))", + "detail": "modules.arcface_torch.eval_ijbc", + "documentation": {} + }, + { + "label": "get_image_feature", + "kind": 2, + "importPath": "modules.arcface_torch.eval_ijbc", + "description": "modules.arcface_torch.eval_ijbc", + "peekOfCode": "def get_image_feature(img_path, files_list, model_path, epoch, gpu_id):\n batch_size = args.batch_size\n data_shape = (3, 112, 112)\n files = files_list\n print('files:', len(files))\n rare_size = len(files) % batch_size\n faceness_scores = []\n batch = 0\n img_feats = np.empty((len(files), 1024), dtype=np.float32)\n batch_data = np.empty((2 * batch_size, 3, 112, 112))", + "detail": "modules.arcface_torch.eval_ijbc", + "documentation": {} + }, + { + "label": "image2template_feature", + "kind": 2, + "importPath": "modules.arcface_torch.eval_ijbc", + "description": "modules.arcface_torch.eval_ijbc", + "peekOfCode": "def image2template_feature(img_feats=None, templates=None, medias=None):\n # ==========================================================\n # 1. face image feature l2 normalization. img_feats:[number_image x feats_dim]\n # 2. compute media feature.\n # 3. compute template feature.\n # ==========================================================\n unique_templates = np.unique(templates)\n template_feats = np.zeros((len(unique_templates), img_feats.shape[1]))\n for count_template, uqt in enumerate(unique_templates):\n (ind_t,) = np.where(templates == uqt)", + "detail": "modules.arcface_torch.eval_ijbc", + "documentation": {} + }, + { + "label": "verification", + "kind": 2, + "importPath": "modules.arcface_torch.eval_ijbc", + "description": "modules.arcface_torch.eval_ijbc", + "peekOfCode": "def verification(template_norm_feats=None,\n unique_templates=None,\n p1=None,\n p2=None):\n # ==========================================================\n # Compute set-to-set Similarity Score.\n # ==========================================================\n template2id = np.zeros((max(unique_templates) + 1, 1), dtype=int)\n for count_template, uqt in enumerate(unique_templates):\n template2id[uqt] = count_template", + "detail": "modules.arcface_torch.eval_ijbc", + "documentation": {} + }, + { + "label": "verification2", + "kind": 2, + "importPath": "modules.arcface_torch.eval_ijbc", + "description": "modules.arcface_torch.eval_ijbc", + "peekOfCode": "def verification2(template_norm_feats=None,\n unique_templates=None,\n p1=None,\n p2=None):\n template2id = np.zeros((max(unique_templates) + 1, 1), dtype=int)\n for count_template, uqt in enumerate(unique_templates):\n template2id[uqt] = count_template\n score = np.zeros((len(p1),)) # save cosine distance between pairs\n total_pairs = np.array(range(len(p1)))\n batchsize = 100000 # small batchsize instead of all pairs in one batch due to the memory limiation", + "detail": "modules.arcface_torch.eval_ijbc", + "documentation": {} + }, + { + "label": "read_score", + "kind": 2, + "importPath": "modules.arcface_torch.eval_ijbc", + "description": "modules.arcface_torch.eval_ijbc", + "peekOfCode": "def read_score(path):\n with open(path, 'rb') as fid:\n img_feats = pickle.load(fid)\n return img_feats\n# # Step1: Load Meta Data\n# In[ ]:\nassert target == 'IJBC' or target == 'IJBB'\n# =============================================================\n# load image and template relationships for template feature embedding\n# tid --> template id, mid --> media id", + "detail": "modules.arcface_torch.eval_ijbc", + "documentation": {} + }, + { + "label": "parser", + "kind": 5, + "importPath": "modules.arcface_torch.eval_ijbc", + "description": "modules.arcface_torch.eval_ijbc", + "peekOfCode": "parser = argparse.ArgumentParser(description='do ijb test')\n# general\nparser.add_argument('--model-prefix', default='', help='path to load model.')\nparser.add_argument('--image-path', default='', type=str, help='')\nparser.add_argument('--result-dir', default='.', type=str, help='')\nparser.add_argument('--batch-size', default=128, type=int, help='')\nparser.add_argument('--network', default='iresnet50', type=str, help='')\nparser.add_argument('--job', default='insightface', type=str, help='job name')\nparser.add_argument('--target', default='IJBC', type=str, help='target, set to IJBC or IJBB')\nargs = parser.parse_args()", + "detail": "modules.arcface_torch.eval_ijbc", + "documentation": {} + }, + { + "label": "args", + "kind": 5, + "importPath": "modules.arcface_torch.eval_ijbc", + "description": "modules.arcface_torch.eval_ijbc", + "peekOfCode": "args = parser.parse_args()\ntarget = args.target\nmodel_path = args.model_prefix\nimage_path = args.image_path\nresult_dir = args.result_dir\ngpu_id = None\nuse_norm_score = True # if Ture, TestMode(N1)\nuse_detector_score = True # if Ture, TestMode(D1)\nuse_flip_test = True # if Ture, TestMode(F1)\njob = args.job", + "detail": "modules.arcface_torch.eval_ijbc", + "documentation": {} + }, + { + "label": "target", + "kind": 5, + "importPath": "modules.arcface_torch.eval_ijbc", + "description": "modules.arcface_torch.eval_ijbc", + "peekOfCode": "target = args.target\nmodel_path = args.model_prefix\nimage_path = args.image_path\nresult_dir = args.result_dir\ngpu_id = None\nuse_norm_score = True # if Ture, TestMode(N1)\nuse_detector_score = True # if Ture, TestMode(D1)\nuse_flip_test = True # if Ture, TestMode(F1)\njob = args.job\nbatch_size = args.batch_size", + "detail": "modules.arcface_torch.eval_ijbc", + "documentation": {} + }, + { + "label": "model_path", + "kind": 5, + "importPath": "modules.arcface_torch.eval_ijbc", + "description": "modules.arcface_torch.eval_ijbc", + "peekOfCode": "model_path = args.model_prefix\nimage_path = args.image_path\nresult_dir = args.result_dir\ngpu_id = None\nuse_norm_score = True # if Ture, TestMode(N1)\nuse_detector_score = True # if Ture, TestMode(D1)\nuse_flip_test = True # if Ture, TestMode(F1)\njob = args.job\nbatch_size = args.batch_size\nclass Embedding(object):", + "detail": "modules.arcface_torch.eval_ijbc", + "documentation": {} + }, + { + "label": "image_path", + "kind": 5, + "importPath": "modules.arcface_torch.eval_ijbc", + "description": "modules.arcface_torch.eval_ijbc", + "peekOfCode": "image_path = args.image_path\nresult_dir = args.result_dir\ngpu_id = None\nuse_norm_score = True # if Ture, TestMode(N1)\nuse_detector_score = True # if Ture, TestMode(D1)\nuse_flip_test = True # if Ture, TestMode(F1)\njob = args.job\nbatch_size = args.batch_size\nclass Embedding(object):\n def __init__(self, prefix, data_shape, batch_size=1):", + "detail": "modules.arcface_torch.eval_ijbc", + "documentation": {} + }, + { + "label": "result_dir", + "kind": 5, + "importPath": "modules.arcface_torch.eval_ijbc", + "description": "modules.arcface_torch.eval_ijbc", + "peekOfCode": "result_dir = args.result_dir\ngpu_id = None\nuse_norm_score = True # if Ture, TestMode(N1)\nuse_detector_score = True # if Ture, TestMode(D1)\nuse_flip_test = True # if Ture, TestMode(F1)\njob = args.job\nbatch_size = args.batch_size\nclass Embedding(object):\n def __init__(self, prefix, data_shape, batch_size=1):\n image_size = (112, 112)", + "detail": "modules.arcface_torch.eval_ijbc", + "documentation": {} + }, + { + "label": "gpu_id", + "kind": 5, + "importPath": "modules.arcface_torch.eval_ijbc", + "description": "modules.arcface_torch.eval_ijbc", + "peekOfCode": "gpu_id = None\nuse_norm_score = True # if Ture, TestMode(N1)\nuse_detector_score = True # if Ture, TestMode(D1)\nuse_flip_test = True # if Ture, TestMode(F1)\njob = args.job\nbatch_size = args.batch_size\nclass Embedding(object):\n def __init__(self, prefix, data_shape, batch_size=1):\n image_size = (112, 112)\n self.image_size = image_size", + "detail": "modules.arcface_torch.eval_ijbc", + "documentation": {} + }, + { + "label": "use_norm_score", + "kind": 5, + "importPath": "modules.arcface_torch.eval_ijbc", + "description": "modules.arcface_torch.eval_ijbc", + "peekOfCode": "use_norm_score = True # if Ture, TestMode(N1)\nuse_detector_score = True # if Ture, TestMode(D1)\nuse_flip_test = True # if Ture, TestMode(F1)\njob = args.job\nbatch_size = args.batch_size\nclass Embedding(object):\n def __init__(self, prefix, data_shape, batch_size=1):\n image_size = (112, 112)\n self.image_size = image_size\n weight = torch.load(prefix)", + "detail": "modules.arcface_torch.eval_ijbc", + "documentation": {} + }, + { + "label": "use_detector_score", + "kind": 5, + "importPath": "modules.arcface_torch.eval_ijbc", + "description": "modules.arcface_torch.eval_ijbc", + "peekOfCode": "use_detector_score = True # if Ture, TestMode(D1)\nuse_flip_test = True # if Ture, TestMode(F1)\njob = args.job\nbatch_size = args.batch_size\nclass Embedding(object):\n def __init__(self, prefix, data_shape, batch_size=1):\n image_size = (112, 112)\n self.image_size = image_size\n weight = torch.load(prefix)\n resnet = get_model(args.network, dropout=0, fp16=False).cuda()", + "detail": "modules.arcface_torch.eval_ijbc", + "documentation": {} + }, + { + "label": "use_flip_test", + "kind": 5, + "importPath": "modules.arcface_torch.eval_ijbc", + "description": "modules.arcface_torch.eval_ijbc", + "peekOfCode": "use_flip_test = True # if Ture, TestMode(F1)\njob = args.job\nbatch_size = args.batch_size\nclass Embedding(object):\n def __init__(self, prefix, data_shape, batch_size=1):\n image_size = (112, 112)\n self.image_size = image_size\n weight = torch.load(prefix)\n resnet = get_model(args.network, dropout=0, fp16=False).cuda()\n resnet.load_state_dict(weight)", + "detail": "modules.arcface_torch.eval_ijbc", + "documentation": {} + }, + { + "label": "job", + "kind": 5, + "importPath": "modules.arcface_torch.eval_ijbc", + "description": "modules.arcface_torch.eval_ijbc", + "peekOfCode": "job = args.job\nbatch_size = args.batch_size\nclass Embedding(object):\n def __init__(self, prefix, data_shape, batch_size=1):\n image_size = (112, 112)\n self.image_size = image_size\n weight = torch.load(prefix)\n resnet = get_model(args.network, dropout=0, fp16=False).cuda()\n resnet.load_state_dict(weight)\n model = torch.nn.DataParallel(resnet)", + "detail": "modules.arcface_torch.eval_ijbc", + "documentation": {} + }, + { + "label": "batch_size", + "kind": 5, + "importPath": "modules.arcface_torch.eval_ijbc", + "description": "modules.arcface_torch.eval_ijbc", + "peekOfCode": "batch_size = args.batch_size\nclass Embedding(object):\n def __init__(self, prefix, data_shape, batch_size=1):\n image_size = (112, 112)\n self.image_size = image_size\n weight = torch.load(prefix)\n resnet = get_model(args.network, dropout=0, fp16=False).cuda()\n resnet.load_state_dict(weight)\n model = torch.nn.DataParallel(resnet)\n self.model = model", + "detail": "modules.arcface_torch.eval_ijbc", + "documentation": {} + }, + { + "label": "start", + "kind": 5, + "importPath": "modules.arcface_torch.eval_ijbc", + "description": "modules.arcface_torch.eval_ijbc", + "peekOfCode": "start = timeit.default_timer()\ntemplates, medias = read_template_media_list(\n os.path.join('%s/meta' % image_path,\n '%s_face_tid_mid.txt' % target.lower()))\nstop = timeit.default_timer()\nprint('Time: %.2f s. ' % (stop - start))\n# In[ ]:\n# =============================================================\n# load template pairs for template-to-template verification\n# tid : template id, label : 1/0", + "detail": "modules.arcface_torch.eval_ijbc", + "documentation": {} + }, + { + "label": "stop", + "kind": 5, + "importPath": "modules.arcface_torch.eval_ijbc", + "description": "modules.arcface_torch.eval_ijbc", + "peekOfCode": "stop = timeit.default_timer()\nprint('Time: %.2f s. ' % (stop - start))\n# In[ ]:\n# =============================================================\n# load template pairs for template-to-template verification\n# tid : template id, label : 1/0\n# format:\n# tid_1 tid_2 label\n# =============================================================\nstart = timeit.default_timer()", + "detail": "modules.arcface_torch.eval_ijbc", + "documentation": {} + }, + { + "label": "start", + "kind": 5, + "importPath": "modules.arcface_torch.eval_ijbc", + "description": "modules.arcface_torch.eval_ijbc", + "peekOfCode": "start = timeit.default_timer()\np1, p2, label = read_template_pair_list(\n os.path.join('%s/meta' % image_path,\n '%s_template_pair_label.txt' % target.lower()))\nstop = timeit.default_timer()\nprint('Time: %.2f s. ' % (stop - start))\n# # Step 2: Get Image Features\n# In[ ]:\n# =============================================================\n# load image features", + "detail": "modules.arcface_torch.eval_ijbc", + "documentation": {} + }, + { + "label": "stop", + "kind": 5, + "importPath": "modules.arcface_torch.eval_ijbc", + "description": "modules.arcface_torch.eval_ijbc", + "peekOfCode": "stop = timeit.default_timer()\nprint('Time: %.2f s. ' % (stop - start))\n# # Step 2: Get Image Features\n# In[ ]:\n# =============================================================\n# load image features\n# format:\n# img_feats: [image_num x feats_dim] (227630, 512)\n# =============================================================\nstart = timeit.default_timer()", + "detail": "modules.arcface_torch.eval_ijbc", + "documentation": {} + }, + { + "label": "start", + "kind": 5, + "importPath": "modules.arcface_torch.eval_ijbc", + "description": "modules.arcface_torch.eval_ijbc", + "peekOfCode": "start = timeit.default_timer()\nimg_path = '%s/loose_crop' % image_path\nimg_list_path = '%s/meta/%s_name_5pts_score.txt' % (image_path, target.lower())\nimg_list = open(img_list_path)\nfiles = img_list.readlines()\n# files_list = divideIntoNstrand(files, rank_size)\nfiles_list = files\n# img_feats\n# for i in range(rank_size):\nimg_feats, faceness_scores = get_image_feature(img_path, files_list,", + "detail": "modules.arcface_torch.eval_ijbc", + "documentation": {} + }, + { + "label": "img_path", + "kind": 5, + "importPath": "modules.arcface_torch.eval_ijbc", + "description": "modules.arcface_torch.eval_ijbc", + "peekOfCode": "img_path = '%s/loose_crop' % image_path\nimg_list_path = '%s/meta/%s_name_5pts_score.txt' % (image_path, target.lower())\nimg_list = open(img_list_path)\nfiles = img_list.readlines()\n# files_list = divideIntoNstrand(files, rank_size)\nfiles_list = files\n# img_feats\n# for i in range(rank_size):\nimg_feats, faceness_scores = get_image_feature(img_path, files_list,\n model_path, 0, gpu_id)", + "detail": "modules.arcface_torch.eval_ijbc", + "documentation": {} + }, + { + "label": "img_list_path", + "kind": 5, + "importPath": "modules.arcface_torch.eval_ijbc", + "description": "modules.arcface_torch.eval_ijbc", + "peekOfCode": "img_list_path = '%s/meta/%s_name_5pts_score.txt' % (image_path, target.lower())\nimg_list = open(img_list_path)\nfiles = img_list.readlines()\n# files_list = divideIntoNstrand(files, rank_size)\nfiles_list = files\n# img_feats\n# for i in range(rank_size):\nimg_feats, faceness_scores = get_image_feature(img_path, files_list,\n model_path, 0, gpu_id)\nstop = timeit.default_timer()", + "detail": "modules.arcface_torch.eval_ijbc", + "documentation": {} + }, + { + "label": "img_list", + "kind": 5, + "importPath": "modules.arcface_torch.eval_ijbc", + "description": "modules.arcface_torch.eval_ijbc", + "peekOfCode": "img_list = open(img_list_path)\nfiles = img_list.readlines()\n# files_list = divideIntoNstrand(files, rank_size)\nfiles_list = files\n# img_feats\n# for i in range(rank_size):\nimg_feats, faceness_scores = get_image_feature(img_path, files_list,\n model_path, 0, gpu_id)\nstop = timeit.default_timer()\nprint('Time: %.2f s. ' % (stop - start))", + "detail": "modules.arcface_torch.eval_ijbc", + "documentation": {} + }, + { + "label": "files", + "kind": 5, + "importPath": "modules.arcface_torch.eval_ijbc", + "description": "modules.arcface_torch.eval_ijbc", + "peekOfCode": "files = img_list.readlines()\n# files_list = divideIntoNstrand(files, rank_size)\nfiles_list = files\n# img_feats\n# for i in range(rank_size):\nimg_feats, faceness_scores = get_image_feature(img_path, files_list,\n model_path, 0, gpu_id)\nstop = timeit.default_timer()\nprint('Time: %.2f s. ' % (stop - start))\nprint('Feature Shape: ({} , {}) .'.format(img_feats.shape[0],", + "detail": "modules.arcface_torch.eval_ijbc", + "documentation": {} + }, + { + "label": "files_list", + "kind": 5, + "importPath": "modules.arcface_torch.eval_ijbc", + "description": "modules.arcface_torch.eval_ijbc", + "peekOfCode": "files_list = files\n# img_feats\n# for i in range(rank_size):\nimg_feats, faceness_scores = get_image_feature(img_path, files_list,\n model_path, 0, gpu_id)\nstop = timeit.default_timer()\nprint('Time: %.2f s. ' % (stop - start))\nprint('Feature Shape: ({} , {}) .'.format(img_feats.shape[0],\n img_feats.shape[1]))\n# # Step3: Get Template Features", + "detail": "modules.arcface_torch.eval_ijbc", + "documentation": {} + }, + { + "label": "stop", + "kind": 5, + "importPath": "modules.arcface_torch.eval_ijbc", + "description": "modules.arcface_torch.eval_ijbc", + "peekOfCode": "stop = timeit.default_timer()\nprint('Time: %.2f s. ' % (stop - start))\nprint('Feature Shape: ({} , {}) .'.format(img_feats.shape[0],\n img_feats.shape[1]))\n# # Step3: Get Template Features\n# In[ ]:\n# =============================================================\n# compute template features from image features.\n# =============================================================\nstart = timeit.default_timer()", + "detail": "modules.arcface_torch.eval_ijbc", + "documentation": {} + }, + { + "label": "start", + "kind": 5, + "importPath": "modules.arcface_torch.eval_ijbc", + "description": "modules.arcface_torch.eval_ijbc", + "peekOfCode": "start = timeit.default_timer()\n# ==========================================================\n# Norm feature before aggregation into template feature?\n# Feature norm from embedding network and faceness score are able to decrease weights for noise samples (not face).\n# ==========================================================\n# 1. FaceScore (Feature Norm)\n# 2. FaceScore (Detector)\nif use_flip_test:\n # concat --- F1\n # img_input_feats = img_feats", + "detail": "modules.arcface_torch.eval_ijbc", + "documentation": {} + }, + { + "label": "stop", + "kind": 5, + "importPath": "modules.arcface_torch.eval_ijbc", + "description": "modules.arcface_torch.eval_ijbc", + "peekOfCode": "stop = timeit.default_timer()\nprint('Time: %.2f s. ' % (stop - start))\n# # Step 4: Get Template Similarity Scores\n# In[ ]:\n# =============================================================\n# compute verification scores between template pairs.\n# =============================================================\nstart = timeit.default_timer()\nscore = verification(template_norm_feats, unique_templates, p1, p2)\nstop = timeit.default_timer()", + "detail": "modules.arcface_torch.eval_ijbc", + "documentation": {} + }, + { + "label": "start", + "kind": 5, + "importPath": "modules.arcface_torch.eval_ijbc", + "description": "modules.arcface_torch.eval_ijbc", + "peekOfCode": "start = timeit.default_timer()\nscore = verification(template_norm_feats, unique_templates, p1, p2)\nstop = timeit.default_timer()\nprint('Time: %.2f s. ' % (stop - start))\n# In[ ]:\nsave_path = os.path.join(result_dir, args.job)\n# save_path = result_dir + '/%s_result' % target\nif not os.path.exists(save_path):\n os.makedirs(save_path)\nscore_save_file = os.path.join(save_path, \"%s.npy\" % target.lower())", + "detail": "modules.arcface_torch.eval_ijbc", + "documentation": {} + }, + { + "label": "score", + "kind": 5, + "importPath": "modules.arcface_torch.eval_ijbc", + "description": "modules.arcface_torch.eval_ijbc", + "peekOfCode": "score = verification(template_norm_feats, unique_templates, p1, p2)\nstop = timeit.default_timer()\nprint('Time: %.2f s. ' % (stop - start))\n# In[ ]:\nsave_path = os.path.join(result_dir, args.job)\n# save_path = result_dir + '/%s_result' % target\nif not os.path.exists(save_path):\n os.makedirs(save_path)\nscore_save_file = os.path.join(save_path, \"%s.npy\" % target.lower())\nnp.save(score_save_file, score)", + "detail": "modules.arcface_torch.eval_ijbc", + "documentation": {} + }, + { + "label": "stop", + "kind": 5, + "importPath": "modules.arcface_torch.eval_ijbc", + "description": "modules.arcface_torch.eval_ijbc", + "peekOfCode": "stop = timeit.default_timer()\nprint('Time: %.2f s. ' % (stop - start))\n# In[ ]:\nsave_path = os.path.join(result_dir, args.job)\n# save_path = result_dir + '/%s_result' % target\nif not os.path.exists(save_path):\n os.makedirs(save_path)\nscore_save_file = os.path.join(save_path, \"%s.npy\" % target.lower())\nnp.save(score_save_file, score)\n# # Step 5: Get ROC Curves and TPR@FPR Table", + "detail": "modules.arcface_torch.eval_ijbc", + "documentation": {} + }, + { + "label": "save_path", + "kind": 5, + "importPath": "modules.arcface_torch.eval_ijbc", + "description": "modules.arcface_torch.eval_ijbc", + "peekOfCode": "save_path = os.path.join(result_dir, args.job)\n# save_path = result_dir + '/%s_result' % target\nif not os.path.exists(save_path):\n os.makedirs(save_path)\nscore_save_file = os.path.join(save_path, \"%s.npy\" % target.lower())\nnp.save(score_save_file, score)\n# # Step 5: Get ROC Curves and TPR@FPR Table\n# In[ ]:\nfiles = [score_save_file]\nmethods = []", + "detail": "modules.arcface_torch.eval_ijbc", + "documentation": {} + }, + { + "label": "score_save_file", + "kind": 5, + "importPath": "modules.arcface_torch.eval_ijbc", + "description": "modules.arcface_torch.eval_ijbc", + "peekOfCode": "score_save_file = os.path.join(save_path, \"%s.npy\" % target.lower())\nnp.save(score_save_file, score)\n# # Step 5: Get ROC Curves and TPR@FPR Table\n# In[ ]:\nfiles = [score_save_file]\nmethods = []\nscores = []\nfor file in files:\n methods.append(Path(file).stem)\n scores.append(np.load(file))", + "detail": "modules.arcface_torch.eval_ijbc", + "documentation": {} + }, + { + "label": "files", + "kind": 5, + "importPath": "modules.arcface_torch.eval_ijbc", + "description": "modules.arcface_torch.eval_ijbc", + "peekOfCode": "files = [score_save_file]\nmethods = []\nscores = []\nfor file in files:\n methods.append(Path(file).stem)\n scores.append(np.load(file))\nmethods = np.array(methods)\nscores = dict(zip(methods, scores))\ncolours = dict(\n zip(methods, sample_colours_from_colourmap(methods.shape[0], 'Set2')))", + "detail": "modules.arcface_torch.eval_ijbc", + "documentation": {} + }, + { + "label": "methods", + "kind": 5, + "importPath": "modules.arcface_torch.eval_ijbc", + "description": "modules.arcface_torch.eval_ijbc", + "peekOfCode": "methods = []\nscores = []\nfor file in files:\n methods.append(Path(file).stem)\n scores.append(np.load(file))\nmethods = np.array(methods)\nscores = dict(zip(methods, scores))\ncolours = dict(\n zip(methods, sample_colours_from_colourmap(methods.shape[0], 'Set2')))\nx_labels = [10 ** -6, 10 ** -5, 10 ** -4, 10 ** -3, 10 ** -2, 10 ** -1]", + "detail": "modules.arcface_torch.eval_ijbc", + "documentation": {} + }, + { + "label": "scores", + "kind": 5, + "importPath": "modules.arcface_torch.eval_ijbc", + "description": "modules.arcface_torch.eval_ijbc", + "peekOfCode": "scores = []\nfor file in files:\n methods.append(Path(file).stem)\n scores.append(np.load(file))\nmethods = np.array(methods)\nscores = dict(zip(methods, scores))\ncolours = dict(\n zip(methods, sample_colours_from_colourmap(methods.shape[0], 'Set2')))\nx_labels = [10 ** -6, 10 ** -5, 10 ** -4, 10 ** -3, 10 ** -2, 10 ** -1]\ntpr_fpr_table = PrettyTable(['Methods'] + [str(x) for x in x_labels])", + "detail": "modules.arcface_torch.eval_ijbc", + "documentation": {} + }, + { + "label": "methods", + "kind": 5, + "importPath": "modules.arcface_torch.eval_ijbc", + "description": "modules.arcface_torch.eval_ijbc", + "peekOfCode": "methods = np.array(methods)\nscores = dict(zip(methods, scores))\ncolours = dict(\n zip(methods, sample_colours_from_colourmap(methods.shape[0], 'Set2')))\nx_labels = [10 ** -6, 10 ** -5, 10 ** -4, 10 ** -3, 10 ** -2, 10 ** -1]\ntpr_fpr_table = PrettyTable(['Methods'] + [str(x) for x in x_labels])\nfig = plt.figure()\nfor method in methods:\n fpr, tpr, _ = roc_curve(label, scores[method])\n roc_auc = auc(fpr, tpr)", + "detail": "modules.arcface_torch.eval_ijbc", + "documentation": {} + }, + { + "label": "scores", + "kind": 5, + "importPath": "modules.arcface_torch.eval_ijbc", + "description": "modules.arcface_torch.eval_ijbc", + "peekOfCode": "scores = dict(zip(methods, scores))\ncolours = dict(\n zip(methods, sample_colours_from_colourmap(methods.shape[0], 'Set2')))\nx_labels = [10 ** -6, 10 ** -5, 10 ** -4, 10 ** -3, 10 ** -2, 10 ** -1]\ntpr_fpr_table = PrettyTable(['Methods'] + [str(x) for x in x_labels])\nfig = plt.figure()\nfor method in methods:\n fpr, tpr, _ = roc_curve(label, scores[method])\n roc_auc = auc(fpr, tpr)\n fpr = np.flipud(fpr)", + "detail": "modules.arcface_torch.eval_ijbc", + "documentation": {} + }, + { + "label": "colours", + "kind": 5, + "importPath": "modules.arcface_torch.eval_ijbc", + "description": "modules.arcface_torch.eval_ijbc", + "peekOfCode": "colours = dict(\n zip(methods, sample_colours_from_colourmap(methods.shape[0], 'Set2')))\nx_labels = [10 ** -6, 10 ** -5, 10 ** -4, 10 ** -3, 10 ** -2, 10 ** -1]\ntpr_fpr_table = PrettyTable(['Methods'] + [str(x) for x in x_labels])\nfig = plt.figure()\nfor method in methods:\n fpr, tpr, _ = roc_curve(label, scores[method])\n roc_auc = auc(fpr, tpr)\n fpr = np.flipud(fpr)\n tpr = np.flipud(tpr) # select largest tpr at same fpr", + "detail": "modules.arcface_torch.eval_ijbc", + "documentation": {} + }, + { + "label": "x_labels", + "kind": 5, + "importPath": "modules.arcface_torch.eval_ijbc", + "description": "modules.arcface_torch.eval_ijbc", + "peekOfCode": "x_labels = [10 ** -6, 10 ** -5, 10 ** -4, 10 ** -3, 10 ** -2, 10 ** -1]\ntpr_fpr_table = PrettyTable(['Methods'] + [str(x) for x in x_labels])\nfig = plt.figure()\nfor method in methods:\n fpr, tpr, _ = roc_curve(label, scores[method])\n roc_auc = auc(fpr, tpr)\n fpr = np.flipud(fpr)\n tpr = np.flipud(tpr) # select largest tpr at same fpr\n plt.plot(fpr,\n tpr,", + "detail": "modules.arcface_torch.eval_ijbc", + "documentation": {} + }, + { + "label": "tpr_fpr_table", + "kind": 5, + "importPath": "modules.arcface_torch.eval_ijbc", + "description": "modules.arcface_torch.eval_ijbc", + "peekOfCode": "tpr_fpr_table = PrettyTable(['Methods'] + [str(x) for x in x_labels])\nfig = plt.figure()\nfor method in methods:\n fpr, tpr, _ = roc_curve(label, scores[method])\n roc_auc = auc(fpr, tpr)\n fpr = np.flipud(fpr)\n tpr = np.flipud(tpr) # select largest tpr at same fpr\n plt.plot(fpr,\n tpr,\n color=colours[method],", + "detail": "modules.arcface_torch.eval_ijbc", + "documentation": {} + }, + { + "label": "fig", + "kind": 5, + "importPath": "modules.arcface_torch.eval_ijbc", + "description": "modules.arcface_torch.eval_ijbc", + "peekOfCode": "fig = plt.figure()\nfor method in methods:\n fpr, tpr, _ = roc_curve(label, scores[method])\n roc_auc = auc(fpr, tpr)\n fpr = np.flipud(fpr)\n tpr = np.flipud(tpr) # select largest tpr at same fpr\n plt.plot(fpr,\n tpr,\n color=colours[method],\n lw=1,", + "detail": "modules.arcface_torch.eval_ijbc", + "documentation": {} + }, + { + "label": "inference", + "kind": 2, + "importPath": "modules.arcface_torch.inference", + "description": "modules.arcface_torch.inference", + "peekOfCode": "def inference(weight, name, img):\n if img is None:\n img = np.random.randint(0, 255, size=(112, 112, 3), dtype=np.uint8)\n else:\n img = cv2.imread(img)\n img = cv2.resize(img, (112, 112))\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img = np.transpose(img, (2, 0, 1))\n img = torch.from_numpy(img).unsqueeze(0).float()\n img.div_(255).sub_(0.5).div_(0.5)", + "detail": "modules.arcface_torch.inference", + "documentation": {} + }, + { + "label": "CombinedMarginLoss", + "kind": 6, + "importPath": "modules.arcface_torch.losses", + "description": "modules.arcface_torch.losses", + "peekOfCode": "class CombinedMarginLoss(torch.nn.Module):\n def __init__(self, \n s, \n m1,\n m2,\n m3,\n interclass_filtering_threshold=0):\n super().__init__()\n self.s = s\n self.m1 = m1", + "detail": "modules.arcface_torch.losses", + "documentation": {} + }, + { + "label": "ArcFace", + "kind": 6, + "importPath": "modules.arcface_torch.losses", + "description": "modules.arcface_torch.losses", + "peekOfCode": "class ArcFace(torch.nn.Module):\n \"\"\" ArcFace (https://arxiv.org/pdf/1801.07698v1.pdf):\n \"\"\"\n def __init__(self, s=64.0, margin=0.5):\n super(ArcFace, self).__init__()\n self.scale = s\n self.cos_m = math.cos(margin)\n self.sin_m = math.sin(margin)\n self.theta = math.cos(math.pi - margin)\n self.sinmm = math.sin(math.pi - margin) * margin", + "detail": "modules.arcface_torch.losses", + "documentation": {} + }, + { + "label": "CosFace", + "kind": 6, + "importPath": "modules.arcface_torch.losses", + "description": "modules.arcface_torch.losses", + "peekOfCode": "class CosFace(torch.nn.Module):\n def __init__(self, s=64.0, m=0.40):\n super(CosFace, self).__init__()\n self.s = s\n self.m = m\n def forward(self, logits: torch.Tensor, labels: torch.Tensor):\n index = torch.where(labels != -1)[0]\n target_logit = logits[index, labels[index].view(-1)]\n final_target_logit = target_logit - self.m\n logits[index, labels[index].view(-1)] = final_target_logit", + "detail": "modules.arcface_torch.losses", + "documentation": {} + }, + { + "label": "PolyScheduler", + "kind": 6, + "importPath": "modules.arcface_torch.lr_scheduler", + "description": "modules.arcface_torch.lr_scheduler", + "peekOfCode": "class PolyScheduler(_LRScheduler):\n def __init__(self, optimizer, base_lr, max_steps, warmup_steps, last_epoch=-1):\n self.base_lr = base_lr\n self.warmup_lr_init = 0.0001\n self.max_steps: int = max_steps\n self.warmup_steps: int = warmup_steps\n self.power = 2\n super(PolyScheduler, self).__init__(optimizer, -1, False)\n self.last_epoch = last_epoch\n def get_warmup_lr(self):", + "detail": "modules.arcface_torch.lr_scheduler", + "documentation": {} + }, + { + "label": "ArcFaceORT", + "kind": 6, + "importPath": "modules.arcface_torch.onnx_helper", + "description": "modules.arcface_torch.onnx_helper", + "peekOfCode": "class ArcFaceORT:\n def __init__(self, model_path, cpu=False):\n self.model_path = model_path\n # providers = None will use available provider, for onnxruntime-gpu it will be \"CUDAExecutionProvider\"\n self.providers = ['CPUExecutionProvider'] if cpu else None\n #input_size is (w,h), return error message, return None if success\n def check(self, track='cfat', test_img = None):\n #default is cfat\n max_model_size_mb=1024\n max_feat_dim=512", + "detail": "modules.arcface_torch.onnx_helper", + "documentation": {} + }, + { + "label": "AlignedDataSet", + "kind": 6, + "importPath": "modules.arcface_torch.onnx_ijbc", + "description": "modules.arcface_torch.onnx_ijbc", + "peekOfCode": "class AlignedDataSet(mx.gluon.data.Dataset):\n def __init__(self, root, lines, align=True):\n self.lines = lines\n self.root = root\n self.align = align\n def __len__(self):\n return len(self.lines)\n def __getitem__(self, idx):\n each_line = self.lines[idx]\n name_lmk_score = each_line.strip().split(' ')", + "detail": "modules.arcface_torch.onnx_ijbc", + "documentation": {} + }, + { + "label": "extract", + "kind": 2, + "importPath": "modules.arcface_torch.onnx_ijbc", + "description": "modules.arcface_torch.onnx_ijbc", + "peekOfCode": "def extract(model_root, dataset):\n model = ArcFaceORT(model_path=model_root)\n model.check()\n feat_mat = np.zeros(shape=(len(dataset), 2 * model.feat_dim))\n def collate_fn(data):\n return torch.cat(data, dim=0)\n data_loader = DataLoader(\n dataset, batch_size=128, drop_last=False, num_workers=4, collate_fn=collate_fn, )\n num_iter = 0\n for batch in data_loader:", + "detail": "modules.arcface_torch.onnx_ijbc", + "documentation": {} + }, + { + "label": "read_template_media_list", + "kind": 2, + "importPath": "modules.arcface_torch.onnx_ijbc", + "description": "modules.arcface_torch.onnx_ijbc", + "peekOfCode": "def read_template_media_list(path):\n ijb_meta = pd.read_csv(path, sep=' ', header=None).values\n templates = ijb_meta[:, 1].astype(np.int)\n medias = ijb_meta[:, 2].astype(np.int)\n return templates, medias\ndef read_template_pair_list(path):\n pairs = pd.read_csv(path, sep=' ', header=None).values\n t1 = pairs[:, 0].astype(np.int)\n t2 = pairs[:, 1].astype(np.int)\n label = pairs[:, 2].astype(np.int)", + "detail": "modules.arcface_torch.onnx_ijbc", + "documentation": {} + }, + { + "label": "read_template_pair_list", + "kind": 2, + "importPath": "modules.arcface_torch.onnx_ijbc", + "description": "modules.arcface_torch.onnx_ijbc", + "peekOfCode": "def read_template_pair_list(path):\n pairs = pd.read_csv(path, sep=' ', header=None).values\n t1 = pairs[:, 0].astype(np.int)\n t2 = pairs[:, 1].astype(np.int)\n label = pairs[:, 2].astype(np.int)\n return t1, t2, label\ndef read_image_feature(path):\n with open(path, 'rb') as fid:\n img_feats = pickle.load(fid)\n return img_feats", + "detail": "modules.arcface_torch.onnx_ijbc", + "documentation": {} + }, + { + "label": "read_image_feature", + "kind": 2, + "importPath": "modules.arcface_torch.onnx_ijbc", + "description": "modules.arcface_torch.onnx_ijbc", + "peekOfCode": "def read_image_feature(path):\n with open(path, 'rb') as fid:\n img_feats = pickle.load(fid)\n return img_feats\ndef image2template_feature(img_feats=None,\n templates=None,\n medias=None):\n unique_templates = np.unique(templates)\n template_feats = np.zeros((len(unique_templates), img_feats.shape[1]))\n for count_template, uqt in enumerate(unique_templates):", + "detail": "modules.arcface_torch.onnx_ijbc", + "documentation": {} + }, + { + "label": "image2template_feature", + "kind": 2, + "importPath": "modules.arcface_torch.onnx_ijbc", + "description": "modules.arcface_torch.onnx_ijbc", + "peekOfCode": "def image2template_feature(img_feats=None,\n templates=None,\n medias=None):\n unique_templates = np.unique(templates)\n template_feats = np.zeros((len(unique_templates), img_feats.shape[1]))\n for count_template, uqt in enumerate(unique_templates):\n (ind_t,) = np.where(templates == uqt)\n face_norm_feats = img_feats[ind_t]\n face_medias = medias[ind_t]\n unique_medias, unique_media_counts = np.unique(face_medias, return_counts=True)", + "detail": "modules.arcface_torch.onnx_ijbc", + "documentation": {} + }, + { + "label": "verification", + "kind": 2, + "importPath": "modules.arcface_torch.onnx_ijbc", + "description": "modules.arcface_torch.onnx_ijbc", + "peekOfCode": "def verification(template_norm_feats=None,\n unique_templates=None,\n p1=None,\n p2=None):\n template2id = np.zeros((max(unique_templates) + 1, 1), dtype=int)\n for count_template, uqt in enumerate(unique_templates):\n template2id[uqt] = count_template\n score = np.zeros((len(p1),))\n total_pairs = np.array(range(len(p1)))\n batchsize = 100000", + "detail": "modules.arcface_torch.onnx_ijbc", + "documentation": {} + }, + { + "label": "verification2", + "kind": 2, + "importPath": "modules.arcface_torch.onnx_ijbc", + "description": "modules.arcface_torch.onnx_ijbc", + "peekOfCode": "def verification2(template_norm_feats=None,\n unique_templates=None,\n p1=None,\n p2=None):\n template2id = np.zeros((max(unique_templates) + 1, 1), dtype=int)\n for count_template, uqt in enumerate(unique_templates):\n template2id[uqt] = count_template\n score = np.zeros((len(p1),)) # save cosine distance between pairs\n total_pairs = np.array(range(len(p1)))\n batchsize = 100000 # small batchsize instead of all pairs in one batch due to the memory limiation", + "detail": "modules.arcface_torch.onnx_ijbc", + "documentation": {} + }, + { + "label": "main", + "kind": 2, + "importPath": "modules.arcface_torch.onnx_ijbc", + "description": "modules.arcface_torch.onnx_ijbc", + "peekOfCode": "def main(args):\n use_norm_score = True # if Ture, TestMode(N1)\n use_detector_score = True # if Ture, TestMode(D1)\n use_flip_test = True # if Ture, TestMode(F1)\n assert args.target == 'IJBC' or args.target == 'IJBB'\n start = timeit.default_timer()\n templates, medias = read_template_media_list(\n os.path.join('%s/meta' % args.image_path, '%s_face_tid_mid.txt' % args.target.lower()))\n stop = timeit.default_timer()\n print('Time: %.2f s. ' % (stop - start))", + "detail": "modules.arcface_torch.onnx_ijbc", + "documentation": {} + }, + { + "label": "SRC", + "kind": 5, + "importPath": "modules.arcface_torch.onnx_ijbc", + "description": "modules.arcface_torch.onnx_ijbc", + "peekOfCode": "SRC = np.array(\n [\n [30.2946, 51.6963],\n [65.5318, 51.5014],\n [48.0252, 71.7366],\n [33.5493, 92.3655],\n [62.7299, 92.2041]]\n , dtype=np.float32)\nSRC[:, 0] += 8.0\n@torch.no_grad()", + "detail": "modules.arcface_torch.onnx_ijbc", + "documentation": {} + }, + { + "label": "PartialFC", + "kind": 6, + "importPath": "modules.arcface_torch.partial_fc", + "description": "modules.arcface_torch.partial_fc", + "peekOfCode": "class PartialFC(torch.nn.Module):\n \"\"\"\n https://arxiv.org/abs/2203.15565\n A distributed sparsely updating variant of the FC layer, named Partial FC (PFC).\n When sample rate less than 1, in each iteration, positive class centers and a random subset of\n negative class centers are selected to compute the margin-based softmax loss, all class\n centers are still maintained throughout the whole training process, but only a subset is\n selected and updated in each iteration.\n .. note::\n When sample rate equal to 1, Partial FC is equal to model parallelism(default sample rate is 1).", + "detail": "modules.arcface_torch.partial_fc", + "documentation": {} + }, + { + "label": "PartialFCAdamW", + "kind": 6, + "importPath": "modules.arcface_torch.partial_fc", + "description": "modules.arcface_torch.partial_fc", + "peekOfCode": "class PartialFCAdamW(torch.nn.Module):\n def __init__(self,\n margin_loss: Callable,\n embedding_size: int,\n num_classes: int,\n sample_rate: float = 1.0,\n fp16: bool = False,):\n \"\"\"\n Paramenters:\n -----------", + "detail": "modules.arcface_torch.partial_fc", + "documentation": {} + }, + { + "label": "DistCrossEntropyFunc", + "kind": 6, + "importPath": "modules.arcface_torch.partial_fc", + "description": "modules.arcface_torch.partial_fc", + "peekOfCode": "class DistCrossEntropyFunc(torch.autograd.Function):\n \"\"\"\n CrossEntropy loss is calculated in parallel, allreduce denominator into single gpu and calculate softmax.\n Implemented of ArcFace (https://arxiv.org/pdf/1801.07698v1.pdf):\n \"\"\"\n @staticmethod\n def forward(ctx, logits: torch.Tensor, label: torch.Tensor):\n \"\"\" \"\"\"\n batch_size = logits.size(0)\n # for numerical stability", + "detail": "modules.arcface_torch.partial_fc", + "documentation": {} + }, + { + "label": "DistCrossEntropy", + "kind": 6, + "importPath": "modules.arcface_torch.partial_fc", + "description": "modules.arcface_torch.partial_fc", + "peekOfCode": "class DistCrossEntropy(torch.nn.Module):\n def __init__(self):\n super(DistCrossEntropy, self).__init__()\n def forward(self, logit_part, label_part):\n return DistCrossEntropyFunc.apply(logit_part, label_part)\nclass AllGatherFunc(torch.autograd.Function):\n \"\"\"AllGather op with gradient backward\"\"\"\n @staticmethod\n def forward(ctx, tensor, *gather_list):\n gather_list = list(gather_list)", + "detail": "modules.arcface_torch.partial_fc", + "documentation": {} + }, + { + "label": "AllGatherFunc", + "kind": 6, + "importPath": "modules.arcface_torch.partial_fc", + "description": "modules.arcface_torch.partial_fc", + "peekOfCode": "class AllGatherFunc(torch.autograd.Function):\n \"\"\"AllGather op with gradient backward\"\"\"\n @staticmethod\n def forward(ctx, tensor, *gather_list):\n gather_list = list(gather_list)\n distributed.all_gather(gather_list, tensor)\n return tuple(gather_list)\n @staticmethod\n def backward(ctx, *grads):\n grad_list = list(grads)", + "detail": "modules.arcface_torch.partial_fc", + "documentation": {} + }, + { + "label": "AllGather", + "kind": 5, + "importPath": "modules.arcface_torch.partial_fc", + "description": "modules.arcface_torch.partial_fc", + "peekOfCode": "AllGather = AllGatherFunc.apply", + "detail": "modules.arcface_torch.partial_fc", + "documentation": {} + }, + { + "label": "PartialFC_V2", + "kind": 6, + "importPath": "modules.arcface_torch.partial_fc_v2", + "description": "modules.arcface_torch.partial_fc_v2", + "peekOfCode": "class PartialFC_V2(torch.nn.Module):\n \"\"\"\n https://arxiv.org/abs/2203.15565\n A distributed sparsely updating variant of the FC layer, named Partial FC (PFC).\n When sample rate less than 1, in each iteration, positive class centers and a random subset of\n negative class centers are selected to compute the margin-based softmax loss, all class\n centers are still maintained throughout the whole training process, but only a subset is\n selected and updated in each iteration.\n .. note::\n When sample rate equal to 1, Partial FC is equal to model parallelism(default sample rate is 1).", + "detail": "modules.arcface_torch.partial_fc_v2", + "documentation": {} + }, + { + "label": "DistCrossEntropyFunc", + "kind": 6, + "importPath": "modules.arcface_torch.partial_fc_v2", + "description": "modules.arcface_torch.partial_fc_v2", + "peekOfCode": "class DistCrossEntropyFunc(torch.autograd.Function):\n \"\"\"\n CrossEntropy loss is calculated in parallel, allreduce denominator into single gpu and calculate softmax.\n Implemented of ArcFace (https://arxiv.org/pdf/1801.07698v1.pdf):\n \"\"\"\n @staticmethod\n def forward(ctx, logits: torch.Tensor, label: torch.Tensor):\n \"\"\" \"\"\"\n batch_size = logits.size(0)\n # for numerical stability", + "detail": "modules.arcface_torch.partial_fc_v2", + "documentation": {} + }, + { + "label": "DistCrossEntropy", + "kind": 6, + "importPath": "modules.arcface_torch.partial_fc_v2", + "description": "modules.arcface_torch.partial_fc_v2", + "peekOfCode": "class DistCrossEntropy(torch.nn.Module):\n def __init__(self):\n super(DistCrossEntropy, self).__init__()\n def forward(self, logit_part, label_part):\n return DistCrossEntropyFunc.apply(logit_part, label_part)\nclass AllGatherFunc(torch.autograd.Function):\n \"\"\"AllGather op with gradient backward\"\"\"\n @staticmethod\n def forward(ctx, tensor, *gather_list):\n gather_list = list(gather_list)", + "detail": "modules.arcface_torch.partial_fc_v2", + "documentation": {} + }, + { + "label": "AllGatherFunc", + "kind": 6, + "importPath": "modules.arcface_torch.partial_fc_v2", + "description": "modules.arcface_torch.partial_fc_v2", + "peekOfCode": "class AllGatherFunc(torch.autograd.Function):\n \"\"\"AllGather op with gradient backward\"\"\"\n @staticmethod\n def forward(ctx, tensor, *gather_list):\n gather_list = list(gather_list)\n distributed.all_gather(gather_list, tensor)\n return tuple(gather_list)\n @staticmethod\n def backward(ctx, *grads):\n grad_list = list(grads)", + "detail": "modules.arcface_torch.partial_fc_v2", + "documentation": {} + }, + { + "label": "AllGather", + "kind": 5, + "importPath": "modules.arcface_torch.partial_fc_v2", + "description": "modules.arcface_torch.partial_fc_v2", + "peekOfCode": "AllGather = AllGatherFunc.apply", + "detail": "modules.arcface_torch.partial_fc_v2", + "documentation": {} + }, + { + "label": "convert_onnx", + "kind": 2, + "importPath": "modules.arcface_torch.torch2onnx", + "description": "modules.arcface_torch.torch2onnx", + "peekOfCode": "def convert_onnx(net, path_module, output, opset=11, simplify=False):\n assert isinstance(net, torch.nn.Module)\n img = np.random.randint(0, 255, size=(112, 112, 3), dtype=np.int32)\n img = img.astype(np.float)\n img = (img / 255. - 0.5) / 0.5 # torch style norm\n img = img.transpose((2, 0, 1))\n img = torch.from_numpy(img).unsqueeze(0).float()\n weight = torch.load(path_module)\n net.load_state_dict(weight, strict=True)\n net.eval()", + "detail": "modules.arcface_torch.torch2onnx", + "documentation": {} + }, + { + "label": "main", + "kind": 2, + "importPath": "modules.arcface_torch.train", + "description": "modules.arcface_torch.train", + "peekOfCode": "def main(args):\n # get config\n cfg = get_config(args.config)\n # global control random seed\n setup_seed(seed=cfg.seed, cuda_deterministic=False)\n torch.cuda.set_device(args.local_rank)\n os.makedirs(cfg.output, exist_ok=True)\n init_logging(rank, cfg.output)\n summary_writer = (\n SummaryWriter(log_dir=os.path.join(cfg.output, \"tensorboard\"))", + "detail": "modules.arcface_torch.train", + "documentation": {} + }, + { + "label": "main", + "kind": 2, + "importPath": "modules.arcface_torch.train_v2", + "description": "modules.arcface_torch.train_v2", + "peekOfCode": "def main(args):\n # get config\n cfg = get_config(args.config)\n # global control random seed\n setup_seed(seed=cfg.seed, cuda_deterministic=False)\n torch.cuda.set_device(args.local_rank)\n os.makedirs(cfg.output, exist_ok=True)\n init_logging(rank, cfg.output)\n summary_writer = (\n SummaryWriter(log_dir=os.path.join(cfg.output, \"tensorboard\"))", + "detail": "modules.arcface_torch.train_v2", + "documentation": {} + }, + { + "label": "AFLW2000", + "kind": 6, + "importPath": "modules.DECA.decalib.datasets.aflw2000", + "description": "modules.DECA.decalib.datasets.aflw2000", + "peekOfCode": "class AFLW2000(Dataset):\n def __init__(self, testpath='/ps/scratch/yfeng/Data/AFLW2000/GT', crop_size=224):\n '''\n data class for loading AFLW2000 dataset\n make sure each image has corresponding mat file, which provides cropping infromation\n '''\n if os.path.isdir(testpath): \n self.imagepath_list = glob(testpath + '/*.jpg') + glob(testpath + '/*.png')\n elif isinstance(testpath, list):\n self.imagepath_list = testpath", + "detail": "modules.DECA.decalib.datasets.aflw2000", + "documentation": {} + }, + { + "label": "build_train", + "kind": 2, + "importPath": "modules.DECA.decalib.datasets.build_datasets", + "description": "modules.DECA.decalib.datasets.build_datasets", + "peekOfCode": "def build_train(config, is_train=True):\n data_list = []\n if 'vox2' in config.training_data:\n data_list.append(VoxelDataset(dataname='vox2', K=config.K, image_size=config.image_size, scale=[config.scale_min, config.scale_max], trans_scale=config.trans_scale, isSingle=config.isSingle))\n if 'vggface2' in config.training_data:\n data_list.append(VGGFace2Dataset(K=config.K, image_size=config.image_size, scale=[config.scale_min, config.scale_max], trans_scale=config.trans_scale, isSingle=config.isSingle))\n if 'vggface2hq' in config.training_data:\n data_list.append(VGGFace2HQDataset(K=config.K, image_size=config.image_size, scale=[config.scale_min, config.scale_max], trans_scale=config.trans_scale, isSingle=config.isSingle))\n if 'ethnicity' in config.training_data:\n data_list.append(EthnicityDataset(K=config.K, image_size=config.image_size, scale=[config.scale_min, config.scale_max], trans_scale=config.trans_scale, isSingle=config.isSingle))", + "detail": "modules.DECA.decalib.datasets.build_datasets", + "documentation": {} + }, + { + "label": "build_val", + "kind": 2, + "importPath": "modules.DECA.decalib.datasets.build_datasets", + "description": "modules.DECA.decalib.datasets.build_datasets", + "peekOfCode": "def build_val(config, is_train=True):\n data_list = []\n if 'vggface2' in config.eval_data:\n data_list.append(VGGFace2Dataset(isEval=True, K=config.K, image_size=config.image_size, scale=[config.scale_min, config.scale_max], trans_scale=config.trans_scale, isSingle=config.isSingle))\n if 'now' in config.eval_data:\n data_list.append(NoWDataset())\n if 'aflw2000' in config.eval_data:\n data_list.append(AFLW2000())\n dataset = ConcatDataset(data_list)\n return dataset", + "detail": "modules.DECA.decalib.datasets.build_datasets", + "documentation": {} + }, + { + "label": "TestData", + "kind": 6, + "importPath": "modules.DECA.decalib.datasets.datasets", + "description": "modules.DECA.decalib.datasets.datasets", + "peekOfCode": "class TestData(Dataset):\n def __init__(self, testpath=None, iscrop=True, crop_size=224, scale=1.25, face_detector='fan'):\n '''\n testpath: folder, imagepath_list, image path, video path\n '''\n def glob_exts_in_path(path, img_ext=['png', 'jpg']):\n from functools import reduce\n return reduce(\n lambda before, ext: before+glob(\n os.path.join(path, f'*.{ext}')", + "detail": "modules.DECA.decalib.datasets.datasets", + "documentation": {} + }, + { + "label": "video2sequence", + "kind": 2, + "importPath": "modules.DECA.decalib.datasets.datasets", + "description": "modules.DECA.decalib.datasets.datasets", + "peekOfCode": "def video2sequence(video_path):\n videofolder = os.path.splitext(video_path)[0]\n os.makedirs(videofolder, exist_ok=True)\n video_name = os.path.splitext(os.path.split(video_path)[-1])[0]\n vidcap = cv2.VideoCapture(video_path)\n success,image = vidcap.read()\n count = 0\n imagepath_list = []\n while success:\n imagepath = os.path.join(videofolder, f'{video_name}_frame{count:04d}.jpg')", + "detail": "modules.DECA.decalib.datasets.datasets", + "documentation": {} + }, + { + "label": "FAN", + "kind": 6, + "importPath": "modules.DECA.decalib.datasets.detectors", + "description": "modules.DECA.decalib.datasets.detectors", + "peekOfCode": "class FAN(object):\n def __init__(self):\n import face_alignment\n self.model = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D, flip_input=False)\n def run(self, image,**kwargs):\n '''\n image: 0-255, uint8, rgb, [h, w, 3]\n return: detected box list\n '''\n h,w,_=image.shape", + "detail": "modules.DECA.decalib.datasets.detectors", + "documentation": {} + }, + { + "label": "MTCNN", + "kind": 6, + "importPath": "modules.DECA.decalib.datasets.detectors", + "description": "modules.DECA.decalib.datasets.detectors", + "peekOfCode": "class MTCNN(object):\n def __init__(self, device = 'cpu'):\n '''\n https://github.com/timesler/facenet-pytorch/blob/master/examples/infer.ipynb\n '''\n from facenet_pytorch import MTCNN as mtcnn\n self.device = device\n self.model = mtcnn(keep_all=True)\n def run(self, input):\n '''", + "detail": "modules.DECA.decalib.datasets.detectors", + "documentation": {} + }, + { + "label": "EthnicityDataset", + "kind": 6, + "importPath": "modules.DECA.decalib.datasets.ethnicity", + "description": "modules.DECA.decalib.datasets.ethnicity", + "peekOfCode": "class EthnicityDataset(Dataset):\n def __init__(self, K, image_size, scale, trans_scale = 0, isTemporal=False, isEval=False, isSingle=False):\n '''\n K must be less than 6\n '''\n self.K = K\n self.image_size = image_size\n self.imagefolder = '/ps/scratch/face2d3d/train'\n self.kptfolder = '/ps/scratch/face2d3d/train_annotated_torch7/'\n self.segfolder = '/ps/scratch/face2d3d/texture_in_the_wild_code/VGGFace2_seg/test_crop_size_400_batch/'", + "detail": "modules.DECA.decalib.datasets.ethnicity", + "documentation": {} + }, + { + "label": "NoWDataset", + "kind": 6, + "importPath": "modules.DECA.decalib.datasets.now", + "description": "modules.DECA.decalib.datasets.now", + "peekOfCode": "class NoWDataset(Dataset):\n def __init__(self, ring_elements=6, crop_size=224, scale=1.6):\n folder = '/ps/scratch/yfeng/other-github/now_evaluation/data/NoW_Dataset'\n self.data_path = os.path.join(folder, 'imagepathsvalidation.txt')\n with open(self.data_path) as f:\n self.data_lines = f.readlines()\n self.imagefolder = os.path.join(folder, 'final_release_version', 'iphone_pictures')\n self.bbxfolder = os.path.join(folder, 'final_release_version', 'detected_face')\n # self.data_path = '/ps/scratch/face2d3d/ringnetpp/eccv/test_data/evaluation/NoW_Dataset/final_release_version/test_image_paths_ring_6_elements.npy'\n # self.imagepath = '/ps/scratch/face2d3d/ringnetpp/eccv/test_data/evaluation/NoW_Dataset/final_release_version/iphone_pictures/'", + "detail": "modules.DECA.decalib.datasets.now", + "documentation": {} + }, + { + "label": "VoxelDataset", + "kind": 6, + "importPath": "modules.DECA.decalib.datasets.train_datasets", + "description": "modules.DECA.decalib.datasets.train_datasets", + "peekOfCode": "class VoxelDataset(Dataset):\n def __init__(self, K, image_size, scale, trans_scale = 0, dataname='vox2', n_train=100000, isTemporal=False, isEval=False, isSingle=False):\n self.K = K\n self.image_size = image_size\n if dataname == 'vox1':\n self.kpt_suffix = '.txt'\n self.imagefolder = '/ps/project/face2d3d/VoxCeleb/vox1/dev/images_cropped'\n self.kptfolder = '/ps/scratch/yfeng/Data/VoxCeleb/vox1/landmark_2d'\n self.face_dict = {}\n for person_id in sorted(os.listdir(self.kptfolder)):", + "detail": "modules.DECA.decalib.datasets.train_datasets", + "documentation": {} + }, + { + "label": "COCODataset", + "kind": 6, + "importPath": "modules.DECA.decalib.datasets.train_datasets", + "description": "modules.DECA.decalib.datasets.train_datasets", + "peekOfCode": "class COCODataset(Dataset):\n def __init__(self, image_size, scale, trans_scale = 0, isEval=False):\n '''\n # 53877 faces\n K must be less than 6\n '''\n self.image_size = image_size\n self.imagefolder = '/ps/scratch/yfeng/Data/COCO/raw/train2017'\n self.kptfolder = '/ps/scratch/yfeng/Data/COCO/face/train2017_kpt'\n self.kptpath_list = os.listdir(self.kptfolder)", + "detail": "modules.DECA.decalib.datasets.train_datasets", + "documentation": {} + }, + { + "label": "CelebAHQDataset", + "kind": 6, + "importPath": "modules.DECA.decalib.datasets.train_datasets", + "description": "modules.DECA.decalib.datasets.train_datasets", + "peekOfCode": "class CelebAHQDataset(Dataset):\n def __init__(self, image_size, scale, trans_scale = 0, isEval=False):\n '''\n # 53877 faces\n K must be less than 6\n '''\n self.image_size = image_size\n self.imagefolder = '/ps/project/face2d3d/faceHQ_100K/celebA-HQ/celebahq_resized_256'\n self.kptfolder = '/ps/project/face2d3d/faceHQ_100K/celebA-HQ/celebahq_resized_256_torch'\n self.kptpath_list = os.listdir(self.kptfolder)", + "detail": "modules.DECA.decalib.datasets.train_datasets", + "documentation": {} + }, + { + "label": "TestData", + "kind": 6, + "importPath": "modules.DECA.decalib.datasets.train_datasets", + "description": "modules.DECA.decalib.datasets.train_datasets", + "peekOfCode": "class TestData(Dataset):\n def __init__(self, testpath, iscrop=True, crop_size=224, scale=1.25, face_detector='fan', face_detector_model=None):\n '''\n testpath: folder, imagepath_list, image path, video path\n '''\n if isinstance(testpath, list):\n self.imagepath_list = testpath\n elif os.path.isdir(testpath): \n self.imagepath_list = glob(testpath + '/*.jpg') + glob(testpath + '/*.png') + glob(testpath + '/*.bmp')\n elif os.path.isfile(testpath) and (testpath[-3:] in ['jpg', 'png', 'bmp']):", + "detail": "modules.DECA.decalib.datasets.train_datasets", + "documentation": {} + }, + { + "label": "EvalData", + "kind": 6, + "importPath": "modules.DECA.decalib.datasets.train_datasets", + "description": "modules.DECA.decalib.datasets.train_datasets", + "peekOfCode": "class EvalData(Dataset):\n def __init__(self, testpath, kptfolder, iscrop=True, crop_size=224, scale=1.25, face_detector='fan', face_detector_model=None):\n '''\n testpath: folder, imagepath_list, image path, video path\n '''\n if isinstance(testpath, list):\n self.imagepath_list = testpath\n elif os.path.isdir(testpath): \n self.imagepath_list = glob(testpath + '/*.jpg') + glob(testpath + '/*.png')\n elif os.path.isfile(testpath) and (testpath[-3:] in ['jpg', 'png']):", + "detail": "modules.DECA.decalib.datasets.train_datasets", + "documentation": {} + }, + { + "label": "build_dataloader", + "kind": 2, + "importPath": "modules.DECA.decalib.datasets.train_datasets", + "description": "modules.DECA.decalib.datasets.train_datasets", + "peekOfCode": "def build_dataloader(config, is_train=True):\n data_list = []\n if 'vox1' in config.training_data:\n data_list.append(VoxelDataset(K=config.K, image_size=config.image_size, scale=[config.scale_min, config.scale_max], n_train=config.n_train, isSingle=config.isSingle))\n if 'vox2' in config.training_data:\n data_list.append(VoxelDataset(dataname='vox2', K=config.K, image_size=config.image_size, scale=[config.scale_min, config.scale_max], n_train=config.n_train, isSingle=config.isSingle))\n if 'vggface2' in config.training_data:\n data_list.append(VGGFace2Dataset(K=config.K, image_size=config.image_size, scale=[config.scale_min, config.scale_max], trans_scale=config.trans_scale, isSingle=config.isSingle))\n if 'vggface2hq' in config.training_data:\n data_list.append(VGGFace2HQDataset(K=config.K, image_size=config.image_size, scale=[config.scale_min, config.scale_max], trans_scale=config.trans_scale, isSingle=config.isSingle))", + "detail": "modules.DECA.decalib.datasets.train_datasets", + "documentation": {} + }, + { + "label": "video2sequence", + "kind": 2, + "importPath": "modules.DECA.decalib.datasets.train_datasets", + "description": "modules.DECA.decalib.datasets.train_datasets", + "peekOfCode": "def video2sequence(video_path):\n videofolder = video_path.split('.')[0] \n os.makedirs(videofolder, exist_ok=True)\n video_name = video_path.split('/')[-1].split('.')[0]\n # import ipdb; ipdb.set_trace()\n vidcap = cv2.VideoCapture(video_path)\n success,image = vidcap.read()\n count = 0\n imagepath_list = []\n while success:", + "detail": "modules.DECA.decalib.datasets.train_datasets", + "documentation": {} + }, + { + "label": "VGGFace2Dataset", + "kind": 6, + "importPath": "modules.DECA.decalib.datasets.vggface", + "description": "modules.DECA.decalib.datasets.vggface", + "peekOfCode": "class VGGFace2Dataset(Dataset):\n def __init__(self, K, image_size, scale, trans_scale = 0, isTemporal=False, isEval=False, isSingle=False):\n '''\n K must be less than 6\n '''\n self.K = K\n self.image_size = image_size\n self.imagefolder = '/ps/scratch/face2d3d/train'\n self.kptfolder = '/ps/scratch/face2d3d/train_annotated_torch7'\n self.segfolder = '/ps/scratch/face2d3d/texture_in_the_wild_code/VGGFace2_seg/test_crop_size_400_batch'", + "detail": "modules.DECA.decalib.datasets.vggface", + "documentation": {} + }, + { + "label": "VGGFace2HQDataset", + "kind": 6, + "importPath": "modules.DECA.decalib.datasets.vggface", + "description": "modules.DECA.decalib.datasets.vggface", + "peekOfCode": "class VGGFace2HQDataset(Dataset):\n def __init__(self, K, image_size, scale, trans_scale = 0, isTemporal=False, isEval=False, isSingle=False):\n '''\n K must be less than 6\n '''\n self.K = K\n self.image_size = image_size\n self.imagefolder = '/ps/scratch/face2d3d/train'\n self.kptfolder = '/ps/scratch/face2d3d/train_annotated_torch7'\n self.segfolder = '/ps/scratch/face2d3d/texture_in_the_wild_code/VGGFace2_seg/test_crop_size_400_batch'", + "detail": "modules.DECA.decalib.datasets.vggface", + "documentation": {} + }, + { + "label": "VoxelDataset", + "kind": 6, + "importPath": "modules.DECA.decalib.datasets.vox", + "description": "modules.DECA.decalib.datasets.vox", + "peekOfCode": "class VoxelDataset(Dataset):\n def __init__(self, K, image_size, scale, trans_scale = 0, dataname='vox2', n_train=100000, isTemporal=False, isEval=False, isSingle=False):\n self.K = K\n self.image_size = image_size\n if dataname == 'vox1':\n self.kpt_suffix = '.txt'\n self.imagefolder = '/ps/project/face2d3d/VoxCeleb/vox1/dev/images_cropped'\n self.kptfolder = '/ps/scratch/yfeng/Data/VoxCeleb/vox1/landmark_2d'\n self.face_dict = {}\n for person_id in sorted(os.listdir(self.kptfolder)):", + "detail": "modules.DECA.decalib.datasets.vox", + "documentation": {} + }, + { + "label": "Generator", + "kind": 6, + "importPath": "modules.DECA.decalib.models.decoders", + "description": "modules.DECA.decalib.models.decoders", + "peekOfCode": "class Generator(nn.Module):\n def __init__(self, latent_dim=100, out_channels=1, out_scale=0.01, sample_mode = 'bilinear'):\n super(Generator, self).__init__()\n self.out_scale = out_scale\n self.init_size = 32 // 4 # Initial size before upsampling\n self.l1 = nn.Sequential(nn.Linear(latent_dim, 128 * self.init_size ** 2))\n self.conv_blocks = nn.Sequential(\n nn.BatchNorm2d(128),\n nn.Upsample(scale_factor=2, mode=sample_mode), #16\n nn.Conv2d(128, 128, 3, stride=1, padding=1),", + "detail": "modules.DECA.decalib.models.decoders", + "documentation": {} + }, + { + "label": "ResnetEncoder", + "kind": 6, + "importPath": "modules.DECA.decalib.models.encoders", + "description": "modules.DECA.decalib.models.encoders", + "peekOfCode": "class ResnetEncoder(nn.Module):\n def __init__(self, outsize, last_op=None):\n super(ResnetEncoder, self).__init__()\n feature_size = 2048\n self.encoder = resnet.load_ResNet50Model() #out: 2048\n ### regressor\n self.layers = nn.Sequential(\n nn.Linear(feature_size, 1024),\n nn.ReLU(),\n nn.Linear(1024, outsize)", + "detail": "modules.DECA.decalib.models.encoders", + "documentation": {} + }, + { + "label": "Struct", + "kind": 6, + "importPath": "modules.DECA.decalib.models.FLAME", + "description": "modules.DECA.decalib.models.FLAME", + "peekOfCode": "class Struct(object):\n def __init__(self, **kwargs):\n for key, val in kwargs.items():\n setattr(self, key, val)\nclass FLAME(nn.Module):\n \"\"\"\n borrowed from https://github.com/soubhiksanyal/FLAME_PyTorch/blob/master/FLAME.py\n Given flame parameters this class generates a differentiable FLAME function\n which outputs the a mesh and 2D/3D facial landmarks\n \"\"\"", + "detail": "modules.DECA.decalib.models.FLAME", + "documentation": {} + }, + { + "label": "FLAME", + "kind": 6, + "importPath": "modules.DECA.decalib.models.FLAME", + "description": "modules.DECA.decalib.models.FLAME", + "peekOfCode": "class FLAME(nn.Module):\n \"\"\"\n borrowed from https://github.com/soubhiksanyal/FLAME_PyTorch/blob/master/FLAME.py\n Given flame parameters this class generates a differentiable FLAME function\n which outputs the a mesh and 2D/3D facial landmarks\n \"\"\"\n def __init__(self, config):\n super(FLAME, self).__init__()\n print(\"creating the FLAME Decoder\")\n with open(config.flame_model_path, 'rb') as f:", + "detail": "modules.DECA.decalib.models.FLAME", + "documentation": {} + }, + { + "label": "FLAMETex", + "kind": 6, + "importPath": "modules.DECA.decalib.models.FLAME", + "description": "modules.DECA.decalib.models.FLAME", + "peekOfCode": "class FLAMETex(nn.Module):\n \"\"\"\n FLAME texture:\n https://github.com/TimoBolkart/TF_FLAME/blob/ade0ab152300ec5f0e8555d6765411555c5ed43d/sample_texture.py#L64\n FLAME texture converted from BFM:\n https://github.com/TimoBolkart/BFM_to_FLAME\n \"\"\"\n def __init__(self, config):\n super(FLAMETex, self).__init__()\n if config.tex_type == 'BFM':", + "detail": "modules.DECA.decalib.models.FLAME", + "documentation": {} + }, + { + "label": "to_tensor", + "kind": 2, + "importPath": "modules.DECA.decalib.models.FLAME", + "description": "modules.DECA.decalib.models.FLAME", + "peekOfCode": "def to_tensor(array, dtype=torch.float32):\n if 'torch.tensor' not in str(type(array)):\n return torch.tensor(array, dtype=dtype)\ndef to_np(array, dtype=np.float32):\n if 'scipy.sparse' in str(type(array)):\n array = array.todense()\n return np.array(array, dtype=dtype)\nclass Struct(object):\n def __init__(self, **kwargs):\n for key, val in kwargs.items():", + "detail": "modules.DECA.decalib.models.FLAME", + "documentation": {} + }, + { + "label": "to_np", + "kind": 2, + "importPath": "modules.DECA.decalib.models.FLAME", + "description": "modules.DECA.decalib.models.FLAME", + "peekOfCode": "def to_np(array, dtype=np.float32):\n if 'scipy.sparse' in str(type(array)):\n array = array.todense()\n return np.array(array, dtype=dtype)\nclass Struct(object):\n def __init__(self, **kwargs):\n for key, val in kwargs.items():\n setattr(self, key, val)\nclass FLAME(nn.Module):\n \"\"\"", + "detail": "modules.DECA.decalib.models.FLAME", + "documentation": {} + }, + { + "label": "BasicBlock", + "kind": 6, + "importPath": "modules.DECA.decalib.models.frnet", + "description": "modules.DECA.decalib.models.frnet", + "peekOfCode": "class BasicBlock(nn.Module):\n expansion = 1\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes)\n self.downsample = downsample", + "detail": "modules.DECA.decalib.models.frnet", + "documentation": {} + }, + { + "label": "Bottleneck", + "kind": 6, + "importPath": "modules.DECA.decalib.models.frnet", + "description": "modules.DECA.decalib.models.frnet", + "peekOfCode": "class Bottleneck(nn.Module):\n expansion = 4\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(planes * 4)", + "detail": "modules.DECA.decalib.models.frnet", + "documentation": {} + }, + { + "label": "ResNet", + "kind": 6, + "importPath": "modules.DECA.decalib.models.frnet", + "description": "modules.DECA.decalib.models.frnet", + "peekOfCode": "class ResNet(nn.Module):\n def __init__(self, block, layers, num_classes=1000, include_top=True):\n self.inplanes = 64\n super(ResNet, self).__init__()\n self.include_top = include_top\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=0, ceil_mode=True)\n self.layer1 = self._make_layer(block, 64, layers[0])", + "detail": "modules.DECA.decalib.models.frnet", + "documentation": {} + }, + { + "label": "conv3x3", + "kind": 2, + "importPath": "modules.DECA.decalib.models.frnet", + "description": "modules.DECA.decalib.models.frnet", + "peekOfCode": "def conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\nclass BasicBlock(nn.Module):\n expansion = 1\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes)", + "detail": "modules.DECA.decalib.models.frnet", + "documentation": {} + }, + { + "label": "resnet50", + "kind": 2, + "importPath": "modules.DECA.decalib.models.frnet", + "description": "modules.DECA.decalib.models.frnet", + "peekOfCode": "def resnet50(**kwargs):\n \"\"\"Constructs a ResNet-50 model.\n \"\"\"\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n return model\nimport pickle\ndef load_state_dict(model, fname):\n \"\"\"\n Set parameters converted from Caffe models authors of VGGFace2 provide.\n See https://www.robots.ox.ac.uk/~vgg/data/vgg_face2/.", + "detail": "modules.DECA.decalib.models.frnet", + "documentation": {} + }, + { + "label": "load_state_dict", + "kind": 2, + "importPath": "modules.DECA.decalib.models.frnet", + "description": "modules.DECA.decalib.models.frnet", + "peekOfCode": "def load_state_dict(model, fname):\n \"\"\"\n Set parameters converted from Caffe models authors of VGGFace2 provide.\n See https://www.robots.ox.ac.uk/~vgg/data/vgg_face2/.\n Arguments:\n model: model\n fname: file name of parameters converted from a Caffe model, assuming the file format is Pickle.\n \"\"\"\n with open(fname, 'rb') as f:\n weights = pickle.load(f, encoding='latin1')", + "detail": "modules.DECA.decalib.models.frnet", + "documentation": {} + }, + { + "label": "rot_mat_to_euler", + "kind": 2, + "importPath": "modules.DECA.decalib.models.lbs", + "description": "modules.DECA.decalib.models.lbs", + "peekOfCode": "def rot_mat_to_euler(rot_mats):\n # Calculates rotation matrix to euler angles\n # Careful for extreme cases of eular angles like [0.0, pi, 0.0]\n sy = torch.sqrt(rot_mats[:, 0, 0] * rot_mats[:, 0, 0] +\n rot_mats[:, 1, 0] * rot_mats[:, 1, 0])\n return torch.atan2(-rot_mats[:, 2, 0], sy)\ndef find_dynamic_lmk_idx_and_bcoords(vertices, pose, dynamic_lmk_faces_idx,\n dynamic_lmk_b_coords,\n neck_kin_chain, dtype=torch.float32):\n ''' Compute the faces, barycentric coordinates for the dynamic landmarks", + "detail": "modules.DECA.decalib.models.lbs", + "documentation": {} + }, + { + "label": "find_dynamic_lmk_idx_and_bcoords", + "kind": 2, + "importPath": "modules.DECA.decalib.models.lbs", + "description": "modules.DECA.decalib.models.lbs", + "peekOfCode": "def find_dynamic_lmk_idx_and_bcoords(vertices, pose, dynamic_lmk_faces_idx,\n dynamic_lmk_b_coords,\n neck_kin_chain, dtype=torch.float32):\n ''' Compute the faces, barycentric coordinates for the dynamic landmarks\n To do so, we first compute the rotation of the neck around the y-axis\n and then use a pre-computed look-up table to find the faces and the\n barycentric coordinates that will be used.\n Special thanks to Soubhik Sanyal (soubhik.sanyal@tuebingen.mpg.de)\n for providing the original TensorFlow implementation and for the LUT.\n Parameters", + "detail": "modules.DECA.decalib.models.lbs", + "documentation": {} + }, + { + "label": "vertices2landmarks", + "kind": 2, + "importPath": "modules.DECA.decalib.models.lbs", + "description": "modules.DECA.decalib.models.lbs", + "peekOfCode": "def vertices2landmarks(vertices, faces, lmk_faces_idx, lmk_bary_coords):\n ''' Calculates landmarks by barycentric interpolation\n Parameters\n ----------\n vertices: torch.tensor BxVx3, dtype = torch.float32\n The tensor of input vertices\n faces: torch.tensor Fx3, dtype = torch.long\n The faces of the mesh\n lmk_faces_idx: torch.tensor L, dtype = torch.long\n The tensor with the indices of the faces used to calculate the", + "detail": "modules.DECA.decalib.models.lbs", + "documentation": {} + }, + { + "label": "lbs", + "kind": 2, + "importPath": "modules.DECA.decalib.models.lbs", + "description": "modules.DECA.decalib.models.lbs", + "peekOfCode": "def lbs(betas, pose, v_template, shapedirs, posedirs, J_regressor, parents,\n lbs_weights, pose2rot=True, dtype=torch.float32):\n ''' Performs Linear Blend Skinning with the given shape and pose parameters\n Parameters\n ----------\n betas : torch.tensor BxNB\n The tensor of shape parameters\n pose : torch.tensor Bx(J + 1) * 3\n The pose parameters in axis-angle format\n v_template torch.tensor BxVx3", + "detail": "modules.DECA.decalib.models.lbs", + "documentation": {} + }, + { + "label": "vertices2joints", + "kind": 2, + "importPath": "modules.DECA.decalib.models.lbs", + "description": "modules.DECA.decalib.models.lbs", + "peekOfCode": "def vertices2joints(J_regressor, vertices):\n ''' Calculates the 3D joint locations from the vertices\n Parameters\n ----------\n J_regressor : torch.tensor JxV\n The regressor array that is used to calculate the joints from the\n position of the vertices\n vertices : torch.tensor BxVx3\n The tensor of mesh vertices\n Returns", + "detail": "modules.DECA.decalib.models.lbs", + "documentation": {} + }, + { + "label": "blend_shapes", + "kind": 2, + "importPath": "modules.DECA.decalib.models.lbs", + "description": "modules.DECA.decalib.models.lbs", + "peekOfCode": "def blend_shapes(betas, shape_disps):\n ''' Calculates the per vertex displacement due to the blend shapes\n Parameters\n ----------\n betas : torch.tensor Bx(num_betas)\n Blend shape coefficients\n shape_disps: torch.tensor Vx3x(num_betas)\n Blend shapes\n Returns\n -------", + "detail": "modules.DECA.decalib.models.lbs", + "documentation": {} + }, + { + "label": "batch_rodrigues", + "kind": 2, + "importPath": "modules.DECA.decalib.models.lbs", + "description": "modules.DECA.decalib.models.lbs", + "peekOfCode": "def batch_rodrigues(rot_vecs, epsilon=1e-8, dtype=torch.float32):\n ''' Calculates the rotation matrices for a batch of rotation vectors\n Parameters\n ----------\n rot_vecs: torch.tensor Nx3\n array of N axis-angle vectors\n Returns\n -------\n R: torch.tensor Nx3x3\n The rotation matrices for the given axis-angle parameters", + "detail": "modules.DECA.decalib.models.lbs", + "documentation": {} + }, + { + "label": "transform_mat", + "kind": 2, + "importPath": "modules.DECA.decalib.models.lbs", + "description": "modules.DECA.decalib.models.lbs", + "peekOfCode": "def transform_mat(R, t):\n ''' Creates a batch of transformation matrices\n Args:\n - R: Bx3x3 array of a batch of rotation matrices\n - t: Bx3x1 array of a batch of translation vectors\n Returns:\n - T: Bx4x4 Transformation matrix\n '''\n # No padding left or right, only add an extra row\n return torch.cat([F.pad(R, [0, 0, 0, 1]),", + "detail": "modules.DECA.decalib.models.lbs", + "documentation": {} + }, + { + "label": "batch_rigid_transform", + "kind": 2, + "importPath": "modules.DECA.decalib.models.lbs", + "description": "modules.DECA.decalib.models.lbs", + "peekOfCode": "def batch_rigid_transform(rot_mats, joints, parents, dtype=torch.float32):\n \"\"\"\n Applies a batch of rigid transformations to the joints\n Parameters\n ----------\n rot_mats : torch.tensor BxNx3x3\n Tensor of rotation matrices\n joints : torch.tensor BxNx3\n Locations of joints\n parents : torch.tensor BxN", + "detail": "modules.DECA.decalib.models.lbs", + "documentation": {} + }, + { + "label": "ResNet", + "kind": 6, + "importPath": "modules.DECA.decalib.models.resnet", + "description": "modules.DECA.decalib.models.resnet", + "peekOfCode": "class ResNet(nn.Module):\n def __init__(self, block, layers, num_classes=1000):\n self.inplanes = 64\n super(ResNet, self).__init__()\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,\n bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0])", + "detail": "modules.DECA.decalib.models.resnet", + "documentation": {} + }, + { + "label": "Bottleneck", + "kind": 6, + "importPath": "modules.DECA.decalib.models.resnet", + "description": "modules.DECA.decalib.models.resnet", + "peekOfCode": "class Bottleneck(nn.Module):\n expansion = 4\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)", + "detail": "modules.DECA.decalib.models.resnet", + "documentation": {} + }, + { + "label": "BasicBlock", + "kind": 6, + "importPath": "modules.DECA.decalib.models.resnet", + "description": "modules.DECA.decalib.models.resnet", + "peekOfCode": "class BasicBlock(nn.Module):\n expansion = 1\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes)\n self.downsample = downsample", + "detail": "modules.DECA.decalib.models.resnet", + "documentation": {} + }, + { + "label": "DoubleConv", + "kind": 6, + "importPath": "modules.DECA.decalib.models.resnet", + "description": "modules.DECA.decalib.models.resnet", + "peekOfCode": "class DoubleConv(nn.Module):\n \"\"\"(convolution => [BN] => ReLU) * 2\"\"\"\n def __init__(self, in_channels, out_channels):\n super().__init__()\n self.double_conv = nn.Sequential(\n nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),\n nn.BatchNorm2d(out_channels),\n nn.ReLU(inplace=True),\n nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1),\n nn.BatchNorm2d(out_channels),", + "detail": "modules.DECA.decalib.models.resnet", + "documentation": {} + }, + { + "label": "Down", + "kind": 6, + "importPath": "modules.DECA.decalib.models.resnet", + "description": "modules.DECA.decalib.models.resnet", + "peekOfCode": "class Down(nn.Module):\n \"\"\"Downscaling with maxpool then double conv\"\"\"\n def __init__(self, in_channels, out_channels):\n super().__init__()\n self.maxpool_conv = nn.Sequential(\n nn.MaxPool2d(2),\n DoubleConv(in_channels, out_channels)\n )\n def forward(self, x):\n return self.maxpool_conv(x)", + "detail": "modules.DECA.decalib.models.resnet", + "documentation": {} + }, + { + "label": "Up", + "kind": 6, + "importPath": "modules.DECA.decalib.models.resnet", + "description": "modules.DECA.decalib.models.resnet", + "peekOfCode": "class Up(nn.Module):\n \"\"\"Upscaling then double conv\"\"\"\n def __init__(self, in_channels, out_channels, bilinear=True):\n super().__init__()\n # if bilinear, use the normal convolutions to reduce the number of channels\n if bilinear:\n self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)\n else:\n self.up = nn.ConvTranspose2d(in_channels // 2, in_channels // 2, kernel_size=2, stride=2)\n self.conv = DoubleConv(in_channels, out_channels)", + "detail": "modules.DECA.decalib.models.resnet", + "documentation": {} + }, + { + "label": "OutConv", + "kind": 6, + "importPath": "modules.DECA.decalib.models.resnet", + "description": "modules.DECA.decalib.models.resnet", + "peekOfCode": "class OutConv(nn.Module):\n def __init__(self, in_channels, out_channels):\n super(OutConv, self).__init__()\n self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)\n def forward(self, x):\n return self.conv(x)", + "detail": "modules.DECA.decalib.models.resnet", + "documentation": {} + }, + { + "label": "conv3x3", + "kind": 2, + "importPath": "modules.DECA.decalib.models.resnet", + "description": "modules.DECA.decalib.models.resnet", + "peekOfCode": "def conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\nclass BasicBlock(nn.Module):\n expansion = 1\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes)", + "detail": "modules.DECA.decalib.models.resnet", + "documentation": {} + }, + { + "label": "copy_parameter_from_resnet", + "kind": 2, + "importPath": "modules.DECA.decalib.models.resnet", + "description": "modules.DECA.decalib.models.resnet", + "peekOfCode": "def copy_parameter_from_resnet(model, resnet_dict):\n cur_state_dict = model.state_dict()\n # import ipdb; ipdb.set_trace()\n for name, param in list(resnet_dict.items())[0:None]:\n if name not in cur_state_dict:\n # print(name, ' not available in reconstructed resnet')\n continue\n if isinstance(param, Parameter):\n param = param.data\n try:", + "detail": "modules.DECA.decalib.models.resnet", + "documentation": {} + }, + { + "label": "load_ResNet50Model", + "kind": 2, + "importPath": "modules.DECA.decalib.models.resnet", + "description": "modules.DECA.decalib.models.resnet", + "peekOfCode": "def load_ResNet50Model():\n model = ResNet(Bottleneck, [3, 4, 6, 3])\n copy_parameter_from_resnet(model, torchvision.models.resnet50(pretrained = False).state_dict())\n return model\ndef load_ResNet101Model():\n model = ResNet(Bottleneck, [3, 4, 23, 3])\n copy_parameter_from_resnet(model, torchvision.models.resnet101(pretrained = True).state_dict())\n return model\ndef load_ResNet152Model():\n model = ResNet(Bottleneck, [3, 8, 36, 3])", + "detail": "modules.DECA.decalib.models.resnet", + "documentation": {} + }, + { + "label": "load_ResNet101Model", + "kind": 2, + "importPath": "modules.DECA.decalib.models.resnet", + "description": "modules.DECA.decalib.models.resnet", + "peekOfCode": "def load_ResNet101Model():\n model = ResNet(Bottleneck, [3, 4, 23, 3])\n copy_parameter_from_resnet(model, torchvision.models.resnet101(pretrained = True).state_dict())\n return model\ndef load_ResNet152Model():\n model = ResNet(Bottleneck, [3, 8, 36, 3])\n copy_parameter_from_resnet(model, torchvision.models.resnet152(pretrained = True).state_dict())\n return model\n# model.load_state_dict(checkpoint['model_state_dict'])\n######## Unet", + "detail": "modules.DECA.decalib.models.resnet", + "documentation": {} + }, + { + "label": "load_ResNet152Model", + "kind": 2, + "importPath": "modules.DECA.decalib.models.resnet", + "description": "modules.DECA.decalib.models.resnet", + "peekOfCode": "def load_ResNet152Model():\n model = ResNet(Bottleneck, [3, 8, 36, 3])\n copy_parameter_from_resnet(model, torchvision.models.resnet152(pretrained = True).state_dict())\n return model\n# model.load_state_dict(checkpoint['model_state_dict'])\n######## Unet\nclass DoubleConv(nn.Module):\n \"\"\"(convolution => [BN] => ReLU) * 2\"\"\"\n def __init__(self, in_channels, out_channels):\n super().__init__()", + "detail": "modules.DECA.decalib.models.resnet", + "documentation": {} + }, + { + "label": "os.environ[\"CC\"]", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.rasterizer.setup", + "description": "modules.DECA.decalib.utils.rasterizer.setup", + "peekOfCode": "os.environ[\"CC\"] = \"gcc-7\"\nos.environ[\"CXX\"] = \"gcc-7\"\nUSE_NINJA = os.getenv('USE_NINJA') == '1'\nsetup(\n name='standard_rasterize_cuda',\n ext_modules=[\n\tCUDAExtension('standard_rasterize_cuda', [\n 'standard_rasterize_cuda.cpp',\n 'standard_rasterize_cuda_kernel.cu',\n ])", + "detail": "modules.DECA.decalib.utils.rasterizer.setup", + "documentation": {} + }, + { + "label": "os.environ[\"CXX\"]", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.rasterizer.setup", + "description": "modules.DECA.decalib.utils.rasterizer.setup", + "peekOfCode": "os.environ[\"CXX\"] = \"gcc-7\"\nUSE_NINJA = os.getenv('USE_NINJA') == '1'\nsetup(\n name='standard_rasterize_cuda',\n ext_modules=[\n\tCUDAExtension('standard_rasterize_cuda', [\n 'standard_rasterize_cuda.cpp',\n 'standard_rasterize_cuda_kernel.cu',\n ])\n\t],", + "detail": "modules.DECA.decalib.utils.rasterizer.setup", + "documentation": {} + }, + { + "label": "USE_NINJA", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.rasterizer.setup", + "description": "modules.DECA.decalib.utils.rasterizer.setup", + "peekOfCode": "USE_NINJA = os.getenv('USE_NINJA') == '1'\nsetup(\n name='standard_rasterize_cuda',\n ext_modules=[\n\tCUDAExtension('standard_rasterize_cuda', [\n 'standard_rasterize_cuda.cpp',\n 'standard_rasterize_cuda_kernel.cu',\n ])\n\t],\n cmdclass={'build_ext': BuildExtension.with_options(use_ninja=USE_NINJA)}", + "detail": "modules.DECA.decalib.utils.rasterizer.setup", + "documentation": {} + }, + { + "label": "get_cfg_defaults", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "def get_cfg_defaults():\n \"\"\"Get a yacs CfgNode object with default values for my_project.\"\"\"\n # Return a clone so that the defaults will not be altered\n # This is for the \"local variable\" use pattern\n return cfg.clone()\ndef update_cfg(cfg, cfg_file):\n cfg.merge_from_file(cfg_file)\n return cfg.clone()\ndef parse_args():\n parser = argparse.ArgumentParser()", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "update_cfg", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "def update_cfg(cfg, cfg_file):\n cfg.merge_from_file(cfg_file)\n return cfg.clone()\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--cfg', type=str, help='cfg file path')\n parser.add_argument('--mode', type=str, default = 'train', help='deca mode')\n args = parser.parse_args()\n print(args, end='\\n\\n')\n cfg = get_cfg_defaults()", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "parse_args", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--cfg', type=str, help='cfg file path')\n parser.add_argument('--mode', type=str, default = 'train', help='deca mode')\n args = parser.parse_args()\n print(args, end='\\n\\n')\n cfg = get_cfg_defaults()\n cfg.cfg_file = None\n cfg.mode = args.mode\n # import ipdb; ipdb.set_trace()", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg = CN()\n#TODO model path here\n# deca_dir-->\n# data-->\n# deca_model.tar\n# ......\nabs_deca_dir = os.path.abspath(os.path.join(\nos.path.dirname(__file__), '../../../../../models/models_deca'))\n# print(f'abs_deca_dir:{abs_deca_dir}')\ncfg.deca_dir = abs_deca_dir", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "abs_deca_dir", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "abs_deca_dir = os.path.abspath(os.path.join(\nos.path.dirname(__file__), '../../../../../models/models_deca'))\n# print(f'abs_deca_dir:{abs_deca_dir}')\ncfg.deca_dir = abs_deca_dir\ncfg.device = 'cuda'\ncfg.device_id = '0'\ncfg.pretrained_modelpath = os.path.join(cfg.deca_dir, 'data', 'deca_model.tar')\ncfg.output_dir = ''\ncfg.rasterizer_type = 'pytorch3d'\n# ---------------------------------------------------------------------------- #", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.deca_dir", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.deca_dir = abs_deca_dir\ncfg.device = 'cuda'\ncfg.device_id = '0'\ncfg.pretrained_modelpath = os.path.join(cfg.deca_dir, 'data', 'deca_model.tar')\ncfg.output_dir = ''\ncfg.rasterizer_type = 'pytorch3d'\n# ---------------------------------------------------------------------------- #\n# Options for Face model\n# ---------------------------------------------------------------------------- #\ncfg.model = CN()", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.device", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.device = 'cuda'\ncfg.device_id = '0'\ncfg.pretrained_modelpath = os.path.join(cfg.deca_dir, 'data', 'deca_model.tar')\ncfg.output_dir = ''\ncfg.rasterizer_type = 'pytorch3d'\n# ---------------------------------------------------------------------------- #\n# Options for Face model\n# ---------------------------------------------------------------------------- #\ncfg.model = CN()\ncfg.model.topology_path = os.path.join(cfg.deca_dir, 'data', 'head_template.obj')", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.device_id", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.device_id = '0'\ncfg.pretrained_modelpath = os.path.join(cfg.deca_dir, 'data', 'deca_model.tar')\ncfg.output_dir = ''\ncfg.rasterizer_type = 'pytorch3d'\n# ---------------------------------------------------------------------------- #\n# Options for Face model\n# ---------------------------------------------------------------------------- #\ncfg.model = CN()\ncfg.model.topology_path = os.path.join(cfg.deca_dir, 'data', 'head_template.obj')\n# texture data original from http://files.is.tue.mpg.de/tbolkart/FLAME/FLAME_texture_data.zip", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.pretrained_modelpath", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.pretrained_modelpath = os.path.join(cfg.deca_dir, 'data', 'deca_model.tar')\ncfg.output_dir = ''\ncfg.rasterizer_type = 'pytorch3d'\n# ---------------------------------------------------------------------------- #\n# Options for Face model\n# ---------------------------------------------------------------------------- #\ncfg.model = CN()\ncfg.model.topology_path = os.path.join(cfg.deca_dir, 'data', 'head_template.obj')\n# texture data original from http://files.is.tue.mpg.de/tbolkart/FLAME/FLAME_texture_data.zip\ncfg.model.dense_template_path = os.path.join(cfg.deca_dir, 'data', 'texture_data_256.npy')", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.output_dir", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.output_dir = ''\ncfg.rasterizer_type = 'pytorch3d'\n# ---------------------------------------------------------------------------- #\n# Options for Face model\n# ---------------------------------------------------------------------------- #\ncfg.model = CN()\ncfg.model.topology_path = os.path.join(cfg.deca_dir, 'data', 'head_template.obj')\n# texture data original from http://files.is.tue.mpg.de/tbolkart/FLAME/FLAME_texture_data.zip\ncfg.model.dense_template_path = os.path.join(cfg.deca_dir, 'data', 'texture_data_256.npy')\ncfg.model.fixed_displacement_path = os.path.join(cfg.deca_dir, 'data', 'fixed_displacement_256.npy')", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.rasterizer_type", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.rasterizer_type = 'pytorch3d'\n# ---------------------------------------------------------------------------- #\n# Options for Face model\n# ---------------------------------------------------------------------------- #\ncfg.model = CN()\ncfg.model.topology_path = os.path.join(cfg.deca_dir, 'data', 'head_template.obj')\n# texture data original from http://files.is.tue.mpg.de/tbolkart/FLAME/FLAME_texture_data.zip\ncfg.model.dense_template_path = os.path.join(cfg.deca_dir, 'data', 'texture_data_256.npy')\ncfg.model.fixed_displacement_path = os.path.join(cfg.deca_dir, 'data', 'fixed_displacement_256.npy')\ncfg.model.flame_model_path = os.path.join(cfg.deca_dir, 'data', 'generic_model.pkl') ", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.model = CN()\ncfg.model.topology_path = os.path.join(cfg.deca_dir, 'data', 'head_template.obj')\n# texture data original from http://files.is.tue.mpg.de/tbolkart/FLAME/FLAME_texture_data.zip\ncfg.model.dense_template_path = os.path.join(cfg.deca_dir, 'data', 'texture_data_256.npy')\ncfg.model.fixed_displacement_path = os.path.join(cfg.deca_dir, 'data', 'fixed_displacement_256.npy')\ncfg.model.flame_model_path = os.path.join(cfg.deca_dir, 'data', 'generic_model.pkl') \ncfg.model.flame_lmk_embedding_path = os.path.join(cfg.deca_dir, 'data', 'landmark_embedding.npy') \ncfg.model.face_mask_path = os.path.join(cfg.deca_dir, 'data', 'uv_face_mask.png') \ncfg.model.face_eye_mask_path = os.path.join(cfg.deca_dir, 'data', 'uv_face_eye_mask.png') \ncfg.model.mean_tex_path = os.path.join(cfg.deca_dir, 'data', 'mean_texture.jpg') ", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.topology_path", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.model.topology_path = os.path.join(cfg.deca_dir, 'data', 'head_template.obj')\n# texture data original from http://files.is.tue.mpg.de/tbolkart/FLAME/FLAME_texture_data.zip\ncfg.model.dense_template_path = os.path.join(cfg.deca_dir, 'data', 'texture_data_256.npy')\ncfg.model.fixed_displacement_path = os.path.join(cfg.deca_dir, 'data', 'fixed_displacement_256.npy')\ncfg.model.flame_model_path = os.path.join(cfg.deca_dir, 'data', 'generic_model.pkl') \ncfg.model.flame_lmk_embedding_path = os.path.join(cfg.deca_dir, 'data', 'landmark_embedding.npy') \ncfg.model.face_mask_path = os.path.join(cfg.deca_dir, 'data', 'uv_face_mask.png') \ncfg.model.face_eye_mask_path = os.path.join(cfg.deca_dir, 'data', 'uv_face_eye_mask.png') \ncfg.model.mean_tex_path = os.path.join(cfg.deca_dir, 'data', 'mean_texture.jpg') \ncfg.model.tex_path = os.path.join(cfg.deca_dir, 'data', 'FLAME_albedo_from_BFM.npz') ", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.dense_template_path", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.model.dense_template_path = os.path.join(cfg.deca_dir, 'data', 'texture_data_256.npy')\ncfg.model.fixed_displacement_path = os.path.join(cfg.deca_dir, 'data', 'fixed_displacement_256.npy')\ncfg.model.flame_model_path = os.path.join(cfg.deca_dir, 'data', 'generic_model.pkl') \ncfg.model.flame_lmk_embedding_path = os.path.join(cfg.deca_dir, 'data', 'landmark_embedding.npy') \ncfg.model.face_mask_path = os.path.join(cfg.deca_dir, 'data', 'uv_face_mask.png') \ncfg.model.face_eye_mask_path = os.path.join(cfg.deca_dir, 'data', 'uv_face_eye_mask.png') \ncfg.model.mean_tex_path = os.path.join(cfg.deca_dir, 'data', 'mean_texture.jpg') \ncfg.model.tex_path = os.path.join(cfg.deca_dir, 'data', 'FLAME_albedo_from_BFM.npz') \ncfg.model.tex_type = 'BFM' # BFM, FLAME, albedoMM\ncfg.model.uv_size = 256", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.fixed_displacement_path", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.model.fixed_displacement_path = os.path.join(cfg.deca_dir, 'data', 'fixed_displacement_256.npy')\ncfg.model.flame_model_path = os.path.join(cfg.deca_dir, 'data', 'generic_model.pkl') \ncfg.model.flame_lmk_embedding_path = os.path.join(cfg.deca_dir, 'data', 'landmark_embedding.npy') \ncfg.model.face_mask_path = os.path.join(cfg.deca_dir, 'data', 'uv_face_mask.png') \ncfg.model.face_eye_mask_path = os.path.join(cfg.deca_dir, 'data', 'uv_face_eye_mask.png') \ncfg.model.mean_tex_path = os.path.join(cfg.deca_dir, 'data', 'mean_texture.jpg') \ncfg.model.tex_path = os.path.join(cfg.deca_dir, 'data', 'FLAME_albedo_from_BFM.npz') \ncfg.model.tex_type = 'BFM' # BFM, FLAME, albedoMM\ncfg.model.uv_size = 256\ncfg.model.param_list = ['shape', 'tex', 'exp', 'pose', 'cam', 'light']", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.flame_model_path", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.model.flame_model_path = os.path.join(cfg.deca_dir, 'data', 'generic_model.pkl') \ncfg.model.flame_lmk_embedding_path = os.path.join(cfg.deca_dir, 'data', 'landmark_embedding.npy') \ncfg.model.face_mask_path = os.path.join(cfg.deca_dir, 'data', 'uv_face_mask.png') \ncfg.model.face_eye_mask_path = os.path.join(cfg.deca_dir, 'data', 'uv_face_eye_mask.png') \ncfg.model.mean_tex_path = os.path.join(cfg.deca_dir, 'data', 'mean_texture.jpg') \ncfg.model.tex_path = os.path.join(cfg.deca_dir, 'data', 'FLAME_albedo_from_BFM.npz') \ncfg.model.tex_type = 'BFM' # BFM, FLAME, albedoMM\ncfg.model.uv_size = 256\ncfg.model.param_list = ['shape', 'tex', 'exp', 'pose', 'cam', 'light']\ncfg.model.n_shape = 100", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.flame_lmk_embedding_path", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.model.flame_lmk_embedding_path = os.path.join(cfg.deca_dir, 'data', 'landmark_embedding.npy') \ncfg.model.face_mask_path = os.path.join(cfg.deca_dir, 'data', 'uv_face_mask.png') \ncfg.model.face_eye_mask_path = os.path.join(cfg.deca_dir, 'data', 'uv_face_eye_mask.png') \ncfg.model.mean_tex_path = os.path.join(cfg.deca_dir, 'data', 'mean_texture.jpg') \ncfg.model.tex_path = os.path.join(cfg.deca_dir, 'data', 'FLAME_albedo_from_BFM.npz') \ncfg.model.tex_type = 'BFM' # BFM, FLAME, albedoMM\ncfg.model.uv_size = 256\ncfg.model.param_list = ['shape', 'tex', 'exp', 'pose', 'cam', 'light']\ncfg.model.n_shape = 100\ncfg.model.n_tex = 50", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.face_mask_path", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.model.face_mask_path = os.path.join(cfg.deca_dir, 'data', 'uv_face_mask.png') \ncfg.model.face_eye_mask_path = os.path.join(cfg.deca_dir, 'data', 'uv_face_eye_mask.png') \ncfg.model.mean_tex_path = os.path.join(cfg.deca_dir, 'data', 'mean_texture.jpg') \ncfg.model.tex_path = os.path.join(cfg.deca_dir, 'data', 'FLAME_albedo_from_BFM.npz') \ncfg.model.tex_type = 'BFM' # BFM, FLAME, albedoMM\ncfg.model.uv_size = 256\ncfg.model.param_list = ['shape', 'tex', 'exp', 'pose', 'cam', 'light']\ncfg.model.n_shape = 100\ncfg.model.n_tex = 50\ncfg.model.n_exp = 50", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.face_eye_mask_path", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.model.face_eye_mask_path = os.path.join(cfg.deca_dir, 'data', 'uv_face_eye_mask.png') \ncfg.model.mean_tex_path = os.path.join(cfg.deca_dir, 'data', 'mean_texture.jpg') \ncfg.model.tex_path = os.path.join(cfg.deca_dir, 'data', 'FLAME_albedo_from_BFM.npz') \ncfg.model.tex_type = 'BFM' # BFM, FLAME, albedoMM\ncfg.model.uv_size = 256\ncfg.model.param_list = ['shape', 'tex', 'exp', 'pose', 'cam', 'light']\ncfg.model.n_shape = 100\ncfg.model.n_tex = 50\ncfg.model.n_exp = 50\ncfg.model.n_cam = 3", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.mean_tex_path", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.model.mean_tex_path = os.path.join(cfg.deca_dir, 'data', 'mean_texture.jpg') \ncfg.model.tex_path = os.path.join(cfg.deca_dir, 'data', 'FLAME_albedo_from_BFM.npz') \ncfg.model.tex_type = 'BFM' # BFM, FLAME, albedoMM\ncfg.model.uv_size = 256\ncfg.model.param_list = ['shape', 'tex', 'exp', 'pose', 'cam', 'light']\ncfg.model.n_shape = 100\ncfg.model.n_tex = 50\ncfg.model.n_exp = 50\ncfg.model.n_cam = 3\ncfg.model.n_pose = 6", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.tex_path", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.model.tex_path = os.path.join(cfg.deca_dir, 'data', 'FLAME_albedo_from_BFM.npz') \ncfg.model.tex_type = 'BFM' # BFM, FLAME, albedoMM\ncfg.model.uv_size = 256\ncfg.model.param_list = ['shape', 'tex', 'exp', 'pose', 'cam', 'light']\ncfg.model.n_shape = 100\ncfg.model.n_tex = 50\ncfg.model.n_exp = 50\ncfg.model.n_cam = 3\ncfg.model.n_pose = 6\ncfg.model.n_light = 27", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.tex_type", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.model.tex_type = 'BFM' # BFM, FLAME, albedoMM\ncfg.model.uv_size = 256\ncfg.model.param_list = ['shape', 'tex', 'exp', 'pose', 'cam', 'light']\ncfg.model.n_shape = 100\ncfg.model.n_tex = 50\ncfg.model.n_exp = 50\ncfg.model.n_cam = 3\ncfg.model.n_pose = 6\ncfg.model.n_light = 27\ncfg.model.use_tex = True", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.uv_size", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.model.uv_size = 256\ncfg.model.param_list = ['shape', 'tex', 'exp', 'pose', 'cam', 'light']\ncfg.model.n_shape = 100\ncfg.model.n_tex = 50\ncfg.model.n_exp = 50\ncfg.model.n_cam = 3\ncfg.model.n_pose = 6\ncfg.model.n_light = 27\ncfg.model.use_tex = True\ncfg.model.jaw_type = 'aa' # default use axis angle, another option: euler. Note that: aa is not stable in the beginning", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.param_list", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.model.param_list = ['shape', 'tex', 'exp', 'pose', 'cam', 'light']\ncfg.model.n_shape = 100\ncfg.model.n_tex = 50\ncfg.model.n_exp = 50\ncfg.model.n_cam = 3\ncfg.model.n_pose = 6\ncfg.model.n_light = 27\ncfg.model.use_tex = True\ncfg.model.jaw_type = 'aa' # default use axis angle, another option: euler. Note that: aa is not stable in the beginning\n# face recognition model", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.n_shape", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.model.n_shape = 100\ncfg.model.n_tex = 50\ncfg.model.n_exp = 50\ncfg.model.n_cam = 3\ncfg.model.n_pose = 6\ncfg.model.n_light = 27\ncfg.model.use_tex = True\ncfg.model.jaw_type = 'aa' # default use axis angle, another option: euler. Note that: aa is not stable in the beginning\n# face recognition model\ncfg.model.fr_model_path = os.path.join(cfg.deca_dir, 'data', 'resnet50_ft_weight.pkl')", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.n_tex", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.model.n_tex = 50\ncfg.model.n_exp = 50\ncfg.model.n_cam = 3\ncfg.model.n_pose = 6\ncfg.model.n_light = 27\ncfg.model.use_tex = True\ncfg.model.jaw_type = 'aa' # default use axis angle, another option: euler. Note that: aa is not stable in the beginning\n# face recognition model\ncfg.model.fr_model_path = os.path.join(cfg.deca_dir, 'data', 'resnet50_ft_weight.pkl')\n## details", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.n_exp", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.model.n_exp = 50\ncfg.model.n_cam = 3\ncfg.model.n_pose = 6\ncfg.model.n_light = 27\ncfg.model.use_tex = True\ncfg.model.jaw_type = 'aa' # default use axis angle, another option: euler. Note that: aa is not stable in the beginning\n# face recognition model\ncfg.model.fr_model_path = os.path.join(cfg.deca_dir, 'data', 'resnet50_ft_weight.pkl')\n## details\ncfg.model.n_detail = 128", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.n_cam", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.model.n_cam = 3\ncfg.model.n_pose = 6\ncfg.model.n_light = 27\ncfg.model.use_tex = True\ncfg.model.jaw_type = 'aa' # default use axis angle, another option: euler. Note that: aa is not stable in the beginning\n# face recognition model\ncfg.model.fr_model_path = os.path.join(cfg.deca_dir, 'data', 'resnet50_ft_weight.pkl')\n## details\ncfg.model.n_detail = 128\ncfg.model.max_z = 0.01", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.n_pose", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.model.n_pose = 6\ncfg.model.n_light = 27\ncfg.model.use_tex = True\ncfg.model.jaw_type = 'aa' # default use axis angle, another option: euler. Note that: aa is not stable in the beginning\n# face recognition model\ncfg.model.fr_model_path = os.path.join(cfg.deca_dir, 'data', 'resnet50_ft_weight.pkl')\n## details\ncfg.model.n_detail = 128\ncfg.model.max_z = 0.01\n# ---------------------------------------------------------------------------- #", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.n_light", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.model.n_light = 27\ncfg.model.use_tex = True\ncfg.model.jaw_type = 'aa' # default use axis angle, another option: euler. Note that: aa is not stable in the beginning\n# face recognition model\ncfg.model.fr_model_path = os.path.join(cfg.deca_dir, 'data', 'resnet50_ft_weight.pkl')\n## details\ncfg.model.n_detail = 128\ncfg.model.max_z = 0.01\n# ---------------------------------------------------------------------------- #\n# Options for Dataset", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.use_tex", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.model.use_tex = True\ncfg.model.jaw_type = 'aa' # default use axis angle, another option: euler. Note that: aa is not stable in the beginning\n# face recognition model\ncfg.model.fr_model_path = os.path.join(cfg.deca_dir, 'data', 'resnet50_ft_weight.pkl')\n## details\ncfg.model.n_detail = 128\ncfg.model.max_z = 0.01\n# ---------------------------------------------------------------------------- #\n# Options for Dataset\n# ---------------------------------------------------------------------------- #", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.jaw_type", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.model.jaw_type = 'aa' # default use axis angle, another option: euler. Note that: aa is not stable in the beginning\n# face recognition model\ncfg.model.fr_model_path = os.path.join(cfg.deca_dir, 'data', 'resnet50_ft_weight.pkl')\n## details\ncfg.model.n_detail = 128\ncfg.model.max_z = 0.01\n# ---------------------------------------------------------------------------- #\n# Options for Dataset\n# ---------------------------------------------------------------------------- #\ncfg.dataset = CN()", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.fr_model_path", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.model.fr_model_path = os.path.join(cfg.deca_dir, 'data', 'resnet50_ft_weight.pkl')\n## details\ncfg.model.n_detail = 128\ncfg.model.max_z = 0.01\n# ---------------------------------------------------------------------------- #\n# Options for Dataset\n# ---------------------------------------------------------------------------- #\ncfg.dataset = CN()\ncfg.dataset.training_data = ['vggface2', 'ethnicity']\n# cfg.dataset.training_data = ['ethnicity']", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.n_detail", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.model.n_detail = 128\ncfg.model.max_z = 0.01\n# ---------------------------------------------------------------------------- #\n# Options for Dataset\n# ---------------------------------------------------------------------------- #\ncfg.dataset = CN()\ncfg.dataset.training_data = ['vggface2', 'ethnicity']\n# cfg.dataset.training_data = ['ethnicity']\ncfg.dataset.eval_data = ['aflw2000']\ncfg.dataset.test_data = ['']", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.max_z", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.model.max_z = 0.01\n# ---------------------------------------------------------------------------- #\n# Options for Dataset\n# ---------------------------------------------------------------------------- #\ncfg.dataset = CN()\ncfg.dataset.training_data = ['vggface2', 'ethnicity']\n# cfg.dataset.training_data = ['ethnicity']\ncfg.dataset.eval_data = ['aflw2000']\ncfg.dataset.test_data = ['']\ncfg.dataset.batch_size = 2", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.dataset", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.dataset = CN()\ncfg.dataset.training_data = ['vggface2', 'ethnicity']\n# cfg.dataset.training_data = ['ethnicity']\ncfg.dataset.eval_data = ['aflw2000']\ncfg.dataset.test_data = ['']\ncfg.dataset.batch_size = 2\ncfg.dataset.K = 4\ncfg.dataset.isSingle = False\ncfg.dataset.num_workers = 2\ncfg.dataset.image_size = 224", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.dataset.training_data", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.dataset.training_data = ['vggface2', 'ethnicity']\n# cfg.dataset.training_data = ['ethnicity']\ncfg.dataset.eval_data = ['aflw2000']\ncfg.dataset.test_data = ['']\ncfg.dataset.batch_size = 2\ncfg.dataset.K = 4\ncfg.dataset.isSingle = False\ncfg.dataset.num_workers = 2\ncfg.dataset.image_size = 224\ncfg.dataset.scale_min = 1.4", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.dataset.eval_data", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.dataset.eval_data = ['aflw2000']\ncfg.dataset.test_data = ['']\ncfg.dataset.batch_size = 2\ncfg.dataset.K = 4\ncfg.dataset.isSingle = False\ncfg.dataset.num_workers = 2\ncfg.dataset.image_size = 224\ncfg.dataset.scale_min = 1.4\ncfg.dataset.scale_max = 1.8\ncfg.dataset.trans_scale = 0.", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.dataset.test_data", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.dataset.test_data = ['']\ncfg.dataset.batch_size = 2\ncfg.dataset.K = 4\ncfg.dataset.isSingle = False\ncfg.dataset.num_workers = 2\ncfg.dataset.image_size = 224\ncfg.dataset.scale_min = 1.4\ncfg.dataset.scale_max = 1.8\ncfg.dataset.trans_scale = 0.\n# ---------------------------------------------------------------------------- #", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.dataset.batch_size", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.dataset.batch_size = 2\ncfg.dataset.K = 4\ncfg.dataset.isSingle = False\ncfg.dataset.num_workers = 2\ncfg.dataset.image_size = 224\ncfg.dataset.scale_min = 1.4\ncfg.dataset.scale_max = 1.8\ncfg.dataset.trans_scale = 0.\n# ---------------------------------------------------------------------------- #\n# Options for training", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.dataset.K", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.dataset.K = 4\ncfg.dataset.isSingle = False\ncfg.dataset.num_workers = 2\ncfg.dataset.image_size = 224\ncfg.dataset.scale_min = 1.4\ncfg.dataset.scale_max = 1.8\ncfg.dataset.trans_scale = 0.\n# ---------------------------------------------------------------------------- #\n# Options for training\n# ---------------------------------------------------------------------------- #", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.dataset.isSingle", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.dataset.isSingle = False\ncfg.dataset.num_workers = 2\ncfg.dataset.image_size = 224\ncfg.dataset.scale_min = 1.4\ncfg.dataset.scale_max = 1.8\ncfg.dataset.trans_scale = 0.\n# ---------------------------------------------------------------------------- #\n# Options for training\n# ---------------------------------------------------------------------------- #\ncfg.train = CN()", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.dataset.num_workers", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.dataset.num_workers = 2\ncfg.dataset.image_size = 224\ncfg.dataset.scale_min = 1.4\ncfg.dataset.scale_max = 1.8\ncfg.dataset.trans_scale = 0.\n# ---------------------------------------------------------------------------- #\n# Options for training\n# ---------------------------------------------------------------------------- #\ncfg.train = CN()\ncfg.train.train_detail = False", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.dataset.image_size", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.dataset.image_size = 224\ncfg.dataset.scale_min = 1.4\ncfg.dataset.scale_max = 1.8\ncfg.dataset.trans_scale = 0.\n# ---------------------------------------------------------------------------- #\n# Options for training\n# ---------------------------------------------------------------------------- #\ncfg.train = CN()\ncfg.train.train_detail = False\ncfg.train.max_epochs = 500", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.dataset.scale_min", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.dataset.scale_min = 1.4\ncfg.dataset.scale_max = 1.8\ncfg.dataset.trans_scale = 0.\n# ---------------------------------------------------------------------------- #\n# Options for training\n# ---------------------------------------------------------------------------- #\ncfg.train = CN()\ncfg.train.train_detail = False\ncfg.train.max_epochs = 500\ncfg.train.max_steps = 1000000", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.dataset.scale_max", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.dataset.scale_max = 1.8\ncfg.dataset.trans_scale = 0.\n# ---------------------------------------------------------------------------- #\n# Options for training\n# ---------------------------------------------------------------------------- #\ncfg.train = CN()\ncfg.train.train_detail = False\ncfg.train.max_epochs = 500\ncfg.train.max_steps = 1000000\ncfg.train.lr = 1e-4", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.dataset.trans_scale", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.dataset.trans_scale = 0.\n# ---------------------------------------------------------------------------- #\n# Options for training\n# ---------------------------------------------------------------------------- #\ncfg.train = CN()\ncfg.train.train_detail = False\ncfg.train.max_epochs = 500\ncfg.train.max_steps = 1000000\ncfg.train.lr = 1e-4\ncfg.train.log_dir = 'logs'", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.train", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.train = CN()\ncfg.train.train_detail = False\ncfg.train.max_epochs = 500\ncfg.train.max_steps = 1000000\ncfg.train.lr = 1e-4\ncfg.train.log_dir = 'logs'\ncfg.train.log_steps = 10\ncfg.train.vis_dir = 'train_images'\ncfg.train.vis_steps = 200\ncfg.train.write_summary = True", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.train.train_detail", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.train.train_detail = False\ncfg.train.max_epochs = 500\ncfg.train.max_steps = 1000000\ncfg.train.lr = 1e-4\ncfg.train.log_dir = 'logs'\ncfg.train.log_steps = 10\ncfg.train.vis_dir = 'train_images'\ncfg.train.vis_steps = 200\ncfg.train.write_summary = True\ncfg.train.checkpoint_steps = 500", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.train.max_epochs", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.train.max_epochs = 500\ncfg.train.max_steps = 1000000\ncfg.train.lr = 1e-4\ncfg.train.log_dir = 'logs'\ncfg.train.log_steps = 10\ncfg.train.vis_dir = 'train_images'\ncfg.train.vis_steps = 200\ncfg.train.write_summary = True\ncfg.train.checkpoint_steps = 500\ncfg.train.val_steps = 500", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.train.max_steps", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.train.max_steps = 1000000\ncfg.train.lr = 1e-4\ncfg.train.log_dir = 'logs'\ncfg.train.log_steps = 10\ncfg.train.vis_dir = 'train_images'\ncfg.train.vis_steps = 200\ncfg.train.write_summary = True\ncfg.train.checkpoint_steps = 500\ncfg.train.val_steps = 500\ncfg.train.val_vis_dir = 'val_images'", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.train.lr", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.train.lr = 1e-4\ncfg.train.log_dir = 'logs'\ncfg.train.log_steps = 10\ncfg.train.vis_dir = 'train_images'\ncfg.train.vis_steps = 200\ncfg.train.write_summary = True\ncfg.train.checkpoint_steps = 500\ncfg.train.val_steps = 500\ncfg.train.val_vis_dir = 'val_images'\ncfg.train.eval_steps = 5000", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.train.log_dir", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.train.log_dir = 'logs'\ncfg.train.log_steps = 10\ncfg.train.vis_dir = 'train_images'\ncfg.train.vis_steps = 200\ncfg.train.write_summary = True\ncfg.train.checkpoint_steps = 500\ncfg.train.val_steps = 500\ncfg.train.val_vis_dir = 'val_images'\ncfg.train.eval_steps = 5000\ncfg.train.resume = True", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.train.log_steps", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.train.log_steps = 10\ncfg.train.vis_dir = 'train_images'\ncfg.train.vis_steps = 200\ncfg.train.write_summary = True\ncfg.train.checkpoint_steps = 500\ncfg.train.val_steps = 500\ncfg.train.val_vis_dir = 'val_images'\ncfg.train.eval_steps = 5000\ncfg.train.resume = True\n# ---------------------------------------------------------------------------- #", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.train.vis_dir", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.train.vis_dir = 'train_images'\ncfg.train.vis_steps = 200\ncfg.train.write_summary = True\ncfg.train.checkpoint_steps = 500\ncfg.train.val_steps = 500\ncfg.train.val_vis_dir = 'val_images'\ncfg.train.eval_steps = 5000\ncfg.train.resume = True\n# ---------------------------------------------------------------------------- #\n# Options for Losses", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.train.vis_steps", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.train.vis_steps = 200\ncfg.train.write_summary = True\ncfg.train.checkpoint_steps = 500\ncfg.train.val_steps = 500\ncfg.train.val_vis_dir = 'val_images'\ncfg.train.eval_steps = 5000\ncfg.train.resume = True\n# ---------------------------------------------------------------------------- #\n# Options for Losses\n# ---------------------------------------------------------------------------- #", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.train.write_summary", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.train.write_summary = True\ncfg.train.checkpoint_steps = 500\ncfg.train.val_steps = 500\ncfg.train.val_vis_dir = 'val_images'\ncfg.train.eval_steps = 5000\ncfg.train.resume = True\n# ---------------------------------------------------------------------------- #\n# Options for Losses\n# ---------------------------------------------------------------------------- #\ncfg.loss = CN()", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.train.checkpoint_steps", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.train.checkpoint_steps = 500\ncfg.train.val_steps = 500\ncfg.train.val_vis_dir = 'val_images'\ncfg.train.eval_steps = 5000\ncfg.train.resume = True\n# ---------------------------------------------------------------------------- #\n# Options for Losses\n# ---------------------------------------------------------------------------- #\ncfg.loss = CN()\ncfg.loss.lmk = 1.0", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.train.val_steps", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.train.val_steps = 500\ncfg.train.val_vis_dir = 'val_images'\ncfg.train.eval_steps = 5000\ncfg.train.resume = True\n# ---------------------------------------------------------------------------- #\n# Options for Losses\n# ---------------------------------------------------------------------------- #\ncfg.loss = CN()\ncfg.loss.lmk = 1.0\ncfg.loss.useWlmk = True", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.train.val_vis_dir", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.train.val_vis_dir = 'val_images'\ncfg.train.eval_steps = 5000\ncfg.train.resume = True\n# ---------------------------------------------------------------------------- #\n# Options for Losses\n# ---------------------------------------------------------------------------- #\ncfg.loss = CN()\ncfg.loss.lmk = 1.0\ncfg.loss.useWlmk = True\ncfg.loss.eyed = 1.0", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.train.eval_steps", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.train.eval_steps = 5000\ncfg.train.resume = True\n# ---------------------------------------------------------------------------- #\n# Options for Losses\n# ---------------------------------------------------------------------------- #\ncfg.loss = CN()\ncfg.loss.lmk = 1.0\ncfg.loss.useWlmk = True\ncfg.loss.eyed = 1.0\ncfg.loss.lipd = 0.5", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.train.resume", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.train.resume = True\n# ---------------------------------------------------------------------------- #\n# Options for Losses\n# ---------------------------------------------------------------------------- #\ncfg.loss = CN()\ncfg.loss.lmk = 1.0\ncfg.loss.useWlmk = True\ncfg.loss.eyed = 1.0\ncfg.loss.lipd = 0.5\ncfg.loss.photo = 2.0", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.loss", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.loss = CN()\ncfg.loss.lmk = 1.0\ncfg.loss.useWlmk = True\ncfg.loss.eyed = 1.0\ncfg.loss.lipd = 0.5\ncfg.loss.photo = 2.0\ncfg.loss.useSeg = True\ncfg.loss.id = 0.2\ncfg.loss.id_shape_only = True\ncfg.loss.reg_shape = 1e-04", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.loss.lmk", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.loss.lmk = 1.0\ncfg.loss.useWlmk = True\ncfg.loss.eyed = 1.0\ncfg.loss.lipd = 0.5\ncfg.loss.photo = 2.0\ncfg.loss.useSeg = True\ncfg.loss.id = 0.2\ncfg.loss.id_shape_only = True\ncfg.loss.reg_shape = 1e-04\ncfg.loss.reg_exp = 1e-04", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.loss.useWlmk", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.loss.useWlmk = True\ncfg.loss.eyed = 1.0\ncfg.loss.lipd = 0.5\ncfg.loss.photo = 2.0\ncfg.loss.useSeg = True\ncfg.loss.id = 0.2\ncfg.loss.id_shape_only = True\ncfg.loss.reg_shape = 1e-04\ncfg.loss.reg_exp = 1e-04\ncfg.loss.reg_tex = 1e-04", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.loss.eyed", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.loss.eyed = 1.0\ncfg.loss.lipd = 0.5\ncfg.loss.photo = 2.0\ncfg.loss.useSeg = True\ncfg.loss.id = 0.2\ncfg.loss.id_shape_only = True\ncfg.loss.reg_shape = 1e-04\ncfg.loss.reg_exp = 1e-04\ncfg.loss.reg_tex = 1e-04\ncfg.loss.reg_light = 1.", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.loss.lipd", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.loss.lipd = 0.5\ncfg.loss.photo = 2.0\ncfg.loss.useSeg = True\ncfg.loss.id = 0.2\ncfg.loss.id_shape_only = True\ncfg.loss.reg_shape = 1e-04\ncfg.loss.reg_exp = 1e-04\ncfg.loss.reg_tex = 1e-04\ncfg.loss.reg_light = 1.\ncfg.loss.reg_jaw_pose = 0. #1.", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.loss.photo", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.loss.photo = 2.0\ncfg.loss.useSeg = True\ncfg.loss.id = 0.2\ncfg.loss.id_shape_only = True\ncfg.loss.reg_shape = 1e-04\ncfg.loss.reg_exp = 1e-04\ncfg.loss.reg_tex = 1e-04\ncfg.loss.reg_light = 1.\ncfg.loss.reg_jaw_pose = 0. #1.\ncfg.loss.use_gender_prior = False", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.loss.useSeg", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.loss.useSeg = True\ncfg.loss.id = 0.2\ncfg.loss.id_shape_only = True\ncfg.loss.reg_shape = 1e-04\ncfg.loss.reg_exp = 1e-04\ncfg.loss.reg_tex = 1e-04\ncfg.loss.reg_light = 1.\ncfg.loss.reg_jaw_pose = 0. #1.\ncfg.loss.use_gender_prior = False\ncfg.loss.shape_consistency = True", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.loss.id", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.loss.id = 0.2\ncfg.loss.id_shape_only = True\ncfg.loss.reg_shape = 1e-04\ncfg.loss.reg_exp = 1e-04\ncfg.loss.reg_tex = 1e-04\ncfg.loss.reg_light = 1.\ncfg.loss.reg_jaw_pose = 0. #1.\ncfg.loss.use_gender_prior = False\ncfg.loss.shape_consistency = True\n# loss for detail", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.loss.id_shape_only", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.loss.id_shape_only = True\ncfg.loss.reg_shape = 1e-04\ncfg.loss.reg_exp = 1e-04\ncfg.loss.reg_tex = 1e-04\ncfg.loss.reg_light = 1.\ncfg.loss.reg_jaw_pose = 0. #1.\ncfg.loss.use_gender_prior = False\ncfg.loss.shape_consistency = True\n# loss for detail\ncfg.loss.detail_consistency = True", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.loss.reg_shape", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.loss.reg_shape = 1e-04\ncfg.loss.reg_exp = 1e-04\ncfg.loss.reg_tex = 1e-04\ncfg.loss.reg_light = 1.\ncfg.loss.reg_jaw_pose = 0. #1.\ncfg.loss.use_gender_prior = False\ncfg.loss.shape_consistency = True\n# loss for detail\ncfg.loss.detail_consistency = True\ncfg.loss.useConstraint = True", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.loss.reg_exp", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.loss.reg_exp = 1e-04\ncfg.loss.reg_tex = 1e-04\ncfg.loss.reg_light = 1.\ncfg.loss.reg_jaw_pose = 0. #1.\ncfg.loss.use_gender_prior = False\ncfg.loss.shape_consistency = True\n# loss for detail\ncfg.loss.detail_consistency = True\ncfg.loss.useConstraint = True\ncfg.loss.mrf = 5e-2", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.loss.reg_tex", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.loss.reg_tex = 1e-04\ncfg.loss.reg_light = 1.\ncfg.loss.reg_jaw_pose = 0. #1.\ncfg.loss.use_gender_prior = False\ncfg.loss.shape_consistency = True\n# loss for detail\ncfg.loss.detail_consistency = True\ncfg.loss.useConstraint = True\ncfg.loss.mrf = 5e-2\ncfg.loss.photo_D = 2.", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.loss.reg_light", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.loss.reg_light = 1.\ncfg.loss.reg_jaw_pose = 0. #1.\ncfg.loss.use_gender_prior = False\ncfg.loss.shape_consistency = True\n# loss for detail\ncfg.loss.detail_consistency = True\ncfg.loss.useConstraint = True\ncfg.loss.mrf = 5e-2\ncfg.loss.photo_D = 2.\ncfg.loss.reg_sym = 0.005", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.loss.reg_jaw_pose", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.loss.reg_jaw_pose = 0. #1.\ncfg.loss.use_gender_prior = False\ncfg.loss.shape_consistency = True\n# loss for detail\ncfg.loss.detail_consistency = True\ncfg.loss.useConstraint = True\ncfg.loss.mrf = 5e-2\ncfg.loss.photo_D = 2.\ncfg.loss.reg_sym = 0.005\ncfg.loss.reg_z = 0.005", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.loss.use_gender_prior", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.loss.use_gender_prior = False\ncfg.loss.shape_consistency = True\n# loss for detail\ncfg.loss.detail_consistency = True\ncfg.loss.useConstraint = True\ncfg.loss.mrf = 5e-2\ncfg.loss.photo_D = 2.\ncfg.loss.reg_sym = 0.005\ncfg.loss.reg_z = 0.005\ncfg.loss.reg_diff = 0.005", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.loss.shape_consistency", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.loss.shape_consistency = True\n# loss for detail\ncfg.loss.detail_consistency = True\ncfg.loss.useConstraint = True\ncfg.loss.mrf = 5e-2\ncfg.loss.photo_D = 2.\ncfg.loss.reg_sym = 0.005\ncfg.loss.reg_z = 0.005\ncfg.loss.reg_diff = 0.005\ndef get_cfg_defaults():", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.loss.detail_consistency", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.loss.detail_consistency = True\ncfg.loss.useConstraint = True\ncfg.loss.mrf = 5e-2\ncfg.loss.photo_D = 2.\ncfg.loss.reg_sym = 0.005\ncfg.loss.reg_z = 0.005\ncfg.loss.reg_diff = 0.005\ndef get_cfg_defaults():\n \"\"\"Get a yacs CfgNode object with default values for my_project.\"\"\"\n # Return a clone so that the defaults will not be altered", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.loss.useConstraint", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.loss.useConstraint = True\ncfg.loss.mrf = 5e-2\ncfg.loss.photo_D = 2.\ncfg.loss.reg_sym = 0.005\ncfg.loss.reg_z = 0.005\ncfg.loss.reg_diff = 0.005\ndef get_cfg_defaults():\n \"\"\"Get a yacs CfgNode object with default values for my_project.\"\"\"\n # Return a clone so that the defaults will not be altered\n # This is for the \"local variable\" use pattern", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.loss.mrf", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.loss.mrf = 5e-2\ncfg.loss.photo_D = 2.\ncfg.loss.reg_sym = 0.005\ncfg.loss.reg_z = 0.005\ncfg.loss.reg_diff = 0.005\ndef get_cfg_defaults():\n \"\"\"Get a yacs CfgNode object with default values for my_project.\"\"\"\n # Return a clone so that the defaults will not be altered\n # This is for the \"local variable\" use pattern\n return cfg.clone()", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.loss.photo_D", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.loss.photo_D = 2.\ncfg.loss.reg_sym = 0.005\ncfg.loss.reg_z = 0.005\ncfg.loss.reg_diff = 0.005\ndef get_cfg_defaults():\n \"\"\"Get a yacs CfgNode object with default values for my_project.\"\"\"\n # Return a clone so that the defaults will not be altered\n # This is for the \"local variable\" use pattern\n return cfg.clone()\ndef update_cfg(cfg, cfg_file):", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.loss.reg_sym", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.loss.reg_sym = 0.005\ncfg.loss.reg_z = 0.005\ncfg.loss.reg_diff = 0.005\ndef get_cfg_defaults():\n \"\"\"Get a yacs CfgNode object with default values for my_project.\"\"\"\n # Return a clone so that the defaults will not be altered\n # This is for the \"local variable\" use pattern\n return cfg.clone()\ndef update_cfg(cfg, cfg_file):\n cfg.merge_from_file(cfg_file)", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.loss.reg_z", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.loss.reg_z = 0.005\ncfg.loss.reg_diff = 0.005\ndef get_cfg_defaults():\n \"\"\"Get a yacs CfgNode object with default values for my_project.\"\"\"\n # Return a clone so that the defaults will not be altered\n # This is for the \"local variable\" use pattern\n return cfg.clone()\ndef update_cfg(cfg, cfg_file):\n cfg.merge_from_file(cfg_file)\n return cfg.clone()", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "cfg.loss.reg_diff", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.config", + "description": "modules.DECA.decalib.utils.config", + "peekOfCode": "cfg.loss.reg_diff = 0.005\ndef get_cfg_defaults():\n \"\"\"Get a yacs CfgNode object with default values for my_project.\"\"\"\n # Return a clone so that the defaults will not be altered\n # This is for the \"local variable\" use pattern\n return cfg.clone()\ndef update_cfg(cfg, cfg_file):\n cfg.merge_from_file(cfg_file)\n return cfg.clone()\ndef parse_args():", + "detail": "modules.DECA.decalib.utils.config", + "documentation": {} + }, + { + "label": "VGG19FeatLayer", + "kind": 6, + "importPath": "modules.DECA.decalib.utils.lossfunc", + "description": "modules.DECA.decalib.utils.lossfunc", + "peekOfCode": "class VGG19FeatLayer(nn.Module):\n def __init__(self):\n super(VGG19FeatLayer, self).__init__()\n self.vgg19 = models.vgg19(pretrained=True).features.eval().cuda()\n self.mean = torch.tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1).cuda()\n self.std = torch.tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1).cuda()\n def forward(self, x):\n out = {}\n x = x - self.mean\n x = x/self.std", + "detail": "modules.DECA.decalib.utils.lossfunc", + "documentation": {} + }, + { + "label": "IDMRFLoss", + "kind": 6, + "importPath": "modules.DECA.decalib.utils.lossfunc", + "description": "modules.DECA.decalib.utils.lossfunc", + "peekOfCode": "class IDMRFLoss(nn.Module):\n def __init__(self, featlayer=VGG19FeatLayer):\n super(IDMRFLoss, self).__init__()\n self.featlayer = featlayer()\n self.feat_style_layers = {'relu3_2': 1.0, 'relu4_2': 1.0}\n self.feat_content_layers = {'relu4_2': 1.0}\n self.bias = 1.0\n self.nn_stretch_sigma = 0.5\n self.lambda_style = 1.0\n self.lambda_content = 1.0", + "detail": "modules.DECA.decalib.utils.lossfunc", + "documentation": {} + }, + { + "label": "VGG_16", + "kind": 6, + "importPath": "modules.DECA.decalib.utils.lossfunc", + "description": "modules.DECA.decalib.utils.lossfunc", + "peekOfCode": "class VGG_16(nn.Module):\n \"\"\"\n Main Class\n \"\"\"\n def __init__(self):\n \"\"\"\n Constructor\n \"\"\"\n super().__init__()\n self.block_size = [2, 2, 3, 3, 3]", + "detail": "modules.DECA.decalib.utils.lossfunc", + "documentation": {} + }, + { + "label": "VGGLoss", + "kind": 6, + "importPath": "modules.DECA.decalib.utils.lossfunc", + "description": "modules.DECA.decalib.utils.lossfunc", + "peekOfCode": "class VGGLoss(nn.Module):\n def __init__(self):\n super(VGGLoss, self).__init__()\n self.featlayer = VGG_16().float()\n self.featlayer.load_weights(path=\"data/face_recognition_model/vgg_face_torch/VGG_FACE.t7\")\n self.featlayer = self.featlayer.cuda().eval()\n self.feat_style_layers = {'relu3_2': 1.0, 'relu4_2': 1.0}\n self.feat_content_layers = {'relu4_2': 1.0}\n self.bias = 1.0\n self.nn_stretch_sigma = 0.5", + "detail": "modules.DECA.decalib.utils.lossfunc", + "documentation": {} + }, + { + "label": "VGGFace2Loss", + "kind": 6, + "importPath": "modules.DECA.decalib.utils.lossfunc", + "description": "modules.DECA.decalib.utils.lossfunc", + "peekOfCode": "class VGGFace2Loss(nn.Module):\n def __init__(self, pretrained_model, pretrained_data='vggface2'):\n super(VGGFace2Loss, self).__init__()\n self.reg_model = resnet50(num_classes=8631, include_top=False).eval().cuda()\n load_state_dict(self.reg_model, pretrained_model)\n self.mean_bgr = torch.tensor([91.4953, 103.8827, 131.0912]).cuda()\n def reg_features(self, x):\n # out = []\n margin=10\n x = x[:,:,margin:224-margin,margin:224-margin]", + "detail": "modules.DECA.decalib.utils.lossfunc", + "documentation": {} + }, + { + "label": "l2_distance", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.lossfunc", + "description": "modules.DECA.decalib.utils.lossfunc", + "peekOfCode": "def l2_distance(verts1, verts2):\n return torch.sqrt(((verts1 - verts2)**2).sum(2)).mean(1).mean()\n### VAE\ndef kl_loss(texcode):\n \"\"\"\n recon_x: generating images\n x: origin images\n mu: latent mean\n logvar: latent log variance\n \"\"\"", + "detail": "modules.DECA.decalib.utils.lossfunc", + "documentation": {} + }, + { + "label": "kl_loss", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.lossfunc", + "description": "modules.DECA.decalib.utils.lossfunc", + "peekOfCode": "def kl_loss(texcode):\n \"\"\"\n recon_x: generating images\n x: origin images\n mu: latent mean\n logvar: latent log variance\n \"\"\"\n mu, logvar = texcode[:,:128], texcode[:,128:]\n KLD_element = mu.pow(2).add_(logvar.exp()).mul_(-1).add_(1).add_(logvar)\n KLD = torch.sum(KLD_element).mul_(-0.5)", + "detail": "modules.DECA.decalib.utils.lossfunc", + "documentation": {} + }, + { + "label": "shading_white_loss", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.lossfunc", + "description": "modules.DECA.decalib.utils.lossfunc", + "peekOfCode": "def shading_white_loss(shading):\n '''\n regularize lighting: assume lights close to white \n '''\n # rgb_diff = (shading[:,0] - shading[:,1])**2 + (shading[:,0] - shading[:,2])**2 + (shading[:,1] - shading[:,2])**2\n # rgb_diff = (shading[:,0].mean([1,2]) - shading[:,1].mean([1,2]))**2 + (shading[:,0].mean([1,2]) - shading[:,2].mean([1,2]))**2 + (shading[:,1].mean([1,2]) - shading[:,2].mean([1,2]))**2\n # rgb_diff = (shading.mean([2, 3]) - torch.ones((shading.shape[0], 3)).float().cuda())**2\n rgb_diff = (shading.mean([0, 2, 3]) - 0.99)**2\n return rgb_diff.mean()\ndef shading_smooth_loss(shading):", + "detail": "modules.DECA.decalib.utils.lossfunc", + "documentation": {} + }, + { + "label": "shading_smooth_loss", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.lossfunc", + "description": "modules.DECA.decalib.utils.lossfunc", + "peekOfCode": "def shading_smooth_loss(shading):\n '''\n assume: shading should be smooth\n ref: Lifting AutoEncoders: Unsupervised Learning of a Fully-Disentangled 3D Morphable Model using Deep Non-Rigid Structure from Motion\n '''\n dx = shading[:,:,1:-1,1:] - shading[:,:,1:-1,:-1]\n dy = shading[:,:,1:,1:-1] - shading[:,:,:-1,1:-1]\n gradient_image = (dx**2).mean() + (dy**2).mean()\n return gradient_image.mean()\n### ------------------------------------- Losses/Regularizations for albedo", + "detail": "modules.DECA.decalib.utils.lossfunc", + "documentation": {} + }, + { + "label": "albedo_constancy_loss", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.lossfunc", + "description": "modules.DECA.decalib.utils.lossfunc", + "peekOfCode": "def albedo_constancy_loss(albedo, alpha = 15, weight = 1.):\n '''\n for similarity of neighbors\n ref: Self-supervised Multi-level Face Model Learning for Monocular Reconstruction at over 250 Hz\n Towards High-fidelity Nonlinear 3D Face Morphable Model\n '''\n albedo_chromaticity = albedo/(torch.sum(albedo, dim=1, keepdim=True) + 1e-6)\n weight_x = torch.exp(-alpha*(albedo_chromaticity[:,:,1:,:] - albedo_chromaticity[:,:,:-1,:])**2).detach()\n weight_y = torch.exp(-alpha*(albedo_chromaticity[:,:,:,1:] - albedo_chromaticity[:,:,:,:-1])**2).detach()\n albedo_const_loss_x = ((albedo[:,:,1:,:] - albedo[:,:,:-1,:])**2)*weight_x", + "detail": "modules.DECA.decalib.utils.lossfunc", + "documentation": {} + }, + { + "label": "albedo_ring_loss", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.lossfunc", + "description": "modules.DECA.decalib.utils.lossfunc", + "peekOfCode": "def albedo_ring_loss(texcode, ring_elements, margin, weight=1.):\n \"\"\"\n computes ring loss for ring_outputs before FLAME decoder\n Inputs:\n ring_outputs = a list containing N streams of the ring; len(ring_outputs) = N\n Each ring_outputs[i] is a tensor of (batch_size X shape_dim_num)\n Aim is to force each row (same subject) of each stream to produce same shape\n Each row of first N-1 strams are of the same subject and\n the Nth stream is the different subject\n \"\"\"", + "detail": "modules.DECA.decalib.utils.lossfunc", + "documentation": {} + }, + { + "label": "albedo_same_loss", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.lossfunc", + "description": "modules.DECA.decalib.utils.lossfunc", + "peekOfCode": "def albedo_same_loss(albedo, ring_elements, weight=1.):\n \"\"\"\n computes ring loss for ring_outputs before FLAME decoder\n Inputs:\n ring_outputs = a list containing N streams of the ring; len(ring_outputs) = N\n Each ring_outputs[i] is a tensor of (batch_size X shape_dim_num)\n Aim is to force each row (same subject) of each stream to produce same shape\n Each row of first N-1 strams are of the same subject and\n the Nth stream is the different subject\n \"\"\"", + "detail": "modules.DECA.decalib.utils.lossfunc", + "documentation": {} + }, + { + "label": "batch_kp_2d_l1_loss", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.lossfunc", + "description": "modules.DECA.decalib.utils.lossfunc", + "peekOfCode": "def batch_kp_2d_l1_loss(real_2d_kp, predicted_2d_kp, weights=None):\n \"\"\"\n Computes the l1 loss between the ground truth keypoints and the predicted keypoints\n Inputs:\n kp_gt : N x K x 3\n kp_pred: N x K x 2\n \"\"\"\n if weights is not None:\n real_2d_kp[:,:,2] = weights[None,:]*real_2d_kp[:,:,2]\n kp_gt = real_2d_kp.view(-1, 3)", + "detail": "modules.DECA.decalib.utils.lossfunc", + "documentation": {} + }, + { + "label": "landmark_loss", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.lossfunc", + "description": "modules.DECA.decalib.utils.lossfunc", + "peekOfCode": "def landmark_loss(predicted_landmarks, landmarks_gt, weight=1.):\n # (predicted_theta, predicted_verts, predicted_landmarks) = ringnet_outputs[-1]\n if torch.is_tensor(landmarks_gt) is not True:\n real_2d = torch.cat(landmarks_gt).cuda()\n else:\n real_2d = torch.cat([landmarks_gt, torch.ones((landmarks_gt.shape[0], 68, 1)).cuda()], dim=-1)\n # real_2d = torch.cat(landmarks_gt).cuda()\n loss_lmk_2d = batch_kp_2d_l1_loss(real_2d, predicted_landmarks)\n return loss_lmk_2d * weight\ndef eye_dis(landmarks):", + "detail": "modules.DECA.decalib.utils.lossfunc", + "documentation": {} + }, + { + "label": "eye_dis", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.lossfunc", + "description": "modules.DECA.decalib.utils.lossfunc", + "peekOfCode": "def eye_dis(landmarks):\n # left eye: [38,42], [39,41] - 1\n # right eye: [44,48], [45,47] -1\n eye_up = landmarks[:,[37, 38, 43, 44], :]\n eye_bottom = landmarks[:,[41, 40, 47, 46], :]\n dis = torch.sqrt(((eye_up - eye_bottom)**2).sum(2)) #[bz, 4]\n return dis\ndef eyed_loss(predicted_landmarks, landmarks_gt, weight=1.):\n if torch.is_tensor(landmarks_gt) is not True:\n real_2d = torch.cat(landmarks_gt).cuda()", + "detail": "modules.DECA.decalib.utils.lossfunc", + "documentation": {} + }, + { + "label": "eyed_loss", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.lossfunc", + "description": "modules.DECA.decalib.utils.lossfunc", + "peekOfCode": "def eyed_loss(predicted_landmarks, landmarks_gt, weight=1.):\n if torch.is_tensor(landmarks_gt) is not True:\n real_2d = torch.cat(landmarks_gt).cuda()\n else:\n real_2d = torch.cat([landmarks_gt, torch.ones((landmarks_gt.shape[0], 68, 1)).cuda()], dim=-1)\n pred_eyed = eye_dis(predicted_landmarks[:,:,:2])\n gt_eyed = eye_dis(real_2d[:,:,:2])\n loss = (pred_eyed - gt_eyed).abs().mean()\n return loss\ndef lip_dis(landmarks):", + "detail": "modules.DECA.decalib.utils.lossfunc", + "documentation": {} + }, + { + "label": "lip_dis", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.lossfunc", + "description": "modules.DECA.decalib.utils.lossfunc", + "peekOfCode": "def lip_dis(landmarks):\n # up inner lip: [62, 63, 64] - 1\n # down innder lip: [68, 67, 66] -1\n lip_up = landmarks[:,[61, 62, 63], :]\n lip_down = landmarks[:,[67, 66, 65], :]\n dis = torch.sqrt(((lip_up - lip_down)**2).sum(2)) #[bz, 4]\n return dis\ndef lipd_loss(predicted_landmarks, landmarks_gt, weight=1.):\n if torch.is_tensor(landmarks_gt) is not True:\n real_2d = torch.cat(landmarks_gt).cuda()", + "detail": "modules.DECA.decalib.utils.lossfunc", + "documentation": {} + }, + { + "label": "lipd_loss", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.lossfunc", + "description": "modules.DECA.decalib.utils.lossfunc", + "peekOfCode": "def lipd_loss(predicted_landmarks, landmarks_gt, weight=1.):\n if torch.is_tensor(landmarks_gt) is not True:\n real_2d = torch.cat(landmarks_gt).cuda()\n else:\n real_2d = torch.cat([landmarks_gt, torch.ones((landmarks_gt.shape[0], 68, 1)).cuda()], dim=-1)\n pred_lipd = lip_dis(predicted_landmarks[:,:,:2])\n gt_lipd = lip_dis(real_2d[:,:,:2])\n loss = (pred_lipd - gt_lipd).abs().mean()\n return loss\ndef weighted_landmark_loss(predicted_landmarks, landmarks_gt, weight=1.):", + "detail": "modules.DECA.decalib.utils.lossfunc", + "documentation": {} + }, + { + "label": "weighted_landmark_loss", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.lossfunc", + "description": "modules.DECA.decalib.utils.lossfunc", + "peekOfCode": "def weighted_landmark_loss(predicted_landmarks, landmarks_gt, weight=1.):\n #smaller inner landmark weights\n # (predicted_theta, predicted_verts, predicted_landmarks) = ringnet_outputs[-1]\n # import ipdb; ipdb.set_trace()\n real_2d = landmarks_gt\n weights = torch.ones((68,)).cuda()\n weights[5:7] = 2\n weights[10:12] = 2\n # nose points\n weights[27:36] = 1.5", + "detail": "modules.DECA.decalib.utils.lossfunc", + "documentation": {} + }, + { + "label": "landmark_loss_tensor", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.lossfunc", + "description": "modules.DECA.decalib.utils.lossfunc", + "peekOfCode": "def landmark_loss_tensor(predicted_landmarks, landmarks_gt, weight=1.):\n # (predicted_theta, predicted_verts, predicted_landmarks) = ringnet_outputs[-1]\n loss_lmk_2d = batch_kp_2d_l1_loss(landmarks_gt, predicted_landmarks)\n return loss_lmk_2d * weight\ndef ring_loss(ring_outputs, ring_type, margin, weight=1.):\n \"\"\"\n computes ring loss for ring_outputs before FLAME decoder\n Inputs:\n ring_outputs = a list containing N streams of the ring; len(ring_outputs) = N\n Each ring_outputs[i] is a tensor of (batch_size X shape_dim_num)", + "detail": "modules.DECA.decalib.utils.lossfunc", + "documentation": {} + }, + { + "label": "ring_loss", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.lossfunc", + "description": "modules.DECA.decalib.utils.lossfunc", + "peekOfCode": "def ring_loss(ring_outputs, ring_type, margin, weight=1.):\n \"\"\"\n computes ring loss for ring_outputs before FLAME decoder\n Inputs:\n ring_outputs = a list containing N streams of the ring; len(ring_outputs) = N\n Each ring_outputs[i] is a tensor of (batch_size X shape_dim_num)\n Aim is to force each row (same subject) of each stream to produce same shape\n Each row of first N-1 strams are of the same subject and\n the Nth stream is the different subject\n \"\"\"", + "detail": "modules.DECA.decalib.utils.lossfunc", + "documentation": {} + }, + { + "label": "gradient_dif_loss", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.lossfunc", + "description": "modules.DECA.decalib.utils.lossfunc", + "peekOfCode": "def gradient_dif_loss(prediction, gt):\n prediction_diff_x = prediction[:,:,1:-1,1:] - prediction[:,:,1:-1,:-1]\n prediction_diff_y = prediction[:,:,1:,1:-1] - prediction[:,:,1:,1:-1]\n gt_x = gt[:,:,1:-1,1:] - gt[:,:,1:-1,:-1]\n gt_y = gt[:,:,1:,1:-1] - gt[:,:,:-1,1:-1]\n diff = torch.mean((prediction_diff_x-gt_x)**2) + torch.mean((prediction_diff_y-gt_y)**2)\n return diff.mean()\ndef get_laplacian_kernel2d(kernel_size: int):\n r\"\"\"Function that returns Gaussian filter matrix coefficients.\n Args:", + "detail": "modules.DECA.decalib.utils.lossfunc", + "documentation": {} + }, + { + "label": "get_laplacian_kernel2d", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.lossfunc", + "description": "modules.DECA.decalib.utils.lossfunc", + "peekOfCode": "def get_laplacian_kernel2d(kernel_size: int):\n r\"\"\"Function that returns Gaussian filter matrix coefficients.\n Args:\n kernel_size (int): filter size should be odd.\n Returns:\n Tensor: 2D tensor with laplacian filter matrix coefficients.\n Shape:\n - Output: :math:`(\\text{kernel_size}_x, \\text{kernel_size}_y)`\n Examples::\n >>> kornia.image.get_laplacian_kernel2d(3)", + "detail": "modules.DECA.decalib.utils.lossfunc", + "documentation": {} + }, + { + "label": "laplacian_hq_loss", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.lossfunc", + "description": "modules.DECA.decalib.utils.lossfunc", + "peekOfCode": "def laplacian_hq_loss(prediction, gt):\n # https://torchgeometry.readthedocs.io/en/latest/_modules/kornia/filters/laplacian.html\n b, c, h, w = prediction.shape\n kernel_size = 3\n kernel = get_laplacian_kernel2d(kernel_size).to(prediction.device).to(prediction.dtype)\n kernel = kernel.repeat(c, 1, 1, 1)\n padding = (kernel_size - 1) // 2\n lap_pre = F.conv2d(prediction, kernel, padding=padding, stride=1, groups=c)\n lap_gt = F.conv2d(gt, kernel, padding=padding, stride=1, groups=c)\n return ((lap_pre - lap_gt)**2).mean()", + "detail": "modules.DECA.decalib.utils.lossfunc", + "documentation": {} + }, + { + "label": "StandardRasterizer", + "kind": 6, + "importPath": "modules.DECA.decalib.utils.renderer", + "description": "modules.DECA.decalib.utils.renderer", + "peekOfCode": "class StandardRasterizer(nn.Module):\n \"\"\" Alg: https://www.scratchapixel.com/lessons/3d-basic-rendering/rasterization-practical-implementation\n Notice:\n x,y,z are in image space, normalized to [-1, 1]\n can render non-squared image\n not differentiable\n \"\"\"\n def __init__(self, height, width=None):\n \"\"\"\n use fixed raster_settings for rendering faces", + "detail": "modules.DECA.decalib.utils.renderer", + "documentation": {} + }, + { + "label": "Pytorch3dRasterizer", + "kind": 6, + "importPath": "modules.DECA.decalib.utils.renderer", + "description": "modules.DECA.decalib.utils.renderer", + "peekOfCode": "class Pytorch3dRasterizer(nn.Module):\n ## TODO: add support for rendering non-squared images, since pytorc3d supports this now\n \"\"\" Borrowed from https://github.com/facebookresearch/pytorch3d\n Notice:\n x,y,z are in image space, normalized\n can only render squared image now\n \"\"\"\n def __init__(self, image_size=224):\n \"\"\"\n use fixed raster_settings for rendering faces", + "detail": "modules.DECA.decalib.utils.renderer", + "documentation": {} + }, + { + "label": "SRenderY", + "kind": 6, + "importPath": "modules.DECA.decalib.utils.renderer", + "description": "modules.DECA.decalib.utils.renderer", + "peekOfCode": "class SRenderY(nn.Module):\n def __init__(self, image_size, obj_filename, uv_size=256, rasterizer_type='pytorch3d'):\n super(SRenderY, self).__init__()\n self.image_size = image_size\n self.uv_size = uv_size\n if rasterizer_type == 'pytorch3d':\n self.rasterizer = Pytorch3dRasterizer(image_size)\n self.uv_rasterizer = Pytorch3dRasterizer(uv_size)\n verts, faces, aux = load_obj(obj_filename)\n uvcoords = aux.verts_uvs[None, ...] # (N, V, 2)", + "detail": "modules.DECA.decalib.utils.renderer", + "documentation": {} + }, + { + "label": "set_rasterizer", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.renderer", + "description": "modules.DECA.decalib.utils.renderer", + "peekOfCode": "def set_rasterizer(type = 'pytorch3d'):\n if type == 'pytorch3d':\n global Meshes, load_obj, rasterize_meshes\n from pytorch3d.structures import Meshes\n from pytorch3d.io import load_obj\n from pytorch3d.renderer.mesh import rasterize_meshes\n elif type == 'standard':\n global standard_rasterize, load_obj\n import os\n from .util import load_obj", + "detail": "modules.DECA.decalib.utils.renderer", + "documentation": {} + }, + { + "label": "rad2deg", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.rotation_converter", + "description": "modules.DECA.decalib.utils.rotation_converter", + "peekOfCode": "def rad2deg(tensor):\n \"\"\"Function that converts angles from radians to degrees.\n See :class:`~torchgeometry.RadToDeg` for details.\n Args:\n tensor (Tensor): Tensor of arbitrary shape.\n Returns:\n Tensor: Tensor with same shape as input.\n Example:\n >>> input = tgm.pi * torch.rand(1, 3, 3)\n >>> output = tgm.rad2deg(input)", + "detail": "modules.DECA.decalib.utils.rotation_converter", + "documentation": {} + }, + { + "label": "deg2rad", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.rotation_converter", + "description": "modules.DECA.decalib.utils.rotation_converter", + "peekOfCode": "def deg2rad(tensor):\n \"\"\"Function that converts angles from degrees to radians.\n See :class:`~torchgeometry.DegToRad` for details.\n Args:\n tensor (Tensor): Tensor of arbitrary shape.\n Returns:\n Tensor: Tensor with same shape as input.\n Examples::\n >>> input = 360. * torch.rand(1, 3, 3)\n >>> output = tgm.deg2rad(input)", + "detail": "modules.DECA.decalib.utils.rotation_converter", + "documentation": {} + }, + { + "label": "euler_to_quaternion", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.rotation_converter", + "description": "modules.DECA.decalib.utils.rotation_converter", + "peekOfCode": "def euler_to_quaternion(r):\n x = r[..., 0]\n y = r[..., 1]\n z = r[..., 2]\n z = z/2.0\n y = y/2.0\n x = x/2.0\n cz = torch.cos(z)\n sz = torch.sin(z)\n cy = torch.cos(y)", + "detail": "modules.DECA.decalib.utils.rotation_converter", + "documentation": {} + }, + { + "label": "rotation_matrix_to_quaternion", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.rotation_converter", + "description": "modules.DECA.decalib.utils.rotation_converter", + "peekOfCode": "def rotation_matrix_to_quaternion(rotation_matrix, eps=1e-6):\n \"\"\"Convert 3x4 rotation matrix to 4d quaternion vector\n This algorithm is based on algorithm described in\n https://github.com/KieranWynn/pyquaternion/blob/master/pyquaternion/quaternion.py#L201\n Args:\n rotation_matrix (Tensor): the rotation matrix to convert.\n Return:\n Tensor: the rotation in quaternion\n Shape:\n - Input: :math:`(N, 3, 4)`", + "detail": "modules.DECA.decalib.utils.rotation_converter", + "documentation": {} + }, + { + "label": "angle_axis_to_quaternion", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.rotation_converter", + "description": "modules.DECA.decalib.utils.rotation_converter", + "peekOfCode": "def angle_axis_to_quaternion(angle_axis: torch.Tensor) -> torch.Tensor:\n \"\"\"Convert an angle axis to a quaternion.\n Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h\n Args:\n angle_axis (torch.Tensor): tensor with angle axis.\n Return:\n torch.Tensor: tensor with quaternion.\n Shape:\n - Input: :math:`(*, 3)` where `*` means, any number of dimensions\n - Output: :math:`(*, 4)`", + "detail": "modules.DECA.decalib.utils.rotation_converter", + "documentation": {} + }, + { + "label": "quaternion_to_rotation_matrix", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.rotation_converter", + "description": "modules.DECA.decalib.utils.rotation_converter", + "peekOfCode": "def quaternion_to_rotation_matrix(quat):\n \"\"\"Convert quaternion coefficients to rotation matrix.\n Args:\n quat: size = [B, 4] 4 <===>(w, x, y, z)\n Returns:\n Rotation matrix corresponding to the quaternion -- size = [B, 3, 3]\n \"\"\"\n norm_quat = quat\n norm_quat = norm_quat / norm_quat.norm(p=2, dim=1, keepdim=True)\n w, x, y, z = norm_quat[:, 0], norm_quat[:, 1], norm_quat[:, 2], norm_quat[:, 3]", + "detail": "modules.DECA.decalib.utils.rotation_converter", + "documentation": {} + }, + { + "label": "quaternion_to_angle_axis", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.rotation_converter", + "description": "modules.DECA.decalib.utils.rotation_converter", + "peekOfCode": "def quaternion_to_angle_axis(quaternion: torch.Tensor):\n \"\"\"Convert quaternion vector to angle axis of rotation. TODO: CORRECT\n Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h\n Args:\n quaternion (torch.Tensor): tensor with quaternions.\n Return:\n torch.Tensor: tensor with angle axis of rotation.\n Shape:\n - Input: :math:`(*, 4)` where `*` means, any number of dimensions\n - Output: :math:`(*, 3)`", + "detail": "modules.DECA.decalib.utils.rotation_converter", + "documentation": {} + }, + { + "label": "batch_euler2axis", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.rotation_converter", + "description": "modules.DECA.decalib.utils.rotation_converter", + "peekOfCode": "def batch_euler2axis(r):\n return quaternion_to_angle_axis(euler_to_quaternion(r))\ndef batch_euler2matrix(r):\n return quaternion_to_rotation_matrix(euler_to_quaternion(r))\ndef batch_matrix2euler(rot_mats):\n # Calculates rotation matrix to euler angles\n # Careful for extreme cases of eular angles like [0.0, pi, 0.0]\n ### only y?\n # TODO:\n sy = torch.sqrt(rot_mats[:, 0, 0] * rot_mats[:, 0, 0] +", + "detail": "modules.DECA.decalib.utils.rotation_converter", + "documentation": {} + }, + { + "label": "batch_euler2matrix", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.rotation_converter", + "description": "modules.DECA.decalib.utils.rotation_converter", + "peekOfCode": "def batch_euler2matrix(r):\n return quaternion_to_rotation_matrix(euler_to_quaternion(r))\ndef batch_matrix2euler(rot_mats):\n # Calculates rotation matrix to euler angles\n # Careful for extreme cases of eular angles like [0.0, pi, 0.0]\n ### only y?\n # TODO:\n sy = torch.sqrt(rot_mats[:, 0, 0] * rot_mats[:, 0, 0] +\n rot_mats[:, 1, 0] * rot_mats[:, 1, 0])\n return torch.atan2(-rot_mats[:, 2, 0], sy)", + "detail": "modules.DECA.decalib.utils.rotation_converter", + "documentation": {} + }, + { + "label": "batch_matrix2euler", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.rotation_converter", + "description": "modules.DECA.decalib.utils.rotation_converter", + "peekOfCode": "def batch_matrix2euler(rot_mats):\n # Calculates rotation matrix to euler angles\n # Careful for extreme cases of eular angles like [0.0, pi, 0.0]\n ### only y?\n # TODO:\n sy = torch.sqrt(rot_mats[:, 0, 0] * rot_mats[:, 0, 0] +\n rot_mats[:, 1, 0] * rot_mats[:, 1, 0])\n return torch.atan2(-rot_mats[:, 2, 0], sy)\ndef batch_matrix2axis(rot_mats):\n return quaternion_to_angle_axis(rotation_matrix_to_quaternion(rot_mats))", + "detail": "modules.DECA.decalib.utils.rotation_converter", + "documentation": {} + }, + { + "label": "batch_matrix2axis", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.rotation_converter", + "description": "modules.DECA.decalib.utils.rotation_converter", + "peekOfCode": "def batch_matrix2axis(rot_mats):\n return quaternion_to_angle_axis(rotation_matrix_to_quaternion(rot_mats))\ndef batch_axis2matrix(theta):\n # angle axis to rotation matrix\n # theta N x 3\n # return quat2mat(quat)\n # batch_rodrigues\n return quaternion_to_rotation_matrix(angle_axis_to_quaternion(theta))\ndef batch_axis2euler(theta):\n return batch_matrix2euler(batch_axis2matrix(theta))", + "detail": "modules.DECA.decalib.utils.rotation_converter", + "documentation": {} + }, + { + "label": "batch_axis2matrix", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.rotation_converter", + "description": "modules.DECA.decalib.utils.rotation_converter", + "peekOfCode": "def batch_axis2matrix(theta):\n # angle axis to rotation matrix\n # theta N x 3\n # return quat2mat(quat)\n # batch_rodrigues\n return quaternion_to_rotation_matrix(angle_axis_to_quaternion(theta))\ndef batch_axis2euler(theta):\n return batch_matrix2euler(batch_axis2matrix(theta))\ndef batch_axis2euler(r):\n return rot_mat_to_euler(batch_rodrigues(r))", + "detail": "modules.DECA.decalib.utils.rotation_converter", + "documentation": {} + }, + { + "label": "batch_axis2euler", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.rotation_converter", + "description": "modules.DECA.decalib.utils.rotation_converter", + "peekOfCode": "def batch_axis2euler(theta):\n return batch_matrix2euler(batch_axis2matrix(theta))\ndef batch_axis2euler(r):\n return rot_mat_to_euler(batch_rodrigues(r))\ndef batch_orth_proj(X, camera):\n '''\n X is N x num_pquaternion_to_angle_axisoints x 3\n '''\n camera = camera.clone().view(-1, 1, 3)\n X_trans = X[:, :, :2] + camera[:, :, 1:]", + "detail": "modules.DECA.decalib.utils.rotation_converter", + "documentation": {} + }, + { + "label": "batch_axis2euler", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.rotation_converter", + "description": "modules.DECA.decalib.utils.rotation_converter", + "peekOfCode": "def batch_axis2euler(r):\n return rot_mat_to_euler(batch_rodrigues(r))\ndef batch_orth_proj(X, camera):\n '''\n X is N x num_pquaternion_to_angle_axisoints x 3\n '''\n camera = camera.clone().view(-1, 1, 3)\n X_trans = X[:, :, :2] + camera[:, :, 1:]\n X_trans = torch.cat([X_trans, X[:,:,2:]], 2)\n Xn = (camera[:, :, 0:1] * X_trans)", + "detail": "modules.DECA.decalib.utils.rotation_converter", + "documentation": {} + }, + { + "label": "batch_orth_proj", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.rotation_converter", + "description": "modules.DECA.decalib.utils.rotation_converter", + "peekOfCode": "def batch_orth_proj(X, camera):\n '''\n X is N x num_pquaternion_to_angle_axisoints x 3\n '''\n camera = camera.clone().view(-1, 1, 3)\n X_trans = X[:, :, :2] + camera[:, :, 1:]\n X_trans = torch.cat([X_trans, X[:,:,2:]], 2)\n Xn = (camera[:, :, 0:1] * X_trans)\n return Xn\ndef batch_rodrigues(rot_vecs, epsilon=1e-8, dtype=torch.float32):", + "detail": "modules.DECA.decalib.utils.rotation_converter", + "documentation": {} + }, + { + "label": "batch_rodrigues", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.rotation_converter", + "description": "modules.DECA.decalib.utils.rotation_converter", + "peekOfCode": "def batch_rodrigues(rot_vecs, epsilon=1e-8, dtype=torch.float32):\n ''' same as batch_matrix2axis\n Calculates the rotation matrices for a batch of rotation vectors\n Parameters\n ----------\n rot_vecs: torch.tensor Nx3\n array of N axis-angle vectors\n Returns\n -------\n R: torch.tensor Nx3x3", + "detail": "modules.DECA.decalib.utils.rotation_converter", + "documentation": {} + }, + { + "label": "pi", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.rotation_converter", + "description": "modules.DECA.decalib.utils.rotation_converter", + "peekOfCode": "pi = torch.Tensor([3.14159265358979323846])\ndef rad2deg(tensor):\n \"\"\"Function that converts angles from radians to degrees.\n See :class:`~torchgeometry.RadToDeg` for details.\n Args:\n tensor (Tensor): Tensor of arbitrary shape.\n Returns:\n Tensor: Tensor with same shape as input.\n Example:\n >>> input = tgm.pi * torch.rand(1, 3, 3)", + "detail": "modules.DECA.decalib.utils.rotation_converter", + "documentation": {} + }, + { + "label": "Cropper", + "kind": 6, + "importPath": "modules.DECA.decalib.utils.tensor_cropper", + "description": "modules.DECA.decalib.utils.tensor_cropper", + "peekOfCode": "class Cropper(object):\n def __init__(self, crop_size, scale=[1,1], trans_scale = 0.):\n self.crop_size = crop_size\n self.scale = scale\n self.trans_scale = trans_scale\n def crop(self, image, points, points_scale=None):\n # points to bbox\n center, bbox_size = points2bbox(points.clone(), points_scale)\n # argument bbox. TODO: add rotation?\n center, bbox_size = augment_bbox(center, bbox_size, scale=self.scale, trans_scale=self.trans_scale)", + "detail": "modules.DECA.decalib.utils.tensor_cropper", + "documentation": {} + }, + { + "label": "points2bbox", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.tensor_cropper", + "description": "modules.DECA.decalib.utils.tensor_cropper", + "peekOfCode": "def points2bbox(points, points_scale=None):\n if points_scale:\n assert points_scale[0]==points_scale[1]\n points = points.clone()\n points[:,:,:2] = (points[:,:,:2]*0.5 + 0.5)*points_scale[0]\n min_coords, _ = torch.min(points, dim=1)\n xmin, ymin = min_coords[:, 0], min_coords[:, 1]\n max_coords, _ = torch.max(points, dim=1)\n xmax, ymax = max_coords[:, 0], max_coords[:, 1]\n center = torch.stack([xmax + xmin, ymax + ymin], dim=-1) * 0.5", + "detail": "modules.DECA.decalib.utils.tensor_cropper", + "documentation": {} + }, + { + "label": "augment_bbox", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.tensor_cropper", + "description": "modules.DECA.decalib.utils.tensor_cropper", + "peekOfCode": "def augment_bbox(center, bbox_size, scale=[1.0, 1.0], trans_scale=0.):\n batch_size = center.shape[0]\n trans_scale = (torch.rand([batch_size, 2], device=center.device)*2. -1.) * trans_scale\n center = center + trans_scale*bbox_size # 0.5\n scale = torch.rand([batch_size,1], device=center.device) * (scale[1] - scale[0]) + scale[0]\n size = bbox_size*scale\n return center, size\ndef crop_tensor(image, center, bbox_size, crop_size, interpolation = 'bilinear', align_corners=False):\n ''' for batch image\n Args:", + "detail": "modules.DECA.decalib.utils.tensor_cropper", + "documentation": {} + }, + { + "label": "crop_tensor", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.tensor_cropper", + "description": "modules.DECA.decalib.utils.tensor_cropper", + "peekOfCode": "def crop_tensor(image, center, bbox_size, crop_size, interpolation = 'bilinear', align_corners=False):\n ''' for batch image\n Args:\n image (torch.Tensor): the reference tensor of shape BXHxWXC.\n center: [bz, 2]\n bboxsize: [bz, 1]\n crop_size;\n interpolation (str): Interpolation flag. Default: 'bilinear'.\n align_corners (bool): mode for grid_generation. Default: False. See\n https://pytorch.org/docs/stable/nn.functional.html#torch.nn.functional.interpolate for details", + "detail": "modules.DECA.decalib.utils.tensor_cropper", + "documentation": {} + }, + { + "label": "transform_points", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.tensor_cropper", + "description": "modules.DECA.decalib.utils.tensor_cropper", + "peekOfCode": "def transform_points(points, tform, points_scale=None, out_scale=None):\n points_2d = points[:,:,:2]\n #'input points must use original range'\n if points_scale:\n assert points_scale[0]==points_scale[1]\n points_2d = (points_2d*0.5 + 0.5)*points_scale[0]\n # import ipdb; ipdb.set_trace()\n batch_size, n_points, _ = points.shape\n trans_points_2d = torch.bmm(\n torch.cat([points_2d, torch.ones([batch_size, n_points, 1], device=points.device, dtype=points.dtype)], dim=-1), ", + "detail": "modules.DECA.decalib.utils.tensor_cropper", + "documentation": {} + }, + { + "label": "Trainer", + "kind": 6, + "importPath": "modules.DECA.decalib.utils.trainer", + "description": "modules.DECA.decalib.utils.trainer", + "peekOfCode": "class Trainer(object):\n def __init__(self, model, config=None, device='cuda:0'):\n if config is None:\n self.cfg = cfg\n else:\n self.cfg = config\n self.device = device\n self.batch_size = self.cfg.dataset.batch_size\n self.image_size = self.cfg.dataset.image_size\n self.uv_size = self.cfg.model.uv_size", + "detail": "modules.DECA.decalib.utils.trainer", + "documentation": {} + }, + { + "label": "torch.backends.cudnn.benchmark", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.trainer", + "description": "modules.DECA.decalib.utils.trainer", + "peekOfCode": "torch.backends.cudnn.benchmark = True\nfrom .utils import lossfunc\nfrom .datasets import build_datasets\nclass Trainer(object):\n def __init__(self, model, config=None, device='cuda:0'):\n if config is None:\n self.cfg = cfg\n else:\n self.cfg = config\n self.device = device", + "detail": "modules.DECA.decalib.utils.trainer", + "documentation": {} + }, + { + "label": "Struct", + "kind": 6, + "importPath": "modules.DECA.decalib.utils.util", + "description": "modules.DECA.decalib.utils.util", + "peekOfCode": "class Struct(object):\n def __init__(self, **kwargs):\n for key, val in kwargs.items():\n setattr(self, key, val)\n# original saved file with DataParallel\ndef remove_module(state_dict):\n# create new OrderedDict that does not contain `module.`\n new_state_dict = OrderedDict()\n for k, v in state_dict.items():\n name = k[7:] # remove `module.`", + "detail": "modules.DECA.decalib.utils.util", + "documentation": {} + }, + { + "label": "upsample_mesh", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.util", + "description": "modules.DECA.decalib.utils.util", + "peekOfCode": "def upsample_mesh(vertices, normals, faces, displacement_map, texture_map, dense_template):\n ''' Credit to Timo\n upsampling coarse mesh (with displacment map)\n vertices: vertices of coarse mesh, [nv, 3]\n normals: vertex normals, [nv, 3]\n faces: faces of coarse mesh, [nf, 3]\n texture_map: texture map, [256, 256, 3]\n displacement_map: displacment map, [256, 256]\n dense_template: \n Returns: ", + "detail": "modules.DECA.decalib.utils.util", + "documentation": {} + }, + { + "label": "write_obj", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.util", + "description": "modules.DECA.decalib.utils.util", + "peekOfCode": "def write_obj(obj_name,\n vertices,\n faces,\n colors=None,\n texture=None,\n uvcoords=None,\n uvfaces=None,\n inverse_face_order=False,\n normal_map=None,\n ):", + "detail": "modules.DECA.decalib.utils.util", + "documentation": {} + }, + { + "label": "load_obj", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.util", + "description": "modules.DECA.decalib.utils.util", + "peekOfCode": "def load_obj(obj_filename):\n \"\"\" Ref: https://github.com/facebookresearch/pytorch3d/blob/25c065e9dafa90163e7cec873dbb324a637c68b7/pytorch3d/io/obj_io.py\n Load a mesh from a file-like object.\n \"\"\"\n with open(obj_filename, 'r') as f:\n lines = [line.strip() for line in f]\n verts, uvcoords = [], []\n faces, uv_faces = [], []\n # startswith expects each line to be a string. If the file is read in as\n # bytes then first decode to strings.", + "detail": "modules.DECA.decalib.utils.util", + "documentation": {} + }, + { + "label": "generate_triangles", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.util", + "description": "modules.DECA.decalib.utils.util", + "peekOfCode": "def generate_triangles(h, w, margin_x=2, margin_y=5, mask = None):\n # quad layout:\n # 0 1 ... w-1\n # w w+1\n #.\n # w*h\n triangles = []\n for x in range(margin_x, w-1-margin_x):\n for y in range(margin_y, h-1-margin_y):\n triangle0 = [y*w + x, y*w + x + 1, (y+1)*w + x]", + "detail": "modules.DECA.decalib.utils.util", + "documentation": {} + }, + { + "label": "face_vertices", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.util", + "description": "modules.DECA.decalib.utils.util", + "peekOfCode": "def face_vertices(vertices, faces):\n \"\"\" \n :param vertices: [batch size, number of vertices, 3]\n :param faces: [batch size, number of faces, 3]\n :return: [batch size, number of faces, 3, 3]\n \"\"\"\n assert (vertices.ndimension() == 3)\n assert (faces.ndimension() == 3)\n assert (vertices.shape[0] == faces.shape[0])\n assert (vertices.shape[2] == 3)", + "detail": "modules.DECA.decalib.utils.util", + "documentation": {} + }, + { + "label": "vertex_normals", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.util", + "description": "modules.DECA.decalib.utils.util", + "peekOfCode": "def vertex_normals(vertices, faces):\n \"\"\"\n :param vertices: [batch size, number of vertices, 3]\n :param faces: [batch size, number of faces, 3]\n :return: [batch size, number of vertices, 3]\n \"\"\"\n assert (vertices.ndimension() == 3)\n assert (faces.ndimension() == 3)\n assert (vertices.shape[0] == faces.shape[0])\n assert (vertices.shape[2] == 3)", + "detail": "modules.DECA.decalib.utils.util", + "documentation": {} + }, + { + "label": "batch_orth_proj", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.util", + "description": "modules.DECA.decalib.utils.util", + "peekOfCode": "def batch_orth_proj(X, camera):\n ''' orthgraphic projection\n X: 3d vertices, [bz, n_point, 3]\n camera: scale and translation, [bz, 3], [scale, tx, ty]\n '''\n camera = camera.clone().view(-1, 1, 3)\n X_trans = X[:, :, :2] + camera[:, :, 1:]\n X_trans = torch.cat([X_trans, X[:,:,2:]], 2)\n shape = X_trans.shape\n Xn = (camera[:, :, 0:1] * X_trans)", + "detail": "modules.DECA.decalib.utils.util", + "documentation": {} + }, + { + "label": "gaussian", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.util", + "description": "modules.DECA.decalib.utils.util", + "peekOfCode": "def gaussian(window_size, sigma):\n def gauss_fcn(x):\n return -(x - window_size // 2)**2 / float(2 * sigma**2)\n gauss = torch.stack(\n [torch.exp(torch.tensor(gauss_fcn(x))) for x in range(window_size)])\n return gauss / gauss.sum()\ndef get_gaussian_kernel(kernel_size: int, sigma: float):\n r\"\"\"Function that returns Gaussian filter coefficients.\n Args:\n kernel_size (int): filter size. It should be odd and positive.", + "detail": "modules.DECA.decalib.utils.util", + "documentation": {} + }, + { + "label": "get_gaussian_kernel", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.util", + "description": "modules.DECA.decalib.utils.util", + "peekOfCode": "def get_gaussian_kernel(kernel_size: int, sigma: float):\n r\"\"\"Function that returns Gaussian filter coefficients.\n Args:\n kernel_size (int): filter size. It should be odd and positive.\n sigma (float): gaussian standard deviation.\n Returns:\n Tensor: 1D tensor with gaussian filter coefficients.\n Shape:\n - Output: :math:`(\\text{kernel_size})`\n Examples::", + "detail": "modules.DECA.decalib.utils.util", + "documentation": {} + }, + { + "label": "get_gaussian_kernel2d", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.util", + "description": "modules.DECA.decalib.utils.util", + "peekOfCode": "def get_gaussian_kernel2d(kernel_size, sigma):\n r\"\"\"Function that returns Gaussian filter matrix coefficients.\n Args:\n kernel_size (Tuple[int, int]): filter sizes in the x and y direction.\n Sizes should be odd and positive.\n sigma (Tuple[int, int]): gaussian standard deviation in the x and y\n direction.\n Returns:\n Tensor: 2D tensor with gaussian filter matrix coefficients.\n Shape:", + "detail": "modules.DECA.decalib.utils.util", + "documentation": {} + }, + { + "label": "gaussian_blur", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.util", + "description": "modules.DECA.decalib.utils.util", + "peekOfCode": "def gaussian_blur(x, kernel_size=(3,3), sigma=(0.8,0.8)):\n b, c, h, w = x.shape\n kernel = get_gaussian_kernel2d(kernel_size, sigma).to(x.device).to(x.dtype)\n kernel = kernel.repeat(c, 1, 1, 1)\n padding = [(k - 1) // 2 for k in kernel_size]\n return F.conv2d(x, kernel, padding=padding, stride=1, groups=c)\ndef _compute_binary_kernel(window_size):\n r\"\"\"Creates a binary kernel to extract the patches. If the window size\n is HxW will create a (H*W)xHxW kernel.\n \"\"\"", + "detail": "modules.DECA.decalib.utils.util", + "documentation": {} + }, + { + "label": "median_blur", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.util", + "description": "modules.DECA.decalib.utils.util", + "peekOfCode": "def median_blur(x, kernel_size=(3,3)):\n b, c, h, w = x.shape\n kernel = _compute_binary_kernel(kernel_size).to(x.device).to(x.dtype)\n kernel = kernel.repeat(c, 1, 1, 1)\n padding = [(k - 1) // 2 for k in kernel_size]\n features = F.conv2d(x, kernel, padding=padding, stride=1, groups=c)\n features = features.view(b,c,-1,h,w)\n median = torch.median(features, dim=2)[0]\n return median\ndef get_laplacian_kernel2d(kernel_size: int):", + "detail": "modules.DECA.decalib.utils.util", + "documentation": {} + }, + { + "label": "get_laplacian_kernel2d", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.util", + "description": "modules.DECA.decalib.utils.util", + "peekOfCode": "def get_laplacian_kernel2d(kernel_size: int):\n r\"\"\"Function that returns Gaussian filter matrix coefficients.\n Args:\n kernel_size (int): filter size should be odd.\n Returns:\n Tensor: 2D tensor with laplacian filter matrix coefficients.\n Shape:\n - Output: :math:`(\\text{kernel_size}_x, \\text{kernel_size}_y)`\n Examples::\n >>> kornia.image.get_laplacian_kernel2d(3)", + "detail": "modules.DECA.decalib.utils.util", + "documentation": {} + }, + { + "label": "laplacian", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.util", + "description": "modules.DECA.decalib.utils.util", + "peekOfCode": "def laplacian(x):\n # https://torchgeometry.readthedocs.io/en/latest/_modules/kornia/filters/laplacian.html\n b, c, h, w = x.shape\n kernel_size = 3\n kernel = get_laplacian_kernel2d(kernel_size).to(x.device).to(x.dtype)\n kernel = kernel.repeat(c, 1, 1, 1)\n padding = (kernel_size - 1) // 2\n return F.conv2d(x, kernel, padding=padding, stride=1, groups=c)\ndef angle2matrix(angles):\n ''' get rotation matrix from three rotation angles(degree). right-handed.", + "detail": "modules.DECA.decalib.utils.util", + "documentation": {} + }, + { + "label": "angle2matrix", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.util", + "description": "modules.DECA.decalib.utils.util", + "peekOfCode": "def angle2matrix(angles):\n ''' get rotation matrix from three rotation angles(degree). right-handed.\n Args:\n angles: [batch_size, 3] tensor containing X, Y, and Z angles.\n x: pitch. positive for looking down.\n y: yaw. positive for looking left. \n z: roll. positive for tilting head right. \n Returns:\n R: [batch_size, 3, 3]. rotation matrices.\n '''", + "detail": "modules.DECA.decalib.utils.util", + "documentation": {} + }, + { + "label": "binary_erosion", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.util", + "description": "modules.DECA.decalib.utils.util", + "peekOfCode": "def binary_erosion(tensor, kernel_size=5):\n # tensor: [bz, 1, h, w]. \n device = tensor.device\n mask = tensor.cpu().numpy()\n structure=np.ones((kernel_size,kernel_size))\n new_mask = mask.copy()\n for i in range(mask.shape[0]):\n new_mask[i,0] = morphology.binary_erosion(mask[i,0], structure)\n return torch.from_numpy(new_mask.astype(np.float32)).to(device)\ndef flip_image(src_image, kps):", + "detail": "modules.DECA.decalib.utils.util", + "documentation": {} + }, + { + "label": "flip_image", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.util", + "description": "modules.DECA.decalib.utils.util", + "peekOfCode": "def flip_image(src_image, kps):\n '''\n purpose:\n flip a image given by src_image and the 2d keypoints\n flip_mode: \n 0: horizontal flip\n >0: vertical flip\n <0: horizontal & vertical flip\n '''\n h, w = src_image.shape[0], src_image.shape[1]", + "detail": "modules.DECA.decalib.utils.util", + "documentation": {} + }, + { + "label": "copy_state_dict", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.util", + "description": "modules.DECA.decalib.utils.util", + "peekOfCode": "def copy_state_dict(cur_state_dict, pre_state_dict, prefix='', load_name=None):\n def _get_params(key):\n key = prefix + key\n if key in pre_state_dict:\n return pre_state_dict[key]\n return None\n for k in cur_state_dict.keys():\n if load_name is not None:\n if load_name not in k:\n continue", + "detail": "modules.DECA.decalib.utils.util", + "documentation": {} + }, + { + "label": "check_mkdir", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.util", + "description": "modules.DECA.decalib.utils.util", + "peekOfCode": "def check_mkdir(path):\n if not os.path.exists(path):\n print('creating %s' % path)\n os.makedirs(path)\ndef check_mkdirlist(pathlist):\n for path in pathlist:\n if not os.path.exists(path):\n print('creating %s' % path)\n os.makedirs(path)\ndef tensor2image(tensor):", + "detail": "modules.DECA.decalib.utils.util", + "documentation": {} + }, + { + "label": "check_mkdirlist", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.util", + "description": "modules.DECA.decalib.utils.util", + "peekOfCode": "def check_mkdirlist(pathlist):\n for path in pathlist:\n if not os.path.exists(path):\n print('creating %s' % path)\n os.makedirs(path)\ndef tensor2image(tensor):\n image = tensor.detach().cpu().numpy()\n image = image*255.\n image = np.maximum(np.minimum(image, 255), 0)\n image = image.transpose(1,2,0)[:,:,[2,1,0]]", + "detail": "modules.DECA.decalib.utils.util", + "documentation": {} + }, + { + "label": "tensor2image", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.util", + "description": "modules.DECA.decalib.utils.util", + "peekOfCode": "def tensor2image(tensor):\n image = tensor.detach().cpu().numpy()\n image = image*255.\n image = np.maximum(np.minimum(image, 255), 0)\n image = image.transpose(1,2,0)[:,:,[2,1,0]]\n return image.astype(np.uint8).copy()\ndef dict2obj(d):\n # if isinstance(d, list):\n # d = [dict2obj(x) for x in d]\n if not isinstance(d, dict):", + "detail": "modules.DECA.decalib.utils.util", + "documentation": {} + }, + { + "label": "dict2obj", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.util", + "description": "modules.DECA.decalib.utils.util", + "peekOfCode": "def dict2obj(d):\n # if isinstance(d, list):\n # d = [dict2obj(x) for x in d]\n if not isinstance(d, dict):\n return d\n class C(object):\n pass\n o = C()\n for k in d:\n o.__dict__[k] = dict2obj(d[k])", + "detail": "modules.DECA.decalib.utils.util", + "documentation": {} + }, + { + "label": "remove_module", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.util", + "description": "modules.DECA.decalib.utils.util", + "peekOfCode": "def remove_module(state_dict):\n# create new OrderedDict that does not contain `module.`\n new_state_dict = OrderedDict()\n for k, v in state_dict.items():\n name = k[7:] # remove `module.`\n new_state_dict[name] = v\n return new_state_dict\n# def dict_tensor2npy(tensor_dict):\n# npy_dict = {}\n# for key in tensor_dict:", + "detail": "modules.DECA.decalib.utils.util", + "documentation": {} + }, + { + "label": "dict_tensor2npy", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.util", + "description": "modules.DECA.decalib.utils.util", + "peekOfCode": "def dict_tensor2npy(tensor_dict):\n npy_dict = {}\n for key, value in tensor_dict.items():\n # print(type(value))\n # if type(value)==dict:\n # pass\n # # print('dict')\n # # npy_dict[key] = dict_tensor2npy(value)\n # el\n if type(value)==torch.Tensor:", + "detail": "modules.DECA.decalib.utils.util", + "documentation": {} + }, + { + "label": "plot_kpts", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.util", + "description": "modules.DECA.decalib.utils.util", + "peekOfCode": "def plot_kpts(image, kpts, color = 'r'):\n ''' Draw 68 key points\n Args: \n image: the input image\n kpt: (68, 3).\n '''\n if color == 'r':\n c = (255, 0, 0)\n elif color == 'g':\n c = (0, 255, 0)", + "detail": "modules.DECA.decalib.utils.util", + "documentation": {} + }, + { + "label": "plot_verts", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.util", + "description": "modules.DECA.decalib.utils.util", + "peekOfCode": "def plot_verts(image, kpts, color = 'r'):\n ''' Draw 68 key points\n Args: \n image: the input image\n kpt: (68, 3).\n '''\n if color == 'r':\n c = (255, 0, 0)\n elif color == 'g':\n c = (0, 255, 0)", + "detail": "modules.DECA.decalib.utils.util", + "documentation": {} + }, + { + "label": "tensor_vis_landmarks", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.util", + "description": "modules.DECA.decalib.utils.util", + "peekOfCode": "def tensor_vis_landmarks(images, landmarks, gt_landmarks=None, color = 'g', isScale=True):\n # visualize landmarks\n vis_landmarks = []\n images = images.cpu().numpy()\n predicted_landmarks = landmarks.detach().cpu().numpy()\n if gt_landmarks is not None:\n gt_landmarks_np = gt_landmarks.detach().cpu().numpy()\n for i in range(images.shape[0]):\n image = images[i]\n image = image.transpose(1,2,0)[:,:,[2,1,0]].copy(); image = (image*255)", + "detail": "modules.DECA.decalib.utils.util", + "documentation": {} + }, + { + "label": "load_local_mask", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.util", + "description": "modules.DECA.decalib.utils.util", + "peekOfCode": "def load_local_mask(image_size=256, mode='bbx'):\n if mode == 'bbx':\n # UV space face attributes bbx in size 2048 (l r t b)\n # face = np.array([512, 1536, 512, 1536]) #\n face = np.array([400, 1648, 400, 1648])\n # if image_size == 512:\n # face = np.array([400, 400+512*2, 400, 400+512*2])\n # face = np.array([512, 512+512*2, 512, 512+512*2])\n forehead = np.array([550, 1498, 430, 700+50])\n eye_nose = np.array([490, 1558, 700, 1050+50])", + "detail": "modules.DECA.decalib.utils.util", + "documentation": {} + }, + { + "label": "visualize_grid", + "kind": 2, + "importPath": "modules.DECA.decalib.utils.util", + "description": "modules.DECA.decalib.utils.util", + "peekOfCode": "def visualize_grid(visdict, savepath=None, size=224, dim=1, return_gird=True):\n '''\n image range should be [0,1]\n dim: 2 for horizontal. 1 for vertical\n '''\n assert dim == 1 or dim==2\n grids = {}\n for key in visdict:\n _,_,h,w = visdict[key].shape\n if dim == 2:", + "detail": "modules.DECA.decalib.utils.util", + "documentation": {} + }, + { + "label": "end_list", + "kind": 5, + "importPath": "modules.DECA.decalib.utils.util", + "description": "modules.DECA.decalib.utils.util", + "peekOfCode": "end_list = np.array([17, 22, 27, 42, 48, 31, 36, 68], dtype = np.int32) - 1\ndef plot_kpts(image, kpts, color = 'r'):\n ''' Draw 68 key points\n Args: \n image: the input image\n kpt: (68, 3).\n '''\n if color == 'r':\n c = (255, 0, 0)\n elif color == 'g':", + "detail": "modules.DECA.decalib.utils.util", + "documentation": {} + }, + { + "label": "DECA", + "kind": 6, + "importPath": "modules.DECA.decalib.deca", + "description": "modules.DECA.decalib.deca", + "peekOfCode": "class DECA(nn.Module):\n def __init__(self, config=None, device='cuda'):\n super(DECA, self).__init__()\n if config is None:\n self.cfg = cfg\n else:\n self.cfg = config\n self.device = device\n self.image_size = self.cfg.dataset.image_size\n self.uv_size = self.cfg.model.uv_size", + "detail": "modules.DECA.decalib.deca", + "documentation": {} + }, + { + "label": "torch.backends.cudnn.benchmark", + "kind": 5, + "importPath": "modules.DECA.decalib.deca", + "description": "modules.DECA.decalib.deca", + "peekOfCode": "torch.backends.cudnn.benchmark = True\nclass DECA(nn.Module):\n def __init__(self, config=None, device='cuda'):\n super(DECA, self).__init__()\n if config is None:\n self.cfg = cfg\n else:\n self.cfg = config\n self.device = device\n self.image_size = self.cfg.dataset.image_size", + "detail": "modules.DECA.decalib.deca", + "documentation": {} + }, + { + "label": "Trainer", + "kind": 6, + "importPath": "modules.DECA.decalib.trainer", + "description": "modules.DECA.decalib.trainer", + "peekOfCode": "class Trainer(object):\n def __init__(self, model, config=None, device='cuda:0'):\n if config is None:\n self.cfg = cfg\n else:\n self.cfg = config\n self.device = device\n self.batch_size = self.cfg.dataset.batch_size\n self.image_size = self.cfg.dataset.image_size\n self.uv_size = self.cfg.model.uv_size", + "detail": "modules.DECA.decalib.trainer", + "documentation": {} + }, + { + "label": "torch.backends.cudnn.benchmark", + "kind": 5, + "importPath": "modules.DECA.decalib.trainer", + "description": "modules.DECA.decalib.trainer", + "peekOfCode": "torch.backends.cudnn.benchmark = True\nfrom .utils import lossfunc\nfrom .datasets import build_datasets\nclass Trainer(object):\n def __init__(self, model, config=None, device='cuda:0'):\n if config is None:\n self.cfg = cfg\n else:\n self.cfg = config\n self.device = device", + "detail": "modules.DECA.decalib.trainer", + "documentation": {} + }, + { + "label": "api_multi_deca", + "kind": 2, + "importPath": "modules.DECA.demos.api_multi_deca", + "description": "modules.DECA.demos.api_multi_deca", + "peekOfCode": "def api_multi_deca(\n savefolder,\n visfolder,\n inputpath,\n device='cuda',\n saveVis=True,\n saveMat=True,\n rasterizer_type='pytorch3d',\n face_detector='mtcnn',\n ):", + "detail": "modules.DECA.demos.api_multi_deca", + "documentation": {} + }, + { + "label": "main", + "kind": 2, + "importPath": "modules.DECA.demos.demo_multi_persons", + "description": "modules.DECA.demos.demo_multi_persons", + "peekOfCode": "def main(args):\n savefolder = args.savefolder\n device = args.device\n os.makedirs(savefolder, exist_ok=True)\n # load test images \n testdata = datasets.TestData(args.inputpath, iscrop=args.iscrop, face_detector=args.detector)\n # run DECA\n deca_cfg.model.use_tex = args.useTex\n deca_cfg.rasterizer_type = args.rasterizer_type\n deca = DECA(config = deca_cfg, device=device)", + "detail": "modules.DECA.demos.demo_multi_persons", + "documentation": {} + }, + { + "label": "main", + "kind": 2, + "importPath": "modules.DECA.demos.demo_reconstruct", + "description": "modules.DECA.demos.demo_reconstruct", + "peekOfCode": "def main(args):\n # if args.rasterizer_type != 'standard':\n # args.render_orig = False\n savefolder = args.savefolder\n device = args.device\n os.makedirs(savefolder, exist_ok=True)\n # os.makedirs(os.path.join(savefolder, 'vis'), exist_ok=True)\n # os.makedirs(os.path.join(savefolder, 'org'), exist_ok=True)\n # load test images \n testdata = datasets.TestData(args.inputpath, iscrop=args.iscrop, face_detector=args.detector)", + "detail": "modules.DECA.demos.demo_reconstruct", + "documentation": {} + }, + { + "label": "main", + "kind": 2, + "importPath": "modules.DECA.demos.demo_reconstruct_modify", + "description": "modules.DECA.demos.demo_reconstruct_modify", + "peekOfCode": "def main(args):\n # if args.rasterizer_type != 'standard':\n # args.render_orig = False\n savefolder = args.savefolder\n device = args.device\n os.makedirs(savefolder, exist_ok=True)\n # os.makedirs(os.path.join(savefolder, 'vis'), exist_ok=True)\n # os.makedirs(os.path.join(savefolder, 'org'), exist_ok=True)\n # load test images \n testdata = datasets.TestData(args.inputpath, iscrop=args.iscrop, face_detector=args.detector)", + "detail": "modules.DECA.demos.demo_reconstruct_modify", + "documentation": {} + }, + { + "label": "main", + "kind": 2, + "importPath": "modules.DECA.demos.demo_teaser", + "description": "modules.DECA.demos.demo_teaser", + "peekOfCode": "def main(args):\n savefolder = args.savefolder\n device = args.device\n os.makedirs(savefolder, exist_ok=True)\n # load test images \n testdata = datasets.TestData(args.inputpath, iscrop=args.iscrop, face_detector=args.detector)\n expdata = datasets.TestData(args.exp_path, iscrop=args.iscrop, face_detector=args.detector)\n # DECA\n deca_cfg.rasterizer_type = args.rasterizer_type\n deca = DECA(config=deca_cfg, device=device)", + "detail": "modules.DECA.demos.demo_teaser", + "documentation": {} + }, + { + "label": "main", + "kind": 2, + "importPath": "modules.DECA.demos.demo_transfer", + "description": "modules.DECA.demos.demo_transfer", + "peekOfCode": "def main(args):\n savefolder = args.savefolder\n device = args.device\n os.makedirs(savefolder, exist_ok=True)\n # load test images \n testdata = datasets.TestData(args.image_path, iscrop=args.iscrop, face_detector=args.detector)\n expdata = datasets.TestData(args.exp_path, iscrop=args.iscrop, face_detector=args.detector)\n # run DECA\n deca_cfg.model.use_tex = args.useTex\n deca_cfg.rasterizer_type = args.rasterizer_type", + "detail": "modules.DECA.demos.demo_transfer", + "documentation": {} + }, + { + "label": "main", + "kind": 2, + "importPath": "modules.DECA.main_train", + "description": "modules.DECA.main_train", + "peekOfCode": "def main(cfg):\n # creat folders \n os.makedirs(os.path.join(cfg.output_dir, cfg.train.log_dir), exist_ok=True)\n os.makedirs(os.path.join(cfg.output_dir, cfg.train.vis_dir), exist_ok=True)\n os.makedirs(os.path.join(cfg.output_dir, cfg.train.val_vis_dir), exist_ok=True)\n with open(os.path.join(cfg.output_dir, cfg.train.log_dir, 'full_config.yaml'), 'w') as f:\n yaml.dump(cfg, f, default_flow_style=False)\n shutil.copy(cfg.cfg_file, os.path.join(cfg.output_dir, 'config.yaml'))\n # cudnn related setting\n cudnn.benchmark = True", + "detail": "modules.DECA.main_train", + "documentation": {} + }, + { + "label": "BU3DFE", + "kind": 6, + "importPath": "modules.MICA.datasets.creation.instances.bu3dfe", + "description": "modules.MICA.datasets.creation.instances.bu3dfe", + "peekOfCode": "class BU3DFE(Instance, ABC):\n def __init__(self):\n super(BU3DFE, self).__init__()\n self.dst = '/scratch/NFC/OnFlame/BU3DFE/'\n self.src = '/scratch/NFC/BU-3DFE/'\n def get_images(self):\n images = {}\n for actor in sorted(glob(self.get_src().replace('BU-3DFE', 'BU-3DFE_clean') + 'images/*')):\n images[Path(actor).name] = glob(f'{actor}/*.jpg')\n return images", + "detail": "modules.MICA.datasets.creation.instances.bu3dfe", + "documentation": {} + }, + { + "label": "D3DFACS", + "kind": 6, + "importPath": "modules.MICA.datasets.creation.instances.d3dfacs", + "description": "modules.MICA.datasets.creation.instances.d3dfacs", + "peekOfCode": "class D3DFACS(Instance, ABC):\n def __init__(self):\n super(D3DFACS, self).__init__()\n self.dst = '/scratch/NFC/OnFlame/D3DFACS/'\n self.src = '/home/wzielonka/datasets/D3DFACS/'\n def get_images(self):\n images = {}\n for file in sorted(glob(self.get_src() + 'processed/images/*')):\n actor = Path(file).stem\n images[actor] = glob(f'{file}/*.jpg')", + "detail": "modules.MICA.datasets.creation.instances.d3dfacs", + "documentation": {} + }, + { + "label": "FaceWarehouse", + "kind": 6, + "importPath": "modules.MICA.datasets.creation.instances.facewarehouse", + "description": "modules.MICA.datasets.creation.instances.facewarehouse", + "peekOfCode": "class FaceWarehouse(Instance, ABC):\n def __init__(self):\n super(FaceWarehouse, self).__init__()\n self.dst = '/scratch/NFC/OnFlame/FACEWAREHOUSE/'\n self.src = '/scratch/NFC/FaceWarehouse/'\n def get_images(self):\n images = {}\n for actor in sorted(glob(self.get_src() + 'Images/*')):\n images[Path(actor).stem] = glob(f'{actor}/*.png')\n return images", + "detail": "modules.MICA.datasets.creation.instances.facewarehouse", + "documentation": {} + }, + { + "label": "Florence", + "kind": 6, + "importPath": "modules.MICA.datasets.creation.instances.florence", + "description": "modules.MICA.datasets.creation.instances.florence", + "peekOfCode": "class Florence(Instance, ABC):\n def __init__(self):\n super(Florence, self).__init__()\n self.dst = '/scratch/NFC/OnFlame/FLORENCE/'\n self.src = '/scratch/NFC/MICC_Florence/'\n def get_min_det_score(self):\n return 0.85\n def get_images(self):\n images = {}\n for actor in sorted(glob(self.get_src() + 'images/*')):", + "detail": "modules.MICA.datasets.creation.instances.florence", + "documentation": {} + }, + { + "label": "FRGC", + "kind": 6, + "importPath": "modules.MICA.datasets.creation.instances.frgc", + "description": "modules.MICA.datasets.creation.instances.frgc", + "peekOfCode": "class FRGC(Instance, ABC):\n def __init__(self):\n super(FRGC, self).__init__()\n self.dst = '/scratch/NFC/OnFlame/FRGC/'\n self.src = '/scratch/NFC/FRGC_v2/'\n def get_images(self):\n images = {}\n for actor in sorted(glob(self.get_src() + 'images/*')):\n imgs = list(filter(lambda f: 'Spring2003range' not in f, glob(f'/{actor}/*/*.jpg')))\n images[Path(actor).name] = imgs", + "detail": "modules.MICA.datasets.creation.instances.frgc", + "documentation": {} + }, + { + "label": "Instance", + "kind": 6, + "importPath": "modules.MICA.datasets.creation.instances.instance", + "description": "modules.MICA.datasets.creation.instances.instance", + "peekOfCode": "class Instance:\n def __init__(self):\n self.mount = '/home/wzielonka/Cluster/lustre'\n self.dst = 'empty'\n self.src = 'empty'\n self.device = 'cuda:0'\n self.actors = []\n self.use_mount = os.path.exists(self.mount)\n def get_dst(self):\n return self.dst if not self.use_mount else self.mount + self.dst", + "detail": "modules.MICA.datasets.creation.instances.instance", + "documentation": {} + }, + { + "label": "LYHM", + "kind": 6, + "importPath": "modules.MICA.datasets.creation.instances.lyhm", + "description": "modules.MICA.datasets.creation.instances.lyhm", + "peekOfCode": "class LYHM(Instance, ABC):\n def __init__(self):\n super(LYHM, self).__init__()\n self.dst = '/scratch/NFC/MICA/LYHM/'\n self.src = '/scratch/NFC/LYHM/'\n def get_images(self):\n images = {}\n for actor in sorted(glob(self.get_src() + '/*')):\n images[Path(actor).name] = glob(f'/{actor}/*.png')\n return images", + "detail": "modules.MICA.datasets.creation.instances.lyhm", + "documentation": {} + }, + { + "label": "ImageFile.LOAD_TRUNCATED_IMAGES", + "kind": 5, + "importPath": "modules.MICA.datasets.creation.instances.lyhm", + "description": "modules.MICA.datasets.creation.instances.lyhm", + "peekOfCode": "ImageFile.LOAD_TRUNCATED_IMAGES = True\nfrom pytorch3d.io import load_objs_as_meshes\nfrom pytorch3d.transforms import RotateAxisAngle\nfrom datasets.creation.instances.instance import Instance\nclass LYHM(Instance, ABC):\n def __init__(self):\n super(LYHM, self).__init__()\n self.dst = '/scratch/NFC/MICA/LYHM/'\n self.src = '/scratch/NFC/LYHM/'\n def get_images(self):", + "detail": "modules.MICA.datasets.creation.instances.lyhm", + "documentation": {} + }, + { + "label": "PB4D", + "kind": 6, + "importPath": "modules.MICA.datasets.creation.instances.pb4d", + "description": "modules.MICA.datasets.creation.instances.pb4d", + "peekOfCode": "class PB4D(Instance, ABC):\n def __init__(self):\n super(PB4D, self).__init__()\n self.dst = '/scratch/NFC/OnFlame/BP4D/'\n self.src = '/scratch/NFC/BP4D/'\n def get_images(self):\n images = {}\n for actor in sorted(glob(self.get_src() + 'images/*')):\n imgs = sorted(glob(f'/{actor}/*.jpg'))\n indecies = np.random.choice(len(imgs), 100, replace=False)", + "detail": "modules.MICA.datasets.creation.instances.pb4d", + "documentation": {} + }, + { + "label": "Stirling", + "kind": 6, + "importPath": "modules.MICA.datasets.creation.instances.stirling", + "description": "modules.MICA.datasets.creation.instances.stirling", + "peekOfCode": "class Stirling(Instance, ABC):\n def __init__(self):\n super(Stirling, self).__init__()\n self.dst = '/scratch/NFC/OnFlame/STIRLING/'\n self.src = '/scratch/NFC/Stirling/'\n def get_min_det_score(self):\n return 0.75\n def get_images(self):\n images = {}\n for file in sorted(glob(self.get_src() + 'images/Real_images__Subset_2D_FG2018/HQ/*')):", + "detail": "modules.MICA.datasets.creation.instances.stirling", + "documentation": {} + }, + { + "label": "Generator", + "kind": 6, + "importPath": "modules.MICA.datasets.creation.generator", + "description": "modules.MICA.datasets.creation.generator", + "peekOfCode": "class Generator:\n def __init__(self, instances):\n self.instances: List[Instance] = instances\n self.ARCFACE = 'arcface_input'\n def copy(self):\n logger.info('Start copying...')\n for instance in tqdm(self.instances):\n payloads = [(instance, instance.get_images, 'images', instance.transform_path)]\n with Pool(processes=len(payloads)) as pool:\n for _ in tqdm(pool.imap_unordered(_copy, payloads), total=len(payloads)):", + "detail": "modules.MICA.datasets.creation.generator", + "documentation": {} + }, + { + "label": "create_folders", + "kind": 2, + "importPath": "modules.MICA.datasets.creation.util", + "description": "modules.MICA.datasets.creation.util", + "peekOfCode": "def create_folders(folders):\n if not type(folders) is list:\n folders = folders.split('/')\n parents = '/'\n for folder in folders:\n parents = os.path.join(parents, folder)\n if os.path.exists(parents):\n continue\n Path(parents).mkdir(exist_ok=True)\ndef get_arcface_input(face, img):", + "detail": "modules.MICA.datasets.creation.util", + "documentation": {} + }, + { + "label": "get_arcface_input", + "kind": 2, + "importPath": "modules.MICA.datasets.creation.util", + "description": "modules.MICA.datasets.creation.util", + "peekOfCode": "def get_arcface_input(face, img):\n aimg = face_align.norm_crop(img, landmark=face.kps)\n blob = cv2.dnn.blobFromImages([aimg], 1.0 / input_std, (112, 112), (input_mean, input_mean, input_mean), swapRB=True)\n return blob[0], aimg\ndef get_image(name, to_rgb=False):\n images_dir = osp.join(Path(__file__).parent.absolute(), '../images')\n ext_names = ['.jpg', '.png', '.jpeg']\n image_file = None\n for ext_name in ext_names:\n _image_file = osp.join(images_dir, \"%s%s\" % (name, ext_name))", + "detail": "modules.MICA.datasets.creation.util", + "documentation": {} + }, + { + "label": "get_image", + "kind": 2, + "importPath": "modules.MICA.datasets.creation.util", + "description": "modules.MICA.datasets.creation.util", + "peekOfCode": "def get_image(name, to_rgb=False):\n images_dir = osp.join(Path(__file__).parent.absolute(), '../images')\n ext_names = ['.jpg', '.png', '.jpeg']\n image_file = None\n for ext_name in ext_names:\n _image_file = osp.join(images_dir, \"%s%s\" % (name, ext_name))\n if osp.exists(_image_file):\n image_file = _image_file\n break\n assert image_file is not None, '%s not found' % name", + "detail": "modules.MICA.datasets.creation.util", + "documentation": {} + }, + { + "label": "dist", + "kind": 2, + "importPath": "modules.MICA.datasets.creation.util", + "description": "modules.MICA.datasets.creation.util", + "peekOfCode": "def dist(p1, p2):\n return math.sqrt(((p1[0] - p2[0]) ** 2) + ((p1[1] - p2[1]) ** 2))\ndef get_center(bboxes, img):\n img_center = img.shape[0] // 2, img.shape[1] // 2\n size = bboxes.shape[0]\n distance = np.Inf\n j = 0\n for i in range(size):\n x1, y1, x2, y2 = bboxes[i, 0:4]\n dx = abs(x2 - x1) / 2.0", + "detail": "modules.MICA.datasets.creation.util", + "documentation": {} + }, + { + "label": "get_center", + "kind": 2, + "importPath": "modules.MICA.datasets.creation.util", + "description": "modules.MICA.datasets.creation.util", + "peekOfCode": "def get_center(bboxes, img):\n img_center = img.shape[0] // 2, img.shape[1] // 2\n size = bboxes.shape[0]\n distance = np.Inf\n j = 0\n for i in range(size):\n x1, y1, x2, y2 = bboxes[i, 0:4]\n dx = abs(x2 - x1) / 2.0\n dy = abs(y2 - y1) / 2.0\n current = dist((x1 + dx, y1 + dy), img_center)", + "detail": "modules.MICA.datasets.creation.util", + "documentation": {} + }, + { + "label": "bbox2point", + "kind": 2, + "importPath": "modules.MICA.datasets.creation.util", + "description": "modules.MICA.datasets.creation.util", + "peekOfCode": "def bbox2point(left, right, top, bottom, type='bbox'):\n if type == 'kpt68':\n old_size = (right - left + bottom - top) / 2 * 1.1\n center = np.array([right - (right - left) / 2.0, bottom - (bottom - top) / 2.0])\n elif type == 'bbox':\n old_size = (right - left + bottom - top) / 2\n center = np.array([right - (right - left) / 2.0, bottom - (bottom - top) / 2.0 + old_size * 0.12])\n else:\n raise NotImplementedError\n return old_size, center", + "detail": "modules.MICA.datasets.creation.util", + "documentation": {} + }, + { + "label": "input_mean", + "kind": 5, + "importPath": "modules.MICA.datasets.creation.util", + "description": "modules.MICA.datasets.creation.util", + "peekOfCode": "input_mean = 127.5\ninput_std = 127.5\ndef create_folders(folders):\n if not type(folders) is list:\n folders = folders.split('/')\n parents = '/'\n for folder in folders:\n parents = os.path.join(parents, folder)\n if os.path.exists(parents):\n continue", + "detail": "modules.MICA.datasets.creation.util", + "documentation": {} + }, + { + "label": "input_std", + "kind": 5, + "importPath": "modules.MICA.datasets.creation.util", + "description": "modules.MICA.datasets.creation.util", + "peekOfCode": "input_std = 127.5\ndef create_folders(folders):\n if not type(folders) is list:\n folders = folders.split('/')\n parents = '/'\n for folder in folders:\n parents = os.path.join(parents, folder)\n if os.path.exists(parents):\n continue\n Path(parents).mkdir(exist_ok=True)", + "detail": "modules.MICA.datasets.creation.util", + "documentation": {} + }, + { + "label": "BaseDataset", + "kind": 6, + "importPath": "modules.MICA.datasets.base", + "description": "modules.MICA.datasets.base", + "peekOfCode": "class BaseDataset(Dataset, ABC):\n def __init__(self, name, config, device, isEval):\n self.K = config.K\n self.isEval = isEval\n self.n_train = np.Inf\n self.imagepaths = []\n self.face_dict = {}\n self.name = name\n self.device = device\n self.min_max_K = 0", + "detail": "modules.MICA.datasets.base", + "documentation": {} + }, + { + "label": "IBasicBlock", + "kind": 6, + "importPath": "modules.MICA.micalib.arcface", + "description": "modules.MICA.micalib.arcface", + "peekOfCode": "class IBasicBlock(nn.Module):\n expansion = 1\n def __init__(self, inplanes, planes, stride=1, downsample=None,\n groups=1, base_width=64, dilation=1):\n super(IBasicBlock, self).__init__()\n if groups != 1 or base_width != 64:\n raise ValueError('BasicBlock only supports groups=1 and base_width=64')\n if dilation > 1:\n raise NotImplementedError(\"Dilation > 1 not supported in BasicBlock\")\n self.bn1 = nn.BatchNorm2d(inplanes, eps=1e-05, )", + "detail": "modules.MICA.micalib.arcface", + "documentation": {} + }, + { + "label": "IResNet", + "kind": 6, + "importPath": "modules.MICA.micalib.arcface", + "description": "modules.MICA.micalib.arcface", + "peekOfCode": "class IResNet(nn.Module):\n fc_scale = 7 * 7\n def __init__(self,\n block, layers, dropout=0, num_features=512, zero_init_residual=False,\n groups=1, width_per_group=64, replace_stride_with_dilation=None, fp16=False):\n super(IResNet, self).__init__()\n self.fp16 = fp16\n self.inplanes = 64\n self.dilation = 1\n self.block = block", + "detail": "modules.MICA.micalib.arcface", + "documentation": {} + }, + { + "label": "Arcface", + "kind": 6, + "importPath": "modules.MICA.micalib.arcface", + "description": "modules.MICA.micalib.arcface", + "peekOfCode": "class Arcface(IResNet):\n def __init__(self, pretrained_path=None, **kwargs):\n super(Arcface, self).__init__(IBasicBlock, [3, 13, 30, 3], **kwargs)\n if pretrained_path is not None and os.path.exists(pretrained_path):\n logger.info(f'[Arcface] Initializing from insightface model from {pretrained_path}.')\n self.load_state_dict(torch.load(pretrained_path))\n self.freezer([self.layer1, self.layer2, self.layer3, self.conv1, self.bn1, self.prelu])\n def freezer(self, layers):\n for layer in layers:\n for block in layer.parameters():", + "detail": "modules.MICA.micalib.arcface", + "documentation": {} + }, + { + "label": "conv3x3", + "kind": 2, + "importPath": "modules.MICA.micalib.arcface", + "description": "modules.MICA.micalib.arcface", + "peekOfCode": "def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes,\n out_planes,\n kernel_size=3,\n stride=stride,\n padding=dilation,\n groups=groups,\n bias=False,\n dilation=dilation)", + "detail": "modules.MICA.micalib.arcface", + "documentation": {} + }, + { + "label": "conv1x1", + "kind": 2, + "importPath": "modules.MICA.micalib.arcface", + "description": "modules.MICA.micalib.arcface", + "peekOfCode": "def conv1x1(in_planes, out_planes, stride=1):\n \"\"\"1x1 convolution\"\"\"\n return nn.Conv2d(in_planes,\n out_planes,\n kernel_size=1,\n stride=stride,\n bias=False)\nclass IBasicBlock(nn.Module):\n expansion = 1\n def __init__(self, inplanes, planes, stride=1, downsample=None,", + "detail": "modules.MICA.micalib.arcface", + "documentation": {} + }, + { + "label": "__all__", + "kind": 5, + "importPath": "modules.MICA.micalib.arcface", + "description": "modules.MICA.micalib.arcface", + "peekOfCode": "__all__ = ['iresnet18', 'iresnet34', 'iresnet50', 'iresnet100', 'iresnet200']\ndef conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes,\n out_planes,\n kernel_size=3,\n stride=stride,\n padding=dilation,\n groups=groups,\n bias=False,", + "detail": "modules.MICA.micalib.arcface", + "documentation": {} + }, + { + "label": "BaseModel", + "kind": 6, + "importPath": "modules.MICA.micalib.base_model", + "description": "modules.MICA.micalib.base_model", + "peekOfCode": "class BaseModel(nn.Module):\n def __init__(self, config=None, device=None, tag=''):\n super(BaseModel, self).__init__()\n if config is None:\n self.cfg = cfg\n else:\n self.cfg = config\n self.tag = tag\n self.use_mask = self.cfg.train.use_mask\n self.device = device", + "detail": "modules.MICA.micalib.base_model", + "documentation": {} + }, + { + "label": "BestModel", + "kind": 6, + "importPath": "modules.MICA.micalib.best_model", + "description": "modules.MICA.micalib.best_model", + "peekOfCode": "class BestModel:\n def __init__(self, trainer):\n self.average = np.Inf\n self.weighted_average = np.Inf\n self.smoothed_average = np.Inf\n self.smoothed_weighted_average = np.Inf\n self.running_average = np.Inf\n self.running_weighted_average = np.Inf\n self.now_mean = None\n self.trainer = trainer", + "detail": "modules.MICA.micalib.best_model", + "documentation": {} + }, + { + "label": "get_cfg_defaults", + "kind": 2, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "def get_cfg_defaults():\n return cfg.clone()\ndef update_cfg(cfg, cfg_file):\n cfg.merge_from_file(cfg_file)\n return cfg.clone()\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--cfg', type=str, help='cfg file path', required=True)\n parser.add_argument('--test_dataset', type=str, help='Test dataset type', default='')\n parser.add_argument('--checkpoint', type=str, help='Checkpoint to load', default='')", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "update_cfg", + "kind": 2, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "def update_cfg(cfg, cfg_file):\n cfg.merge_from_file(cfg_file)\n return cfg.clone()\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--cfg', type=str, help='cfg file path', required=True)\n parser.add_argument('--test_dataset', type=str, help='Test dataset type', default='')\n parser.add_argument('--checkpoint', type=str, help='Checkpoint to load', default='')\n args = parser.parse_args()\n print(args, end='\\n\\n')", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "parse_args", + "kind": 2, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--cfg', type=str, help='cfg file path', required=True)\n parser.add_argument('--test_dataset', type=str, help='Test dataset type', default='')\n parser.add_argument('--checkpoint', type=str, help='Checkpoint to load', default='')\n args = parser.parse_args()\n print(args, end='\\n\\n')\n cfg = get_cfg_defaults()\n if args.cfg is not None:\n cfg_file = args.cfg", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg = CN()\nabs_mica_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../..','models/models_MICA'))\n# abs_mica_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))\ncfg.mica_dir = abs_mica_dir\ncfg.device = 'cuda'\ncfg.device_id = '0'\ncfg.pretrained_model_path = os.path.join(cfg.mica_dir, 'pretrained', 'mica.tar')\ncfg.output_dir = ''\ncfg.FLAME_masks=os.path.join(cfg.mica_dir, 'FLAME2020/FLAME_masks/FLAME_masks.pkl')\ncfg.generic_model=os.path.join(cfg.mica_dir, 'FLAME2020/generic_model.pkl')", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "abs_mica_dir", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "abs_mica_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../..','models/models_MICA'))\n# abs_mica_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))\ncfg.mica_dir = abs_mica_dir\ncfg.device = 'cuda'\ncfg.device_id = '0'\ncfg.pretrained_model_path = os.path.join(cfg.mica_dir, 'pretrained', 'mica.tar')\ncfg.output_dir = ''\ncfg.FLAME_masks=os.path.join(cfg.mica_dir, 'FLAME2020/FLAME_masks/FLAME_masks.pkl')\ncfg.generic_model=os.path.join(cfg.mica_dir, 'FLAME2020/generic_model.pkl')\n# ---------------------------------------------------------------------------- #", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg.mica_dir", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg.mica_dir = abs_mica_dir\ncfg.device = 'cuda'\ncfg.device_id = '0'\ncfg.pretrained_model_path = os.path.join(cfg.mica_dir, 'pretrained', 'mica.tar')\ncfg.output_dir = ''\ncfg.FLAME_masks=os.path.join(cfg.mica_dir, 'FLAME2020/FLAME_masks/FLAME_masks.pkl')\ncfg.generic_model=os.path.join(cfg.mica_dir, 'FLAME2020/generic_model.pkl')\n# ---------------------------------------------------------------------------- #\n# Options for Face model\n# ---------------------------------------------------------------------------- #", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg.device", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg.device = 'cuda'\ncfg.device_id = '0'\ncfg.pretrained_model_path = os.path.join(cfg.mica_dir, 'pretrained', 'mica.tar')\ncfg.output_dir = ''\ncfg.FLAME_masks=os.path.join(cfg.mica_dir, 'FLAME2020/FLAME_masks/FLAME_masks.pkl')\ncfg.generic_model=os.path.join(cfg.mica_dir, 'FLAME2020/generic_model.pkl')\n# ---------------------------------------------------------------------------- #\n# Options for Face model\n# ---------------------------------------------------------------------------- #\ncfg.model = CN()", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg.device_id", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg.device_id = '0'\ncfg.pretrained_model_path = os.path.join(cfg.mica_dir, 'pretrained', 'mica.tar')\ncfg.output_dir = ''\ncfg.FLAME_masks=os.path.join(cfg.mica_dir, 'FLAME2020/FLAME_masks/FLAME_masks.pkl')\ncfg.generic_model=os.path.join(cfg.mica_dir, 'FLAME2020/generic_model.pkl')\n# ---------------------------------------------------------------------------- #\n# Options for Face model\n# ---------------------------------------------------------------------------- #\ncfg.model = CN()\ncfg.model.testing = False", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg.pretrained_model_path", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg.pretrained_model_path = os.path.join(cfg.mica_dir, 'pretrained', 'mica.tar')\ncfg.output_dir = ''\ncfg.FLAME_masks=os.path.join(cfg.mica_dir, 'FLAME2020/FLAME_masks/FLAME_masks.pkl')\ncfg.generic_model=os.path.join(cfg.mica_dir, 'FLAME2020/generic_model.pkl')\n# ---------------------------------------------------------------------------- #\n# Options for Face model\n# ---------------------------------------------------------------------------- #\ncfg.model = CN()\ncfg.model.testing = False\ncfg.model.name = 'mica'", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg.output_dir", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg.output_dir = ''\ncfg.FLAME_masks=os.path.join(cfg.mica_dir, 'FLAME2020/FLAME_masks/FLAME_masks.pkl')\ncfg.generic_model=os.path.join(cfg.mica_dir, 'FLAME2020/generic_model.pkl')\n# ---------------------------------------------------------------------------- #\n# Options for Face model\n# ---------------------------------------------------------------------------- #\ncfg.model = CN()\ncfg.model.testing = False\ncfg.model.name = 'mica'\ncfg.model.topology_path = os.path.join(cfg.mica_dir, 'FLAME2020', 'head_template.obj')", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg.model", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg.model = CN()\ncfg.model.testing = False\ncfg.model.name = 'mica'\ncfg.model.topology_path = os.path.join(cfg.mica_dir, 'FLAME2020', 'head_template.obj')\ncfg.model.flame_model_path = os.path.join(cfg.mica_dir, 'FLAME2020', 'generic_model.pkl')\ncfg.model.flame_lmk_embedding_path = os.path.join(cfg.mica_dir, 'FLAME2020', 'landmark_embedding.npy')\n# cfg.model.topology_path = os.path.join(cfg.mica_dir, '../../../data', 'head_template.obj')\n# cfg.model.flame_model_path = os.path.join(cfg.mica_dir, '../../../data', 'generic_model.pkl')\n# cfg.model.flame_lmk_embedding_path = os.path.join(cfg.mica_dir, '../../../data', 'landmark_embedding.npy')\ncfg.model.n_shape = 300", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg.model.testing", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg.model.testing = False\ncfg.model.name = 'mica'\ncfg.model.topology_path = os.path.join(cfg.mica_dir, 'FLAME2020', 'head_template.obj')\ncfg.model.flame_model_path = os.path.join(cfg.mica_dir, 'FLAME2020', 'generic_model.pkl')\ncfg.model.flame_lmk_embedding_path = os.path.join(cfg.mica_dir, 'FLAME2020', 'landmark_embedding.npy')\n# cfg.model.topology_path = os.path.join(cfg.mica_dir, '../../../data', 'head_template.obj')\n# cfg.model.flame_model_path = os.path.join(cfg.mica_dir, '../../../data', 'generic_model.pkl')\n# cfg.model.flame_lmk_embedding_path = os.path.join(cfg.mica_dir, '../../../data', 'landmark_embedding.npy')\ncfg.model.n_shape = 300\ncfg.model.layers = 8", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg.model.name", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg.model.name = 'mica'\ncfg.model.topology_path = os.path.join(cfg.mica_dir, 'FLAME2020', 'head_template.obj')\ncfg.model.flame_model_path = os.path.join(cfg.mica_dir, 'FLAME2020', 'generic_model.pkl')\ncfg.model.flame_lmk_embedding_path = os.path.join(cfg.mica_dir, 'FLAME2020', 'landmark_embedding.npy')\n# cfg.model.topology_path = os.path.join(cfg.mica_dir, '../../../data', 'head_template.obj')\n# cfg.model.flame_model_path = os.path.join(cfg.mica_dir, '../../../data', 'generic_model.pkl')\n# cfg.model.flame_lmk_embedding_path = os.path.join(cfg.mica_dir, '../../../data', 'landmark_embedding.npy')\ncfg.model.n_shape = 300\ncfg.model.layers = 8\ncfg.model.hidden_layers_size = 256", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg.model.topology_path", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg.model.topology_path = os.path.join(cfg.mica_dir, 'FLAME2020', 'head_template.obj')\ncfg.model.flame_model_path = os.path.join(cfg.mica_dir, 'FLAME2020', 'generic_model.pkl')\ncfg.model.flame_lmk_embedding_path = os.path.join(cfg.mica_dir, 'FLAME2020', 'landmark_embedding.npy')\n# cfg.model.topology_path = os.path.join(cfg.mica_dir, '../../../data', 'head_template.obj')\n# cfg.model.flame_model_path = os.path.join(cfg.mica_dir, '../../../data', 'generic_model.pkl')\n# cfg.model.flame_lmk_embedding_path = os.path.join(cfg.mica_dir, '../../../data', 'landmark_embedding.npy')\ncfg.model.n_shape = 300\ncfg.model.layers = 8\ncfg.model.hidden_layers_size = 256\ncfg.model.mapping_layers = 3", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg.model.flame_model_path", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg.model.flame_model_path = os.path.join(cfg.mica_dir, 'FLAME2020', 'generic_model.pkl')\ncfg.model.flame_lmk_embedding_path = os.path.join(cfg.mica_dir, 'FLAME2020', 'landmark_embedding.npy')\n# cfg.model.topology_path = os.path.join(cfg.mica_dir, '../../../data', 'head_template.obj')\n# cfg.model.flame_model_path = os.path.join(cfg.mica_dir, '../../../data', 'generic_model.pkl')\n# cfg.model.flame_lmk_embedding_path = os.path.join(cfg.mica_dir, '../../../data', 'landmark_embedding.npy')\ncfg.model.n_shape = 300\ncfg.model.layers = 8\ncfg.model.hidden_layers_size = 256\ncfg.model.mapping_layers = 3\ncfg.model.use_pretrained = True", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg.model.flame_lmk_embedding_path", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg.model.flame_lmk_embedding_path = os.path.join(cfg.mica_dir, 'FLAME2020', 'landmark_embedding.npy')\n# cfg.model.topology_path = os.path.join(cfg.mica_dir, '../../../data', 'head_template.obj')\n# cfg.model.flame_model_path = os.path.join(cfg.mica_dir, '../../../data', 'generic_model.pkl')\n# cfg.model.flame_lmk_embedding_path = os.path.join(cfg.mica_dir, '../../../data', 'landmark_embedding.npy')\ncfg.model.n_shape = 300\ncfg.model.layers = 8\ncfg.model.hidden_layers_size = 256\ncfg.model.mapping_layers = 3\ncfg.model.use_pretrained = True\ncfg.model.arcface_pretrained_model = '/scratch/is-rg-ncs/models_weights/arcface-torch/backbone100.pth'", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg.model.n_shape", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg.model.n_shape = 300\ncfg.model.layers = 8\ncfg.model.hidden_layers_size = 256\ncfg.model.mapping_layers = 3\ncfg.model.use_pretrained = True\ncfg.model.arcface_pretrained_model = '/scratch/is-rg-ncs/models_weights/arcface-torch/backbone100.pth'\n# ---------------------------------------------------------------------------- #\n# Options for Dataset\n# ---------------------------------------------------------------------------- #\ncfg.dataset = CN()", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg.model.layers", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg.model.layers = 8\ncfg.model.hidden_layers_size = 256\ncfg.model.mapping_layers = 3\ncfg.model.use_pretrained = True\ncfg.model.arcface_pretrained_model = '/scratch/is-rg-ncs/models_weights/arcface-torch/backbone100.pth'\n# ---------------------------------------------------------------------------- #\n# Options for Dataset\n# ---------------------------------------------------------------------------- #\ncfg.dataset = CN()\ncfg.dataset.training_data = ['LYHM']", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg.model.hidden_layers_size", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg.model.hidden_layers_size = 256\ncfg.model.mapping_layers = 3\ncfg.model.use_pretrained = True\ncfg.model.arcface_pretrained_model = '/scratch/is-rg-ncs/models_weights/arcface-torch/backbone100.pth'\n# ---------------------------------------------------------------------------- #\n# Options for Dataset\n# ---------------------------------------------------------------------------- #\ncfg.dataset = CN()\ncfg.dataset.training_data = ['LYHM']\ncfg.dataset.eval_data = ['FLORENCE']", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg.model.mapping_layers", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg.model.mapping_layers = 3\ncfg.model.use_pretrained = True\ncfg.model.arcface_pretrained_model = '/scratch/is-rg-ncs/models_weights/arcface-torch/backbone100.pth'\n# ---------------------------------------------------------------------------- #\n# Options for Dataset\n# ---------------------------------------------------------------------------- #\ncfg.dataset = CN()\ncfg.dataset.training_data = ['LYHM']\ncfg.dataset.eval_data = ['FLORENCE']\ncfg.dataset.batch_size = 2", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg.model.use_pretrained", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg.model.use_pretrained = True\ncfg.model.arcface_pretrained_model = '/scratch/is-rg-ncs/models_weights/arcface-torch/backbone100.pth'\n# ---------------------------------------------------------------------------- #\n# Options for Dataset\n# ---------------------------------------------------------------------------- #\ncfg.dataset = CN()\ncfg.dataset.training_data = ['LYHM']\ncfg.dataset.eval_data = ['FLORENCE']\ncfg.dataset.batch_size = 2\ncfg.dataset.K = 4", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg.model.arcface_pretrained_model", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg.model.arcface_pretrained_model = '/scratch/is-rg-ncs/models_weights/arcface-torch/backbone100.pth'\n# ---------------------------------------------------------------------------- #\n# Options for Dataset\n# ---------------------------------------------------------------------------- #\ncfg.dataset = CN()\ncfg.dataset.training_data = ['LYHM']\ncfg.dataset.eval_data = ['FLORENCE']\ncfg.dataset.batch_size = 2\ncfg.dataset.K = 4\ncfg.dataset.n_train = 100000", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg.dataset", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg.dataset = CN()\ncfg.dataset.training_data = ['LYHM']\ncfg.dataset.eval_data = ['FLORENCE']\ncfg.dataset.batch_size = 2\ncfg.dataset.K = 4\ncfg.dataset.n_train = 100000\ncfg.dataset.num_workers = 4\ncfg.dataset.root = '/datasets/MICA/'\n# ---------------------------------------------------------------------------- #\n# Mask weights", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg.dataset.training_data", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg.dataset.training_data = ['LYHM']\ncfg.dataset.eval_data = ['FLORENCE']\ncfg.dataset.batch_size = 2\ncfg.dataset.K = 4\ncfg.dataset.n_train = 100000\ncfg.dataset.num_workers = 4\ncfg.dataset.root = '/datasets/MICA/'\n# ---------------------------------------------------------------------------- #\n# Mask weights\n# ---------------------------------------------------------------------------- #", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg.dataset.eval_data", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg.dataset.eval_data = ['FLORENCE']\ncfg.dataset.batch_size = 2\ncfg.dataset.K = 4\ncfg.dataset.n_train = 100000\ncfg.dataset.num_workers = 4\ncfg.dataset.root = '/datasets/MICA/'\n# ---------------------------------------------------------------------------- #\n# Mask weights\n# ---------------------------------------------------------------------------- #\ncfg.mask_weights = CN()", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg.dataset.batch_size", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg.dataset.batch_size = 2\ncfg.dataset.K = 4\ncfg.dataset.n_train = 100000\ncfg.dataset.num_workers = 4\ncfg.dataset.root = '/datasets/MICA/'\n# ---------------------------------------------------------------------------- #\n# Mask weights\n# ---------------------------------------------------------------------------- #\ncfg.mask_weights = CN()\ncfg.mask_weights.face = 150.0", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg.dataset.K", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg.dataset.K = 4\ncfg.dataset.n_train = 100000\ncfg.dataset.num_workers = 4\ncfg.dataset.root = '/datasets/MICA/'\n# ---------------------------------------------------------------------------- #\n# Mask weights\n# ---------------------------------------------------------------------------- #\ncfg.mask_weights = CN()\ncfg.mask_weights.face = 150.0\ncfg.mask_weights.nose = 50.0", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg.dataset.n_train", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg.dataset.n_train = 100000\ncfg.dataset.num_workers = 4\ncfg.dataset.root = '/datasets/MICA/'\n# ---------------------------------------------------------------------------- #\n# Mask weights\n# ---------------------------------------------------------------------------- #\ncfg.mask_weights = CN()\ncfg.mask_weights.face = 150.0\ncfg.mask_weights.nose = 50.0\ncfg.mask_weights.lips = 50.0", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg.dataset.num_workers", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg.dataset.num_workers = 4\ncfg.dataset.root = '/datasets/MICA/'\n# ---------------------------------------------------------------------------- #\n# Mask weights\n# ---------------------------------------------------------------------------- #\ncfg.mask_weights = CN()\ncfg.mask_weights.face = 150.0\ncfg.mask_weights.nose = 50.0\ncfg.mask_weights.lips = 50.0\ncfg.mask_weights.forehead = 50.0", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg.dataset.root", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg.dataset.root = '/datasets/MICA/'\n# ---------------------------------------------------------------------------- #\n# Mask weights\n# ---------------------------------------------------------------------------- #\ncfg.mask_weights = CN()\ncfg.mask_weights.face = 150.0\ncfg.mask_weights.nose = 50.0\ncfg.mask_weights.lips = 50.0\ncfg.mask_weights.forehead = 50.0\ncfg.mask_weights.lr_eye_region = 50.0", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg.mask_weights", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg.mask_weights = CN()\ncfg.mask_weights.face = 150.0\ncfg.mask_weights.nose = 50.0\ncfg.mask_weights.lips = 50.0\ncfg.mask_weights.forehead = 50.0\ncfg.mask_weights.lr_eye_region = 50.0\ncfg.mask_weights.eye_region = 50.0\ncfg.mask_weights.whole = 1.0\ncfg.mask_weights.ears = 0.01\ncfg.mask_weights.eyes = 0.01", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg.mask_weights.face", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg.mask_weights.face = 150.0\ncfg.mask_weights.nose = 50.0\ncfg.mask_weights.lips = 50.0\ncfg.mask_weights.forehead = 50.0\ncfg.mask_weights.lr_eye_region = 50.0\ncfg.mask_weights.eye_region = 50.0\ncfg.mask_weights.whole = 1.0\ncfg.mask_weights.ears = 0.01\ncfg.mask_weights.eyes = 0.01\ncfg.running_average = 7", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg.mask_weights.nose", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg.mask_weights.nose = 50.0\ncfg.mask_weights.lips = 50.0\ncfg.mask_weights.forehead = 50.0\ncfg.mask_weights.lr_eye_region = 50.0\ncfg.mask_weights.eye_region = 50.0\ncfg.mask_weights.whole = 1.0\ncfg.mask_weights.ears = 0.01\ncfg.mask_weights.eyes = 0.01\ncfg.running_average = 7\n# ---------------------------------------------------------------------------- #", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg.mask_weights.lips", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg.mask_weights.lips = 50.0\ncfg.mask_weights.forehead = 50.0\ncfg.mask_weights.lr_eye_region = 50.0\ncfg.mask_weights.eye_region = 50.0\ncfg.mask_weights.whole = 1.0\ncfg.mask_weights.ears = 0.01\ncfg.mask_weights.eyes = 0.01\ncfg.running_average = 7\n# ---------------------------------------------------------------------------- #\n# Options for training", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg.mask_weights.forehead", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg.mask_weights.forehead = 50.0\ncfg.mask_weights.lr_eye_region = 50.0\ncfg.mask_weights.eye_region = 50.0\ncfg.mask_weights.whole = 1.0\ncfg.mask_weights.ears = 0.01\ncfg.mask_weights.eyes = 0.01\ncfg.running_average = 7\n# ---------------------------------------------------------------------------- #\n# Options for training\n# ---------------------------------------------------------------------------- #", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg.mask_weights.lr_eye_region", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg.mask_weights.lr_eye_region = 50.0\ncfg.mask_weights.eye_region = 50.0\ncfg.mask_weights.whole = 1.0\ncfg.mask_weights.ears = 0.01\ncfg.mask_weights.eyes = 0.01\ncfg.running_average = 7\n# ---------------------------------------------------------------------------- #\n# Options for training\n# ---------------------------------------------------------------------------- #\ncfg.train = CN()", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg.mask_weights.eye_region", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg.mask_weights.eye_region = 50.0\ncfg.mask_weights.whole = 1.0\ncfg.mask_weights.ears = 0.01\ncfg.mask_weights.eyes = 0.01\ncfg.running_average = 7\n# ---------------------------------------------------------------------------- #\n# Options for training\n# ---------------------------------------------------------------------------- #\ncfg.train = CN()\ncfg.train.use_mask = False", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg.mask_weights.whole", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg.mask_weights.whole = 1.0\ncfg.mask_weights.ears = 0.01\ncfg.mask_weights.eyes = 0.01\ncfg.running_average = 7\n# ---------------------------------------------------------------------------- #\n# Options for training\n# ---------------------------------------------------------------------------- #\ncfg.train = CN()\ncfg.train.use_mask = False\ncfg.train.max_epochs = 50", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg.mask_weights.ears", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg.mask_weights.ears = 0.01\ncfg.mask_weights.eyes = 0.01\ncfg.running_average = 7\n# ---------------------------------------------------------------------------- #\n# Options for training\n# ---------------------------------------------------------------------------- #\ncfg.train = CN()\ncfg.train.use_mask = False\ncfg.train.max_epochs = 50\ncfg.train.max_steps = 100000", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg.mask_weights.eyes", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg.mask_weights.eyes = 0.01\ncfg.running_average = 7\n# ---------------------------------------------------------------------------- #\n# Options for training\n# ---------------------------------------------------------------------------- #\ncfg.train = CN()\ncfg.train.use_mask = False\ncfg.train.max_epochs = 50\ncfg.train.max_steps = 100000\ncfg.train.lr = 1e-4", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg.running_average", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg.running_average = 7\n# ---------------------------------------------------------------------------- #\n# Options for training\n# ---------------------------------------------------------------------------- #\ncfg.train = CN()\ncfg.train.use_mask = False\ncfg.train.max_epochs = 50\ncfg.train.max_steps = 100000\ncfg.train.lr = 1e-4\ncfg.train.arcface_lr = 1e-3", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg.train", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg.train = CN()\ncfg.train.use_mask = False\ncfg.train.max_epochs = 50\ncfg.train.max_steps = 100000\ncfg.train.lr = 1e-4\ncfg.train.arcface_lr = 1e-3\ncfg.train.weight_decay = 0.0\ncfg.train.lr_update_step = 100000000\ncfg.train.log_dir = 'logs'\ncfg.train.log_steps = 10", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg.train.use_mask", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg.train.use_mask = False\ncfg.train.max_epochs = 50\ncfg.train.max_steps = 100000\ncfg.train.lr = 1e-4\ncfg.train.arcface_lr = 1e-3\ncfg.train.weight_decay = 0.0\ncfg.train.lr_update_step = 100000000\ncfg.train.log_dir = 'logs'\ncfg.train.log_steps = 10\ncfg.train.vis_dir = 'train_images'", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg.train.max_epochs", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg.train.max_epochs = 50\ncfg.train.max_steps = 100000\ncfg.train.lr = 1e-4\ncfg.train.arcface_lr = 1e-3\ncfg.train.weight_decay = 0.0\ncfg.train.lr_update_step = 100000000\ncfg.train.log_dir = 'logs'\ncfg.train.log_steps = 10\ncfg.train.vis_dir = 'train_images'\ncfg.train.vis_steps = 200", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg.train.max_steps", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg.train.max_steps = 100000\ncfg.train.lr = 1e-4\ncfg.train.arcface_lr = 1e-3\ncfg.train.weight_decay = 0.0\ncfg.train.lr_update_step = 100000000\ncfg.train.log_dir = 'logs'\ncfg.train.log_steps = 10\ncfg.train.vis_dir = 'train_images'\ncfg.train.vis_steps = 200\ncfg.train.write_summary = True", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg.train.lr", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg.train.lr = 1e-4\ncfg.train.arcface_lr = 1e-3\ncfg.train.weight_decay = 0.0\ncfg.train.lr_update_step = 100000000\ncfg.train.log_dir = 'logs'\ncfg.train.log_steps = 10\ncfg.train.vis_dir = 'train_images'\ncfg.train.vis_steps = 200\ncfg.train.write_summary = True\ncfg.train.checkpoint_steps = 1000", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg.train.arcface_lr", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg.train.arcface_lr = 1e-3\ncfg.train.weight_decay = 0.0\ncfg.train.lr_update_step = 100000000\ncfg.train.log_dir = 'logs'\ncfg.train.log_steps = 10\ncfg.train.vis_dir = 'train_images'\ncfg.train.vis_steps = 200\ncfg.train.write_summary = True\ncfg.train.checkpoint_steps = 1000\ncfg.train.checkpoint_epochs_steps = 2", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg.train.weight_decay", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg.train.weight_decay = 0.0\ncfg.train.lr_update_step = 100000000\ncfg.train.log_dir = 'logs'\ncfg.train.log_steps = 10\ncfg.train.vis_dir = 'train_images'\ncfg.train.vis_steps = 200\ncfg.train.write_summary = True\ncfg.train.checkpoint_steps = 1000\ncfg.train.checkpoint_epochs_steps = 2\ncfg.train.val_steps = 1000", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg.train.lr_update_step", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg.train.lr_update_step = 100000000\ncfg.train.log_dir = 'logs'\ncfg.train.log_steps = 10\ncfg.train.vis_dir = 'train_images'\ncfg.train.vis_steps = 200\ncfg.train.write_summary = True\ncfg.train.checkpoint_steps = 1000\ncfg.train.checkpoint_epochs_steps = 2\ncfg.train.val_steps = 1000\ncfg.train.val_vis_dir = 'val_images'", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg.train.log_dir", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg.train.log_dir = 'logs'\ncfg.train.log_steps = 10\ncfg.train.vis_dir = 'train_images'\ncfg.train.vis_steps = 200\ncfg.train.write_summary = True\ncfg.train.checkpoint_steps = 1000\ncfg.train.checkpoint_epochs_steps = 2\ncfg.train.val_steps = 1000\ncfg.train.val_vis_dir = 'val_images'\ncfg.train.eval_steps = 5000", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg.train.log_steps", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg.train.log_steps = 10\ncfg.train.vis_dir = 'train_images'\ncfg.train.vis_steps = 200\ncfg.train.write_summary = True\ncfg.train.checkpoint_steps = 1000\ncfg.train.checkpoint_epochs_steps = 2\ncfg.train.val_steps = 1000\ncfg.train.val_vis_dir = 'val_images'\ncfg.train.eval_steps = 5000\ncfg.train.reset_optimizer = False", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg.train.vis_dir", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg.train.vis_dir = 'train_images'\ncfg.train.vis_steps = 200\ncfg.train.write_summary = True\ncfg.train.checkpoint_steps = 1000\ncfg.train.checkpoint_epochs_steps = 2\ncfg.train.val_steps = 1000\ncfg.train.val_vis_dir = 'val_images'\ncfg.train.eval_steps = 5000\ncfg.train.reset_optimizer = False\ncfg.train.val_save_img = 5000", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg.train.vis_steps", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg.train.vis_steps = 200\ncfg.train.write_summary = True\ncfg.train.checkpoint_steps = 1000\ncfg.train.checkpoint_epochs_steps = 2\ncfg.train.val_steps = 1000\ncfg.train.val_vis_dir = 'val_images'\ncfg.train.eval_steps = 5000\ncfg.train.reset_optimizer = False\ncfg.train.val_save_img = 5000\ncfg.test_dataset = 'now'", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg.train.write_summary", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg.train.write_summary = True\ncfg.train.checkpoint_steps = 1000\ncfg.train.checkpoint_epochs_steps = 2\ncfg.train.val_steps = 1000\ncfg.train.val_vis_dir = 'val_images'\ncfg.train.eval_steps = 5000\ncfg.train.reset_optimizer = False\ncfg.train.val_save_img = 5000\ncfg.test_dataset = 'now'\ndef get_cfg_defaults():", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg.train.checkpoint_steps", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg.train.checkpoint_steps = 1000\ncfg.train.checkpoint_epochs_steps = 2\ncfg.train.val_steps = 1000\ncfg.train.val_vis_dir = 'val_images'\ncfg.train.eval_steps = 5000\ncfg.train.reset_optimizer = False\ncfg.train.val_save_img = 5000\ncfg.test_dataset = 'now'\ndef get_cfg_defaults():\n return cfg.clone()", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg.train.checkpoint_epochs_steps", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg.train.checkpoint_epochs_steps = 2\ncfg.train.val_steps = 1000\ncfg.train.val_vis_dir = 'val_images'\ncfg.train.eval_steps = 5000\ncfg.train.reset_optimizer = False\ncfg.train.val_save_img = 5000\ncfg.test_dataset = 'now'\ndef get_cfg_defaults():\n return cfg.clone()\ndef update_cfg(cfg, cfg_file):", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg.train.val_steps", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg.train.val_steps = 1000\ncfg.train.val_vis_dir = 'val_images'\ncfg.train.eval_steps = 5000\ncfg.train.reset_optimizer = False\ncfg.train.val_save_img = 5000\ncfg.test_dataset = 'now'\ndef get_cfg_defaults():\n return cfg.clone()\ndef update_cfg(cfg, cfg_file):\n cfg.merge_from_file(cfg_file)", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg.train.val_vis_dir", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg.train.val_vis_dir = 'val_images'\ncfg.train.eval_steps = 5000\ncfg.train.reset_optimizer = False\ncfg.train.val_save_img = 5000\ncfg.test_dataset = 'now'\ndef get_cfg_defaults():\n return cfg.clone()\ndef update_cfg(cfg, cfg_file):\n cfg.merge_from_file(cfg_file)\n return cfg.clone()", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg.train.eval_steps", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg.train.eval_steps = 5000\ncfg.train.reset_optimizer = False\ncfg.train.val_save_img = 5000\ncfg.test_dataset = 'now'\ndef get_cfg_defaults():\n return cfg.clone()\ndef update_cfg(cfg, cfg_file):\n cfg.merge_from_file(cfg_file)\n return cfg.clone()\ndef parse_args():", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg.train.reset_optimizer", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg.train.reset_optimizer = False\ncfg.train.val_save_img = 5000\ncfg.test_dataset = 'now'\ndef get_cfg_defaults():\n return cfg.clone()\ndef update_cfg(cfg, cfg_file):\n cfg.merge_from_file(cfg_file)\n return cfg.clone()\ndef parse_args():\n parser = argparse.ArgumentParser()", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg.train.val_save_img", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg.train.val_save_img = 5000\ncfg.test_dataset = 'now'\ndef get_cfg_defaults():\n return cfg.clone()\ndef update_cfg(cfg, cfg_file):\n cfg.merge_from_file(cfg_file)\n return cfg.clone()\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--cfg', type=str, help='cfg file path', required=True)", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "cfg.test_dataset", + "kind": 5, + "importPath": "modules.MICA.micalib.config", + "description": "modules.MICA.micalib.config", + "peekOfCode": "cfg.test_dataset = 'now'\ndef get_cfg_defaults():\n return cfg.clone()\ndef update_cfg(cfg, cfg_file):\n cfg.merge_from_file(cfg_file)\n return cfg.clone()\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--cfg', type=str, help='cfg file path', required=True)\n parser.add_argument('--test_dataset', type=str, help='Test dataset type', default='')", + "detail": "modules.MICA.micalib.config", + "documentation": {} + }, + { + "label": "Struct", + "kind": 6, + "importPath": "modules.MICA.micalib.flame", + "description": "modules.MICA.micalib.flame", + "peekOfCode": "class Struct(object):\n def __init__(self, **kwargs):\n for key, val in kwargs.items():\n setattr(self, key, val)\nclass FLAME(nn.Module):\n \"\"\"\n borrowed from https://github.com/soubhiksanyal/FLAME_PyTorch/blob/master/FLAME.py\n Given flame parameters this class generates a differentiable FLAME function\n which outputs the a mesh and 2D/3D facial landmarks\n \"\"\"", + "detail": "modules.MICA.micalib.flame", + "documentation": {} + }, + { + "label": "FLAME", + "kind": 6, + "importPath": "modules.MICA.micalib.flame", + "description": "modules.MICA.micalib.flame", + "peekOfCode": "class FLAME(nn.Module):\n \"\"\"\n borrowed from https://github.com/soubhiksanyal/FLAME_PyTorch/blob/master/FLAME.py\n Given flame parameters this class generates a differentiable FLAME function\n which outputs the a mesh and 2D/3D facial landmarks\n \"\"\"\n def __init__(self, config, optimize_basis=False):\n super(FLAME, self).__init__()\n loguru.logger.info(\"[FLAME] creating the FLAME Decoder\")\n with open(config.flame_model_path, 'rb') as f:", + "detail": "modules.MICA.micalib.flame", + "documentation": {} + }, + { + "label": "to_tensor", + "kind": 2, + "importPath": "modules.MICA.micalib.flame", + "description": "modules.MICA.micalib.flame", + "peekOfCode": "def to_tensor(array, dtype=torch.float32):\n if 'torch.tensor' not in str(type(array)):\n return torch.tensor(array, dtype=dtype)\ndef to_np(array, dtype=np.float32):\n if 'scipy.sparse' in str(type(array)):\n array = array.todense()\n return np.array(array, dtype=dtype)\nclass Struct(object):\n def __init__(self, **kwargs):\n for key, val in kwargs.items():", + "detail": "modules.MICA.micalib.flame", + "documentation": {} + }, + { + "label": "to_np", + "kind": 2, + "importPath": "modules.MICA.micalib.flame", + "description": "modules.MICA.micalib.flame", + "peekOfCode": "def to_np(array, dtype=np.float32):\n if 'scipy.sparse' in str(type(array)):\n array = array.todense()\n return np.array(array, dtype=dtype)\nclass Struct(object):\n def __init__(self, **kwargs):\n for key, val in kwargs.items():\n setattr(self, key, val)\nclass FLAME(nn.Module):\n \"\"\"", + "detail": "modules.MICA.micalib.flame", + "documentation": {} + }, + { + "label": "MappingNetwork", + "kind": 6, + "importPath": "modules.MICA.micalib.generator", + "description": "modules.MICA.micalib.generator", + "peekOfCode": "class MappingNetwork(nn.Module):\n def __init__(self, z_dim, map_hidden_dim, map_output_dim, hidden=2):\n super().__init__()\n if hidden > 5:\n self.skips = [int(hidden / 2)]\n else:\n self.skips = []\n self.network = nn.ModuleList(\n [nn.Linear(z_dim, map_hidden_dim)] +\n [nn.Linear(map_hidden_dim, map_hidden_dim) if i not in self.skips else", + "detail": "modules.MICA.micalib.generator", + "documentation": {} + }, + { + "label": "Generator", + "kind": 6, + "importPath": "modules.MICA.micalib.generator", + "description": "modules.MICA.micalib.generator", + "peekOfCode": "class Generator(nn.Module):\n def __init__(self, z_dim, map_hidden_dim, map_output_dim, hidden, model_cfg, device, regress=True):\n super().__init__()\n self.device = device\n self.cfg = model_cfg\n self.regress = regress\n if self.regress:\n self.regressor = MappingNetwork(z_dim, map_hidden_dim, map_output_dim, hidden).to(self.device)\n self.generator = FLAME(model_cfg).to(self.device)\n def forward(self, arcface):", + "detail": "modules.MICA.micalib.generator", + "documentation": {} + }, + { + "label": "kaiming_leaky_init", + "kind": 2, + "importPath": "modules.MICA.micalib.generator", + "description": "modules.MICA.micalib.generator", + "peekOfCode": "def kaiming_leaky_init(m):\n classname = m.__class__.__name__\n if classname.find('Linear') != -1:\n torch.nn.init.kaiming_normal_(m.weight, a=0.2, mode='fan_in', nonlinearity='leaky_relu')\nclass MappingNetwork(nn.Module):\n def __init__(self, z_dim, map_hidden_dim, map_output_dim, hidden=2):\n super().__init__()\n if hidden > 5:\n self.skips = [int(hidden / 2)]\n else:", + "detail": "modules.MICA.micalib.generator", + "documentation": {} + }, + { + "label": "rot_mat_to_euler", + "kind": 2, + "importPath": "modules.MICA.micalib.lbs", + "description": "modules.MICA.micalib.lbs", + "peekOfCode": "def rot_mat_to_euler(rot_mats):\n # Calculates rotation matrix to euler angles\n # Careful for extreme cases of eular angles like [0.0, pi, 0.0]\n sy = torch.sqrt(rot_mats[:, 0, 0] * rot_mats[:, 0, 0] +\n rot_mats[:, 1, 0] * rot_mats[:, 1, 0])\n return torch.atan2(-rot_mats[:, 2, 0], sy)\ndef find_dynamic_lmk_idx_and_bcoords(vertices, pose, dynamic_lmk_faces_idx,\n dynamic_lmk_b_coords,\n neck_kin_chain, dtype=torch.float32):\n ''' Compute the faces, barycentric coordinates for the dynamic landmarks", + "detail": "modules.MICA.micalib.lbs", + "documentation": {} + }, + { + "label": "find_dynamic_lmk_idx_and_bcoords", + "kind": 2, + "importPath": "modules.MICA.micalib.lbs", + "description": "modules.MICA.micalib.lbs", + "peekOfCode": "def find_dynamic_lmk_idx_and_bcoords(vertices, pose, dynamic_lmk_faces_idx,\n dynamic_lmk_b_coords,\n neck_kin_chain, dtype=torch.float32):\n ''' Compute the faces, barycentric coordinates for the dynamic landmarks\n To do so, we first compute the rotation of the neck around the y-axis\n and then use a pre-computed look-up table to find the faces and the\n barycentric coordinates that will be used.\n Special thanks to Soubhik Sanyal (soubhik.sanyal@tuebingen.mpg.de)\n for providing the original TensorFlow implementation and for the LUT.\n Parameters", + "detail": "modules.MICA.micalib.lbs", + "documentation": {} + }, + { + "label": "vertices2landmarks", + "kind": 2, + "importPath": "modules.MICA.micalib.lbs", + "description": "modules.MICA.micalib.lbs", + "peekOfCode": "def vertices2landmarks(vertices, faces, lmk_faces_idx, lmk_bary_coords):\n ''' Calculates landmarks by barycentric interpolation\n Parameters\n ----------\n vertices: torch.tensor BxVx3, dtype = torch.float32\n The tensor of input vertices\n faces: torch.tensor Fx3, dtype = torch.long\n The faces of the mesh\n lmk_faces_idx: torch.tensor L, dtype = torch.long\n The tensor with the indices of the faces used to calculate the", + "detail": "modules.MICA.micalib.lbs", + "documentation": {} + }, + { + "label": "lbs", + "kind": 2, + "importPath": "modules.MICA.micalib.lbs", + "description": "modules.MICA.micalib.lbs", + "peekOfCode": "def lbs(betas, pose, v_template, shapedirs, posedirs, J_regressor, parents,\n lbs_weights, pose2rot=True, dtype=torch.float32):\n ''' Performs Linear Blend Skinning with the given shape and pose parameters\n Parameters\n ----------\n betas : torch.tensor BxNB\n The tensor of shape parameters\n pose : torch.tensor Bx(J + 1) * 3\n The pose parameters in axis-angle format\n v_template torch.tensor BxVx3", + "detail": "modules.MICA.micalib.lbs", + "documentation": {} + }, + { + "label": "vertices2joints", + "kind": 2, + "importPath": "modules.MICA.micalib.lbs", + "description": "modules.MICA.micalib.lbs", + "peekOfCode": "def vertices2joints(J_regressor, vertices):\n ''' Calculates the 3D joint locations from the vertices\n Parameters\n ----------\n J_regressor : torch.tensor JxV\n The regressor array that is used to calculate the joints from the\n position of the vertices\n vertices : torch.tensor BxVx3\n The tensor of mesh vertices\n Returns", + "detail": "modules.MICA.micalib.lbs", + "documentation": {} + }, + { + "label": "blend_shapes", + "kind": 2, + "importPath": "modules.MICA.micalib.lbs", + "description": "modules.MICA.micalib.lbs", + "peekOfCode": "def blend_shapes(betas, shape_disps):\n ''' Calculates the per vertex displacement due to the blend shapes\n Parameters\n ----------\n betas : torch.tensor Bx(num_betas)\n Blend shape coefficients\n shape_disps: torch.tensor Vx3x(num_betas)\n Blend shapes\n Returns\n -------", + "detail": "modules.MICA.micalib.lbs", + "documentation": {} + }, + { + "label": "batch_rodrigues", + "kind": 2, + "importPath": "modules.MICA.micalib.lbs", + "description": "modules.MICA.micalib.lbs", + "peekOfCode": "def batch_rodrigues(rot_vecs, epsilon=1e-8, dtype=torch.float32):\n ''' Calculates the rotation matrices for a batch of rotation vectors\n Parameters\n ----------\n rot_vecs: torch.tensor Nx3\n array of N axis-angle vectors\n Returns\n -------\n R: torch.tensor Nx3x3\n The rotation matrices for the given axis-angle parameters", + "detail": "modules.MICA.micalib.lbs", + "documentation": {} + }, + { + "label": "transform_mat", + "kind": 2, + "importPath": "modules.MICA.micalib.lbs", + "description": "modules.MICA.micalib.lbs", + "peekOfCode": "def transform_mat(R, t):\n ''' Creates a batch of transformation matrices\n Args:\n - R: Bx3x3 array of a batch of rotation matrices\n - t: Bx3x1 array of a batch of translation vectors\n Returns:\n - T: Bx4x4 Transformation matrix\n '''\n # No padding left or right, only add an extra row\n return torch.cat([F.pad(R, [0, 0, 0, 1]),", + "detail": "modules.MICA.micalib.lbs", + "documentation": {} + }, + { + "label": "batch_rigid_transform", + "kind": 2, + "importPath": "modules.MICA.micalib.lbs", + "description": "modules.MICA.micalib.lbs", + "peekOfCode": "def batch_rigid_transform(rot_mats, joints, parents, dtype=torch.float32):\n \"\"\"\n Applies a batch of rigid transformations to the joints\n Parameters\n ----------\n rot_mats : torch.tensor BxNx3x3\n Tensor of rotation matrices\n joints : torch.tensor BxNx3\n Locations of joints\n parents : torch.tensor BxN", + "detail": "modules.MICA.micalib.lbs", + "documentation": {} + }, + { + "label": "Struct", + "kind": 6, + "importPath": "modules.MICA.micalib.masking", + "description": "modules.MICA.micalib.masking", + "peekOfCode": "class Struct(object):\n def __init__(self, **kwargs):\n for key, val in kwargs.items():\n setattr(self, key, val)\nimport os\n# FLAME_masks=os.path.join(os.path.dirname(__file__),'..','./data/FLAME2020/FLAME_masks/FLAME_masks.pkl')\n# generic_model=os.path.join(os.path.dirname(__file__),'..','./data/FLAME2020/generic_model.pkl')\n# FLAME_masks=os.path.join(os.path.dirname(__file__),'../../../..','models/models_MICA/FLAME2020/FLAME_masks.pkl')\n# generic_model=os.path.join(os.path.dirname(__file__),'../../../..','models/models_MICA/FLAME2020/generic_model.pkl')\nclass Masking(nn.Module):", + "detail": "modules.MICA.micalib.masking", + "documentation": {} + }, + { + "label": "Masking", + "kind": 6, + "importPath": "modules.MICA.micalib.masking", + "description": "modules.MICA.micalib.masking", + "peekOfCode": "class Masking(nn.Module):\n def __init__(self, config):\n super(Masking, self).__init__()\n with open(config.FLAME_masks, 'rb') as f:\n ss = pickle.load(f, encoding='latin1')\n self.masks = Struct(**ss)\n with open(config.generic_model, 'rb') as f:\n ss = pickle.load(f, encoding='latin1')\n flame_model = Struct(**ss)\n self.masked_faces = None", + "detail": "modules.MICA.micalib.masking", + "documentation": {} + }, + { + "label": "to_tensor", + "kind": 2, + "importPath": "modules.MICA.micalib.masking", + "description": "modules.MICA.micalib.masking", + "peekOfCode": "def to_tensor(array, dtype=torch.float32):\n if 'torch.tensor' not in str(type(array)):\n return torch.tensor(array, dtype=dtype)\ndef to_np(array, dtype=np.float32):\n if 'scipy.sparse' in str(type(array)):\n array = array.todense()\n return np.array(array, dtype=dtype)\nclass Struct(object):\n def __init__(self, **kwargs):\n for key, val in kwargs.items():", + "detail": "modules.MICA.micalib.masking", + "documentation": {} + }, + { + "label": "to_np", + "kind": 2, + "importPath": "modules.MICA.micalib.masking", + "description": "modules.MICA.micalib.masking", + "peekOfCode": "def to_np(array, dtype=np.float32):\n if 'scipy.sparse' in str(type(array)):\n array = array.todense()\n return np.array(array, dtype=dtype)\nclass Struct(object):\n def __init__(self, **kwargs):\n for key, val in kwargs.items():\n setattr(self, key, val)\nimport os\n# FLAME_masks=os.path.join(os.path.dirname(__file__),'..','./data/FLAME2020/FLAME_masks/FLAME_masks.pkl')", + "detail": "modules.MICA.micalib.masking", + "documentation": {} + }, + { + "label": "MICA", + "kind": 6, + "importPath": "modules.MICA.micalib.mica", + "description": "modules.MICA.micalib.mica", + "peekOfCode": "class MICA(BaseModel):\n def __init__(self, config=None, device=None, tag='MICA'):\n super(MICA, self).__init__(config, device, tag)\n self.initialize()\n def create_model(self, model_cfg):\n mapping_layers = model_cfg.mapping_layers\n pretrained_path = None\n if not model_cfg.use_pretrained:\n pretrained_path = model_cfg.arcface_pretrained_model\n self.arcface = Arcface(pretrained_path=pretrained_path).to(self.device)", + "detail": "modules.MICA.micalib.mica", + "documentation": {} + }, + { + "label": "MeshShapeRenderer", + "kind": 6, + "importPath": "modules.MICA.micalib.renderer", + "description": "modules.MICA.micalib.renderer", + "peekOfCode": "class MeshShapeRenderer(nn.Module):\n def __init__(self, obj_filename):\n super().__init__()\n verts, faces, aux = load_obj(obj_filename)\n faces = faces.verts_idx[None, ...].cuda()\n self.register_buffer('faces', faces)\n R, T = look_at_view_transform(2.7, 10.0, 10.0)\n self.cameras = FoVPerspectiveCameras(device='cuda:0', R=R, T=T, fov=6)\n raster_settings = RasterizationSettings(\n image_size=512,", + "detail": "modules.MICA.micalib.renderer", + "documentation": {} + }, + { + "label": "Tester", + "kind": 6, + "importPath": "modules.MICA.micalib.tester", + "description": "modules.MICA.micalib.tester", + "peekOfCode": "class Tester(object):\n def __init__(self, nfc_model, config=None, device=None):\n if config is None:\n self.cfg = cfg\n else:\n self.cfg = config\n self.device = device\n self.batch_size = self.cfg.dataset.batch_size\n self.K = self.cfg.dataset.K\n self.render_mesh = True", + "detail": "modules.MICA.micalib.tester", + "documentation": {} + }, + { + "label": "input_mean", + "kind": 5, + "importPath": "modules.MICA.micalib.tester", + "description": "modules.MICA.micalib.tester", + "peekOfCode": "input_mean = 127.5\ninput_std = 127.5\nNOW_SCANS = '/home/wzielonka/datasets/NoWDataset/final_release_version/scans/'\nNOW_PICTURES = '/home/wzielonka/datasets/NoWDataset/final_release_version/iphone_pictures/'\nNOW_BBOX = '/home/wzielonka/datasets/NoWDataset/final_release_version/detected_face/'\nSTIRLING_PICTURES = '/home/wzielonka/datasets/Stirling/images/'\nclass Tester(object):\n def __init__(self, nfc_model, config=None, device=None):\n if config is None:\n self.cfg = cfg", + "detail": "modules.MICA.micalib.tester", + "documentation": {} + }, + { + "label": "input_std", + "kind": 5, + "importPath": "modules.MICA.micalib.tester", + "description": "modules.MICA.micalib.tester", + "peekOfCode": "input_std = 127.5\nNOW_SCANS = '/home/wzielonka/datasets/NoWDataset/final_release_version/scans/'\nNOW_PICTURES = '/home/wzielonka/datasets/NoWDataset/final_release_version/iphone_pictures/'\nNOW_BBOX = '/home/wzielonka/datasets/NoWDataset/final_release_version/detected_face/'\nSTIRLING_PICTURES = '/home/wzielonka/datasets/Stirling/images/'\nclass Tester(object):\n def __init__(self, nfc_model, config=None, device=None):\n if config is None:\n self.cfg = cfg\n else:", + "detail": "modules.MICA.micalib.tester", + "documentation": {} + }, + { + "label": "NOW_SCANS", + "kind": 5, + "importPath": "modules.MICA.micalib.tester", + "description": "modules.MICA.micalib.tester", + "peekOfCode": "NOW_SCANS = '/home/wzielonka/datasets/NoWDataset/final_release_version/scans/'\nNOW_PICTURES = '/home/wzielonka/datasets/NoWDataset/final_release_version/iphone_pictures/'\nNOW_BBOX = '/home/wzielonka/datasets/NoWDataset/final_release_version/detected_face/'\nSTIRLING_PICTURES = '/home/wzielonka/datasets/Stirling/images/'\nclass Tester(object):\n def __init__(self, nfc_model, config=None, device=None):\n if config is None:\n self.cfg = cfg\n else:\n self.cfg = config", + "detail": "modules.MICA.micalib.tester", + "documentation": {} + }, + { + "label": "NOW_PICTURES", + "kind": 5, + "importPath": "modules.MICA.micalib.tester", + "description": "modules.MICA.micalib.tester", + "peekOfCode": "NOW_PICTURES = '/home/wzielonka/datasets/NoWDataset/final_release_version/iphone_pictures/'\nNOW_BBOX = '/home/wzielonka/datasets/NoWDataset/final_release_version/detected_face/'\nSTIRLING_PICTURES = '/home/wzielonka/datasets/Stirling/images/'\nclass Tester(object):\n def __init__(self, nfc_model, config=None, device=None):\n if config is None:\n self.cfg = cfg\n else:\n self.cfg = config\n self.device = device", + "detail": "modules.MICA.micalib.tester", + "documentation": {} + }, + { + "label": "NOW_BBOX", + "kind": 5, + "importPath": "modules.MICA.micalib.tester", + "description": "modules.MICA.micalib.tester", + "peekOfCode": "NOW_BBOX = '/home/wzielonka/datasets/NoWDataset/final_release_version/detected_face/'\nSTIRLING_PICTURES = '/home/wzielonka/datasets/Stirling/images/'\nclass Tester(object):\n def __init__(self, nfc_model, config=None, device=None):\n if config is None:\n self.cfg = cfg\n else:\n self.cfg = config\n self.device = device\n self.batch_size = self.cfg.dataset.batch_size", + "detail": "modules.MICA.micalib.tester", + "documentation": {} + }, + { + "label": "STIRLING_PICTURES", + "kind": 5, + "importPath": "modules.MICA.micalib.tester", + "description": "modules.MICA.micalib.tester", + "peekOfCode": "STIRLING_PICTURES = '/home/wzielonka/datasets/Stirling/images/'\nclass Tester(object):\n def __init__(self, nfc_model, config=None, device=None):\n if config is None:\n self.cfg = cfg\n else:\n self.cfg = config\n self.device = device\n self.batch_size = self.cfg.dataset.batch_size\n self.K = self.cfg.dataset.K", + "detail": "modules.MICA.micalib.tester", + "documentation": {} + }, + { + "label": "Trainer", + "kind": 6, + "importPath": "modules.MICA.micalib.trainer", + "description": "modules.MICA.micalib.trainer", + "peekOfCode": "class Trainer(object):\n def __init__(self, nfc_model, config=None, device=None):\n if config is None:\n self.cfg = cfg\n else:\n self.cfg = config\n logger.add(os.path.join(self.cfg.output_dir, self.cfg.train.log_dir, 'train.log'))\n self.device = device\n self.batch_size = self.cfg.dataset.batch_size\n self.K = self.cfg.dataset.K", + "detail": "modules.MICA.micalib.trainer", + "documentation": {} + }, + { + "label": "print_info", + "kind": 2, + "importPath": "modules.MICA.micalib.trainer", + "description": "modules.MICA.micalib.trainer", + "peekOfCode": "def print_info(rank):\n props = torch.cuda.get_device_properties(rank)\n logger.info(f'[INFO] {torch.cuda.get_device_name(rank)}')\n logger.info(f'[INFO] Rank: {str(rank)}')\n logger.info(f'[INFO] Memory: {round(props.total_memory / 1024 ** 3, 1)} GB')\n logger.info(f'[INFO] Allocated: {round(torch.cuda.memory_allocated(rank) / 1024 ** 3, 1)} GB')\n logger.info(f'[INFO] Cached: {round(torch.cuda.memory_reserved(rank) / 1024 ** 3, 1)} GB')\ndef seed_worker(worker_id):\n worker_seed = torch.initial_seed() % 2 ** 32\n np.random.seed(worker_seed)", + "detail": "modules.MICA.micalib.trainer", + "documentation": {} + }, + { + "label": "seed_worker", + "kind": 2, + "importPath": "modules.MICA.micalib.trainer", + "description": "modules.MICA.micalib.trainer", + "peekOfCode": "def seed_worker(worker_id):\n worker_seed = torch.initial_seed() % 2 ** 32\n np.random.seed(worker_seed)\n random.seed(worker_seed)\nclass Trainer(object):\n def __init__(self, nfc_model, config=None, device=None):\n if config is None:\n self.cfg = cfg\n else:\n self.cfg = config", + "detail": "modules.MICA.micalib.trainer", + "documentation": {} + }, + { + "label": "find_model_using_name", + "kind": 2, + "importPath": "modules.MICA.micalib.util", + "description": "modules.MICA.micalib.util", + "peekOfCode": "def find_model_using_name(model_dir, model_name):\n # adapted from pix2pix framework: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/models/__init__.py#L25\n # import \"model_dir/modelname.py\"\n model_filename = model_dir + \".\" + model_name\n modellib = importlib.import_module(model_filename, package=model_dir)\n # In the file, the class called ModelName() will\n # be instantiated. It has to be a subclass of BaseModel,\n # and it is case-insensitive.\n model = None\n target_model_name = model_name.replace('_', '')", + "detail": "modules.MICA.micalib.util", + "documentation": {} + }, + { + "label": "visualize_grid", + "kind": 2, + "importPath": "modules.MICA.micalib.util", + "description": "modules.MICA.micalib.util", + "peekOfCode": "def visualize_grid(visdict, savepath=None, size=224, dim=1, return_gird=True):\n '''\n image range should be [0,1]\n dim: 2 for horizontal. 1 for vertical\n '''\n assert dim == 1 or dim == 2\n grids = {}\n for key in visdict:\n b, c, h, w = visdict[key].shape\n if dim == 2:", + "detail": "modules.MICA.micalib.util", + "documentation": {} + }, + { + "label": "Validator", + "kind": 6, + "importPath": "modules.MICA.micalib.validator", + "description": "modules.MICA.micalib.validator", + "peekOfCode": "class Validator(object):\n def __init__(self, trainer):\n self.trainer = trainer\n self.device = self.trainer.device\n self.nfc = self.trainer.nfc\n self.cfg = deepcopy(self.trainer.cfg)\n self.device = trainer.device\n # Create a separate instance only for predictions\n # nfc = util.find_model_using_name(model_dir='nfclib.models', model_name=self.cfg.model.name)(self.cfg, self.device)\n # self.tester = Tester(nfc, self.cfg, self.device)", + "detail": "modules.MICA.micalib.validator", + "documentation": {} + }, + { + "label": "test", + "kind": 2, + "importPath": "modules.MICA.testing.now.now", + "description": "modules.MICA.testing.now.now", + "peekOfCode": "def test():\n global experiments\n if len(experiments) == 0:\n experiments = list(filter(lambda f: 'condor' not in f, os.listdir('../../output/')))\n os.system('rm -rf logs')\n os.system('rm -rf jobs')\n os.makedirs('logs', exist_ok=True)\n os.makedirs('jobs', exist_ok=True)\n for experiment in sorted(experiments):\n print(f'Testing {experiment}')", + "detail": "modules.MICA.testing.now.now", + "documentation": {} + }, + { + "label": "logs", + "kind": 5, + "importPath": "modules.MICA.testing.now.now", + "description": "modules.MICA.testing.now.now", + "peekOfCode": "logs = '/home/wzielonka/projects/MICA/testing/now/logs/'\njobs = '/home/wzielonka/projects/MICA/testing/now/jobs/'\nroot = '/home/wzielonka/projects/MICA/output/'\nexperiments = []\ndef test():\n global experiments\n if len(experiments) == 0:\n experiments = list(filter(lambda f: 'condor' not in f, os.listdir('../../output/')))\n os.system('rm -rf logs')\n os.system('rm -rf jobs')", + "detail": "modules.MICA.testing.now.now", + "documentation": {} + }, + { + "label": "jobs", + "kind": 5, + "importPath": "modules.MICA.testing.now.now", + "description": "modules.MICA.testing.now.now", + "peekOfCode": "jobs = '/home/wzielonka/projects/MICA/testing/now/jobs/'\nroot = '/home/wzielonka/projects/MICA/output/'\nexperiments = []\ndef test():\n global experiments\n if len(experiments) == 0:\n experiments = list(filter(lambda f: 'condor' not in f, os.listdir('../../output/')))\n os.system('rm -rf logs')\n os.system('rm -rf jobs')\n os.makedirs('logs', exist_ok=True)", + "detail": "modules.MICA.testing.now.now", + "documentation": {} + }, + { + "label": "root", + "kind": 5, + "importPath": "modules.MICA.testing.now.now", + "description": "modules.MICA.testing.now.now", + "peekOfCode": "root = '/home/wzielonka/projects/MICA/output/'\nexperiments = []\ndef test():\n global experiments\n if len(experiments) == 0:\n experiments = list(filter(lambda f: 'condor' not in f, os.listdir('../../output/')))\n os.system('rm -rf logs')\n os.system('rm -rf jobs')\n os.makedirs('logs', exist_ok=True)\n os.makedirs('jobs', exist_ok=True)", + "detail": "modules.MICA.testing.now.now", + "documentation": {} + }, + { + "label": "experiments", + "kind": 5, + "importPath": "modules.MICA.testing.now.now", + "description": "modules.MICA.testing.now.now", + "peekOfCode": "experiments = []\ndef test():\n global experiments\n if len(experiments) == 0:\n experiments = list(filter(lambda f: 'condor' not in f, os.listdir('../../output/')))\n os.system('rm -rf logs')\n os.system('rm -rf jobs')\n os.makedirs('logs', exist_ok=True)\n os.makedirs('jobs', exist_ok=True)\n for experiment in sorted(experiments):", + "detail": "modules.MICA.testing.now.now", + "documentation": {} + }, + { + "label": "test", + "kind": 2, + "importPath": "modules.MICA.testing.stirling.stirling", + "description": "modules.MICA.testing.stirling.stirling", + "peekOfCode": "def test():\n global experiments\n if len(experiments) == 0:\n experiments = list(filter(lambda f: 'condor' not in f and 'resnet' in f, os.listdir('../../output/')))\n # experiments = list(filter(lambda f: 'experiment_' in f, os.listdir('../../output/')))\n os.system('rm -rf logs')\n os.system('rm -rf jobs')\n os.makedirs('logs', exist_ok=True)\n os.makedirs('jobs', exist_ok=True)\n for experiment in sorted(experiments):", + "detail": "modules.MICA.testing.stirling.stirling", + "documentation": {} + }, + { + "label": "logs", + "kind": 5, + "importPath": "modules.MICA.testing.stirling.stirling", + "description": "modules.MICA.testing.stirling.stirling", + "peekOfCode": "logs = '/home/wzielonka/projects/MICA/testing/stirling/logs/'\njobs = '/home/wzielonka/projects/MICA/testing/stirling/jobs/'\nroot = '/home/wzielonka/projects/MICA/output/'\nexperiments = []\ndef test():\n global experiments\n if len(experiments) == 0:\n experiments = list(filter(lambda f: 'condor' not in f and 'resnet' in f, os.listdir('../../output/')))\n # experiments = list(filter(lambda f: 'experiment_' in f, os.listdir('../../output/')))\n os.system('rm -rf logs')", + "detail": "modules.MICA.testing.stirling.stirling", + "documentation": {} + }, + { + "label": "jobs", + "kind": 5, + "importPath": "modules.MICA.testing.stirling.stirling", + "description": "modules.MICA.testing.stirling.stirling", + "peekOfCode": "jobs = '/home/wzielonka/projects/MICA/testing/stirling/jobs/'\nroot = '/home/wzielonka/projects/MICA/output/'\nexperiments = []\ndef test():\n global experiments\n if len(experiments) == 0:\n experiments = list(filter(lambda f: 'condor' not in f and 'resnet' in f, os.listdir('../../output/')))\n # experiments = list(filter(lambda f: 'experiment_' in f, os.listdir('../../output/')))\n os.system('rm -rf logs')\n os.system('rm -rf jobs')", + "detail": "modules.MICA.testing.stirling.stirling", + "documentation": {} + }, + { + "label": "root", + "kind": 5, + "importPath": "modules.MICA.testing.stirling.stirling", + "description": "modules.MICA.testing.stirling.stirling", + "peekOfCode": "root = '/home/wzielonka/projects/MICA/output/'\nexperiments = []\ndef test():\n global experiments\n if len(experiments) == 0:\n experiments = list(filter(lambda f: 'condor' not in f and 'resnet' in f, os.listdir('../../output/')))\n # experiments = list(filter(lambda f: 'experiment_' in f, os.listdir('../../output/')))\n os.system('rm -rf logs')\n os.system('rm -rf jobs')\n os.makedirs('logs', exist_ok=True)", + "detail": "modules.MICA.testing.stirling.stirling", + "documentation": {} + }, + { + "label": "experiments", + "kind": 5, + "importPath": "modules.MICA.testing.stirling.stirling", + "description": "modules.MICA.testing.stirling.stirling", + "peekOfCode": "experiments = []\ndef test():\n global experiments\n if len(experiments) == 0:\n experiments = list(filter(lambda f: 'condor' not in f and 'resnet' in f, os.listdir('../../output/')))\n # experiments = list(filter(lambda f: 'experiment_' in f, os.listdir('../../output/')))\n os.system('rm -rf logs')\n os.system('rm -rf jobs')\n os.makedirs('logs', exist_ok=True)\n os.makedirs('jobs', exist_ok=True)", + "detail": "modules.MICA.testing.stirling.stirling", + "documentation": {} + }, + { + "label": "api_MICA", + "kind": 6, + "importPath": "modules.MICA.api_MICA", + "description": "modules.MICA.api_MICA", + "peekOfCode": "class api_MICA(object):\n def __init__(self):\n cfg = get_cfg_defaults()\n device = 'cuda:0'\n cfg.model.testing = True\n self.mica = util.find_model_using_name(model_dir='micalib', model_name=cfg.model.name)(cfg, device)\n self.load_checkpoint(self.mica,model_path=cfg.pretrained_model_path)\n self.mica.eval()\n self.app = FaceAnalysis(name='antelopev2', \n providers=['CPUExecutionProvider'],", + "detail": "modules.MICA.api_MICA", + "documentation": {} + }, + { + "label": "deterministic", + "kind": 2, + "importPath": "modules.MICA.demo", + "description": "modules.MICA.demo", + "peekOfCode": "def deterministic(rank):\n torch.manual_seed(rank)\n torch.cuda.manual_seed(rank)\n np.random.seed(rank)\n random.seed(rank)\n cudnn.deterministic = True\n cudnn.benchmark = False\ndef process(args, app, image_size=224):\n dst = Path(args.a)\n dst.mkdir(parents=True, exist_ok=True)", + "detail": "modules.MICA.demo", + "documentation": {} + }, + { + "label": "process", + "kind": 2, + "importPath": "modules.MICA.demo", + "description": "modules.MICA.demo", + "peekOfCode": "def process(args, app, image_size=224):\n dst = Path(args.a)\n dst.mkdir(parents=True, exist_ok=True)\n processes = []\n image_paths = sorted(glob(args.i + '/*.*'))\n for image_path in tqdm(image_paths):\n name = Path(image_path).stem\n img = cv2.imread(image_path)\n bboxes, kpss = app.det_model.detect(img, max_num=0, metric='default')\n if bboxes.shape[0] == 0:", + "detail": "modules.MICA.demo", + "documentation": {} + }, + { + "label": "to_batch", + "kind": 2, + "importPath": "modules.MICA.demo", + "description": "modules.MICA.demo", + "peekOfCode": "def to_batch(path):\n src = path.replace('npy', 'jpg')\n if not os.path.exists(src):\n src = path.replace('npy', 'png')\n image = imread(src)[:, :, :3]\n image = image / 255.\n image = cv2.resize(image, (224, 224)).transpose(2, 0, 1)\n image = torch.tensor(image).cuda()[None]\n arcface = np.load(path)\n arcface = torch.tensor(arcface).cuda()[None]", + "detail": "modules.MICA.demo", + "documentation": {} + }, + { + "label": "load_checkpoint", + "kind": 2, + "importPath": "modules.MICA.demo", + "description": "modules.MICA.demo", + "peekOfCode": "def load_checkpoint(args, mica):\n checkpoint = torch.load(args.m)\n if 'arcface' in checkpoint:\n mica.arcface.load_state_dict(checkpoint['arcface'])\n if 'flameModel' in checkpoint:\n mica.flameModel.load_state_dict(checkpoint['flameModel'])\ndef main(cfg, args):\n device = 'cuda:0'\n cfg.model.testing = True\n mica = util.find_model_using_name(model_dir='micalib.models', model_name=cfg.model.name)(cfg, device)", + "detail": "modules.MICA.demo", + "documentation": {} + }, + { + "label": "main", + "kind": 2, + "importPath": "modules.MICA.demo", + "description": "modules.MICA.demo", + "peekOfCode": "def main(cfg, args):\n device = 'cuda:0'\n cfg.model.testing = True\n mica = util.find_model_using_name(model_dir='micalib.models', model_name=cfg.model.name)(cfg, device)\n load_checkpoint(args, mica)\n mica.eval()\n faces = mica.render.faces[0].cpu()\n Path(args.o).mkdir(exist_ok=True, parents=True)\n app = FaceAnalysis(name='antelopev2', providers=['CPUExecutionProvider'])\n # app = FaceAnalysis(name='antelopev2', providers=['CUDAExecutionProvider'])", + "detail": "modules.MICA.demo", + "documentation": {} + }, + { + "label": "setup", + "kind": 2, + "importPath": "modules.MICA.jobs", + "description": "modules.MICA.jobs", + "peekOfCode": "def setup(rank, world_size, port):\n os.environ['MASTER_ADDR'] = 'localhost'\n os.environ['MASTER_PORT'] = str(port)\n dist.init_process_group(\"nccl\", rank=rank, world_size=world_size, init_method=\"env://\")\ndef deterministic(rank):\n torch.manual_seed(rank)\n torch.cuda.manual_seed(rank)\n np.random.seed(rank)\n random.seed(rank)\n cudnn.deterministic = True", + "detail": "modules.MICA.jobs", + "documentation": {} + }, + { + "label": "deterministic", + "kind": 2, + "importPath": "modules.MICA.jobs", + "description": "modules.MICA.jobs", + "peekOfCode": "def deterministic(rank):\n torch.manual_seed(rank)\n torch.cuda.manual_seed(rank)\n np.random.seed(rank)\n random.seed(rank)\n cudnn.deterministic = True\n cudnn.benchmark = False\ndef test(rank, world_size, cfg, args):\n port = np.random.randint(low=0, high=2000)\n setup(rank, world_size, 12310 + port)", + "detail": "modules.MICA.jobs", + "documentation": {} + }, + { + "label": "test", + "kind": 2, + "importPath": "modules.MICA.jobs", + "description": "modules.MICA.jobs", + "peekOfCode": "def test(rank, world_size, cfg, args):\n port = np.random.randint(low=0, high=2000)\n setup(rank, world_size, 12310 + port)\n deterministic(rank)\n cfg.model.testing = True\n mica = util.find_model_using_name(model_dir='micalib.models', model_name=cfg.model.name)(cfg, rank)\n tester = Tester(nfc_model=mica, config=cfg, device=rank)\n tester.render_mesh = True\n if args.test_dataset.upper() == 'STIRLING':\n tester.test_stirling(args.checkpoint)", + "detail": "modules.MICA.jobs", + "documentation": {} + }, + { + "label": "train", + "kind": 2, + "importPath": "modules.MICA.jobs", + "description": "modules.MICA.jobs", + "peekOfCode": "def train(rank, world_size, cfg):\n port = np.random.randint(low=0, high=2000)\n setup(rank, world_size, 12310 + port)\n logger.info(f'[MAIN] output_dir: {cfg.output_dir}')\n os.makedirs(os.path.join(cfg.output_dir, cfg.train.log_dir), exist_ok=True)\n os.makedirs(os.path.join(cfg.output_dir, cfg.train.vis_dir), exist_ok=True)\n os.makedirs(os.path.join(cfg.output_dir, cfg.train.val_vis_dir), exist_ok=True)\n with open(os.path.join(cfg.output_dir, cfg.train.log_dir, 'full_config.yaml'), 'w') as f:\n yaml.dump(cfg, f, default_flow_style=False)\n # shutil.copy(cfg.cfg_file, os.path.join(cfg.output_dir, 'config.yaml'))", + "detail": "modules.MICA.jobs", + "documentation": {} + }, + { + "label": "main", + "kind": 2, + "importPath": "modules.MICA.render_dataset", + "description": "modules.MICA.render_dataset", + "peekOfCode": "def main():\n cfg = get_cfg_defaults()\n render = MeshShapeRenderer(obj_filename=cfg.model.topology_path)\n flame = FLAME(cfg.model).to('cuda:0')\n datasets = sorted(glob('/home/wzielonka/datasets/MICA/*'))\n for dataset in tqdm(datasets):\n meshes = sorted(glob(f'{dataset}/FLAME_parameters/*/*.npz'))\n sample_list = np.array(np.random.choice(range(len(meshes)), size=30 * 5))\n dst = Path('./output', Path(dataset).name)\n dst.mkdir(parents=True, exist_ok=True)", + "detail": "modules.MICA.render_dataset", + "documentation": {} + }, + { + "label": "api_multi_body", + "kind": 2, + "importPath": "modules.PIXIE.demos.api_multi_pixie", + "description": "modules.PIXIE.demos.api_multi_pixie", + "peekOfCode": "def api_multi_body(\n imgfolder='',\n savefolder='',\n visfolder='',\n focal=5000,\n device='cuda',\n iscrop=True,\n saveVis=True,\n saveMat=True,\n rasterizer_type='pytorch3d'", + "detail": "modules.PIXIE.demos.api_multi_pixie", + "documentation": {} + }, + { + "label": "main", + "kind": 2, + "importPath": "modules.PIXIE.demos.demo_animate_body", + "description": "modules.PIXIE.demos.demo_animate_body", + "peekOfCode": "def main(args):\n savefolder = args.savefolder\n device = args.device\n os.makedirs(savefolder, exist_ok=True)\n # check env\n if not torch.cuda.is_available():\n print('CUDA is not available! use CPU instead')\n else:\n cudnn.benchmark = True\n torch.backends.cudnn.deterministic = False", + "detail": "modules.PIXIE.demos.demo_animate_body", + "documentation": {} + }, + { + "label": "main", + "kind": 2, + "importPath": "modules.PIXIE.demos.demo_fit_body", + "description": "modules.PIXIE.demos.demo_fit_body", + "peekOfCode": "def main(args):\n savefolder = args.savefolder\n device = args.device\n os.makedirs(savefolder, exist_ok=True)\n # check env\n if not torch.cuda.is_available():\n print('CUDA is not available! use CPU instead')\n else:\n cudnn.benchmark = True\n torch.backends.cudnn.deterministic = False", + "detail": "modules.PIXIE.demos.demo_fit_body", + "documentation": {} + }, + { + "label": "main", + "kind": 2, + "importPath": "modules.PIXIE.demos.demo_fit_face", + "description": "modules.PIXIE.demos.demo_fit_face", + "peekOfCode": "def main(args):\n savefolder = args.savefolder\n device = args.device\n os.makedirs(savefolder, exist_ok=True)\n # check env\n if not torch.cuda.is_available():\n print('CUDA is not available! use CPU instead')\n else:\n cudnn.benchmark = True\n torch.backends.cudnn.deterministic = False", + "detail": "modules.PIXIE.demos.demo_fit_face", + "documentation": {} + }, + { + "label": "main", + "kind": 2, + "importPath": "modules.PIXIE.demos.demo_fit_hand", + "description": "modules.PIXIE.demos.demo_fit_hand", + "peekOfCode": "def main(args):\n savefolder = args.savefolder\n device = args.device\n os.makedirs(savefolder, exist_ok=True)\n # check env\n if not torch.cuda.is_available():\n print('CUDA is not available! use CPU instead')\n else:\n cudnn.benchmark = True\n torch.backends.cudnn.deterministic = False", + "detail": "modules.PIXIE.demos.demo_fit_hand", + "documentation": {} + }, + { + "label": "dict_tensor2npy", + "kind": 2, + "importPath": "modules.PIXIE.demos.demo_multi_bodys", + "description": "modules.PIXIE.demos.demo_multi_bodys", + "peekOfCode": "def dict_tensor2npy(tensor_dict):\n npy_dict = {}\n for key, value in tensor_dict.items():\n # print(type(value))\n # if type(value)==dict:\n # pass\n # # print('dict')\n # # npy_dict[key] = dict_tensor2npy(value)\n # el\n if type(value)==torch.Tensor:", + "detail": "modules.PIXIE.demos.demo_multi_bodys", + "documentation": {} + }, + { + "label": "main", + "kind": 2, + "importPath": "modules.PIXIE.demos.demo_multi_bodys", + "description": "modules.PIXIE.demos.demo_multi_bodys", + "peekOfCode": "def main(args):\n savefolder = args.savefolder\n device = args.device\n os.makedirs(savefolder, exist_ok=True)\n # check env\n if not torch.cuda.is_available():\n print('CUDA is not available! use CPU instead')\n else:\n cudnn.benchmark = True\n torch.backends.cudnn.deterministic = False", + "detail": "modules.PIXIE.demos.demo_multi_bodys", + "documentation": {} + }, + { + "label": "main", + "kind": 2, + "importPath": "modules.PIXIE.demos.demo_whole_body", + "description": "modules.PIXIE.demos.demo_whole_body", + "peekOfCode": "def main(args):\n savefolder = args.savefolder\n device = args.device\n os.makedirs(savefolder, exist_ok=True)\n # check env\n if not torch.cuda.is_available():\n print('CUDA is not available! use CPU instead')\n else:\n cudnn.benchmark = True\n torch.backends.cudnn.deterministic = False", + "detail": "modules.PIXIE.demos.demo_whole_body", + "documentation": {} + }, + { + "label": "TestData", + "kind": 6, + "importPath": "modules.PIXIE.pixielib.datasets.body_datasets", + "description": "modules.PIXIE.pixielib.datasets.body_datasets", + "peekOfCode": "class TestData(Dataset):\n def __init__(self, testpath, iscrop=False, \n crop_size=224, hd_size = 1024, \n scale=1.1, body_detector='rcnn', \n device='cpu',\n en_multi_person=True\n ):\n '''\n testpath: folder, imagepath_list, image path, video path\n '''", + "detail": "modules.PIXIE.pixielib.datasets.body_datasets", + "documentation": {} + }, + { + "label": "build_dataloader", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.datasets.body_datasets", + "description": "modules.PIXIE.pixielib.datasets.body_datasets", + "peekOfCode": "def build_dataloader(testpath, batch_size=1):\n data_list = []\n dataset = TestData(testpath = testpath)\n dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=False,\n num_workers=1,\n pin_memory=True,\n drop_last =False)\n return dataset, dataloader\ndef video2sequence(video_path):\n print('extract frames from video: {}...'.format(video_path))", + "detail": "modules.PIXIE.pixielib.datasets.body_datasets", + "documentation": {} + }, + { + "label": "video2sequence", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.datasets.body_datasets", + "description": "modules.PIXIE.pixielib.datasets.body_datasets", + "peekOfCode": "def video2sequence(video_path):\n print('extract frames from video: {}...'.format(video_path))\n videofolder = video_path.split('.')[0]\n os.makedirs(videofolder, exist_ok=True)\n video_name = video_path.split('/')[-1].split('.')[0]\n vidcap = cv2.VideoCapture(video_path)\n success, image = vidcap.read()\n count = 0\n imagepath_list = []\n while success:", + "detail": "modules.PIXIE.pixielib.datasets.body_datasets", + "documentation": {} + }, + { + "label": "FasterRCNN", + "kind": 6, + "importPath": "modules.PIXIE.pixielib.datasets.detectors", + "description": "modules.PIXIE.pixielib.datasets.detectors", + "peekOfCode": "class FasterRCNN(object):\n ''' detect body\n '''\n def __init__(self, device='cuda:0'): \n '''\n https://pytorch.org/docs/stable/torchvision/models.html#faster-r-cnn\n '''\n import torchvision\n # self.model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)\n self.model = torchvision.models.detection.fasterrcnn_mobilenet_v3_large_320_fpn(pretrained=True)", + "detail": "modules.PIXIE.pixielib.datasets.detectors", + "documentation": {} + }, + { + "label": "Yolov4", + "kind": 6, + "importPath": "modules.PIXIE.pixielib.datasets.detectors", + "description": "modules.PIXIE.pixielib.datasets.detectors", + "peekOfCode": "class Yolov4(object):\n def __init__(self, device='cuda:0'):\n pass\n @torch.no_grad()\n def run(self, image):\n '''\n image: 0-255, uint8, rgb, [h, w, 3]\n return: detected box list\n '''\n pass", + "detail": "modules.PIXIE.pixielib.datasets.detectors", + "documentation": {} + }, + { + "label": "KeypointRCNN", + "kind": 6, + "importPath": "modules.PIXIE.pixielib.datasets.detectors", + "description": "modules.PIXIE.pixielib.datasets.detectors", + "peekOfCode": "class KeypointRCNN(object):\n ''' Constructs a Keypoint R-CNN model with a ResNet-50-FPN backbone.\n Ref: https://pytorch.org/docs/stable/torchvision/models.html#keypoint-r-cnn\n 'nose',\n 'left_eye',\n 'right_eye',\n 'left_ear',\n 'right_ear',\n 'left_shoulder',\n 'right_shoulder',", + "detail": "modules.PIXIE.pixielib.datasets.detectors", + "documentation": {} + }, + { + "label": "FAN", + "kind": 6, + "importPath": "modules.PIXIE.pixielib.datasets.detectors", + "description": "modules.PIXIE.pixielib.datasets.detectors", + "peekOfCode": "class FAN(object):\n def __init__(self):\n import face_alignment\n self.model = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D, flip_input=False,device='cpu')\n def run(self, image):\n '''\n image: 0-255, uint8, rgb, [h, w, 3]\n return: detected box list\n '''\n out = self.model.get_landmarks(image)", + "detail": "modules.PIXIE.pixielib.datasets.detectors", + "documentation": {} + }, + { + "label": "COCO_INSTANCE_CATEGORY_NAMES", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.datasets.detectors", + "description": "modules.PIXIE.pixielib.datasets.detectors", + "peekOfCode": "COCO_INSTANCE_CATEGORY_NAMES = [\n '__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',\n 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'N/A', 'stop sign',\n 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',\n 'elephant', 'bear', 'zebra', 'giraffe', 'N/A', 'backpack', 'umbrella', 'N/A', 'N/A',\n 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',\n 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket',\n 'bottle', 'N/A', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',\n 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',\n 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'N/A', 'dining table',", + "detail": "modules.PIXIE.pixielib.datasets.detectors", + "documentation": {} + }, + { + "label": "TestData", + "kind": 6, + "importPath": "modules.PIXIE.pixielib.datasets.face_datasets", + "description": "modules.PIXIE.pixielib.datasets.face_datasets", + "peekOfCode": "class TestData(Dataset):\n def __init__(self, testpath, iscrop=True, crop_size=224, scale=2.0, face_detector='fan'):\n '''\n testpath: folder, imagepath_list, image path, video path\n '''\n if isinstance(testpath, list):\n self.imagepath_list = testpath\n elif os.path.isdir(testpath): \n self.imagepath_list = glob(testpath + '/*.jpg') + glob(testpath + '/*.png') + glob(testpath + '/*.bmp') + glob(testpath + '/*.jpeg')\n elif os.path.isfile(testpath) and (testpath[-3:] in ['jpg', 'png', 'bmp']):", + "detail": "modules.PIXIE.pixielib.datasets.face_datasets", + "documentation": {} + }, + { + "label": "NoWTest", + "kind": 6, + "importPath": "modules.PIXIE.pixielib.datasets.face_datasets", + "description": "modules.PIXIE.pixielib.datasets.face_datasets", + "peekOfCode": "class NoWTest(Dataset):\n def __init__(self, iscrop=True, crop_size=224, scale=2.0, ):\n self.iscrop = iscrop\n self.scale = scale\n self.crop_size = crop_size\n # self.data_path = '/ps/scratch/face2d3d/texture_in_the_wild_code/NoW_validation/image_paths_ring_6_elements.npy'\n self.data_path = '/ps/scratch/face2d3d/ringnetpp/eccv/test_data/evaluation/NoW_Dataset/final_release_version/test_image_paths_ring_6_elements.npy'\n self.data_lines = np.load(self.data_path).astype('str').flatten()\n # import ipdb; ipdb.set_trace()\n self.imagepath = '/ps/scratch/face2d3d/ringnetpp/eccv/test_data/evaluation/NoW_Dataset/final_release_version/iphone_pictures/'", + "detail": "modules.PIXIE.pixielib.datasets.face_datasets", + "documentation": {} + }, + { + "label": "NoWTest_body", + "kind": 6, + "importPath": "modules.PIXIE.pixielib.datasets.face_datasets", + "description": "modules.PIXIE.pixielib.datasets.face_datasets", + "peekOfCode": "class NoWTest_body(Dataset):\n def __init__(self, iscrop=False, crop_size=224, hd_size = 1024, scale=1.1, body_detector='rcnn', device='cuda:0'):\n self.iscrop = iscrop\n self.scale = scale\n self.crop_size = crop_size\n # self.data_path = '/ps/scratch/face2d3d/texture_in_the_wild_code/NoW_validation/image_paths_ring_6_elements.npy'\n # self.data_path = '/ps/scratch/face2d3d/ringnetpp/eccv/test_data/evaluation/NoW_Dataset/final_release_version/test_image_paths_ring_6_elements.npy'\n self.data_path = '/ps/scratch/face2d3d/texture_in_the_wild_code/NoW_validation/image_paths._ring_6_elements.npy'\n self.data_lines = np.load(self.data_path).astype('str').flatten()\n # import ipdb; ipdb.set_trace()", + "detail": "modules.PIXIE.pixielib.datasets.face_datasets", + "documentation": {} + }, + { + "label": "VGGFace2", + "kind": 6, + "importPath": "modules.PIXIE.pixielib.datasets.face_datasets", + "description": "modules.PIXIE.pixielib.datasets.face_datasets", + "peekOfCode": "class VGGFace2(Dataset):\n def __init__(self, crop_size, scale=[1, 1], trans_scale = 0., blur_step=1, split='train'):\n '''\n K must be less than 6\n '''\n self.image_size = crop_size\n self.imagefolder = '/ps/scratch/face2d3d/train'\n self.kptfolder = '/ps/scratch/face2d3d/train_annotated_torch7'\n self.segfolder = '/ps/scratch/face2d3d/texture_in_the_wild_code/VGGFace2_seg/test_crop_size_400_batch'\n self.attfolder = '/ps/scratch/yfeng/Data/vggface2/gender-DEX'", + "detail": "modules.PIXIE.pixielib.datasets.face_datasets", + "documentation": {} + }, + { + "label": "video2sequence", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.datasets.face_datasets", + "description": "modules.PIXIE.pixielib.datasets.face_datasets", + "peekOfCode": "def video2sequence(video_path):\n videofolder = video_path.split('.')[0]\n util.check_mkdir(videofolder)\n video_name = video_path.split('/')[-1].split('.')[0]\n vidcap = cv2.VideoCapture(video_path)\n success,image = vidcap.read()\n count = 0\n imagepath_list = []\n while success:\n imagepath = '{}/{}_frame{:04d}.jpg'.format(videofolder, video_name, count)", + "detail": "modules.PIXIE.pixielib.datasets.face_datasets", + "documentation": {} + }, + { + "label": "TestData", + "kind": 6, + "importPath": "modules.PIXIE.pixielib.datasets.hand_datasets", + "description": "modules.PIXIE.pixielib.datasets.hand_datasets", + "peekOfCode": "class TestData(Dataset):\n def __init__(self, testpath, iscrop=True, crop_size=224, scale=2.4, detector='hand'):\n '''\n testpath: folder, imagepath_list, image path, video path\n '''\n if isinstance(testpath, list):\n self.imagepath_list = testpath\n elif os.path.isdir(testpath): \n self.imagepath_list = glob(testpath + '/*.jpg') + glob(testpath + '/*.png') + glob(testpath + '/*.bmp')\n elif os.path.isfile(testpath) and (testpath[-3:] in ['jpg', 'png', 'bmp']):", + "detail": "modules.PIXIE.pixielib.datasets.hand_datasets", + "documentation": {} + }, + { + "label": "video2sequence", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.datasets.hand_datasets", + "description": "modules.PIXIE.pixielib.datasets.hand_datasets", + "peekOfCode": "def video2sequence(video_path):\n videofolder = video_path.split('.')[0]\n util.check_mkdir(videofolder)\n video_name = video_path.split('/')[-1].split('.')[0]\n vidcap = cv2.VideoCapture(video_path)\n success,image = vidcap.read()\n count = 0\n imagepath_list = []\n while success:\n imagepath = '{}/{}_frame{:04d}.jpg'.format(videofolder, video_name, count)", + "detail": "modules.PIXIE.pixielib.datasets.hand_datasets", + "documentation": {} + }, + { + "label": "map_keypoints", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.datasets.keypoint_names", + "description": "modules.PIXIE.pixielib.datasets.keypoint_names", + "peekOfCode": "def map_keypoints(keypoints, conf, source_names, target_names = 'smplx',\n dim=2):\n ''' Maps the keypoints from the source to a target dataset format\n '''\n source_dataset = source_names\n target_dataset = target_names\n names_dict = KEYPOINT_NAMES_DICT\n mapping = {}\n source_names = names_dict.get(source_dataset)\n target_names = names_dict.get(target_dataset)", + "detail": "modules.PIXIE.pixielib.datasets.keypoint_names", + "documentation": {} + }, + { + "label": "__all__", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.datasets.keypoint_names", + "description": "modules.PIXIE.pixielib.datasets.keypoint_names", + "peekOfCode": "__all__ = [\n 'KEYPOINT_PARTS',\n 'KEYPOINT_CONNECTIONS',\n 'KEYPOINT_NAMES_DICT',\n 'KEYPOINT_CONNECTIONS_DICT',\n 'KEYPOINT_PARTS_DICT',\n 'PART_NAMES',\n 'KEYPOINT_PART_CONNECTION_DICTS',\n]\nKEYPOINT_PARTS = {'pelvis': 'body,torso',", + "detail": "modules.PIXIE.pixielib.datasets.keypoint_names", + "documentation": {} + }, + { + "label": "KEYPOINT_PARTS", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.datasets.keypoint_names", + "description": "modules.PIXIE.pixielib.datasets.keypoint_names", + "peekOfCode": "KEYPOINT_PARTS = {'pelvis': 'body,torso',\n 'left_hip': 'body,torso',\n 'right_hip': 'body,torso',\n 'spine1': 'body,torso',\n 'left_knee': 'body',\n 'right_knee': 'body',\n 'spine2': 'body,torso,upper',\n 'left_ankle': 'body',\n 'right_ankle': 'body',\n 'spine3': 'body,torso,upper',", + "detail": "modules.PIXIE.pixielib.datasets.keypoint_names", + "documentation": {} + }, + { + "label": "PART_NAMES", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.datasets.keypoint_names", + "description": "modules.PIXIE.pixielib.datasets.keypoint_names", + "peekOfCode": "PART_NAMES = {\n 'body',\n 'left_hand',\n 'right_hand',\n 'face',\n 'head',\n 'upper',\n 'torso',\n}\nKEYPOINT_CONNECTIONS = [", + "detail": "modules.PIXIE.pixielib.datasets.keypoint_names", + "documentation": {} + }, + { + "label": "KEYPOINT_CONNECTIONS", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.datasets.keypoint_names", + "description": "modules.PIXIE.pixielib.datasets.keypoint_names", + "peekOfCode": "KEYPOINT_CONNECTIONS = [\n ['pelvis', 'spine1'],\n ['spine1', 'spine2'],\n ['spine2', 'spine3'],\n ['spine3', 'left_collar'],\n ['spine3', 'right_collar'],\n ['left_collar', 'left_shoulder'],\n ['right_collar', 'right_shoulder'],\n ['spine3', 'neck'],\n ['neck', 'head'],", + "detail": "modules.PIXIE.pixielib.datasets.keypoint_names", + "documentation": {} + }, + { + "label": "FACIAL_LANDMARKS", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.datasets.keypoint_names", + "description": "modules.PIXIE.pixielib.datasets.keypoint_names", + "peekOfCode": "FACIAL_LANDMARKS = [\n 'right_eye_brow1',\n 'right_eye_brow2',\n 'right_eye_brow3',\n 'right_eye_brow4',\n 'right_eye_brow5',\n 'left_eye_brow5',\n 'left_eye_brow4',\n 'left_eye_brow3',\n 'left_eye_brow2',", + "detail": "modules.PIXIE.pixielib.datasets.keypoint_names", + "documentation": {} + }, + { + "label": "SMPL_KEYPOINT_NAMES", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.datasets.keypoint_names", + "description": "modules.PIXIE.pixielib.datasets.keypoint_names", + "peekOfCode": "SMPL_KEYPOINT_NAMES = [\n 'pelvis',\n 'left_hip',\n 'right_hip',\n 'spine1',\n 'left_knee',\n 'right_knee',\n 'spine2',\n 'left_ankle',\n 'right_ankle',", + "detail": "modules.PIXIE.pixielib.datasets.keypoint_names", + "documentation": {} + }, + { + "label": "SMPLH_KEYPOINT_NAMES", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.datasets.keypoint_names", + "description": "modules.PIXIE.pixielib.datasets.keypoint_names", + "peekOfCode": "SMPLH_KEYPOINT_NAMES = SMPL_KEYPOINT_NAMES[:-2] + [\n 'left_index1',\n 'left_index2',\n 'left_index3',\n 'left_middle1',\n 'left_middle2',\n 'left_middle3',\n 'left_pinky1',\n 'left_pinky2',\n 'left_pinky3',", + "detail": "modules.PIXIE.pixielib.datasets.keypoint_names", + "documentation": {} + }, + { + "label": "SMPLX_KEYPOINT_NAMES", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.datasets.keypoint_names", + "description": "modules.PIXIE.pixielib.datasets.keypoint_names", + "peekOfCode": "SMPLX_KEYPOINT_NAMES = (SMPL_KEYPOINT_NAMES[:-2] +\n ['jaw', 'left_eye_smplx', 'right_eye_smplx'] +\n SMPLH_KEYPOINT_NAMES[22:] +\n FACIAL_LANDMARKS)\nMANO_NAMES = [\n 'wrist',\n 'index1',\n 'index2',\n 'index3',\n 'middle1',", + "detail": "modules.PIXIE.pixielib.datasets.keypoint_names", + "documentation": {} + }, + { + "label": "MANO_NAMES", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.datasets.keypoint_names", + "description": "modules.PIXIE.pixielib.datasets.keypoint_names", + "peekOfCode": "MANO_NAMES = [\n 'wrist',\n 'index1',\n 'index2',\n 'index3',\n 'middle1',\n 'middle2',\n 'middle3',\n 'pinky1',\n 'pinky2',", + "detail": "modules.PIXIE.pixielib.datasets.keypoint_names", + "documentation": {} + }, + { + "label": "HO3D_NAMES", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.datasets.keypoint_names", + "description": "modules.PIXIE.pixielib.datasets.keypoint_names", + "peekOfCode": "HO3D_NAMES = MANO_NAMES + ['thumb', 'index', 'middle', 'ring', 'pinky']\nAGORA_NAMES = [\n 'pelvis',\n 'left_hip',\n 'right_hip',\n 'spine1',\n 'left_knee',\n 'right_knee',\n 'spine2',\n 'left_ankle',", + "detail": "modules.PIXIE.pixielib.datasets.keypoint_names", + "documentation": {} + }, + { + "label": "AGORA_NAMES", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.datasets.keypoint_names", + "description": "modules.PIXIE.pixielib.datasets.keypoint_names", + "peekOfCode": "AGORA_NAMES = [\n 'pelvis',\n 'left_hip',\n 'right_hip',\n 'spine1',\n 'left_knee',\n 'right_knee',\n 'spine2',\n 'left_ankle',\n 'right_ankle',", + "detail": "modules.PIXIE.pixielib.datasets.keypoint_names", + "documentation": {} + }, + { + "label": "EHF_KEYPOINTS", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.datasets.keypoint_names", + "description": "modules.PIXIE.pixielib.datasets.keypoint_names", + "peekOfCode": "EHF_KEYPOINTS = [\n 'pelvis',\n 'left_hip',\n 'right_hip',\n 'spine1',\n 'left_knee',\n 'right_knee',\n 'spine2',\n 'left_ankle',\n 'right_ankle',", + "detail": "modules.PIXIE.pixielib.datasets.keypoint_names", + "documentation": {} + }, + { + "label": "OPENPOSE19_KEYPOINT_NAMES_v1", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.datasets.keypoint_names", + "description": "modules.PIXIE.pixielib.datasets.keypoint_names", + "peekOfCode": "OPENPOSE19_KEYPOINT_NAMES_v1 = [\n 'nose', 'neck',\n 'right_shoulder', 'right_elbow', 'right_wrist',\n 'left_shoulder', 'left_elbow', 'left_wrist',\n 'pelvis',\n 'right_hip', 'right_knee', 'right_ankle',\n 'left_hip', 'left_knee', 'left_ankle',\n 'right_eye', 'left_eye', 'right_ear', 'left_ear',\n 'left_wrist',\n 'left_thumb1', 'left_thumb2', 'left_thumb3', 'left_thumb',", + "detail": "modules.PIXIE.pixielib.datasets.keypoint_names", + "documentation": {} + }, + { + "label": "FEET_KEYPS_NAMES", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.datasets.keypoint_names", + "description": "modules.PIXIE.pixielib.datasets.keypoint_names", + "peekOfCode": "FEET_KEYPS_NAMES = ['left_big_toe', 'left_small_toe', 'left_heel',\n 'right_big_toe', 'right_small_toe', 'right_heel']\nOPENPOSE25_KEYPOINT_NAMES_V1 = deepcopy(OPENPOSE19_KEYPOINT_NAMES_v1)\nstart = 19\nfor feet_name in FEET_KEYPS_NAMES:\n OPENPOSE25_KEYPOINT_NAMES_V1.insert(start, feet_name)\n start += 1\nMPII_KEYPOINT_NAMES = [\n 'right_ankle',\n 'right_knee',", + "detail": "modules.PIXIE.pixielib.datasets.keypoint_names", + "documentation": {} + }, + { + "label": "OPENPOSE25_KEYPOINT_NAMES_V1", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.datasets.keypoint_names", + "description": "modules.PIXIE.pixielib.datasets.keypoint_names", + "peekOfCode": "OPENPOSE25_KEYPOINT_NAMES_V1 = deepcopy(OPENPOSE19_KEYPOINT_NAMES_v1)\nstart = 19\nfor feet_name in FEET_KEYPS_NAMES:\n OPENPOSE25_KEYPOINT_NAMES_V1.insert(start, feet_name)\n start += 1\nMPII_KEYPOINT_NAMES = [\n 'right_ankle',\n 'right_knee',\n 'right_hip',\n 'left_hip',", + "detail": "modules.PIXIE.pixielib.datasets.keypoint_names", + "documentation": {} + }, + { + "label": "start", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.datasets.keypoint_names", + "description": "modules.PIXIE.pixielib.datasets.keypoint_names", + "peekOfCode": "start = 19\nfor feet_name in FEET_KEYPS_NAMES:\n OPENPOSE25_KEYPOINT_NAMES_V1.insert(start, feet_name)\n start += 1\nMPII_KEYPOINT_NAMES = [\n 'right_ankle',\n 'right_knee',\n 'right_hip',\n 'left_hip',\n 'left_knee',", + "detail": "modules.PIXIE.pixielib.datasets.keypoint_names", + "documentation": {} + }, + { + "label": "MPII_KEYPOINT_NAMES", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.datasets.keypoint_names", + "description": "modules.PIXIE.pixielib.datasets.keypoint_names", + "peekOfCode": "MPII_KEYPOINT_NAMES = [\n 'right_ankle',\n 'right_knee',\n 'right_hip',\n 'left_hip',\n 'left_knee',\n 'left_ankle',\n 'pelvis',\n 'thorax',\n 'upper_neck',", + "detail": "modules.PIXIE.pixielib.datasets.keypoint_names", + "documentation": {} + }, + { + "label": "FLAME_KEYPOINT_NAMES", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.datasets.keypoint_names", + "description": "modules.PIXIE.pixielib.datasets.keypoint_names", + "peekOfCode": "FLAME_KEYPOINT_NAMES = [\n 'global', 'neck', 'jaw', 'left_eye', 'right_eye'] + FACIAL_LANDMARKS\nFFHQ_KEYPOINTS = FLAME_KEYPOINT_NAMES\nCOCO_KEYPOINTS = ['nose',\n 'left_eye',\n 'right_eye',\n 'left_ear',\n 'right_ear',\n 'left_shoulder',\n 'right_shoulder',", + "detail": "modules.PIXIE.pixielib.datasets.keypoint_names", + "documentation": {} + }, + { + "label": "FFHQ_KEYPOINTS", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.datasets.keypoint_names", + "description": "modules.PIXIE.pixielib.datasets.keypoint_names", + "peekOfCode": "FFHQ_KEYPOINTS = FLAME_KEYPOINT_NAMES\nCOCO_KEYPOINTS = ['nose',\n 'left_eye',\n 'right_eye',\n 'left_ear',\n 'right_ear',\n 'left_shoulder',\n 'right_shoulder',\n 'left_elbow',\n 'right_elbow',", + "detail": "modules.PIXIE.pixielib.datasets.keypoint_names", + "documentation": {} + }, + { + "label": "COCO_KEYPOINTS", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.datasets.keypoint_names", + "description": "modules.PIXIE.pixielib.datasets.keypoint_names", + "peekOfCode": "COCO_KEYPOINTS = ['nose',\n 'left_eye',\n 'right_eye',\n 'left_ear',\n 'right_ear',\n 'left_shoulder',\n 'right_shoulder',\n 'left_elbow',\n 'right_elbow',\n 'left_wrist',", + "detail": "modules.PIXIE.pixielib.datasets.keypoint_names", + "documentation": {} + }, + { + "label": "COCO_WHOLE_BODY_KEYPOINTS", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.datasets.keypoint_names", + "description": "modules.PIXIE.pixielib.datasets.keypoint_names", + "peekOfCode": "COCO_WHOLE_BODY_KEYPOINTS = COCO_KEYPOINTS + FEET_KEYPS_NAMES + [\n 'left_wrist',\n 'left_thumb1', 'left_thumb2', 'left_thumb3', 'left_thumb',\n 'left_index1', 'left_index2', 'left_index3', 'left_index',\n 'left_middle1', 'left_middle2', 'left_middle3', 'left_middle',\n 'left_ring1', 'left_ring2', 'left_ring3', 'left_ring',\n 'left_pinky1', 'left_pinky2', 'left_pinky3', 'left_pinky',\n 'right_wrist',\n 'right_thumb1', 'right_thumb2', 'right_thumb3', 'right_thumb',\n 'right_index1', 'right_index2', 'right_index3', 'right_index',", + "detail": "modules.PIXIE.pixielib.datasets.keypoint_names", + "documentation": {} + }, + { + "label": "THREEDPW_KEYPOINT_NAMES", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.datasets.keypoint_names", + "description": "modules.PIXIE.pixielib.datasets.keypoint_names", + "peekOfCode": "THREEDPW_KEYPOINT_NAMES = [\n 'nose',\n 'neck',\n 'right_shoulder',\n 'right_elbow',\n 'right_wrist',\n 'left_shoulder',\n 'left_elbow',\n 'left_wrist',\n 'right_hip',", + "detail": "modules.PIXIE.pixielib.datasets.keypoint_names", + "documentation": {} + }, + { + "label": "POSETRACK_KEYPOINT_NAMES", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.datasets.keypoint_names", + "description": "modules.PIXIE.pixielib.datasets.keypoint_names", + "peekOfCode": "POSETRACK_KEYPOINT_NAMES = [\n 'nose',\n 'neck',\n 'head_top',\n 'left_ear',\n 'right_ear',\n 'left_shoulder',\n 'right_shoulder',\n 'left_elbow',\n 'right_elbow',", + "detail": "modules.PIXIE.pixielib.datasets.keypoint_names", + "documentation": {} + }, + { + "label": "AICH_KEYPOINT_NAMES", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.datasets.keypoint_names", + "description": "modules.PIXIE.pixielib.datasets.keypoint_names", + "peekOfCode": "AICH_KEYPOINT_NAMES = [\n 'right_shoulder',\n 'right_elbow',\n 'right_wrist',\n 'left_shoulder',\n 'left_elbow',\n 'left_wrist',\n 'right_hip',\n 'right_knee',\n 'right_ankle',", + "detail": "modules.PIXIE.pixielib.datasets.keypoint_names", + "documentation": {} + }, + { + "label": "SPIN_KEYPOINT_NAMES", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.datasets.keypoint_names", + "description": "modules.PIXIE.pixielib.datasets.keypoint_names", + "peekOfCode": "SPIN_KEYPOINT_NAMES = [\n 'right_ankle',\n 'right_knee',\n 'right_hip',\n 'left_hip',\n 'left_knee',\n 'left_ankle',\n 'right_wrist',\n 'right_elbow',\n 'right_shoulder',", + "detail": "modules.PIXIE.pixielib.datasets.keypoint_names", + "documentation": {} + }, + { + "label": "SPINX_KEYPOINT_NAMES", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.datasets.keypoint_names", + "description": "modules.PIXIE.pixielib.datasets.keypoint_names", + "peekOfCode": "SPINX_KEYPOINT_NAMES = [\n 'right_ankle',\n 'right_knee',\n 'right_hip',\n 'left_hip',\n 'left_knee',\n 'left_ankle',\n 'right_wrist',\n 'right_elbow',\n 'right_shoulder',", + "detail": "modules.PIXIE.pixielib.datasets.keypoint_names", + "documentation": {} + }, + { + "label": "PANOPTIC_KEYPOINT_NAMES", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.datasets.keypoint_names", + "description": "modules.PIXIE.pixielib.datasets.keypoint_names", + "peekOfCode": "PANOPTIC_KEYPOINT_NAMES = [\n 'neck',\n 'nose',\n 'pelvis',\n 'left_shoulder',\n 'left_elbow',\n 'left_wrist',\n 'left_hip',\n 'left_knee',\n 'left_ankle',", + "detail": "modules.PIXIE.pixielib.datasets.keypoint_names", + "documentation": {} + }, + { + "label": "FREIHAND_NAMES", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.datasets.keypoint_names", + "description": "modules.PIXIE.pixielib.datasets.keypoint_names", + "peekOfCode": "FREIHAND_NAMES = [\n 'right_wrist',\n 'right_thumb1',\n 'right_thumb2',\n 'right_thumb3',\n 'right_thumb',\n 'right_index1',\n 'right_index2',\n 'right_index3',\n 'right_index',", + "detail": "modules.PIXIE.pixielib.datasets.keypoint_names", + "documentation": {} + }, + { + "label": "LSP_NAMES", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.datasets.keypoint_names", + "description": "modules.PIXIE.pixielib.datasets.keypoint_names", + "peekOfCode": "LSP_NAMES = [\n 'right_ankle',\n 'right_knee',\n 'right_hip',\n 'left_hip',\n 'left_knee',\n 'left_ankle',\n 'right_wrist',\n 'right_elbow',\n 'right_shoulder',", + "detail": "modules.PIXIE.pixielib.datasets.keypoint_names", + "documentation": {} + }, + { + "label": "RAW_H36M_NAMES", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.datasets.keypoint_names", + "description": "modules.PIXIE.pixielib.datasets.keypoint_names", + "peekOfCode": "RAW_H36M_NAMES = [\n 'pelvis',\n 'left_hip',\n 'left_knee',\n 'left_ankle',\n 'right_hip',\n 'right_knee',\n 'right_ankle',\n 'spine',\n 'neck', # 'thorax',", + "detail": "modules.PIXIE.pixielib.datasets.keypoint_names", + "documentation": {} + }, + { + "label": "H36M_NAMES", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.datasets.keypoint_names", + "description": "modules.PIXIE.pixielib.datasets.keypoint_names", + "peekOfCode": "H36M_NAMES = [\n 'right_ankle',\n 'right_knee',\n 'right_hip',\n 'left_hip',\n 'left_knee',\n 'left_ankle',\n 'right_wrist',\n 'right_elbow',\n 'right_shoulder',", + "detail": "modules.PIXIE.pixielib.datasets.keypoint_names", + "documentation": {} + }, + { + "label": "PANOPTIC_HAND_KEYPOINT_NAMES", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.datasets.keypoint_names", + "description": "modules.PIXIE.pixielib.datasets.keypoint_names", + "peekOfCode": "PANOPTIC_HAND_KEYPOINT_NAMES = [\n 'right_wrist',\n 'right_thumb1', 'right_thumb2', 'right_thumb3', 'right_thumb',\n 'right_index1', 'right_index2', 'right_index3', 'right_index',\n 'right_middle1', 'right_middle2', 'right_middle3', 'right_middle',\n 'right_ring1', 'right_ring2', 'right_ring3', 'right_ring',\n 'right_pinky1', 'right_pinky2', 'right_pinky3', 'right_pinky'\n]\nAGORA_NAMES = [\n 'pelvis',", + "detail": "modules.PIXIE.pixielib.datasets.keypoint_names", + "documentation": {} + }, + { + "label": "AGORA_NAMES", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.datasets.keypoint_names", + "description": "modules.PIXIE.pixielib.datasets.keypoint_names", + "peekOfCode": "AGORA_NAMES = [\n 'pelvis',\n 'left_hip',\n 'right_hip',\n 'spine1',\n 'left_knee',\n 'right_knee',\n 'spine2',\n 'left_ankle',\n 'right_ankle',", + "detail": "modules.PIXIE.pixielib.datasets.keypoint_names", + "documentation": {} + }, + { + "label": "KEYPOINT_NAMES_DICT", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.datasets.keypoint_names", + "description": "modules.PIXIE.pixielib.datasets.keypoint_names", + "peekOfCode": "KEYPOINT_NAMES_DICT = {\n 'smpl': SMPL_KEYPOINT_NAMES,\n 'smplh': SMPLH_KEYPOINT_NAMES,\n 'smplx': SMPLX_KEYPOINT_NAMES,\n 'mano': MANO_NAMES,\n 'mano-from-smplx': SMPLX_KEYPOINT_NAMES,\n 'flame-from-smplx': SMPLX_KEYPOINT_NAMES,\n 'flame': FLAME_KEYPOINT_NAMES,\n 'openpose19_v1': OPENPOSE19_KEYPOINT_NAMES_v1,\n 'openpose25_v1': OPENPOSE25_KEYPOINT_NAMES_V1,", + "detail": "modules.PIXIE.pixielib.datasets.keypoint_names", + "documentation": {} + }, + { + "label": "extra_names", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.datasets.keypoint_names", + "description": "modules.PIXIE.pixielib.datasets.keypoint_names", + "peekOfCode": "extra_names = ['head_top', 'left_big_toe', 'left_ear', 'left_eye', 'left_heel', 'left_index', 'left_middle', 'left_pinky', 'left_ring', 'left_small_toe', 'left_thumb', 'nose', 'right_big_toe', 'right_ear', 'right_eye', 'right_heel', 'right_index', 'right_middle', 'right_pinky', 'right_ring', 'right_small_toe', 'right_thumb']\nKEYPOINT_NAMES_DICT['smplx'] += extra_names\nimport numpy as np\nimport torch\ndef map_keypoints(keypoints, conf, source_names, target_names = 'smplx',\n dim=2):\n ''' Maps the keypoints from the source to a target dataset format\n '''\n source_dataset = source_names\n target_dataset = target_names", + "detail": "modules.PIXIE.pixielib.datasets.keypoint_names", + "documentation": {} + }, + { + "label": "ResnetEncoder", + "kind": 6, + "importPath": "modules.PIXIE.pixielib.models.encoders", + "description": "modules.PIXIE.pixielib.models.encoders", + "peekOfCode": "class ResnetEncoder(nn.Module):\n def __init__(self, append_layers = None):\n super(ResnetEncoder, self).__init__()\n from . import resnet\n # feature_size = 2048\n self.feature_dim = 2048\n self.encoder = resnet.load_ResNet50Model() #out: 2048\n # regressor\n self.append_layers = append_layers\n # for normalize input images", + "detail": "modules.PIXIE.pixielib.models.encoders", + "documentation": {} + }, + { + "label": "MLP", + "kind": 6, + "importPath": "modules.PIXIE.pixielib.models.encoders", + "description": "modules.PIXIE.pixielib.models.encoders", + "peekOfCode": "class MLP(nn.Module):\n def __init__(self, channels = [2048, 1024, 1], last_op = None):\n super(MLP, self).__init__()\n layers = []\n for l in range(0, len(channels) - 1):\n layers.append(nn.Linear(channels[l], channels[l+1]))\n if l < len(channels) - 2:\n layers.append(nn.ReLU())\n if last_op:\n layers.append(last_op)", + "detail": "modules.PIXIE.pixielib.models.encoders", + "documentation": {} + }, + { + "label": "HRNEncoder", + "kind": 6, + "importPath": "modules.PIXIE.pixielib.models.encoders", + "description": "modules.PIXIE.pixielib.models.encoders", + "peekOfCode": "class HRNEncoder(nn.Module):\n def __init__(self, append_layers = None):\n super(HRNEncoder, self).__init__()\n from . import hrnet\n self.feature_dim = 2048\n self.encoder = hrnet.load_HRNet(pretrained=True) #out: 2048\n # regressor\n self.append_layers = append_layers\n # for normalize input images\n MEAN = [0.485, 0.456, 0.406]", + "detail": "modules.PIXIE.pixielib.models.encoders", + "documentation": {} + }, + { + "label": "FLAMETex", + "kind": 6, + "importPath": "modules.PIXIE.pixielib.models.FLAME", + "description": "modules.PIXIE.pixielib.models.FLAME", + "peekOfCode": "class FLAMETex(nn.Module):\n \"\"\"\n FLAME texture:\n https://github.com/TimoBolkart/TF_FLAME/blob/ade0ab152300ec5f0e8555d6765411555c5ed43d/sample_texture.py#L64\n FLAME texture converted from BFM:\n https://github.com/TimoBolkart/BFM_to_FLAME\n \"\"\"\n def __init__(self, config):\n super(FLAMETex, self).__init__()\n if config.tex_type == 'BFM':", + "detail": "modules.PIXIE.pixielib.models.FLAME", + "documentation": {} + }, + { + "label": "texture_flame2smplx", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.models.FLAME", + "description": "modules.PIXIE.pixielib.models.FLAME", + "peekOfCode": "def texture_flame2smplx(cached_data, flame_texture, smplx_texture):\n ''' Convert flame texture map (face-only) into smplx texture map (includes body texture)\n TODO: pytorch version ==> grid sample\n '''\n if smplx_texture.shape[0] != smplx_texture.shape[1]:\n print('SMPL-X texture not squared (%d != %d)' % (smplx_texture[0], smplx_texture[1]))\n return\n if smplx_texture.shape[0] != cached_data['target_resolution']:\n print('SMPL-X texture size does not match cached image resolution (%d != %d)' % (smplx_texture.shape[0], cached_data['target_resolution']))\n return", + "detail": "modules.PIXIE.pixielib.models.FLAME", + "documentation": {} + }, + { + "label": "HighResolutionModule", + "kind": 6, + "importPath": "modules.PIXIE.pixielib.models.hrnet", + "description": "modules.PIXIE.pixielib.models.hrnet", + "peekOfCode": "class HighResolutionModule(nn.Module):\n def __init__(self, num_branches, blocks, num_blocks, num_inchannels,\n num_channels, fuse_method, multi_scale_output=True):\n super(HighResolutionModule, self).__init__()\n self._check_branches(\n num_branches, blocks, num_blocks, num_inchannels, num_channels)\n self.num_inchannels = num_inchannels\n self.fuse_method = fuse_method\n self.num_branches = num_branches\n self.multi_scale_output = multi_scale_output", + "detail": "modules.PIXIE.pixielib.models.hrnet", + "documentation": {} + }, + { + "label": "HighResolutionNet", + "kind": 6, + "importPath": "modules.PIXIE.pixielib.models.hrnet", + "description": "modules.PIXIE.pixielib.models.hrnet", + "peekOfCode": "class HighResolutionNet(nn.Module):\n def __init__(self, cfg, **kwargs):\n self.inplanes = 64\n super(HighResolutionNet, self).__init__()\n use_old_impl = cfg.get('use_old_impl')\n self.use_old_impl = use_old_impl\n # stem net\n self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1,\n bias=False)\n self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)", + "detail": "modules.PIXIE.pixielib.models.hrnet", + "documentation": {} + }, + { + "label": "load_HRNet", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.models.hrnet", + "description": "modules.PIXIE.pixielib.models.hrnet", + "peekOfCode": "def load_HRNet(pretrained=False):\n hr_net_cfg_dict = {\n 'use_old_impl': False,\n 'pretrained_layers': ['*'], \n 'stage1': \n {'num_modules': 1, 'num_branches': 1, 'num_blocks': [4], 'num_channels': [64], 'block': 'BOTTLENECK', 'fuse_method': 'SUM'}, \n 'stage2': \n {'num_modules': 1, 'num_branches': 2, 'num_blocks': [4, 4], 'num_channels': [48, 96], 'block': 'BASIC', 'fuse_method': 'SUM'}, \n 'stage3': \n {'num_modules': 4, 'num_branches': 3, 'num_blocks': [4, 4, 4], 'num_channels': [48, 96, 192], 'block': 'BASIC', 'fuse_method': 'SUM'}, ", + "detail": "modules.PIXIE.pixielib.models.hrnet", + "documentation": {} + }, + { + "label": "BN_MOMENTUM", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.models.hrnet", + "description": "modules.PIXIE.pixielib.models.hrnet", + "peekOfCode": "BN_MOMENTUM = 0.1\ndef load_HRNet(pretrained=False):\n hr_net_cfg_dict = {\n 'use_old_impl': False,\n 'pretrained_layers': ['*'], \n 'stage1': \n {'num_modules': 1, 'num_branches': 1, 'num_blocks': [4], 'num_channels': [64], 'block': 'BOTTLENECK', 'fuse_method': 'SUM'}, \n 'stage2': \n {'num_modules': 1, 'num_branches': 2, 'num_blocks': [4, 4], 'num_channels': [48, 96], 'block': 'BASIC', 'fuse_method': 'SUM'}, \n 'stage3': ", + "detail": "modules.PIXIE.pixielib.models.hrnet", + "documentation": {} + }, + { + "label": "blocks_dict", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.models.hrnet", + "description": "modules.PIXIE.pixielib.models.hrnet", + "peekOfCode": "blocks_dict = {\n 'BASIC': BasicBlock,\n 'BOTTLENECK': Bottleneck\n}\nclass HighResolutionNet(nn.Module):\n def __init__(self, cfg, **kwargs):\n self.inplanes = 64\n super(HighResolutionNet, self).__init__()\n use_old_impl = cfg.get('use_old_impl')\n self.use_old_impl = use_old_impl", + "detail": "modules.PIXIE.pixielib.models.hrnet", + "documentation": {} + }, + { + "label": "JointsFromVerticesSelector", + "kind": 6, + "importPath": "modules.PIXIE.pixielib.models.lbs", + "description": "modules.PIXIE.pixielib.models.lbs", + "peekOfCode": "class JointsFromVerticesSelector(nn.Module):\n def __init__(self, fname):\n ''' Selects extra joints from vertices\n '''\n super(JointsFromVerticesSelector, self).__init__()\n err_msg = (\n 'Either pass a filename or triangle face ids, names and'\n ' barycentrics')\n assert fname is not None or (\n face_ids is not None and bcs is not None and names is not None", + "detail": "modules.PIXIE.pixielib.models.lbs", + "documentation": {} + }, + { + "label": "Struct", + "kind": 6, + "importPath": "modules.PIXIE.pixielib.models.lbs", + "description": "modules.PIXIE.pixielib.models.lbs", + "peekOfCode": "class Struct(object):\n def __init__(self, **kwargs):\n for key, val in kwargs.items():\n setattr(self, key, val)", + "detail": "modules.PIXIE.pixielib.models.lbs", + "documentation": {} + }, + { + "label": "rot_mat_to_euler", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.models.lbs", + "description": "modules.PIXIE.pixielib.models.lbs", + "peekOfCode": "def rot_mat_to_euler(rot_mats):\n # Calculates rotation matrix to euler angles\n # Careful for extreme cases of eular angles like [0.0, pi, 0.0]\n sy = torch.sqrt(rot_mats[:, 0, 0] * rot_mats[:, 0, 0] +\n rot_mats[:, 1, 0] * rot_mats[:, 1, 0])\n return torch.atan2(-rot_mats[:, 2, 0], sy)\ndef find_dynamic_lmk_idx_and_bcoords(vertices, pose, dynamic_lmk_faces_idx,\n dynamic_lmk_b_coords,\n head_kin_chain, dtype=torch.float32):\n ''' Compute the faces, barycentric coordinates for the dynamic landmarks", + "detail": "modules.PIXIE.pixielib.models.lbs", + "documentation": {} + }, + { + "label": "find_dynamic_lmk_idx_and_bcoords", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.models.lbs", + "description": "modules.PIXIE.pixielib.models.lbs", + "peekOfCode": "def find_dynamic_lmk_idx_and_bcoords(vertices, pose, dynamic_lmk_faces_idx,\n dynamic_lmk_b_coords,\n head_kin_chain, dtype=torch.float32):\n ''' Compute the faces, barycentric coordinates for the dynamic landmarks\n To do so, we first compute the rotation of the neck around the y-axis\n and then use a pre-computed look-up table to find the faces and the\n barycentric coordinates that will be used.\n Special thanks to Soubhik Sanyal (soubhik.sanyal@tuebingen.mpg.de)\n for providing the original TensorFlow implementation and for the LUT.\n Parameters", + "detail": "modules.PIXIE.pixielib.models.lbs", + "documentation": {} + }, + { + "label": "vertices2landmarks", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.models.lbs", + "description": "modules.PIXIE.pixielib.models.lbs", + "peekOfCode": "def vertices2landmarks(vertices, faces, lmk_faces_idx, lmk_bary_coords):\n ''' Calculates landmarks by barycentric interpolation\n Parameters\n ----------\n vertices: torch.tensor BxVx3, dtype = torch.float32\n The tensor of input vertices\n faces: torch.tensor Fx3, dtype = torch.long\n The faces of the mesh\n lmk_faces_idx: torch.tensor L, dtype = torch.long\n The tensor with the indices of the faces used to calculate the", + "detail": "modules.PIXIE.pixielib.models.lbs", + "documentation": {} + }, + { + "label": "lbs", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.models.lbs", + "description": "modules.PIXIE.pixielib.models.lbs", + "peekOfCode": "def lbs(betas, pose, v_template, shapedirs, posedirs, J_regressor, parents,\n lbs_weights, pose2rot=True, dtype=torch.float32):\n ''' Performs Linear Blend Skinning with the given shape and pose parameters\n Parameters\n ----------\n betas : torch.tensor BxNB\n The tensor of shape parameters\n pose : torch.tensor Bx(J + 1) * 3\n The pose parameters in axis-angle format\n v_template torch.tensor BxVx3", + "detail": "modules.PIXIE.pixielib.models.lbs", + "documentation": {} + }, + { + "label": "vertices2joints", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.models.lbs", + "description": "modules.PIXIE.pixielib.models.lbs", + "peekOfCode": "def vertices2joints(J_regressor, vertices):\n ''' Calculates the 3D joint locations from the vertices\n Parameters\n ----------\n J_regressor : torch.tensor JxV\n The regressor array that is used to calculate the joints from the\n position of the vertices\n vertices : torch.tensor BxVx3\n The tensor of mesh vertices\n Returns", + "detail": "modules.PIXIE.pixielib.models.lbs", + "documentation": {} + }, + { + "label": "blend_shapes", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.models.lbs", + "description": "modules.PIXIE.pixielib.models.lbs", + "peekOfCode": "def blend_shapes(betas, shape_disps):\n ''' Calculates the per vertex displacement due to the blend shapes\n Parameters\n ----------\n betas : torch.tensor Bx(num_betas)\n Blend shape coefficients\n shape_disps: torch.tensor Vx3x(num_betas)\n Blend shapes\n Returns\n -------", + "detail": "modules.PIXIE.pixielib.models.lbs", + "documentation": {} + }, + { + "label": "batch_rodrigues", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.models.lbs", + "description": "modules.PIXIE.pixielib.models.lbs", + "peekOfCode": "def batch_rodrigues(rot_vecs, epsilon=1e-8, dtype=torch.float32):\n ''' Calculates the rotation matrices for a batch of rotation vectors\n Parameters\n ----------\n rot_vecs: torch.tensor Nx3\n array of N axis-angle vectors\n Returns\n -------\n R: torch.tensor Nx3x3\n The rotation matrices for the given axis-angle parameters", + "detail": "modules.PIXIE.pixielib.models.lbs", + "documentation": {} + }, + { + "label": "transform_mat", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.models.lbs", + "description": "modules.PIXIE.pixielib.models.lbs", + "peekOfCode": "def transform_mat(R, t):\n ''' Creates a batch of transformation matrices\n Args:\n - R: Bx3x3 array of a batch of rotation matrices\n - t: Bx3x1 array of a batch of translation vectors\n Returns:\n - T: Bx4x4 Transformation matrix\n '''\n # No padding left or right, only add an extra row\n return torch.cat([F.pad(R, [0, 0, 0, 1]),", + "detail": "modules.PIXIE.pixielib.models.lbs", + "documentation": {} + }, + { + "label": "batch_rigid_transform", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.models.lbs", + "description": "modules.PIXIE.pixielib.models.lbs", + "peekOfCode": "def batch_rigid_transform(rot_mats, joints, parents, dtype=torch.float32):\n \"\"\"\n Applies a batch of rigid transformations to the joints\n Parameters\n ----------\n rot_mats : torch.tensor BxNx3x3\n Tensor of rotation matrices\n joints : torch.tensor BxNx3\n Locations of joints\n parents : torch.tensor BxN", + "detail": "modules.PIXIE.pixielib.models.lbs", + "documentation": {} + }, + { + "label": "to_tensor", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.models.lbs", + "description": "modules.PIXIE.pixielib.models.lbs", + "peekOfCode": "def to_tensor(array, dtype=torch.float32):\n if 'torch.tensor' not in str(type(array)):\n return torch.tensor(array, dtype=dtype)\ndef to_np(array, dtype=np.float32):\n if 'scipy.sparse' in str(type(array)):\n array = array.todense()\n return np.array(array, dtype=dtype)\nclass Struct(object):\n def __init__(self, **kwargs):\n for key, val in kwargs.items():", + "detail": "modules.PIXIE.pixielib.models.lbs", + "documentation": {} + }, + { + "label": "to_np", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.models.lbs", + "description": "modules.PIXIE.pixielib.models.lbs", + "peekOfCode": "def to_np(array, dtype=np.float32):\n if 'scipy.sparse' in str(type(array)):\n array = array.todense()\n return np.array(array, dtype=dtype)\nclass Struct(object):\n def __init__(self, **kwargs):\n for key, val in kwargs.items():\n setattr(self, key, val)", + "detail": "modules.PIXIE.pixielib.models.lbs", + "documentation": {} + }, + { + "label": "TempSoftmaxFusion", + "kind": 6, + "importPath": "modules.PIXIE.pixielib.models.moderators", + "description": "modules.PIXIE.pixielib.models.moderators", + "peekOfCode": "class TempSoftmaxFusion(nn.Module):\n def __init__(self, channels = [2048*2, 1024, 1], detach_inputs=False, detach_feature=False):\n super(TempSoftmaxFusion, self).__init__()\n self.detach_inputs = detach_inputs\n self.detach_feature = detach_feature\n # weight\n layers = []\n for l in range(0, len(channels) - 1):\n layers.append(nn.Linear(channels[l], channels[l+1]))\n if l < len(channels) - 2:", + "detail": "modules.PIXIE.pixielib.models.moderators", + "documentation": {} + }, + { + "label": "GumbelSoftmaxFusion", + "kind": 6, + "importPath": "modules.PIXIE.pixielib.models.moderators", + "description": "modules.PIXIE.pixielib.models.moderators", + "peekOfCode": "class GumbelSoftmaxFusion(nn.Module):\n def __init__(self, channels = [2048*2, 1024, 1], detach_inputs=False, detach_feature=False):\n super(GumbelSoftmaxFusion, self).__init__()\n self.detach_inputs = detach_inputs\n self.detach_feature = detach_feature\n # weight\n layers = []\n for l in range(0, len(channels) - 1):\n layers.append(nn.Linear(channels[l], channels[l+1]))\n if l < len(channels) - 2:", + "detail": "modules.PIXIE.pixielib.models.moderators", + "documentation": {} + }, + { + "label": "ResNet", + "kind": 6, + "importPath": "modules.PIXIE.pixielib.models.resnet", + "description": "modules.PIXIE.pixielib.models.resnet", + "peekOfCode": "class ResNet(nn.Module):\n def __init__(self, block, layers, num_classes=1000):\n self.inplanes = 64\n super(ResNet, self).__init__()\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,\n bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0])", + "detail": "modules.PIXIE.pixielib.models.resnet", + "documentation": {} + }, + { + "label": "Bottleneck", + "kind": 6, + "importPath": "modules.PIXIE.pixielib.models.resnet", + "description": "modules.PIXIE.pixielib.models.resnet", + "peekOfCode": "class Bottleneck(nn.Module):\n expansion = 4\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)", + "detail": "modules.PIXIE.pixielib.models.resnet", + "documentation": {} + }, + { + "label": "BasicBlock", + "kind": 6, + "importPath": "modules.PIXIE.pixielib.models.resnet", + "description": "modules.PIXIE.pixielib.models.resnet", + "peekOfCode": "class BasicBlock(nn.Module):\n expansion = 1\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes)\n self.downsample = downsample", + "detail": "modules.PIXIE.pixielib.models.resnet", + "documentation": {} + }, + { + "label": "DoubleConv", + "kind": 6, + "importPath": "modules.PIXIE.pixielib.models.resnet", + "description": "modules.PIXIE.pixielib.models.resnet", + "peekOfCode": "class DoubleConv(nn.Module):\n \"\"\"(convolution => [BN] => ReLU) * 2\"\"\"\n def __init__(self, in_channels, out_channels):\n super().__init__()\n self.double_conv = nn.Sequential(\n nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),\n nn.BatchNorm2d(out_channels),\n nn.ReLU(inplace=True),\n nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1),\n nn.BatchNorm2d(out_channels),", + "detail": "modules.PIXIE.pixielib.models.resnet", + "documentation": {} + }, + { + "label": "Down", + "kind": 6, + "importPath": "modules.PIXIE.pixielib.models.resnet", + "description": "modules.PIXIE.pixielib.models.resnet", + "peekOfCode": "class Down(nn.Module):\n \"\"\"Downscaling with maxpool then double conv\"\"\"\n def __init__(self, in_channels, out_channels):\n super().__init__()\n self.maxpool_conv = nn.Sequential(\n nn.MaxPool2d(2),\n DoubleConv(in_channels, out_channels)\n )\n def forward(self, x):\n return self.maxpool_conv(x)", + "detail": "modules.PIXIE.pixielib.models.resnet", + "documentation": {} + }, + { + "label": "Up", + "kind": 6, + "importPath": "modules.PIXIE.pixielib.models.resnet", + "description": "modules.PIXIE.pixielib.models.resnet", + "peekOfCode": "class Up(nn.Module):\n \"\"\"Upscaling then double conv\"\"\"\n def __init__(self, in_channels, out_channels, bilinear=True):\n super().__init__()\n # if bilinear, use the normal convolutions to reduce the number of channels\n if bilinear:\n self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)\n else:\n self.up = nn.ConvTranspose2d(in_channels // 2, in_channels // 2, kernel_size=2, stride=2)\n self.conv = DoubleConv(in_channels, out_channels)", + "detail": "modules.PIXIE.pixielib.models.resnet", + "documentation": {} + }, + { + "label": "OutConv", + "kind": 6, + "importPath": "modules.PIXIE.pixielib.models.resnet", + "description": "modules.PIXIE.pixielib.models.resnet", + "peekOfCode": "class OutConv(nn.Module):\n def __init__(self, in_channels, out_channels):\n super(OutConv, self).__init__()\n self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)\n def forward(self, x):\n return self.conv(x)\nclass UNet(nn.Module):\n def __init__(self, n_channels, n_classes, bilinear=True):\n super(UNet, self).__init__()\n self.n_channels = n_channels", + "detail": "modules.PIXIE.pixielib.models.resnet", + "documentation": {} + }, + { + "label": "UNet", + "kind": 6, + "importPath": "modules.PIXIE.pixielib.models.resnet", + "description": "modules.PIXIE.pixielib.models.resnet", + "peekOfCode": "class UNet(nn.Module):\n def __init__(self, n_channels, n_classes, bilinear=True):\n super(UNet, self).__init__()\n self.n_channels = n_channels\n self.n_classes = n_classes\n self.bilinear = bilinear\n self.inc = DoubleConv(n_channels, 64)\n self.down1 = Down(64, 128)\n self.down2 = Down(128, 256)\n self.down3 = Down(256, 512)", + "detail": "modules.PIXIE.pixielib.models.resnet", + "documentation": {} + }, + { + "label": "conv3x3", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.models.resnet", + "description": "modules.PIXIE.pixielib.models.resnet", + "peekOfCode": "def conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\nclass BasicBlock(nn.Module):\n expansion = 1\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes)", + "detail": "modules.PIXIE.pixielib.models.resnet", + "documentation": {} + }, + { + "label": "copy_parameter_from_resnet", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.models.resnet", + "description": "modules.PIXIE.pixielib.models.resnet", + "peekOfCode": "def copy_parameter_from_resnet(model, resnet_dict):\n cur_state_dict = model.state_dict()\n # import ipdb; ipdb.set_trace()\n for name, param in list(resnet_dict.items())[0:None]:\n if name not in cur_state_dict:\n # print(name, ' not available in reconstructed resnet')\n continue\n if isinstance(param, Parameter):\n param = param.data\n try:", + "detail": "modules.PIXIE.pixielib.models.resnet", + "documentation": {} + }, + { + "label": "load_ResNet50Model", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.models.resnet", + "description": "modules.PIXIE.pixielib.models.resnet", + "peekOfCode": "def load_ResNet50Model():\n model = ResNet(Bottleneck, [3, 4, 6, 3])\n copy_parameter_from_resnet(model, torchvision.models.resnet50(pretrained = True).state_dict())\n return model\ndef load_ResNet101Model():\n model = ResNet(Bottleneck, [3, 4, 23, 3])\n copy_parameter_from_resnet(model, torchvision.models.resnet101(pretrained = True).state_dict())\n return model\ndef load_ResNet152Model():\n model = ResNet(Bottleneck, [3, 8, 36, 3])", + "detail": "modules.PIXIE.pixielib.models.resnet", + "documentation": {} + }, + { + "label": "load_ResNet101Model", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.models.resnet", + "description": "modules.PIXIE.pixielib.models.resnet", + "peekOfCode": "def load_ResNet101Model():\n model = ResNet(Bottleneck, [3, 4, 23, 3])\n copy_parameter_from_resnet(model, torchvision.models.resnet101(pretrained = True).state_dict())\n return model\ndef load_ResNet152Model():\n model = ResNet(Bottleneck, [3, 8, 36, 3])\n copy_parameter_from_resnet(model, torchvision.models.resnet152(pretrained = True).state_dict())\n return model\n# model.load_state_dict(checkpoint['model_state_dict'])\n######## Unet", + "detail": "modules.PIXIE.pixielib.models.resnet", + "documentation": {} + }, + { + "label": "load_ResNet152Model", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.models.resnet", + "description": "modules.PIXIE.pixielib.models.resnet", + "peekOfCode": "def load_ResNet152Model():\n model = ResNet(Bottleneck, [3, 8, 36, 3])\n copy_parameter_from_resnet(model, torchvision.models.resnet152(pretrained = True).state_dict())\n return model\n# model.load_state_dict(checkpoint['model_state_dict'])\n######## Unet\nclass DoubleConv(nn.Module):\n \"\"\"(convolution => [BN] => ReLU) * 2\"\"\"\n def __init__(self, in_channels, out_channels):\n super().__init__()", + "detail": "modules.PIXIE.pixielib.models.resnet", + "documentation": {} + }, + { + "label": "SMPLX", + "kind": 6, + "importPath": "modules.PIXIE.pixielib.models.SMPLX", + "description": "modules.PIXIE.pixielib.models.SMPLX", + "peekOfCode": "class SMPLX(nn.Module):\n \"\"\"\n Given smplx parameters, this class generates a differentiable SMPLX function\n which outputs a mesh and 3D joints\n \"\"\"\n def __init__(self, config):\n super(SMPLX, self).__init__()\n print(\"creating the SMPLX Decoder\")\n ss = np.load(config.smplx_model_path, allow_pickle=True)\n smplx_model = Struct(**ss)", + "detail": "modules.PIXIE.pixielib.models.SMPLX", + "documentation": {} + }, + { + "label": "J14_NAMES", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.models.SMPLX", + "description": "modules.PIXIE.pixielib.models.SMPLX", + "peekOfCode": "J14_NAMES = [\n 'right_ankle',\n 'right_knee',\n 'right_hip',\n 'left_hip',\n 'left_knee',\n 'left_ankle',\n 'right_wrist',\n 'right_elbow',\n 'right_shoulder',", + "detail": "modules.PIXIE.pixielib.models.SMPLX", + "documentation": {} + }, + { + "label": "SMPLX_names", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.models.SMPLX", + "description": "modules.PIXIE.pixielib.models.SMPLX", + "peekOfCode": "SMPLX_names = ['pelvis', 'left_hip', 'right_hip', 'spine1', 'left_knee', 'right_knee', 'spine2', 'left_ankle', 'right_ankle', 'spine3', 'left_foot', 'right_foot', 'neck', 'left_collar', 'right_collar', 'head', 'left_shoulder', 'right_shoulder', 'left_elbow', 'right_elbow', 'left_wrist', 'right_wrist', 'jaw', 'left_eye_smplx', 'right_eye_smplx', 'left_index1', 'left_index2', 'left_index3', 'left_middle1', 'left_middle2', 'left_middle3', 'left_pinky1', 'left_pinky2', 'left_pinky3', 'left_ring1', 'left_ring2', 'left_ring3', 'left_thumb1', 'left_thumb2', 'left_thumb3', 'right_index1', 'right_index2', 'right_index3', 'right_middle1', 'right_middle2', 'right_middle3', 'right_pinky1', 'right_pinky2', 'right_pinky3', 'right_ring1', 'right_ring2', 'right_ring3', 'right_thumb1', 'right_thumb2', 'right_thumb3', 'right_eye_brow1', 'right_eye_brow2', 'right_eye_brow3', 'right_eye_brow4', 'right_eye_brow5', 'left_eye_brow5', 'left_eye_brow4', 'left_eye_brow3', 'left_eye_brow2', 'left_eye_brow1', 'nose1', 'nose2', 'nose3', 'nose4', 'right_nose_2', 'right_nose_1', 'nose_middle', 'left_nose_1', 'left_nose_2', 'right_eye1', 'right_eye2', 'right_eye3', 'right_eye4', 'right_eye5', 'right_eye6', 'left_eye4', 'left_eye3', 'left_eye2', 'left_eye1', 'left_eye6', 'left_eye5', 'right_mouth_1', 'right_mouth_2', 'right_mouth_3', 'mouth_top', 'left_mouth_3', 'left_mouth_2', 'left_mouth_1', 'left_mouth_5', 'left_mouth_4', 'mouth_bottom', 'right_mouth_4', 'right_mouth_5', 'right_lip_1', 'right_lip_2', 'lip_top', 'left_lip_2', 'left_lip_1', 'left_lip_3', 'lip_bottom', 'right_lip_3', 'right_contour_1', 'right_contour_2', 'right_contour_3', 'right_contour_4', 'right_contour_5', 'right_contour_6', 'right_contour_7', 'right_contour_8', 'contour_middle', 'left_contour_8', 'left_contour_7', 'left_contour_6', 'left_contour_5', 'left_contour_4', 'left_contour_3', 'left_contour_2', 'left_contour_1', 'head_top', 'left_big_toe', 'left_ear', 'left_eye', 'left_heel', 'left_index', 'left_middle', 'left_pinky', 'left_ring', 'left_small_toe', 'left_thumb', 'nose', 'right_big_toe', 'right_ear', 'right_eye', 'right_heel', 'right_index', 'right_middle', 'right_pinky', 'right_ring', 'right_small_toe', 'right_thumb']\nextra_names = ['head_top', 'left_big_toe', 'left_ear', 'left_eye', 'left_heel', 'left_index', 'left_middle', 'left_pinky', 'left_ring', 'left_small_toe', 'left_thumb', 'nose', 'right_big_toe', 'right_ear', 'right_eye', 'right_heel', 'right_index', 'right_middle', 'right_pinky', 'right_ring', 'right_small_toe', 'right_thumb']\nSMPLX_names += extra_names\npart_indices = {}\npart_indices['body'] = np.array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,\n 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 123,\n 124, 125, 126, 127, 132, 134, 135, 136, 137, 138, 143])\npart_indices['torso'] = np.array([ 0, 1, 2, 3, 6, 9, 12, 13, 14, 15, 16, 17, 18,\n 19, 22, 23, 24, 55, 56, 57, 58, 59, 76, 77, 78, 79,\n 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92,", + "detail": "modules.PIXIE.pixielib.models.SMPLX", + "documentation": {} + }, + { + "label": "extra_names", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.models.SMPLX", + "description": "modules.PIXIE.pixielib.models.SMPLX", + "peekOfCode": "extra_names = ['head_top', 'left_big_toe', 'left_ear', 'left_eye', 'left_heel', 'left_index', 'left_middle', 'left_pinky', 'left_ring', 'left_small_toe', 'left_thumb', 'nose', 'right_big_toe', 'right_ear', 'right_eye', 'right_heel', 'right_index', 'right_middle', 'right_pinky', 'right_ring', 'right_small_toe', 'right_thumb']\nSMPLX_names += extra_names\npart_indices = {}\npart_indices['body'] = np.array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,\n 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 123,\n 124, 125, 126, 127, 132, 134, 135, 136, 137, 138, 143])\npart_indices['torso'] = np.array([ 0, 1, 2, 3, 6, 9, 12, 13, 14, 15, 16, 17, 18,\n 19, 22, 23, 24, 55, 56, 57, 58, 59, 76, 77, 78, 79,\n 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92,\n 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105,", + "detail": "modules.PIXIE.pixielib.models.SMPLX", + "documentation": {} + }, + { + "label": "part_indices", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.models.SMPLX", + "description": "modules.PIXIE.pixielib.models.SMPLX", + "peekOfCode": "part_indices = {}\npart_indices['body'] = np.array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,\n 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 123,\n 124, 125, 126, 127, 132, 134, 135, 136, 137, 138, 143])\npart_indices['torso'] = np.array([ 0, 1, 2, 3, 6, 9, 12, 13, 14, 15, 16, 17, 18,\n 19, 22, 23, 24, 55, 56, 57, 58, 59, 76, 77, 78, 79,\n 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92,\n 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105,\n 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118,\n 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131,", + "detail": "modules.PIXIE.pixielib.models.SMPLX", + "documentation": {} + }, + { + "label": "part_indices['body']", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.models.SMPLX", + "description": "modules.PIXIE.pixielib.models.SMPLX", + "peekOfCode": "part_indices['body'] = np.array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,\n 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 123,\n 124, 125, 126, 127, 132, 134, 135, 136, 137, 138, 143])\npart_indices['torso'] = np.array([ 0, 1, 2, 3, 6, 9, 12, 13, 14, 15, 16, 17, 18,\n 19, 22, 23, 24, 55, 56, 57, 58, 59, 76, 77, 78, 79,\n 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92,\n 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105,\n 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118,\n 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131,\n 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144])", + "detail": "modules.PIXIE.pixielib.models.SMPLX", + "documentation": {} + }, + { + "label": "part_indices['torso']", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.models.SMPLX", + "description": "modules.PIXIE.pixielib.models.SMPLX", + "peekOfCode": "part_indices['torso'] = np.array([ 0, 1, 2, 3, 6, 9, 12, 13, 14, 15, 16, 17, 18,\n 19, 22, 23, 24, 55, 56, 57, 58, 59, 76, 77, 78, 79,\n 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92,\n 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105,\n 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118,\n 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131,\n 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144])\npart_indices['head'] = np.array([ 12, 15, 22, 23, 24, 55, 56, 57, 58, 59, 60, 61, 62,\n 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75,\n 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88,", + "detail": "modules.PIXIE.pixielib.models.SMPLX", + "documentation": {} + }, + { + "label": "part_indices['head']", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.models.SMPLX", + "description": "modules.PIXIE.pixielib.models.SMPLX", + "peekOfCode": "part_indices['head'] = np.array([ 12, 15, 22, 23, 24, 55, 56, 57, 58, 59, 60, 61, 62,\n 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75,\n 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88,\n 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101,\n 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114,\n 115, 116, 117, 118, 119, 120, 121, 122, 123, 125, 126, 134, 136,\n 137])\npart_indices['face'] = np.array([ 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66,\n 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,\n 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92,", + "detail": "modules.PIXIE.pixielib.models.SMPLX", + "documentation": {} + }, + { + "label": "part_indices['face']", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.models.SMPLX", + "description": "modules.PIXIE.pixielib.models.SMPLX", + "peekOfCode": "part_indices['face'] = np.array([ 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66,\n 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,\n 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92,\n 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105,\n 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118,\n 119, 120, 121, 122])\npart_indices['upper'] = np.array([ 12, 13, 14, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66,\n 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,\n 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92,\n 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105,", + "detail": "modules.PIXIE.pixielib.models.SMPLX", + "documentation": {} + }, + { + "label": "part_indices['upper']", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.models.SMPLX", + "description": "modules.PIXIE.pixielib.models.SMPLX", + "peekOfCode": "part_indices['upper'] = np.array([ 12, 13, 14, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66,\n 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,\n 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92,\n 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105,\n 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118,\n 119, 120, 121, 122])\npart_indices['hand'] = np.array([ 20, 21, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,\n 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,\n 49, 50, 51, 52, 53, 54, 128, 129, 130, 131, 133, 139, 140,\n 141, 142, 144])", + "detail": "modules.PIXIE.pixielib.models.SMPLX", + "documentation": {} + }, + { + "label": "part_indices['hand']", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.models.SMPLX", + "description": "modules.PIXIE.pixielib.models.SMPLX", + "peekOfCode": "part_indices['hand'] = np.array([ 20, 21, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,\n 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,\n 49, 50, 51, 52, 53, 54, 128, 129, 130, 131, 133, 139, 140,\n 141, 142, 144])\npart_indices['left_hand'] = np.array([ 20, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,\n 37, 38, 39, 128, 129, 130, 131, 133])\npart_indices['right_hand'] = np.array([ 21, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,\n 52, 53, 54, 139, 140, 141, 142, 144])\n# kinematic tree \nhead_kin_chain = [15,12,9,6,3,0]", + "detail": "modules.PIXIE.pixielib.models.SMPLX", + "documentation": {} + }, + { + "label": "part_indices['left_hand']", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.models.SMPLX", + "description": "modules.PIXIE.pixielib.models.SMPLX", + "peekOfCode": "part_indices['left_hand'] = np.array([ 20, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,\n 37, 38, 39, 128, 129, 130, 131, 133])\npart_indices['right_hand'] = np.array([ 21, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,\n 52, 53, 54, 139, 140, 141, 142, 144])\n# kinematic tree \nhead_kin_chain = [15,12,9,6,3,0]\n#--smplx joints\n# 00 - Global\n# 01 - L_Thigh\n# 02 - R_Thigh", + "detail": "modules.PIXIE.pixielib.models.SMPLX", + "documentation": {} + }, + { + "label": "part_indices['right_hand']", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.models.SMPLX", + "description": "modules.PIXIE.pixielib.models.SMPLX", + "peekOfCode": "part_indices['right_hand'] = np.array([ 21, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,\n 52, 53, 54, 139, 140, 141, 142, 144])\n# kinematic tree \nhead_kin_chain = [15,12,9,6,3,0]\n#--smplx joints\n# 00 - Global\n# 01 - L_Thigh\n# 02 - R_Thigh\n# 03 - Spine\n# 04 - L_Calf", + "detail": "modules.PIXIE.pixielib.models.SMPLX", + "documentation": {} + }, + { + "label": "head_kin_chain", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.models.SMPLX", + "description": "modules.PIXIE.pixielib.models.SMPLX", + "peekOfCode": "head_kin_chain = [15,12,9,6,3,0]\n#--smplx joints\n# 00 - Global\n# 01 - L_Thigh\n# 02 - R_Thigh\n# 03 - Spine\n# 04 - L_Calf\n# 05 - R_Calf\n# 06 - Spine1\n# 07 - L_Foot", + "detail": "modules.PIXIE.pixielib.models.SMPLX", + "documentation": {} + }, + { + "label": "os.environ[\"CC\"]", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.rasterizer.setup", + "description": "modules.PIXIE.pixielib.utils.rasterizer.setup", + "peekOfCode": "os.environ[\"CC\"] = \"gcc-7\"\nos.environ[\"CXX\"] = \"gcc-7\"\nUSE_NINJA = os.getenv('USE_NINJA') == '1'\nsetup(\n name='standard_rasterize_cuda',\n ext_modules=[\n\tCUDAExtension('standard_rasterize_cuda', [\n 'standard_rasterize_cuda.cpp',\n 'standard_rasterize_cuda_kernel.cu',\n ])", + "detail": "modules.PIXIE.pixielib.utils.rasterizer.setup", + "documentation": {} + }, + { + "label": "os.environ[\"CXX\"]", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.rasterizer.setup", + "description": "modules.PIXIE.pixielib.utils.rasterizer.setup", + "peekOfCode": "os.environ[\"CXX\"] = \"gcc-7\"\nUSE_NINJA = os.getenv('USE_NINJA') == '1'\nsetup(\n name='standard_rasterize_cuda',\n ext_modules=[\n\tCUDAExtension('standard_rasterize_cuda', [\n 'standard_rasterize_cuda.cpp',\n 'standard_rasterize_cuda_kernel.cu',\n ])\n\t],", + "detail": "modules.PIXIE.pixielib.utils.rasterizer.setup", + "documentation": {} + }, + { + "label": "USE_NINJA", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.rasterizer.setup", + "description": "modules.PIXIE.pixielib.utils.rasterizer.setup", + "peekOfCode": "USE_NINJA = os.getenv('USE_NINJA') == '1'\nsetup(\n name='standard_rasterize_cuda',\n ext_modules=[\n\tCUDAExtension('standard_rasterize_cuda', [\n 'standard_rasterize_cuda.cpp',\n 'standard_rasterize_cuda_kernel.cu',\n ])\n\t],\n cmdclass={'build_ext': BuildExtension.with_options(use_ninja=USE_NINJA)}", + "detail": "modules.PIXIE.pixielib.utils.rasterizer.setup", + "documentation": {} + }, + { + "label": "Cropper", + "kind": 6, + "importPath": "modules.PIXIE.pixielib.utils.array_cropper", + "description": "modules.PIXIE.pixielib.utils.array_cropper", + "peekOfCode": "class Cropper(object):\n def __init__(self, crop_size, scale=[1,1], trans_scale = 0.):\n self.crop_size = crop_size\n self.scale = scale\n self.trans_scale = trans_scale\n def crop(self, image, points, points_scale=None):\n # points to bbox\n center, bbox_size = points2bbox(points, points_scale)\n # argument bbox. \n center, bbox_size = augment_bbox(center, bbox_size, scale=self.scale, trans_scale=self.trans_scale)", + "detail": "modules.PIXIE.pixielib.utils.array_cropper", + "documentation": {} + }, + { + "label": "points2bbox", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.array_cropper", + "description": "modules.PIXIE.pixielib.utils.array_cropper", + "peekOfCode": "def points2bbox(points, points_scale=None):\n # recover range\n if points_scale:\n points[:,0] = points[:,0]*points_scale[1]/2 + points_scale[1]/2\n points[:,1] = points[:,1]*points_scale[0]/2 + points_scale[0]/2\n left = np.min(points[:,0]); right = np.max(points[:,0]); \n top = np.min(points[:,1]); bottom = np.max(points[:,1])\n size = max(right - left, bottom - top)\n center = np.array([right - (right - left) / 2.0, bottom - (bottom - top) / 2.0 ])#+ old_size*0.1])\n return center, size", + "detail": "modules.PIXIE.pixielib.utils.array_cropper", + "documentation": {} + }, + { + "label": "augment_bbox", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.array_cropper", + "description": "modules.PIXIE.pixielib.utils.array_cropper", + "peekOfCode": "def augment_bbox(center, bbox_size, scale=[1.0, 1.0], trans_scale=0.):\n trans_scale = (np.random.rand(2)*2 -1) * trans_scale\n center = center + trans_scale*bbox_size # 0.5\n scale = np.random.rand() * (scale[1] - scale[0]) + scale[0]\n size = int(bbox_size*scale)\n return center, size\ndef crop_array(image, center, bboxsize, crop_size):\n ''' for single image only\n Args:\n image (numpy.Array): the reference array of shape HxWXC.", + "detail": "modules.PIXIE.pixielib.utils.array_cropper", + "documentation": {} + }, + { + "label": "crop_array", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.array_cropper", + "description": "modules.PIXIE.pixielib.utils.array_cropper", + "peekOfCode": "def crop_array(image, center, bboxsize, crop_size):\n ''' for single image only\n Args:\n image (numpy.Array): the reference array of shape HxWXC.\n size (Tuple[int, int]): a tuple with the height and width that will be\n used to resize the extracted patches.\n Returns:\n cropped_image\n tform: 3x3 affine matrix\n '''", + "detail": "modules.PIXIE.pixielib.utils.array_cropper", + "documentation": {} + }, + { + "label": "get_cfg_defaults", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "def get_cfg_defaults():\n \"\"\"Get a yacs CfgNode object with default values for my_project.\"\"\"\n # Return a clone so that the defaults will not be altered\n # This is for the \"local variable\" use pattern\n return cfg.clone()\ndef update_cfg(cfg, cfg_file):\n # cfg.merge_from_file(cfg_file, allow_unsafe=True)\n cfg.merge_from_file(cfg_file)\n return cfg.clone()\ndef parse_args():", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "update_cfg", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "def update_cfg(cfg, cfg_file):\n # cfg.merge_from_file(cfg_file, allow_unsafe=True)\n cfg.merge_from_file(cfg_file)\n return cfg.clone()\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--cfg', type=str, help='cfg file path')\n args = parser.parse_args()\n cfg = get_cfg_defaults()\n if args.cfg is not None:", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "parse_args", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--cfg', type=str, help='cfg file path')\n args = parser.parse_args()\n cfg = get_cfg_defaults()\n if args.cfg is not None:\n cfg_file = args.cfg\n cfg = update_cfg(cfg, args.cfg)\n cfg.cfg_file = cfg_file\n return cfg", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg = CN()\n# abs_pixie_dir-->\n# data-->\n# deca_model.tar\n# ......\nabs_pixie_dir = os.path.abspath(os.path.join(\n os.path.dirname(__file__), '../../../../../models/models_pixie'))\ncfg.pixie_dir = abs_pixie_dir\ncfg.device = 'cuda'\ncfg.device_id = '0'", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "abs_pixie_dir", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "abs_pixie_dir = os.path.abspath(os.path.join(\n os.path.dirname(__file__), '../../../../../models/models_pixie'))\ncfg.pixie_dir = abs_pixie_dir\ncfg.device = 'cuda'\ncfg.device_id = '0'\ncfg.pretrained_modelpath = os.path.join(cfg.pixie_dir, 'data', 'pixie_model.tar')\n# cfg.pretrained_modelpath = '/ps/scratch/yfeng/Data/Projects-data/BodyHead/cluster_training/1113_final_newcoco_all_2_together/all[1.0]_betas[0.0]_pose[5.0]_pose[0.1]_3d[0.5]_body[5.0]_head[1.0]_hand[1.0]_shape[0.001]_pose[0.1]_prior[True]_reg[12.0]/model.tar'\n# cfg.pretrained_modelpath = '/ps/scratch/yfeng/Data/Projects-data/BodyHead/cluster_training/1204_newtex_head_only/size[8]_min[1.8]_max[2.2]_scale[0.3]_step[2]_eyed[1.0]_lipd[0.5]_exp[0.0001]_shape[0.0001]_tex[5e-05]_prior[True]_useWlmk[False]_/model.tar'\n## smplx parameter settings\ncfg.params = CN()", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.pixie_dir", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.pixie_dir = abs_pixie_dir\ncfg.device = 'cuda'\ncfg.device_id = '0'\ncfg.pretrained_modelpath = os.path.join(cfg.pixie_dir, 'data', 'pixie_model.tar')\n# cfg.pretrained_modelpath = '/ps/scratch/yfeng/Data/Projects-data/BodyHead/cluster_training/1113_final_newcoco_all_2_together/all[1.0]_betas[0.0]_pose[5.0]_pose[0.1]_3d[0.5]_body[5.0]_head[1.0]_hand[1.0]_shape[0.001]_pose[0.1]_prior[True]_reg[12.0]/model.tar'\n# cfg.pretrained_modelpath = '/ps/scratch/yfeng/Data/Projects-data/BodyHead/cluster_training/1204_newtex_head_only/size[8]_min[1.8]_max[2.2]_scale[0.3]_step[2]_eyed[1.0]_lipd[0.5]_exp[0.0001]_shape[0.0001]_tex[5e-05]_prior[True]_useWlmk[False]_/model.tar'\n## smplx parameter settings\ncfg.params = CN()\ncfg.params.body_list = ['body_cam', 'global_pose', 'partbody_pose', 'neck_pose']\ncfg.params.head_list = ['head_cam', 'tex', 'light']", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.device", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.device = 'cuda'\ncfg.device_id = '0'\ncfg.pretrained_modelpath = os.path.join(cfg.pixie_dir, 'data', 'pixie_model.tar')\n# cfg.pretrained_modelpath = '/ps/scratch/yfeng/Data/Projects-data/BodyHead/cluster_training/1113_final_newcoco_all_2_together/all[1.0]_betas[0.0]_pose[5.0]_pose[0.1]_3d[0.5]_body[5.0]_head[1.0]_hand[1.0]_shape[0.001]_pose[0.1]_prior[True]_reg[12.0]/model.tar'\n# cfg.pretrained_modelpath = '/ps/scratch/yfeng/Data/Projects-data/BodyHead/cluster_training/1204_newtex_head_only/size[8]_min[1.8]_max[2.2]_scale[0.3]_step[2]_eyed[1.0]_lipd[0.5]_exp[0.0001]_shape[0.0001]_tex[5e-05]_prior[True]_useWlmk[False]_/model.tar'\n## smplx parameter settings\ncfg.params = CN()\ncfg.params.body_list = ['body_cam', 'global_pose', 'partbody_pose', 'neck_pose']\ncfg.params.head_list = ['head_cam', 'tex', 'light']\ncfg.params.head_share_list = ['shape', 'exp', 'head_pose', 'jaw_pose']", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.device_id", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.device_id = '0'\ncfg.pretrained_modelpath = os.path.join(cfg.pixie_dir, 'data', 'pixie_model.tar')\n# cfg.pretrained_modelpath = '/ps/scratch/yfeng/Data/Projects-data/BodyHead/cluster_training/1113_final_newcoco_all_2_together/all[1.0]_betas[0.0]_pose[5.0]_pose[0.1]_3d[0.5]_body[5.0]_head[1.0]_hand[1.0]_shape[0.001]_pose[0.1]_prior[True]_reg[12.0]/model.tar'\n# cfg.pretrained_modelpath = '/ps/scratch/yfeng/Data/Projects-data/BodyHead/cluster_training/1204_newtex_head_only/size[8]_min[1.8]_max[2.2]_scale[0.3]_step[2]_eyed[1.0]_lipd[0.5]_exp[0.0001]_shape[0.0001]_tex[5e-05]_prior[True]_useWlmk[False]_/model.tar'\n## smplx parameter settings\ncfg.params = CN()\ncfg.params.body_list = ['body_cam', 'global_pose', 'partbody_pose', 'neck_pose']\ncfg.params.head_list = ['head_cam', 'tex', 'light']\ncfg.params.head_share_list = ['shape', 'exp', 'head_pose', 'jaw_pose']\ncfg.params.hand_list = ['hand_cam']", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.pretrained_modelpath", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.pretrained_modelpath = os.path.join(cfg.pixie_dir, 'data', 'pixie_model.tar')\n# cfg.pretrained_modelpath = '/ps/scratch/yfeng/Data/Projects-data/BodyHead/cluster_training/1113_final_newcoco_all_2_together/all[1.0]_betas[0.0]_pose[5.0]_pose[0.1]_3d[0.5]_body[5.0]_head[1.0]_hand[1.0]_shape[0.001]_pose[0.1]_prior[True]_reg[12.0]/model.tar'\n# cfg.pretrained_modelpath = '/ps/scratch/yfeng/Data/Projects-data/BodyHead/cluster_training/1204_newtex_head_only/size[8]_min[1.8]_max[2.2]_scale[0.3]_step[2]_eyed[1.0]_lipd[0.5]_exp[0.0001]_shape[0.0001]_tex[5e-05]_prior[True]_useWlmk[False]_/model.tar'\n## smplx parameter settings\ncfg.params = CN()\ncfg.params.body_list = ['body_cam', 'global_pose', 'partbody_pose', 'neck_pose']\ncfg.params.head_list = ['head_cam', 'tex', 'light']\ncfg.params.head_share_list = ['shape', 'exp', 'head_pose', 'jaw_pose']\ncfg.params.hand_list = ['hand_cam']\ncfg.params.hand_share_list = ['right_wrist_pose', 'right_hand_pose'] #only for right hand", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.params", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.params = CN()\ncfg.params.body_list = ['body_cam', 'global_pose', 'partbody_pose', 'neck_pose']\ncfg.params.head_list = ['head_cam', 'tex', 'light']\ncfg.params.head_share_list = ['shape', 'exp', 'head_pose', 'jaw_pose']\ncfg.params.hand_list = ['hand_cam']\ncfg.params.hand_share_list = ['right_wrist_pose', 'right_hand_pose'] #only for right hand\n# ---------------------------------------------------------------------------- #\n# Options for Body model\n# ---------------------------------------------------------------------------- #\ncfg.model = CN()", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.params.body_list", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.params.body_list = ['body_cam', 'global_pose', 'partbody_pose', 'neck_pose']\ncfg.params.head_list = ['head_cam', 'tex', 'light']\ncfg.params.head_share_list = ['shape', 'exp', 'head_pose', 'jaw_pose']\ncfg.params.hand_list = ['hand_cam']\ncfg.params.hand_share_list = ['right_wrist_pose', 'right_hand_pose'] #only for right hand\n# ---------------------------------------------------------------------------- #\n# Options for Body model\n# ---------------------------------------------------------------------------- #\ncfg.model = CN()\ncfg.model.tex_path = os.path.join(cfg.pixie_dir, 'data', 'FLAME_albedo_from_BFM.npz')", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.params.head_list", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.params.head_list = ['head_cam', 'tex', 'light']\ncfg.params.head_share_list = ['shape', 'exp', 'head_pose', 'jaw_pose']\ncfg.params.hand_list = ['hand_cam']\ncfg.params.hand_share_list = ['right_wrist_pose', 'right_hand_pose'] #only for right hand\n# ---------------------------------------------------------------------------- #\n# Options for Body model\n# ---------------------------------------------------------------------------- #\ncfg.model = CN()\ncfg.model.tex_path = os.path.join(cfg.pixie_dir, 'data', 'FLAME_albedo_from_BFM.npz')\ncfg.model.smplx_model_path = os.path.join(cfg.pixie_dir, 'data', 'SMPLX_NEUTRAL_2020.npz')", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.params.head_share_list", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.params.head_share_list = ['shape', 'exp', 'head_pose', 'jaw_pose']\ncfg.params.hand_list = ['hand_cam']\ncfg.params.hand_share_list = ['right_wrist_pose', 'right_hand_pose'] #only for right hand\n# ---------------------------------------------------------------------------- #\n# Options for Body model\n# ---------------------------------------------------------------------------- #\ncfg.model = CN()\ncfg.model.tex_path = os.path.join(cfg.pixie_dir, 'data', 'FLAME_albedo_from_BFM.npz')\ncfg.model.smplx_model_path = os.path.join(cfg.pixie_dir, 'data', 'SMPLX_NEUTRAL_2020.npz')\ncfg.model.topology_path = os.path.join(cfg.pixie_dir, 'data', 'SMPL_X_template_FLAME_uv.obj') ", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.params.hand_list", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.params.hand_list = ['hand_cam']\ncfg.params.hand_share_list = ['right_wrist_pose', 'right_hand_pose'] #only for right hand\n# ---------------------------------------------------------------------------- #\n# Options for Body model\n# ---------------------------------------------------------------------------- #\ncfg.model = CN()\ncfg.model.tex_path = os.path.join(cfg.pixie_dir, 'data', 'FLAME_albedo_from_BFM.npz')\ncfg.model.smplx_model_path = os.path.join(cfg.pixie_dir, 'data', 'SMPLX_NEUTRAL_2020.npz')\ncfg.model.topology_path = os.path.join(cfg.pixie_dir, 'data', 'SMPL_X_template_FLAME_uv.obj') \ncfg.model.topology_smplxtex_path = os.path.join(cfg.pixie_dir, 'data', 'smplx_tex.obj')", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.params.hand_share_list", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.params.hand_share_list = ['right_wrist_pose', 'right_hand_pose'] #only for right hand\n# ---------------------------------------------------------------------------- #\n# Options for Body model\n# ---------------------------------------------------------------------------- #\ncfg.model = CN()\ncfg.model.tex_path = os.path.join(cfg.pixie_dir, 'data', 'FLAME_albedo_from_BFM.npz')\ncfg.model.smplx_model_path = os.path.join(cfg.pixie_dir, 'data', 'SMPLX_NEUTRAL_2020.npz')\ncfg.model.topology_path = os.path.join(cfg.pixie_dir, 'data', 'SMPL_X_template_FLAME_uv.obj') \ncfg.model.topology_smplxtex_path = os.path.join(cfg.pixie_dir, 'data', 'smplx_tex.obj')\ncfg.model.topology_smplx_hand_path = os.path.join(cfg.pixie_dir, 'data', 'smplx_hand.obj')", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.model = CN()\ncfg.model.tex_path = os.path.join(cfg.pixie_dir, 'data', 'FLAME_albedo_from_BFM.npz')\ncfg.model.smplx_model_path = os.path.join(cfg.pixie_dir, 'data', 'SMPLX_NEUTRAL_2020.npz')\ncfg.model.topology_path = os.path.join(cfg.pixie_dir, 'data', 'SMPL_X_template_FLAME_uv.obj') \ncfg.model.topology_smplxtex_path = os.path.join(cfg.pixie_dir, 'data', 'smplx_tex.obj')\ncfg.model.topology_smplx_hand_path = os.path.join(cfg.pixie_dir, 'data', 'smplx_hand.obj')\ncfg.model.face_mask_path = os.path.join(cfg.pixie_dir, 'data', 'uv_face_mask.png')\ncfg.model.face_eye_mask_path = os.path.join(cfg.pixie_dir, 'data', 'uv_face_eye_mask.png')\ncfg.model.extra_joint_path = os.path.join(cfg.pixie_dir, 'data', 'smplx_extra_joints.yaml')\ncfg.model.j14_regressor_path = os.path.join(cfg.pixie_dir, 'data', 'SMPLX_to_J14.pkl')", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.tex_path", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.model.tex_path = os.path.join(cfg.pixie_dir, 'data', 'FLAME_albedo_from_BFM.npz')\ncfg.model.smplx_model_path = os.path.join(cfg.pixie_dir, 'data', 'SMPLX_NEUTRAL_2020.npz')\ncfg.model.topology_path = os.path.join(cfg.pixie_dir, 'data', 'SMPL_X_template_FLAME_uv.obj') \ncfg.model.topology_smplxtex_path = os.path.join(cfg.pixie_dir, 'data', 'smplx_tex.obj')\ncfg.model.topology_smplx_hand_path = os.path.join(cfg.pixie_dir, 'data', 'smplx_hand.obj')\ncfg.model.face_mask_path = os.path.join(cfg.pixie_dir, 'data', 'uv_face_mask.png')\ncfg.model.face_eye_mask_path = os.path.join(cfg.pixie_dir, 'data', 'uv_face_eye_mask.png')\ncfg.model.extra_joint_path = os.path.join(cfg.pixie_dir, 'data', 'smplx_extra_joints.yaml')\ncfg.model.j14_regressor_path = os.path.join(cfg.pixie_dir, 'data', 'SMPLX_to_J14.pkl')\ncfg.model.flame2smplx_cached_path = os.path.join(cfg.pixie_dir, 'data', 'flame2smplx_tex_1024.npy')", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.smplx_model_path", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.model.smplx_model_path = os.path.join(cfg.pixie_dir, 'data', 'SMPLX_NEUTRAL_2020.npz')\ncfg.model.topology_path = os.path.join(cfg.pixie_dir, 'data', 'SMPL_X_template_FLAME_uv.obj') \ncfg.model.topology_smplxtex_path = os.path.join(cfg.pixie_dir, 'data', 'smplx_tex.obj')\ncfg.model.topology_smplx_hand_path = os.path.join(cfg.pixie_dir, 'data', 'smplx_hand.obj')\ncfg.model.face_mask_path = os.path.join(cfg.pixie_dir, 'data', 'uv_face_mask.png')\ncfg.model.face_eye_mask_path = os.path.join(cfg.pixie_dir, 'data', 'uv_face_eye_mask.png')\ncfg.model.extra_joint_path = os.path.join(cfg.pixie_dir, 'data', 'smplx_extra_joints.yaml')\ncfg.model.j14_regressor_path = os.path.join(cfg.pixie_dir, 'data', 'SMPLX_to_J14.pkl')\ncfg.model.flame2smplx_cached_path = os.path.join(cfg.pixie_dir, 'data', 'flame2smplx_tex_1024.npy')\ncfg.model.smplx_tex_path = os.path.join(cfg.pixie_dir, 'data', 'smplx_tex.png')", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.topology_path", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.model.topology_path = os.path.join(cfg.pixie_dir, 'data', 'SMPL_X_template_FLAME_uv.obj') \ncfg.model.topology_smplxtex_path = os.path.join(cfg.pixie_dir, 'data', 'smplx_tex.obj')\ncfg.model.topology_smplx_hand_path = os.path.join(cfg.pixie_dir, 'data', 'smplx_hand.obj')\ncfg.model.face_mask_path = os.path.join(cfg.pixie_dir, 'data', 'uv_face_mask.png')\ncfg.model.face_eye_mask_path = os.path.join(cfg.pixie_dir, 'data', 'uv_face_eye_mask.png')\ncfg.model.extra_joint_path = os.path.join(cfg.pixie_dir, 'data', 'smplx_extra_joints.yaml')\ncfg.model.j14_regressor_path = os.path.join(cfg.pixie_dir, 'data', 'SMPLX_to_J14.pkl')\ncfg.model.flame2smplx_cached_path = os.path.join(cfg.pixie_dir, 'data', 'flame2smplx_tex_1024.npy')\ncfg.model.smplx_tex_path = os.path.join(cfg.pixie_dir, 'data', 'smplx_tex.png')\ncfg.model.mano_ids_path = os.path.join(cfg.pixie_dir, 'data', 'MANO_SMPLX_vertex_ids.pkl')", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.topology_smplxtex_path", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.model.topology_smplxtex_path = os.path.join(cfg.pixie_dir, 'data', 'smplx_tex.obj')\ncfg.model.topology_smplx_hand_path = os.path.join(cfg.pixie_dir, 'data', 'smplx_hand.obj')\ncfg.model.face_mask_path = os.path.join(cfg.pixie_dir, 'data', 'uv_face_mask.png')\ncfg.model.face_eye_mask_path = os.path.join(cfg.pixie_dir, 'data', 'uv_face_eye_mask.png')\ncfg.model.extra_joint_path = os.path.join(cfg.pixie_dir, 'data', 'smplx_extra_joints.yaml')\ncfg.model.j14_regressor_path = os.path.join(cfg.pixie_dir, 'data', 'SMPLX_to_J14.pkl')\ncfg.model.flame2smplx_cached_path = os.path.join(cfg.pixie_dir, 'data', 'flame2smplx_tex_1024.npy')\ncfg.model.smplx_tex_path = os.path.join(cfg.pixie_dir, 'data', 'smplx_tex.png')\ncfg.model.mano_ids_path = os.path.join(cfg.pixie_dir, 'data', 'MANO_SMPLX_vertex_ids.pkl')\ncfg.model.flame_ids_path = os.path.join(cfg.pixie_dir, 'data', 'SMPL-X__FLAME_vertex_ids.npy')", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.topology_smplx_hand_path", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.model.topology_smplx_hand_path = os.path.join(cfg.pixie_dir, 'data', 'smplx_hand.obj')\ncfg.model.face_mask_path = os.path.join(cfg.pixie_dir, 'data', 'uv_face_mask.png')\ncfg.model.face_eye_mask_path = os.path.join(cfg.pixie_dir, 'data', 'uv_face_eye_mask.png')\ncfg.model.extra_joint_path = os.path.join(cfg.pixie_dir, 'data', 'smplx_extra_joints.yaml')\ncfg.model.j14_regressor_path = os.path.join(cfg.pixie_dir, 'data', 'SMPLX_to_J14.pkl')\ncfg.model.flame2smplx_cached_path = os.path.join(cfg.pixie_dir, 'data', 'flame2smplx_tex_1024.npy')\ncfg.model.smplx_tex_path = os.path.join(cfg.pixie_dir, 'data', 'smplx_tex.png')\ncfg.model.mano_ids_path = os.path.join(cfg.pixie_dir, 'data', 'MANO_SMPLX_vertex_ids.pkl')\ncfg.model.flame_ids_path = os.path.join(cfg.pixie_dir, 'data', 'SMPL-X__FLAME_vertex_ids.npy')\ncfg.model.uv_size = 256", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.face_mask_path", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.model.face_mask_path = os.path.join(cfg.pixie_dir, 'data', 'uv_face_mask.png')\ncfg.model.face_eye_mask_path = os.path.join(cfg.pixie_dir, 'data', 'uv_face_eye_mask.png')\ncfg.model.extra_joint_path = os.path.join(cfg.pixie_dir, 'data', 'smplx_extra_joints.yaml')\ncfg.model.j14_regressor_path = os.path.join(cfg.pixie_dir, 'data', 'SMPLX_to_J14.pkl')\ncfg.model.flame2smplx_cached_path = os.path.join(cfg.pixie_dir, 'data', 'flame2smplx_tex_1024.npy')\ncfg.model.smplx_tex_path = os.path.join(cfg.pixie_dir, 'data', 'smplx_tex.png')\ncfg.model.mano_ids_path = os.path.join(cfg.pixie_dir, 'data', 'MANO_SMPLX_vertex_ids.pkl')\ncfg.model.flame_ids_path = os.path.join(cfg.pixie_dir, 'data', 'SMPL-X__FLAME_vertex_ids.npy')\ncfg.model.uv_size = 256\n# cfg.model.n_shape = 10", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.face_eye_mask_path", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.model.face_eye_mask_path = os.path.join(cfg.pixie_dir, 'data', 'uv_face_eye_mask.png')\ncfg.model.extra_joint_path = os.path.join(cfg.pixie_dir, 'data', 'smplx_extra_joints.yaml')\ncfg.model.j14_regressor_path = os.path.join(cfg.pixie_dir, 'data', 'SMPLX_to_J14.pkl')\ncfg.model.flame2smplx_cached_path = os.path.join(cfg.pixie_dir, 'data', 'flame2smplx_tex_1024.npy')\ncfg.model.smplx_tex_path = os.path.join(cfg.pixie_dir, 'data', 'smplx_tex.png')\ncfg.model.mano_ids_path = os.path.join(cfg.pixie_dir, 'data', 'MANO_SMPLX_vertex_ids.pkl')\ncfg.model.flame_ids_path = os.path.join(cfg.pixie_dir, 'data', 'SMPL-X__FLAME_vertex_ids.npy')\ncfg.model.uv_size = 256\n# cfg.model.n_shape = 10\ncfg.model.n_shape = 200", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.extra_joint_path", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.model.extra_joint_path = os.path.join(cfg.pixie_dir, 'data', 'smplx_extra_joints.yaml')\ncfg.model.j14_regressor_path = os.path.join(cfg.pixie_dir, 'data', 'SMPLX_to_J14.pkl')\ncfg.model.flame2smplx_cached_path = os.path.join(cfg.pixie_dir, 'data', 'flame2smplx_tex_1024.npy')\ncfg.model.smplx_tex_path = os.path.join(cfg.pixie_dir, 'data', 'smplx_tex.png')\ncfg.model.mano_ids_path = os.path.join(cfg.pixie_dir, 'data', 'MANO_SMPLX_vertex_ids.pkl')\ncfg.model.flame_ids_path = os.path.join(cfg.pixie_dir, 'data', 'SMPL-X__FLAME_vertex_ids.npy')\ncfg.model.uv_size = 256\n# cfg.model.n_shape = 10\ncfg.model.n_shape = 200\ncfg.model.n_tex = 50", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.j14_regressor_path", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.model.j14_regressor_path = os.path.join(cfg.pixie_dir, 'data', 'SMPLX_to_J14.pkl')\ncfg.model.flame2smplx_cached_path = os.path.join(cfg.pixie_dir, 'data', 'flame2smplx_tex_1024.npy')\ncfg.model.smplx_tex_path = os.path.join(cfg.pixie_dir, 'data', 'smplx_tex.png')\ncfg.model.mano_ids_path = os.path.join(cfg.pixie_dir, 'data', 'MANO_SMPLX_vertex_ids.pkl')\ncfg.model.flame_ids_path = os.path.join(cfg.pixie_dir, 'data', 'SMPL-X__FLAME_vertex_ids.npy')\ncfg.model.uv_size = 256\n# cfg.model.n_shape = 10\ncfg.model.n_shape = 200\ncfg.model.n_tex = 50\n# cfg.model.n_exp = 10", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.flame2smplx_cached_path", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.model.flame2smplx_cached_path = os.path.join(cfg.pixie_dir, 'data', 'flame2smplx_tex_1024.npy')\ncfg.model.smplx_tex_path = os.path.join(cfg.pixie_dir, 'data', 'smplx_tex.png')\ncfg.model.mano_ids_path = os.path.join(cfg.pixie_dir, 'data', 'MANO_SMPLX_vertex_ids.pkl')\ncfg.model.flame_ids_path = os.path.join(cfg.pixie_dir, 'data', 'SMPL-X__FLAME_vertex_ids.npy')\ncfg.model.uv_size = 256\n# cfg.model.n_shape = 10\ncfg.model.n_shape = 200\ncfg.model.n_tex = 50\n# cfg.model.n_exp = 10\ncfg.model.n_exp = 50", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.smplx_tex_path", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.model.smplx_tex_path = os.path.join(cfg.pixie_dir, 'data', 'smplx_tex.png')\ncfg.model.mano_ids_path = os.path.join(cfg.pixie_dir, 'data', 'MANO_SMPLX_vertex_ids.pkl')\ncfg.model.flame_ids_path = os.path.join(cfg.pixie_dir, 'data', 'SMPL-X__FLAME_vertex_ids.npy')\ncfg.model.uv_size = 256\n# cfg.model.n_shape = 10\ncfg.model.n_shape = 200\ncfg.model.n_tex = 50\n# cfg.model.n_exp = 10\ncfg.model.n_exp = 50\ncfg.model.n_body_cam = 3", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.mano_ids_path", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.model.mano_ids_path = os.path.join(cfg.pixie_dir, 'data', 'MANO_SMPLX_vertex_ids.pkl')\ncfg.model.flame_ids_path = os.path.join(cfg.pixie_dir, 'data', 'SMPL-X__FLAME_vertex_ids.npy')\ncfg.model.uv_size = 256\n# cfg.model.n_shape = 10\ncfg.model.n_shape = 200\ncfg.model.n_tex = 50\n# cfg.model.n_exp = 10\ncfg.model.n_exp = 50\ncfg.model.n_body_cam = 3\ncfg.model.n_head_cam = 3", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.flame_ids_path", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.model.flame_ids_path = os.path.join(cfg.pixie_dir, 'data', 'SMPL-X__FLAME_vertex_ids.npy')\ncfg.model.uv_size = 256\n# cfg.model.n_shape = 10\ncfg.model.n_shape = 200\ncfg.model.n_tex = 50\n# cfg.model.n_exp = 10\ncfg.model.n_exp = 50\ncfg.model.n_body_cam = 3\ncfg.model.n_head_cam = 3\ncfg.model.n_hand_cam = 3", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.uv_size", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.model.uv_size = 256\n# cfg.model.n_shape = 10\ncfg.model.n_shape = 200\ncfg.model.n_tex = 50\n# cfg.model.n_exp = 10\ncfg.model.n_exp = 50\ncfg.model.n_body_cam = 3\ncfg.model.n_head_cam = 3\ncfg.model.n_hand_cam = 3\ncfg.model.tex_type = 'BFM' # BFM, FLAME, albedoMM", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.n_shape", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.model.n_shape = 200\ncfg.model.n_tex = 50\n# cfg.model.n_exp = 10\ncfg.model.n_exp = 50\ncfg.model.n_body_cam = 3\ncfg.model.n_head_cam = 3\ncfg.model.n_hand_cam = 3\ncfg.model.tex_type = 'BFM' # BFM, FLAME, albedoMM\ncfg.model.uvtex_type = 'SMPLX' # FLAME or SMPLX\ncfg.model.use_tex = False #True # whether to use flame texture model", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.n_tex", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.model.n_tex = 50\n# cfg.model.n_exp = 10\ncfg.model.n_exp = 50\ncfg.model.n_body_cam = 3\ncfg.model.n_head_cam = 3\ncfg.model.n_hand_cam = 3\ncfg.model.tex_type = 'BFM' # BFM, FLAME, albedoMM\ncfg.model.uvtex_type = 'SMPLX' # FLAME or SMPLX\ncfg.model.use_tex = False #True # whether to use flame texture model\ncfg.model.flame_tex_path = '/ps/scratch/yfeng/Data/FLAME/texture/FLAME_texture.npz'", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.n_exp", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.model.n_exp = 50\ncfg.model.n_body_cam = 3\ncfg.model.n_head_cam = 3\ncfg.model.n_hand_cam = 3\ncfg.model.tex_type = 'BFM' # BFM, FLAME, albedoMM\ncfg.model.uvtex_type = 'SMPLX' # FLAME or SMPLX\ncfg.model.use_tex = False #True # whether to use flame texture model\ncfg.model.flame_tex_path = '/ps/scratch/yfeng/Data/FLAME/texture/FLAME_texture.npz'\n# pose\ncfg.model.n_global_pose = 3*2", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.n_body_cam", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.model.n_body_cam = 3\ncfg.model.n_head_cam = 3\ncfg.model.n_hand_cam = 3\ncfg.model.tex_type = 'BFM' # BFM, FLAME, albedoMM\ncfg.model.uvtex_type = 'SMPLX' # FLAME or SMPLX\ncfg.model.use_tex = False #True # whether to use flame texture model\ncfg.model.flame_tex_path = '/ps/scratch/yfeng/Data/FLAME/texture/FLAME_texture.npz'\n# pose\ncfg.model.n_global_pose = 3*2\ncfg.model.n_head_pose = 3*2", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.n_head_cam", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.model.n_head_cam = 3\ncfg.model.n_hand_cam = 3\ncfg.model.tex_type = 'BFM' # BFM, FLAME, albedoMM\ncfg.model.uvtex_type = 'SMPLX' # FLAME or SMPLX\ncfg.model.use_tex = False #True # whether to use flame texture model\ncfg.model.flame_tex_path = '/ps/scratch/yfeng/Data/FLAME/texture/FLAME_texture.npz'\n# pose\ncfg.model.n_global_pose = 3*2\ncfg.model.n_head_pose = 3*2\ncfg.model.n_neck_pose = 3*2", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.n_hand_cam", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.model.n_hand_cam = 3\ncfg.model.tex_type = 'BFM' # BFM, FLAME, albedoMM\ncfg.model.uvtex_type = 'SMPLX' # FLAME or SMPLX\ncfg.model.use_tex = False #True # whether to use flame texture model\ncfg.model.flame_tex_path = '/ps/scratch/yfeng/Data/FLAME/texture/FLAME_texture.npz'\n# pose\ncfg.model.n_global_pose = 3*2\ncfg.model.n_head_pose = 3*2\ncfg.model.n_neck_pose = 3*2\ncfg.model.n_jaw_pose = 3 #euler angle", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.tex_type", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.model.tex_type = 'BFM' # BFM, FLAME, albedoMM\ncfg.model.uvtex_type = 'SMPLX' # FLAME or SMPLX\ncfg.model.use_tex = False #True # whether to use flame texture model\ncfg.model.flame_tex_path = '/ps/scratch/yfeng/Data/FLAME/texture/FLAME_texture.npz'\n# pose\ncfg.model.n_global_pose = 3*2\ncfg.model.n_head_pose = 3*2\ncfg.model.n_neck_pose = 3*2\ncfg.model.n_jaw_pose = 3 #euler angle\ncfg.model.n_body_pose = 21*3*2", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.uvtex_type", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.model.uvtex_type = 'SMPLX' # FLAME or SMPLX\ncfg.model.use_tex = False #True # whether to use flame texture model\ncfg.model.flame_tex_path = '/ps/scratch/yfeng/Data/FLAME/texture/FLAME_texture.npz'\n# pose\ncfg.model.n_global_pose = 3*2\ncfg.model.n_head_pose = 3*2\ncfg.model.n_neck_pose = 3*2\ncfg.model.n_jaw_pose = 3 #euler angle\ncfg.model.n_body_pose = 21*3*2\ncfg.model.n_partbody_pose = (21-4)*3*2", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.use_tex", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.model.use_tex = False #True # whether to use flame texture model\ncfg.model.flame_tex_path = '/ps/scratch/yfeng/Data/FLAME/texture/FLAME_texture.npz'\n# pose\ncfg.model.n_global_pose = 3*2\ncfg.model.n_head_pose = 3*2\ncfg.model.n_neck_pose = 3*2\ncfg.model.n_jaw_pose = 3 #euler angle\ncfg.model.n_body_pose = 21*3*2\ncfg.model.n_partbody_pose = (21-4)*3*2\ncfg.model.n_left_hand_pose = 15*3*2", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.flame_tex_path", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.model.flame_tex_path = '/ps/scratch/yfeng/Data/FLAME/texture/FLAME_texture.npz'\n# pose\ncfg.model.n_global_pose = 3*2\ncfg.model.n_head_pose = 3*2\ncfg.model.n_neck_pose = 3*2\ncfg.model.n_jaw_pose = 3 #euler angle\ncfg.model.n_body_pose = 21*3*2\ncfg.model.n_partbody_pose = (21-4)*3*2\ncfg.model.n_left_hand_pose = 15*3*2\ncfg.model.n_right_hand_pose = 15*3*2", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.n_global_pose", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.model.n_global_pose = 3*2\ncfg.model.n_head_pose = 3*2\ncfg.model.n_neck_pose = 3*2\ncfg.model.n_jaw_pose = 3 #euler angle\ncfg.model.n_body_pose = 21*3*2\ncfg.model.n_partbody_pose = (21-4)*3*2\ncfg.model.n_left_hand_pose = 15*3*2\ncfg.model.n_right_hand_pose = 15*3*2\ncfg.model.n_left_wrist_pose = 1*3*2\ncfg.model.n_right_wrist_pose = 1*3*2", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.n_head_pose", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.model.n_head_pose = 3*2\ncfg.model.n_neck_pose = 3*2\ncfg.model.n_jaw_pose = 3 #euler angle\ncfg.model.n_body_pose = 21*3*2\ncfg.model.n_partbody_pose = (21-4)*3*2\ncfg.model.n_left_hand_pose = 15*3*2\ncfg.model.n_right_hand_pose = 15*3*2\ncfg.model.n_left_wrist_pose = 1*3*2\ncfg.model.n_right_wrist_pose = 1*3*2\ncfg.model.n_light = 27", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.n_neck_pose", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.model.n_neck_pose = 3*2\ncfg.model.n_jaw_pose = 3 #euler angle\ncfg.model.n_body_pose = 21*3*2\ncfg.model.n_partbody_pose = (21-4)*3*2\ncfg.model.n_left_hand_pose = 15*3*2\ncfg.model.n_right_hand_pose = 15*3*2\ncfg.model.n_left_wrist_pose = 1*3*2\ncfg.model.n_right_wrist_pose = 1*3*2\ncfg.model.n_light = 27\ncfg.model.check_pose = True", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.n_jaw_pose", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.model.n_jaw_pose = 3 #euler angle\ncfg.model.n_body_pose = 21*3*2\ncfg.model.n_partbody_pose = (21-4)*3*2\ncfg.model.n_left_hand_pose = 15*3*2\ncfg.model.n_right_hand_pose = 15*3*2\ncfg.model.n_left_wrist_pose = 1*3*2\ncfg.model.n_right_wrist_pose = 1*3*2\ncfg.model.n_light = 27\ncfg.model.check_pose = True\n# ---------------------------------------------------------------------------- #", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.n_body_pose", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.model.n_body_pose = 21*3*2\ncfg.model.n_partbody_pose = (21-4)*3*2\ncfg.model.n_left_hand_pose = 15*3*2\ncfg.model.n_right_hand_pose = 15*3*2\ncfg.model.n_left_wrist_pose = 1*3*2\ncfg.model.n_right_wrist_pose = 1*3*2\ncfg.model.n_light = 27\ncfg.model.check_pose = True\n# ---------------------------------------------------------------------------- #\n# Options for Dataset", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.n_partbody_pose", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.model.n_partbody_pose = (21-4)*3*2\ncfg.model.n_left_hand_pose = 15*3*2\ncfg.model.n_right_hand_pose = 15*3*2\ncfg.model.n_left_wrist_pose = 1*3*2\ncfg.model.n_right_wrist_pose = 1*3*2\ncfg.model.n_light = 27\ncfg.model.check_pose = True\n# ---------------------------------------------------------------------------- #\n# Options for Dataset\n# ---------------------------------------------------------------------------- #", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.n_left_hand_pose", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.model.n_left_hand_pose = 15*3*2\ncfg.model.n_right_hand_pose = 15*3*2\ncfg.model.n_left_wrist_pose = 1*3*2\ncfg.model.n_right_wrist_pose = 1*3*2\ncfg.model.n_light = 27\ncfg.model.check_pose = True\n# ---------------------------------------------------------------------------- #\n# Options for Dataset\n# ---------------------------------------------------------------------------- #\ncfg.dataset = CN()", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.n_right_hand_pose", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.model.n_right_hand_pose = 15*3*2\ncfg.model.n_left_wrist_pose = 1*3*2\ncfg.model.n_right_wrist_pose = 1*3*2\ncfg.model.n_light = 27\ncfg.model.check_pose = True\n# ---------------------------------------------------------------------------- #\n# Options for Dataset\n# ---------------------------------------------------------------------------- #\ncfg.dataset = CN()\ncfg.dataset.source = ['body', 'head', 'hand']", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.n_left_wrist_pose", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.model.n_left_wrist_pose = 1*3*2\ncfg.model.n_right_wrist_pose = 1*3*2\ncfg.model.n_light = 27\ncfg.model.check_pose = True\n# ---------------------------------------------------------------------------- #\n# Options for Dataset\n# ---------------------------------------------------------------------------- #\ncfg.dataset = CN()\ncfg.dataset.source = ['body', 'head', 'hand']\n# head/face dataset", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.n_right_wrist_pose", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.model.n_right_wrist_pose = 1*3*2\ncfg.model.n_light = 27\ncfg.model.check_pose = True\n# ---------------------------------------------------------------------------- #\n# Options for Dataset\n# ---------------------------------------------------------------------------- #\ncfg.dataset = CN()\ncfg.dataset.source = ['body', 'head', 'hand']\n# head/face dataset\ncfg.dataset.head = CN()", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.n_light", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.model.n_light = 27\ncfg.model.check_pose = True\n# ---------------------------------------------------------------------------- #\n# Options for Dataset\n# ---------------------------------------------------------------------------- #\ncfg.dataset = CN()\ncfg.dataset.source = ['body', 'head', 'hand']\n# head/face dataset\ncfg.dataset.head = CN()\ncfg.dataset.head.batch_size = 24", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.model.check_pose", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.model.check_pose = True\n# ---------------------------------------------------------------------------- #\n# Options for Dataset\n# ---------------------------------------------------------------------------- #\ncfg.dataset = CN()\ncfg.dataset.source = ['body', 'head', 'hand']\n# head/face dataset\ncfg.dataset.head = CN()\ncfg.dataset.head.batch_size = 24\ncfg.dataset.head.num_workers = 2", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.dataset", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.dataset = CN()\ncfg.dataset.source = ['body', 'head', 'hand']\n# head/face dataset\ncfg.dataset.head = CN()\ncfg.dataset.head.batch_size = 24\ncfg.dataset.head.num_workers = 2\ncfg.dataset.head.from_body = True\ncfg.dataset.head.image_size = 224\ncfg.dataset.head.image_hd_size = 224\ncfg.dataset.head.scale_min = 1.8", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.dataset.source", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.dataset.source = ['body', 'head', 'hand']\n# head/face dataset\ncfg.dataset.head = CN()\ncfg.dataset.head.batch_size = 24\ncfg.dataset.head.num_workers = 2\ncfg.dataset.head.from_body = True\ncfg.dataset.head.image_size = 224\ncfg.dataset.head.image_hd_size = 224\ncfg.dataset.head.scale_min = 1.8\ncfg.dataset.head.scale_max = 2.2", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.dataset.head", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.dataset.head = CN()\ncfg.dataset.head.batch_size = 24\ncfg.dataset.head.num_workers = 2\ncfg.dataset.head.from_body = True\ncfg.dataset.head.image_size = 224\ncfg.dataset.head.image_hd_size = 224\ncfg.dataset.head.scale_min = 1.8\ncfg.dataset.head.scale_max = 2.2\ncfg.dataset.head.trans_scale = 0.3\n# body datset", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.dataset.head.batch_size", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.dataset.head.batch_size = 24\ncfg.dataset.head.num_workers = 2\ncfg.dataset.head.from_body = True\ncfg.dataset.head.image_size = 224\ncfg.dataset.head.image_hd_size = 224\ncfg.dataset.head.scale_min = 1.8\ncfg.dataset.head.scale_max = 2.2\ncfg.dataset.head.trans_scale = 0.3\n# body datset\ncfg.dataset.body = CN()", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.dataset.head.num_workers", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.dataset.head.num_workers = 2\ncfg.dataset.head.from_body = True\ncfg.dataset.head.image_size = 224\ncfg.dataset.head.image_hd_size = 224\ncfg.dataset.head.scale_min = 1.8\ncfg.dataset.head.scale_max = 2.2\ncfg.dataset.head.trans_scale = 0.3\n# body datset\ncfg.dataset.body = CN()\ncfg.dataset.body.batch_size = 24", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.dataset.head.from_body", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.dataset.head.from_body = True\ncfg.dataset.head.image_size = 224\ncfg.dataset.head.image_hd_size = 224\ncfg.dataset.head.scale_min = 1.8\ncfg.dataset.head.scale_max = 2.2\ncfg.dataset.head.trans_scale = 0.3\n# body datset\ncfg.dataset.body = CN()\ncfg.dataset.body.batch_size = 24\ncfg.dataset.body.num_workers = 2", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.dataset.head.image_size", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.dataset.head.image_size = 224\ncfg.dataset.head.image_hd_size = 224\ncfg.dataset.head.scale_min = 1.8\ncfg.dataset.head.scale_max = 2.2\ncfg.dataset.head.trans_scale = 0.3\n# body datset\ncfg.dataset.body = CN()\ncfg.dataset.body.batch_size = 24\ncfg.dataset.body.num_workers = 2\ncfg.dataset.body.image_size = 224", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.dataset.head.image_hd_size", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.dataset.head.image_hd_size = 224\ncfg.dataset.head.scale_min = 1.8\ncfg.dataset.head.scale_max = 2.2\ncfg.dataset.head.trans_scale = 0.3\n# body datset\ncfg.dataset.body = CN()\ncfg.dataset.body.batch_size = 24\ncfg.dataset.body.num_workers = 2\ncfg.dataset.body.image_size = 224\ncfg.dataset.body.image_hd_size = 1024", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.dataset.head.scale_min", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.dataset.head.scale_min = 1.8\ncfg.dataset.head.scale_max = 2.2\ncfg.dataset.head.trans_scale = 0.3\n# body datset\ncfg.dataset.body = CN()\ncfg.dataset.body.batch_size = 24\ncfg.dataset.body.num_workers = 2\ncfg.dataset.body.image_size = 224\ncfg.dataset.body.image_hd_size = 1024\ncfg.dataset.body.use_hd = True", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.dataset.head.scale_max", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.dataset.head.scale_max = 2.2\ncfg.dataset.head.trans_scale = 0.3\n# body datset\ncfg.dataset.body = CN()\ncfg.dataset.body.batch_size = 24\ncfg.dataset.body.num_workers = 2\ncfg.dataset.body.image_size = 224\ncfg.dataset.body.image_hd_size = 1024\ncfg.dataset.body.use_hd = True\n# hand datset", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.dataset.head.trans_scale", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.dataset.head.trans_scale = 0.3\n# body datset\ncfg.dataset.body = CN()\ncfg.dataset.body.batch_size = 24\ncfg.dataset.body.num_workers = 2\ncfg.dataset.body.image_size = 224\ncfg.dataset.body.image_hd_size = 1024\ncfg.dataset.body.use_hd = True\n# hand datset\ncfg.dataset.hand = CN()", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.dataset.body", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.dataset.body = CN()\ncfg.dataset.body.batch_size = 24\ncfg.dataset.body.num_workers = 2\ncfg.dataset.body.image_size = 224\ncfg.dataset.body.image_hd_size = 1024\ncfg.dataset.body.use_hd = True\n# hand datset\ncfg.dataset.hand = CN()\ncfg.dataset.hand.batch_size = 24\ncfg.dataset.hand.num_workers = 2", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.dataset.body.batch_size", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.dataset.body.batch_size = 24\ncfg.dataset.body.num_workers = 2\ncfg.dataset.body.image_size = 224\ncfg.dataset.body.image_hd_size = 1024\ncfg.dataset.body.use_hd = True\n# hand datset\ncfg.dataset.hand = CN()\ncfg.dataset.hand.batch_size = 24\ncfg.dataset.hand.num_workers = 2\ncfg.dataset.hand.image_size = 224", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.dataset.body.num_workers", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.dataset.body.num_workers = 2\ncfg.dataset.body.image_size = 224\ncfg.dataset.body.image_hd_size = 1024\ncfg.dataset.body.use_hd = True\n# hand datset\ncfg.dataset.hand = CN()\ncfg.dataset.hand.batch_size = 24\ncfg.dataset.hand.num_workers = 2\ncfg.dataset.hand.image_size = 224\ncfg.dataset.hand.image_hd_size = 512", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.dataset.body.image_size", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.dataset.body.image_size = 224\ncfg.dataset.body.image_hd_size = 1024\ncfg.dataset.body.use_hd = True\n# hand datset\ncfg.dataset.hand = CN()\ncfg.dataset.hand.batch_size = 24\ncfg.dataset.hand.num_workers = 2\ncfg.dataset.hand.image_size = 224\ncfg.dataset.hand.image_hd_size = 512\ncfg.dataset.hand.scale_min = 2.2", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.dataset.body.image_hd_size", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.dataset.body.image_hd_size = 1024\ncfg.dataset.body.use_hd = True\n# hand datset\ncfg.dataset.hand = CN()\ncfg.dataset.hand.batch_size = 24\ncfg.dataset.hand.num_workers = 2\ncfg.dataset.hand.image_size = 224\ncfg.dataset.hand.image_hd_size = 512\ncfg.dataset.hand.scale_min = 2.2\ncfg.dataset.hand.scale_max = 2.6", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.dataset.body.use_hd", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.dataset.body.use_hd = True\n# hand datset\ncfg.dataset.hand = CN()\ncfg.dataset.hand.batch_size = 24\ncfg.dataset.hand.num_workers = 2\ncfg.dataset.hand.image_size = 224\ncfg.dataset.hand.image_hd_size = 512\ncfg.dataset.hand.scale_min = 2.2\ncfg.dataset.hand.scale_max = 2.6\ncfg.dataset.hand.trans_scale = 0.4", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.dataset.hand", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.dataset.hand = CN()\ncfg.dataset.hand.batch_size = 24\ncfg.dataset.hand.num_workers = 2\ncfg.dataset.hand.image_size = 224\ncfg.dataset.hand.image_hd_size = 512\ncfg.dataset.hand.scale_min = 2.2\ncfg.dataset.hand.scale_max = 2.6\ncfg.dataset.hand.trans_scale = 0.4\n# ---------------------------------------------------------------------------- #\n# Options for Network", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.dataset.hand.batch_size", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.dataset.hand.batch_size = 24\ncfg.dataset.hand.num_workers = 2\ncfg.dataset.hand.image_size = 224\ncfg.dataset.hand.image_hd_size = 512\ncfg.dataset.hand.scale_min = 2.2\ncfg.dataset.hand.scale_max = 2.6\ncfg.dataset.hand.trans_scale = 0.4\n# ---------------------------------------------------------------------------- #\n# Options for Network\n# ---------------------------------------------------------------------------- #", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.dataset.hand.num_workers", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.dataset.hand.num_workers = 2\ncfg.dataset.hand.image_size = 224\ncfg.dataset.hand.image_hd_size = 512\ncfg.dataset.hand.scale_min = 2.2\ncfg.dataset.hand.scale_max = 2.6\ncfg.dataset.hand.trans_scale = 0.4\n# ---------------------------------------------------------------------------- #\n# Options for Network\n# ---------------------------------------------------------------------------- #\ncfg.network = CN()", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.dataset.hand.image_size", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.dataset.hand.image_size = 224\ncfg.dataset.hand.image_hd_size = 512\ncfg.dataset.hand.scale_min = 2.2\ncfg.dataset.hand.scale_max = 2.6\ncfg.dataset.hand.trans_scale = 0.4\n# ---------------------------------------------------------------------------- #\n# Options for Network\n# ---------------------------------------------------------------------------- #\ncfg.network = CN()\ncfg.network.encoder = CN()", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.dataset.hand.image_hd_size", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.dataset.hand.image_hd_size = 512\ncfg.dataset.hand.scale_min = 2.2\ncfg.dataset.hand.scale_max = 2.6\ncfg.dataset.hand.trans_scale = 0.4\n# ---------------------------------------------------------------------------- #\n# Options for Network\n# ---------------------------------------------------------------------------- #\ncfg.network = CN()\ncfg.network.encoder = CN()\ncfg.network.encoder.body = CN()", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.dataset.hand.scale_min", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.dataset.hand.scale_min = 2.2\ncfg.dataset.hand.scale_max = 2.6\ncfg.dataset.hand.trans_scale = 0.4\n# ---------------------------------------------------------------------------- #\n# Options for Network\n# ---------------------------------------------------------------------------- #\ncfg.network = CN()\ncfg.network.encoder = CN()\ncfg.network.encoder.body = CN()\ncfg.network.encoder.body.type = 'hrnet'", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.dataset.hand.scale_max", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.dataset.hand.scale_max = 2.6\ncfg.dataset.hand.trans_scale = 0.4\n# ---------------------------------------------------------------------------- #\n# Options for Network\n# ---------------------------------------------------------------------------- #\ncfg.network = CN()\ncfg.network.encoder = CN()\ncfg.network.encoder.body = CN()\ncfg.network.encoder.body.type = 'hrnet'\ncfg.network.encoder.head = CN()", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.dataset.hand.trans_scale", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.dataset.hand.trans_scale = 0.4\n# ---------------------------------------------------------------------------- #\n# Options for Network\n# ---------------------------------------------------------------------------- #\ncfg.network = CN()\ncfg.network.encoder = CN()\ncfg.network.encoder.body = CN()\ncfg.network.encoder.body.type = 'hrnet'\ncfg.network.encoder.head = CN()\ncfg.network.encoder.head.type = 'resnet50'", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.network", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.network = CN()\ncfg.network.encoder = CN()\ncfg.network.encoder.body = CN()\ncfg.network.encoder.body.type = 'hrnet'\ncfg.network.encoder.head = CN()\ncfg.network.encoder.head.type = 'resnet50'\ncfg.network.encoder.hand = CN()\ncfg.network.encoder.hand.type = 'resnet50'\ncfg.network.regressor = CN()\ncfg.network.regressor.head_share = CN()", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.network.encoder", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.network.encoder = CN()\ncfg.network.encoder.body = CN()\ncfg.network.encoder.body.type = 'hrnet'\ncfg.network.encoder.head = CN()\ncfg.network.encoder.head.type = 'resnet50'\ncfg.network.encoder.hand = CN()\ncfg.network.encoder.hand.type = 'resnet50'\ncfg.network.regressor = CN()\ncfg.network.regressor.head_share = CN()\ncfg.network.regressor.head_share.type = 'mlp'", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.network.encoder.body", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.network.encoder.body = CN()\ncfg.network.encoder.body.type = 'hrnet'\ncfg.network.encoder.head = CN()\ncfg.network.encoder.head.type = 'resnet50'\ncfg.network.encoder.hand = CN()\ncfg.network.encoder.hand.type = 'resnet50'\ncfg.network.regressor = CN()\ncfg.network.regressor.head_share = CN()\ncfg.network.regressor.head_share.type = 'mlp'\ncfg.network.regressor.head_share.channels = [1024, 1024]", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.network.encoder.body.type", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.network.encoder.body.type = 'hrnet'\ncfg.network.encoder.head = CN()\ncfg.network.encoder.head.type = 'resnet50'\ncfg.network.encoder.hand = CN()\ncfg.network.encoder.hand.type = 'resnet50'\ncfg.network.regressor = CN()\ncfg.network.regressor.head_share = CN()\ncfg.network.regressor.head_share.type = 'mlp'\ncfg.network.regressor.head_share.channels = [1024, 1024]\ncfg.network.regressor.hand_share = CN()", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.network.encoder.head", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.network.encoder.head = CN()\ncfg.network.encoder.head.type = 'resnet50'\ncfg.network.encoder.hand = CN()\ncfg.network.encoder.hand.type = 'resnet50'\ncfg.network.regressor = CN()\ncfg.network.regressor.head_share = CN()\ncfg.network.regressor.head_share.type = 'mlp'\ncfg.network.regressor.head_share.channels = [1024, 1024]\ncfg.network.regressor.hand_share = CN()\ncfg.network.regressor.hand_share.type = 'mlp'", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.network.encoder.head.type", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.network.encoder.head.type = 'resnet50'\ncfg.network.encoder.hand = CN()\ncfg.network.encoder.hand.type = 'resnet50'\ncfg.network.regressor = CN()\ncfg.network.regressor.head_share = CN()\ncfg.network.regressor.head_share.type = 'mlp'\ncfg.network.regressor.head_share.channels = [1024, 1024]\ncfg.network.regressor.hand_share = CN()\ncfg.network.regressor.hand_share.type = 'mlp'\ncfg.network.regressor.hand_share.channels = [1024, 1024]", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.network.encoder.hand", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.network.encoder.hand = CN()\ncfg.network.encoder.hand.type = 'resnet50'\ncfg.network.regressor = CN()\ncfg.network.regressor.head_share = CN()\ncfg.network.regressor.head_share.type = 'mlp'\ncfg.network.regressor.head_share.channels = [1024, 1024]\ncfg.network.regressor.hand_share = CN()\ncfg.network.regressor.hand_share.type = 'mlp'\ncfg.network.regressor.hand_share.channels = [1024, 1024]\ncfg.network.regressor.body = CN()", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.network.encoder.hand.type", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.network.encoder.hand.type = 'resnet50'\ncfg.network.regressor = CN()\ncfg.network.regressor.head_share = CN()\ncfg.network.regressor.head_share.type = 'mlp'\ncfg.network.regressor.head_share.channels = [1024, 1024]\ncfg.network.regressor.hand_share = CN()\ncfg.network.regressor.hand_share.type = 'mlp'\ncfg.network.regressor.hand_share.channels = [1024, 1024]\ncfg.network.regressor.body = CN()\ncfg.network.regressor.body.type = 'mlp'", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.network.regressor", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.network.regressor = CN()\ncfg.network.regressor.head_share = CN()\ncfg.network.regressor.head_share.type = 'mlp'\ncfg.network.regressor.head_share.channels = [1024, 1024]\ncfg.network.regressor.hand_share = CN()\ncfg.network.regressor.hand_share.type = 'mlp'\ncfg.network.regressor.hand_share.channels = [1024, 1024]\ncfg.network.regressor.body = CN()\ncfg.network.regressor.body.type = 'mlp'\ncfg.network.regressor.body.channels = [1024]", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.network.regressor.head_share", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.network.regressor.head_share = CN()\ncfg.network.regressor.head_share.type = 'mlp'\ncfg.network.regressor.head_share.channels = [1024, 1024]\ncfg.network.regressor.hand_share = CN()\ncfg.network.regressor.hand_share.type = 'mlp'\ncfg.network.regressor.hand_share.channels = [1024, 1024]\ncfg.network.regressor.body = CN()\ncfg.network.regressor.body.type = 'mlp'\ncfg.network.regressor.body.channels = [1024]\ncfg.network.regressor.head = CN()", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.network.regressor.head_share.type", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.network.regressor.head_share.type = 'mlp'\ncfg.network.regressor.head_share.channels = [1024, 1024]\ncfg.network.regressor.hand_share = CN()\ncfg.network.regressor.hand_share.type = 'mlp'\ncfg.network.regressor.hand_share.channels = [1024, 1024]\ncfg.network.regressor.body = CN()\ncfg.network.regressor.body.type = 'mlp'\ncfg.network.regressor.body.channels = [1024]\ncfg.network.regressor.head = CN()\ncfg.network.regressor.head.type = 'mlp'", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.network.regressor.head_share.channels", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.network.regressor.head_share.channels = [1024, 1024]\ncfg.network.regressor.hand_share = CN()\ncfg.network.regressor.hand_share.type = 'mlp'\ncfg.network.regressor.hand_share.channels = [1024, 1024]\ncfg.network.regressor.body = CN()\ncfg.network.regressor.body.type = 'mlp'\ncfg.network.regressor.body.channels = [1024]\ncfg.network.regressor.head = CN()\ncfg.network.regressor.head.type = 'mlp'\ncfg.network.regressor.head.channels = [1024]", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.network.regressor.hand_share", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.network.regressor.hand_share = CN()\ncfg.network.regressor.hand_share.type = 'mlp'\ncfg.network.regressor.hand_share.channels = [1024, 1024]\ncfg.network.regressor.body = CN()\ncfg.network.regressor.body.type = 'mlp'\ncfg.network.regressor.body.channels = [1024]\ncfg.network.regressor.head = CN()\ncfg.network.regressor.head.type = 'mlp'\ncfg.network.regressor.head.channels = [1024]\ncfg.network.regressor.hand = CN()", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.network.regressor.hand_share.type", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.network.regressor.hand_share.type = 'mlp'\ncfg.network.regressor.hand_share.channels = [1024, 1024]\ncfg.network.regressor.body = CN()\ncfg.network.regressor.body.type = 'mlp'\ncfg.network.regressor.body.channels = [1024]\ncfg.network.regressor.head = CN()\ncfg.network.regressor.head.type = 'mlp'\ncfg.network.regressor.head.channels = [1024]\ncfg.network.regressor.hand = CN()\ncfg.network.regressor.hand.type = 'mlp'", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.network.regressor.hand_share.channels", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.network.regressor.hand_share.channels = [1024, 1024]\ncfg.network.regressor.body = CN()\ncfg.network.regressor.body.type = 'mlp'\ncfg.network.regressor.body.channels = [1024]\ncfg.network.regressor.head = CN()\ncfg.network.regressor.head.type = 'mlp'\ncfg.network.regressor.head.channels = [1024]\ncfg.network.regressor.hand = CN()\ncfg.network.regressor.hand.type = 'mlp'\ncfg.network.regressor.hand.channels = [1024]", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.network.regressor.body", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.network.regressor.body = CN()\ncfg.network.regressor.body.type = 'mlp'\ncfg.network.regressor.body.channels = [1024]\ncfg.network.regressor.head = CN()\ncfg.network.regressor.head.type = 'mlp'\ncfg.network.regressor.head.channels = [1024]\ncfg.network.regressor.hand = CN()\ncfg.network.regressor.hand.type = 'mlp'\ncfg.network.regressor.hand.channels = [1024]\ncfg.network.extractor = CN()", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.network.regressor.body.type", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.network.regressor.body.type = 'mlp'\ncfg.network.regressor.body.channels = [1024]\ncfg.network.regressor.head = CN()\ncfg.network.regressor.head.type = 'mlp'\ncfg.network.regressor.head.channels = [1024]\ncfg.network.regressor.hand = CN()\ncfg.network.regressor.hand.type = 'mlp'\ncfg.network.regressor.hand.channels = [1024]\ncfg.network.extractor = CN()\ncfg.network.extractor.head_share = CN()", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.network.regressor.body.channels", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.network.regressor.body.channels = [1024]\ncfg.network.regressor.head = CN()\ncfg.network.regressor.head.type = 'mlp'\ncfg.network.regressor.head.channels = [1024]\ncfg.network.regressor.hand = CN()\ncfg.network.regressor.hand.type = 'mlp'\ncfg.network.regressor.hand.channels = [1024]\ncfg.network.extractor = CN()\ncfg.network.extractor.head_share = CN()\ncfg.network.extractor.head_share.type = 'mlp'", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.network.regressor.head", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.network.regressor.head = CN()\ncfg.network.regressor.head.type = 'mlp'\ncfg.network.regressor.head.channels = [1024]\ncfg.network.regressor.hand = CN()\ncfg.network.regressor.hand.type = 'mlp'\ncfg.network.regressor.hand.channels = [1024]\ncfg.network.extractor = CN()\ncfg.network.extractor.head_share = CN()\ncfg.network.extractor.head_share.type = 'mlp'\ncfg.network.extractor.head_share.channels = []", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.network.regressor.head.type", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.network.regressor.head.type = 'mlp'\ncfg.network.regressor.head.channels = [1024]\ncfg.network.regressor.hand = CN()\ncfg.network.regressor.hand.type = 'mlp'\ncfg.network.regressor.hand.channels = [1024]\ncfg.network.extractor = CN()\ncfg.network.extractor.head_share = CN()\ncfg.network.extractor.head_share.type = 'mlp'\ncfg.network.extractor.head_share.channels = []\ncfg.network.extractor.left_hand_share = CN()", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.network.regressor.head.channels", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.network.regressor.head.channels = [1024]\ncfg.network.regressor.hand = CN()\ncfg.network.regressor.hand.type = 'mlp'\ncfg.network.regressor.hand.channels = [1024]\ncfg.network.extractor = CN()\ncfg.network.extractor.head_share = CN()\ncfg.network.extractor.head_share.type = 'mlp'\ncfg.network.extractor.head_share.channels = []\ncfg.network.extractor.left_hand_share = CN()\ncfg.network.extractor.left_hand_share.type = 'mlp'", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.network.regressor.hand", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.network.regressor.hand = CN()\ncfg.network.regressor.hand.type = 'mlp'\ncfg.network.regressor.hand.channels = [1024]\ncfg.network.extractor = CN()\ncfg.network.extractor.head_share = CN()\ncfg.network.extractor.head_share.type = 'mlp'\ncfg.network.extractor.head_share.channels = []\ncfg.network.extractor.left_hand_share = CN()\ncfg.network.extractor.left_hand_share.type = 'mlp'\ncfg.network.extractor.left_hand_share.channels = []", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.network.regressor.hand.type", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.network.regressor.hand.type = 'mlp'\ncfg.network.regressor.hand.channels = [1024]\ncfg.network.extractor = CN()\ncfg.network.extractor.head_share = CN()\ncfg.network.extractor.head_share.type = 'mlp'\ncfg.network.extractor.head_share.channels = []\ncfg.network.extractor.left_hand_share = CN()\ncfg.network.extractor.left_hand_share.type = 'mlp'\ncfg.network.extractor.left_hand_share.channels = []\ncfg.network.extractor.right_hand_share = CN()", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.network.regressor.hand.channels", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.network.regressor.hand.channels = [1024]\ncfg.network.extractor = CN()\ncfg.network.extractor.head_share = CN()\ncfg.network.extractor.head_share.type = 'mlp'\ncfg.network.extractor.head_share.channels = []\ncfg.network.extractor.left_hand_share = CN()\ncfg.network.extractor.left_hand_share.type = 'mlp'\ncfg.network.extractor.left_hand_share.channels = []\ncfg.network.extractor.right_hand_share = CN()\ncfg.network.extractor.right_hand_share.type = 'mlp'", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.network.extractor", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.network.extractor = CN()\ncfg.network.extractor.head_share = CN()\ncfg.network.extractor.head_share.type = 'mlp'\ncfg.network.extractor.head_share.channels = []\ncfg.network.extractor.left_hand_share = CN()\ncfg.network.extractor.left_hand_share.type = 'mlp'\ncfg.network.extractor.left_hand_share.channels = []\ncfg.network.extractor.right_hand_share = CN()\ncfg.network.extractor.right_hand_share.type = 'mlp'\ncfg.network.extractor.right_hand_share.channels = []", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.network.extractor.head_share", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.network.extractor.head_share = CN()\ncfg.network.extractor.head_share.type = 'mlp'\ncfg.network.extractor.head_share.channels = []\ncfg.network.extractor.left_hand_share = CN()\ncfg.network.extractor.left_hand_share.type = 'mlp'\ncfg.network.extractor.left_hand_share.channels = []\ncfg.network.extractor.right_hand_share = CN()\ncfg.network.extractor.right_hand_share.type = 'mlp'\ncfg.network.extractor.right_hand_share.channels = []\ncfg.network.moderator = CN()", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.network.extractor.head_share.type", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.network.extractor.head_share.type = 'mlp'\ncfg.network.extractor.head_share.channels = []\ncfg.network.extractor.left_hand_share = CN()\ncfg.network.extractor.left_hand_share.type = 'mlp'\ncfg.network.extractor.left_hand_share.channels = []\ncfg.network.extractor.right_hand_share = CN()\ncfg.network.extractor.right_hand_share.type = 'mlp'\ncfg.network.extractor.right_hand_share.channels = []\ncfg.network.moderator = CN()\ncfg.network.moderator.head_share = CN()", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.network.extractor.head_share.channels", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.network.extractor.head_share.channels = []\ncfg.network.extractor.left_hand_share = CN()\ncfg.network.extractor.left_hand_share.type = 'mlp'\ncfg.network.extractor.left_hand_share.channels = []\ncfg.network.extractor.right_hand_share = CN()\ncfg.network.extractor.right_hand_share.type = 'mlp'\ncfg.network.extractor.right_hand_share.channels = []\ncfg.network.moderator = CN()\ncfg.network.moderator.head_share = CN()\ncfg.network.moderator.head_share.detach_inputs = False", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.network.extractor.left_hand_share", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.network.extractor.left_hand_share = CN()\ncfg.network.extractor.left_hand_share.type = 'mlp'\ncfg.network.extractor.left_hand_share.channels = []\ncfg.network.extractor.right_hand_share = CN()\ncfg.network.extractor.right_hand_share.type = 'mlp'\ncfg.network.extractor.right_hand_share.channels = []\ncfg.network.moderator = CN()\ncfg.network.moderator.head_share = CN()\ncfg.network.moderator.head_share.detach_inputs = False\ncfg.network.moderator.head_share.detach_feature = False", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.network.extractor.left_hand_share.type", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.network.extractor.left_hand_share.type = 'mlp'\ncfg.network.extractor.left_hand_share.channels = []\ncfg.network.extractor.right_hand_share = CN()\ncfg.network.extractor.right_hand_share.type = 'mlp'\ncfg.network.extractor.right_hand_share.channels = []\ncfg.network.moderator = CN()\ncfg.network.moderator.head_share = CN()\ncfg.network.moderator.head_share.detach_inputs = False\ncfg.network.moderator.head_share.detach_feature = False\ncfg.network.moderator.head_share.type = 'temp-softmax'", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.network.extractor.left_hand_share.channels", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.network.extractor.left_hand_share.channels = []\ncfg.network.extractor.right_hand_share = CN()\ncfg.network.extractor.right_hand_share.type = 'mlp'\ncfg.network.extractor.right_hand_share.channels = []\ncfg.network.moderator = CN()\ncfg.network.moderator.head_share = CN()\ncfg.network.moderator.head_share.detach_inputs = False\ncfg.network.moderator.head_share.detach_feature = False\ncfg.network.moderator.head_share.type = 'temp-softmax'\ncfg.network.moderator.head_share.channels = [1024, 1024]", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.network.extractor.right_hand_share", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.network.extractor.right_hand_share = CN()\ncfg.network.extractor.right_hand_share.type = 'mlp'\ncfg.network.extractor.right_hand_share.channels = []\ncfg.network.moderator = CN()\ncfg.network.moderator.head_share = CN()\ncfg.network.moderator.head_share.detach_inputs = False\ncfg.network.moderator.head_share.detach_feature = False\ncfg.network.moderator.head_share.type = 'temp-softmax'\ncfg.network.moderator.head_share.channels = [1024, 1024]\ncfg.network.moderator.head_share.reduction = 4", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.network.extractor.right_hand_share.type", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.network.extractor.right_hand_share.type = 'mlp'\ncfg.network.extractor.right_hand_share.channels = []\ncfg.network.moderator = CN()\ncfg.network.moderator.head_share = CN()\ncfg.network.moderator.head_share.detach_inputs = False\ncfg.network.moderator.head_share.detach_feature = False\ncfg.network.moderator.head_share.type = 'temp-softmax'\ncfg.network.moderator.head_share.channels = [1024, 1024]\ncfg.network.moderator.head_share.reduction = 4\ncfg.network.moderator.head_share.scale_type = 'scalars'", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.network.extractor.right_hand_share.channels", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.network.extractor.right_hand_share.channels = []\ncfg.network.moderator = CN()\ncfg.network.moderator.head_share = CN()\ncfg.network.moderator.head_share.detach_inputs = False\ncfg.network.moderator.head_share.detach_feature = False\ncfg.network.moderator.head_share.type = 'temp-softmax'\ncfg.network.moderator.head_share.channels = [1024, 1024]\ncfg.network.moderator.head_share.reduction = 4\ncfg.network.moderator.head_share.scale_type = 'scalars'\ncfg.network.moderator.head_share.scale_init = 1.0", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.network.moderator", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.network.moderator = CN()\ncfg.network.moderator.head_share = CN()\ncfg.network.moderator.head_share.detach_inputs = False\ncfg.network.moderator.head_share.detach_feature = False\ncfg.network.moderator.head_share.type = 'temp-softmax'\ncfg.network.moderator.head_share.channels = [1024, 1024]\ncfg.network.moderator.head_share.reduction = 4\ncfg.network.moderator.head_share.scale_type = 'scalars'\ncfg.network.moderator.head_share.scale_init = 1.0\ncfg.network.moderator.hand_share = CN()", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.network.moderator.head_share", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.network.moderator.head_share = CN()\ncfg.network.moderator.head_share.detach_inputs = False\ncfg.network.moderator.head_share.detach_feature = False\ncfg.network.moderator.head_share.type = 'temp-softmax'\ncfg.network.moderator.head_share.channels = [1024, 1024]\ncfg.network.moderator.head_share.reduction = 4\ncfg.network.moderator.head_share.scale_type = 'scalars'\ncfg.network.moderator.head_share.scale_init = 1.0\ncfg.network.moderator.hand_share = CN()\ncfg.network.moderator.hand_share.detach_inputs = False", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.network.moderator.head_share.detach_inputs", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.network.moderator.head_share.detach_inputs = False\ncfg.network.moderator.head_share.detach_feature = False\ncfg.network.moderator.head_share.type = 'temp-softmax'\ncfg.network.moderator.head_share.channels = [1024, 1024]\ncfg.network.moderator.head_share.reduction = 4\ncfg.network.moderator.head_share.scale_type = 'scalars'\ncfg.network.moderator.head_share.scale_init = 1.0\ncfg.network.moderator.hand_share = CN()\ncfg.network.moderator.hand_share.detach_inputs = False\ncfg.network.moderator.hand_share.detach_feature = False", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.network.moderator.head_share.detach_feature", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.network.moderator.head_share.detach_feature = False\ncfg.network.moderator.head_share.type = 'temp-softmax'\ncfg.network.moderator.head_share.channels = [1024, 1024]\ncfg.network.moderator.head_share.reduction = 4\ncfg.network.moderator.head_share.scale_type = 'scalars'\ncfg.network.moderator.head_share.scale_init = 1.0\ncfg.network.moderator.hand_share = CN()\ncfg.network.moderator.hand_share.detach_inputs = False\ncfg.network.moderator.hand_share.detach_feature = False\ncfg.network.moderator.hand_share.type = 'temp-softmax'", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.network.moderator.head_share.type", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.network.moderator.head_share.type = 'temp-softmax'\ncfg.network.moderator.head_share.channels = [1024, 1024]\ncfg.network.moderator.head_share.reduction = 4\ncfg.network.moderator.head_share.scale_type = 'scalars'\ncfg.network.moderator.head_share.scale_init = 1.0\ncfg.network.moderator.hand_share = CN()\ncfg.network.moderator.hand_share.detach_inputs = False\ncfg.network.moderator.hand_share.detach_feature = False\ncfg.network.moderator.hand_share.type = 'temp-softmax'\ncfg.network.moderator.hand_share.channels = [1024, 1024]", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.network.moderator.head_share.channels", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.network.moderator.head_share.channels = [1024, 1024]\ncfg.network.moderator.head_share.reduction = 4\ncfg.network.moderator.head_share.scale_type = 'scalars'\ncfg.network.moderator.head_share.scale_init = 1.0\ncfg.network.moderator.hand_share = CN()\ncfg.network.moderator.hand_share.detach_inputs = False\ncfg.network.moderator.hand_share.detach_feature = False\ncfg.network.moderator.hand_share.type = 'temp-softmax'\ncfg.network.moderator.hand_share.channels = [1024, 1024]\ncfg.network.moderator.hand_share.reduction = 4", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.network.moderator.head_share.reduction", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.network.moderator.head_share.reduction = 4\ncfg.network.moderator.head_share.scale_type = 'scalars'\ncfg.network.moderator.head_share.scale_init = 1.0\ncfg.network.moderator.hand_share = CN()\ncfg.network.moderator.hand_share.detach_inputs = False\ncfg.network.moderator.hand_share.detach_feature = False\ncfg.network.moderator.hand_share.type = 'temp-softmax'\ncfg.network.moderator.hand_share.channels = [1024, 1024]\ncfg.network.moderator.hand_share.reduction = 4\ncfg.network.moderator.hand_share.scale_type = 'scalars'", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.network.moderator.head_share.scale_type", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.network.moderator.head_share.scale_type = 'scalars'\ncfg.network.moderator.head_share.scale_init = 1.0\ncfg.network.moderator.hand_share = CN()\ncfg.network.moderator.hand_share.detach_inputs = False\ncfg.network.moderator.hand_share.detach_feature = False\ncfg.network.moderator.hand_share.type = 'temp-softmax'\ncfg.network.moderator.hand_share.channels = [1024, 1024]\ncfg.network.moderator.hand_share.reduction = 4\ncfg.network.moderator.hand_share.scale_type = 'scalars'\ncfg.network.moderator.hand_share.scale_init = 0.0", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.network.moderator.head_share.scale_init", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.network.moderator.head_share.scale_init = 1.0\ncfg.network.moderator.hand_share = CN()\ncfg.network.moderator.hand_share.detach_inputs = False\ncfg.network.moderator.hand_share.detach_feature = False\ncfg.network.moderator.hand_share.type = 'temp-softmax'\ncfg.network.moderator.hand_share.channels = [1024, 1024]\ncfg.network.moderator.hand_share.reduction = 4\ncfg.network.moderator.hand_share.scale_type = 'scalars'\ncfg.network.moderator.hand_share.scale_init = 0.0\ndef get_cfg_defaults():", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.network.moderator.hand_share", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.network.moderator.hand_share = CN()\ncfg.network.moderator.hand_share.detach_inputs = False\ncfg.network.moderator.hand_share.detach_feature = False\ncfg.network.moderator.hand_share.type = 'temp-softmax'\ncfg.network.moderator.hand_share.channels = [1024, 1024]\ncfg.network.moderator.hand_share.reduction = 4\ncfg.network.moderator.hand_share.scale_type = 'scalars'\ncfg.network.moderator.hand_share.scale_init = 0.0\ndef get_cfg_defaults():\n \"\"\"Get a yacs CfgNode object with default values for my_project.\"\"\"", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.network.moderator.hand_share.detach_inputs", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.network.moderator.hand_share.detach_inputs = False\ncfg.network.moderator.hand_share.detach_feature = False\ncfg.network.moderator.hand_share.type = 'temp-softmax'\ncfg.network.moderator.hand_share.channels = [1024, 1024]\ncfg.network.moderator.hand_share.reduction = 4\ncfg.network.moderator.hand_share.scale_type = 'scalars'\ncfg.network.moderator.hand_share.scale_init = 0.0\ndef get_cfg_defaults():\n \"\"\"Get a yacs CfgNode object with default values for my_project.\"\"\"\n # Return a clone so that the defaults will not be altered", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.network.moderator.hand_share.detach_feature", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.network.moderator.hand_share.detach_feature = False\ncfg.network.moderator.hand_share.type = 'temp-softmax'\ncfg.network.moderator.hand_share.channels = [1024, 1024]\ncfg.network.moderator.hand_share.reduction = 4\ncfg.network.moderator.hand_share.scale_type = 'scalars'\ncfg.network.moderator.hand_share.scale_init = 0.0\ndef get_cfg_defaults():\n \"\"\"Get a yacs CfgNode object with default values for my_project.\"\"\"\n # Return a clone so that the defaults will not be altered\n # This is for the \"local variable\" use pattern", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.network.moderator.hand_share.type", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.network.moderator.hand_share.type = 'temp-softmax'\ncfg.network.moderator.hand_share.channels = [1024, 1024]\ncfg.network.moderator.hand_share.reduction = 4\ncfg.network.moderator.hand_share.scale_type = 'scalars'\ncfg.network.moderator.hand_share.scale_init = 0.0\ndef get_cfg_defaults():\n \"\"\"Get a yacs CfgNode object with default values for my_project.\"\"\"\n # Return a clone so that the defaults will not be altered\n # This is for the \"local variable\" use pattern\n return cfg.clone()", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.network.moderator.hand_share.channels", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.network.moderator.hand_share.channels = [1024, 1024]\ncfg.network.moderator.hand_share.reduction = 4\ncfg.network.moderator.hand_share.scale_type = 'scalars'\ncfg.network.moderator.hand_share.scale_init = 0.0\ndef get_cfg_defaults():\n \"\"\"Get a yacs CfgNode object with default values for my_project.\"\"\"\n # Return a clone so that the defaults will not be altered\n # This is for the \"local variable\" use pattern\n return cfg.clone()\ndef update_cfg(cfg, cfg_file):", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.network.moderator.hand_share.reduction", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.network.moderator.hand_share.reduction = 4\ncfg.network.moderator.hand_share.scale_type = 'scalars'\ncfg.network.moderator.hand_share.scale_init = 0.0\ndef get_cfg_defaults():\n \"\"\"Get a yacs CfgNode object with default values for my_project.\"\"\"\n # Return a clone so that the defaults will not be altered\n # This is for the \"local variable\" use pattern\n return cfg.clone()\ndef update_cfg(cfg, cfg_file):\n # cfg.merge_from_file(cfg_file, allow_unsafe=True)", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.network.moderator.hand_share.scale_type", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.network.moderator.hand_share.scale_type = 'scalars'\ncfg.network.moderator.hand_share.scale_init = 0.0\ndef get_cfg_defaults():\n \"\"\"Get a yacs CfgNode object with default values for my_project.\"\"\"\n # Return a clone so that the defaults will not be altered\n # This is for the \"local variable\" use pattern\n return cfg.clone()\ndef update_cfg(cfg, cfg_file):\n # cfg.merge_from_file(cfg_file, allow_unsafe=True)\n cfg.merge_from_file(cfg_file)", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "cfg.network.moderator.hand_share.scale_init", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.config", + "description": "modules.PIXIE.pixielib.utils.config", + "peekOfCode": "cfg.network.moderator.hand_share.scale_init = 0.0\ndef get_cfg_defaults():\n \"\"\"Get a yacs CfgNode object with default values for my_project.\"\"\"\n # Return a clone so that the defaults will not be altered\n # This is for the \"local variable\" use pattern\n return cfg.clone()\ndef update_cfg(cfg, cfg_file):\n # cfg.merge_from_file(cfg_file, allow_unsafe=True)\n cfg.merge_from_file(cfg_file)\n return cfg.clone()", + "detail": "modules.PIXIE.pixielib.utils.config", + "documentation": {} + }, + { + "label": "StandardRasterizer", + "kind": 6, + "importPath": "modules.PIXIE.pixielib.utils.renderer", + "description": "modules.PIXIE.pixielib.utils.renderer", + "peekOfCode": "class StandardRasterizer(nn.Module):\n \"\"\" Alg: https://www.scratchapixel.com/lessons/3d-basic-rendering/rasterization-practical-implementation\n Notice:\n x,y,z are in image space, normalized to [-1, 1]\n can render non-squared image\n not differentiable\n \"\"\"\n def __init__(self, height, width=None):\n \"\"\"\n use fixed raster_settings for rendering faces", + "detail": "modules.PIXIE.pixielib.utils.renderer", + "documentation": {} + }, + { + "label": "Pytorch3dRasterizer", + "kind": 6, + "importPath": "modules.PIXIE.pixielib.utils.renderer", + "description": "modules.PIXIE.pixielib.utils.renderer", + "peekOfCode": "class Pytorch3dRasterizer(nn.Module):\n \"\"\" Borrowed from https://github.com/facebookresearch/pytorch3d\n This class implements methods for rasterizing a batch of heterogenous Meshes.\n Notice:\n x,y,z are in image space, normalized\n can only render squared image now\n \"\"\"\n def __init__(self, image_size=224):\n \"\"\"\n use fixed raster_settings for rendering faces", + "detail": "modules.PIXIE.pixielib.utils.renderer", + "documentation": {} + }, + { + "label": "SRenderY", + "kind": 6, + "importPath": "modules.PIXIE.pixielib.utils.renderer", + "description": "modules.PIXIE.pixielib.utils.renderer", + "peekOfCode": "class SRenderY(nn.Module):\n def __init__(self, image_size, obj_filename, uv_size=256, rasterizer_type='standard'):\n super(SRenderY, self).__init__()\n self.image_size = image_size\n self.uv_size = uv_size\n self.rasterizer_type=rasterizer_type\n if rasterizer_type == 'pytorch3d':\n self.rasterizer = Pytorch3dRasterizer(image_size)\n self.uv_rasterizer = Pytorch3dRasterizer(uv_size)\n verts, faces, aux = load_obj(obj_filename)", + "detail": "modules.PIXIE.pixielib.utils.renderer", + "documentation": {} + }, + { + "label": "set_rasterizer", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.renderer", + "description": "modules.PIXIE.pixielib.utils.renderer", + "peekOfCode": "def set_rasterizer(type='pytorch3d'):\n if type == 'pytorch3d':\n global Meshes, load_obj, rasterize_meshes\n from pytorch3d.structures import Meshes\n from pytorch3d.io import load_obj\n from pytorch3d.renderer.mesh import rasterize_meshes\n elif type == 'standard':\n global standard_rasterize, load_obj\n import os\n from .util import load_obj", + "detail": "modules.PIXIE.pixielib.utils.renderer", + "documentation": {} + }, + { + "label": "rad2deg", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.rotation_converter", + "description": "modules.PIXIE.pixielib.utils.rotation_converter", + "peekOfCode": "def rad2deg(tensor):\n \"\"\"Function that converts angles from radians to degrees.\n See :class:`~torchgeometry.RadToDeg` for details.\n Args:\n tensor (Tensor): Tensor of arbitrary shape.\n Returns:\n Tensor: Tensor with same shape as input.\n Example:\n >>> input = tgm.pi * torch.rand(1, 3, 3)\n >>> output = tgm.rad2deg(input)", + "detail": "modules.PIXIE.pixielib.utils.rotation_converter", + "documentation": {} + }, + { + "label": "deg2rad", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.rotation_converter", + "description": "modules.PIXIE.pixielib.utils.rotation_converter", + "peekOfCode": "def deg2rad(tensor):\n \"\"\"Function that converts angles from degrees to radians.\n See :class:`~torchgeometry.DegToRad` for details.\n Args:\n tensor (Tensor): Tensor of arbitrary shape.\n Returns:\n Tensor: Tensor with same shape as input.\n Examples::\n >>> input = 360. * torch.rand(1, 3, 3)\n >>> output = tgm.deg2rad(input)", + "detail": "modules.PIXIE.pixielib.utils.rotation_converter", + "documentation": {} + }, + { + "label": "euler_to_quaternion", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.rotation_converter", + "description": "modules.PIXIE.pixielib.utils.rotation_converter", + "peekOfCode": "def euler_to_quaternion(r):\n x = r[..., 0]\n y = r[..., 1]\n z = r[..., 2]\n z = z/2.0\n y = y/2.0\n x = x/2.0\n cz = torch.cos(z)\n sz = torch.sin(z)\n cy = torch.cos(y)", + "detail": "modules.PIXIE.pixielib.utils.rotation_converter", + "documentation": {} + }, + { + "label": "rotation_matrix_to_quaternion", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.rotation_converter", + "description": "modules.PIXIE.pixielib.utils.rotation_converter", + "peekOfCode": "def rotation_matrix_to_quaternion(rotation_matrix, eps=1e-6):\n \"\"\"Convert 3x4 rotation matrix to 4d quaternion vector\n This algorithm is based on algorithm described in\n https://github.com/KieranWynn/pyquaternion/blob/master/pyquaternion/quaternion.py#L201\n Args:\n rotation_matrix (Tensor): the rotation matrix to convert.\n Return:\n Tensor: the rotation in quaternion\n Shape:\n - Input: :math:`(N, 3, 4)`", + "detail": "modules.PIXIE.pixielib.utils.rotation_converter", + "documentation": {} + }, + { + "label": "angle_axis_to_quaternion", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.rotation_converter", + "description": "modules.PIXIE.pixielib.utils.rotation_converter", + "peekOfCode": "def angle_axis_to_quaternion(angle_axis: torch.Tensor) -> torch.Tensor:\n \"\"\"Convert an angle axis to a quaternion.\n Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h\n Args:\n angle_axis (torch.Tensor): tensor with angle axis.\n Return:\n torch.Tensor: tensor with quaternion.\n Shape:\n - Input: :math:`(*, 3)` where `*` means, any number of dimensions\n - Output: :math:`(*, 4)`", + "detail": "modules.PIXIE.pixielib.utils.rotation_converter", + "documentation": {} + }, + { + "label": "quaternion_to_rotation_matrix", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.rotation_converter", + "description": "modules.PIXIE.pixielib.utils.rotation_converter", + "peekOfCode": "def quaternion_to_rotation_matrix(quat):\n \"\"\"Convert quaternion coefficients to rotation matrix.\n Args:\n quat: size = [B, 4] 4 <===>(w, x, y, z)\n Returns:\n Rotation matrix corresponding to the quaternion -- size = [B, 3, 3]\n \"\"\"\n norm_quat = quat\n norm_quat = norm_quat / norm_quat.norm(p=2, dim=1, keepdim=True)\n w, x, y, z = norm_quat[:, 0], norm_quat[:, 1], norm_quat[:, 2], norm_quat[:, 3]", + "detail": "modules.PIXIE.pixielib.utils.rotation_converter", + "documentation": {} + }, + { + "label": "quaternion_to_angle_axis", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.rotation_converter", + "description": "modules.PIXIE.pixielib.utils.rotation_converter", + "peekOfCode": "def quaternion_to_angle_axis(quaternion: torch.Tensor):\n \"\"\"Convert quaternion vector to angle axis of rotation. TODO: CORRECT\n Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h\n Args:\n quaternion (torch.Tensor): tensor with quaternions.\n Return:\n torch.Tensor: tensor with angle axis of rotation.\n Shape:\n - Input: :math:`(*, 4)` where `*` means, any number of dimensions\n - Output: :math:`(*, 3)`", + "detail": "modules.PIXIE.pixielib.utils.rotation_converter", + "documentation": {} + }, + { + "label": "batch_euler2axis", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.rotation_converter", + "description": "modules.PIXIE.pixielib.utils.rotation_converter", + "peekOfCode": "def batch_euler2axis(r):\n return quaternion_to_angle_axis(euler_to_quaternion(r))\ndef batch_euler2matrix(r):\n return quaternion_to_rotation_matrix(euler_to_quaternion(r))\ndef batch_matrix2euler(rot_mats):\n # Calculates rotation matrix to euler angles\n # Careful for extreme cases of eular angles like [0.0, pi, 0.0]\n ### only y biw\n # TODO: add x, z\n sy = torch.sqrt(rot_mats[:, 0, 0] * rot_mats[:, 0, 0] +", + "detail": "modules.PIXIE.pixielib.utils.rotation_converter", + "documentation": {} + }, + { + "label": "batch_euler2matrix", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.rotation_converter", + "description": "modules.PIXIE.pixielib.utils.rotation_converter", + "peekOfCode": "def batch_euler2matrix(r):\n return quaternion_to_rotation_matrix(euler_to_quaternion(r))\ndef batch_matrix2euler(rot_mats):\n # Calculates rotation matrix to euler angles\n # Careful for extreme cases of eular angles like [0.0, pi, 0.0]\n ### only y biw\n # TODO: add x, z\n sy = torch.sqrt(rot_mats[:, 0, 0] * rot_mats[:, 0, 0] +\n rot_mats[:, 1, 0] * rot_mats[:, 1, 0])\n return torch.atan2(-rot_mats[:, 2, 0], sy)", + "detail": "modules.PIXIE.pixielib.utils.rotation_converter", + "documentation": {} + }, + { + "label": "batch_matrix2euler", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.rotation_converter", + "description": "modules.PIXIE.pixielib.utils.rotation_converter", + "peekOfCode": "def batch_matrix2euler(rot_mats):\n # Calculates rotation matrix to euler angles\n # Careful for extreme cases of eular angles like [0.0, pi, 0.0]\n ### only y biw\n # TODO: add x, z\n sy = torch.sqrt(rot_mats[:, 0, 0] * rot_mats[:, 0, 0] +\n rot_mats[:, 1, 0] * rot_mats[:, 1, 0])\n return torch.atan2(-rot_mats[:, 2, 0], sy)\ndef batch_matrix2axis(rot_mats):\n return quaternion_to_angle_axis(rotation_matrix_to_quaternion(rot_mats))", + "detail": "modules.PIXIE.pixielib.utils.rotation_converter", + "documentation": {} + }, + { + "label": "batch_matrix2axis", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.rotation_converter", + "description": "modules.PIXIE.pixielib.utils.rotation_converter", + "peekOfCode": "def batch_matrix2axis(rot_mats):\n return quaternion_to_angle_axis(rotation_matrix_to_quaternion(rot_mats))\ndef batch_axis2matrix(theta):\n # angle axis to rotation matrix\n # theta N x 3\n # return quat2mat(quat)\n # batch_rodrigues\n return quaternion_to_rotation_matrix(angle_axis_to_quaternion(theta))\ndef batch_axis2euler(theta):\n return batch_matrix2euler(batch_axis2matrix(theta))", + "detail": "modules.PIXIE.pixielib.utils.rotation_converter", + "documentation": {} + }, + { + "label": "batch_axis2matrix", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.rotation_converter", + "description": "modules.PIXIE.pixielib.utils.rotation_converter", + "peekOfCode": "def batch_axis2matrix(theta):\n # angle axis to rotation matrix\n # theta N x 3\n # return quat2mat(quat)\n # batch_rodrigues\n return quaternion_to_rotation_matrix(angle_axis_to_quaternion(theta))\ndef batch_axis2euler(theta):\n return batch_matrix2euler(batch_axis2matrix(theta))\ndef batch_axis2euler(r):\n return rot_mat_to_euler(batch_rodrigues(r))", + "detail": "modules.PIXIE.pixielib.utils.rotation_converter", + "documentation": {} + }, + { + "label": "batch_axis2euler", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.rotation_converter", + "description": "modules.PIXIE.pixielib.utils.rotation_converter", + "peekOfCode": "def batch_axis2euler(theta):\n return batch_matrix2euler(batch_axis2matrix(theta))\ndef batch_axis2euler(r):\n return rot_mat_to_euler(batch_rodrigues(r))\ndef batch_rodrigues(rot_vecs, epsilon=1e-8, dtype=torch.float32):\n ''' same as batch_matrix2axis\n Calculates the rotation matrices for a batch of rotation vectors\n Parameters\n ----------\n rot_vecs: torch.tensor Nx3", + "detail": "modules.PIXIE.pixielib.utils.rotation_converter", + "documentation": {} + }, + { + "label": "batch_axis2euler", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.rotation_converter", + "description": "modules.PIXIE.pixielib.utils.rotation_converter", + "peekOfCode": "def batch_axis2euler(r):\n return rot_mat_to_euler(batch_rodrigues(r))\ndef batch_rodrigues(rot_vecs, epsilon=1e-8, dtype=torch.float32):\n ''' same as batch_matrix2axis\n Calculates the rotation matrices for a batch of rotation vectors\n Parameters\n ----------\n rot_vecs: torch.tensor Nx3\n array of N axis-angle vectors\n Returns", + "detail": "modules.PIXIE.pixielib.utils.rotation_converter", + "documentation": {} + }, + { + "label": "batch_rodrigues", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.rotation_converter", + "description": "modules.PIXIE.pixielib.utils.rotation_converter", + "peekOfCode": "def batch_rodrigues(rot_vecs, epsilon=1e-8, dtype=torch.float32):\n ''' same as batch_matrix2axis\n Calculates the rotation matrices for a batch of rotation vectors\n Parameters\n ----------\n rot_vecs: torch.tensor Nx3\n array of N axis-angle vectors\n Returns\n -------\n R: torch.tensor Nx3x3", + "detail": "modules.PIXIE.pixielib.utils.rotation_converter", + "documentation": {} + }, + { + "label": "batch_cont2matrix", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.rotation_converter", + "description": "modules.PIXIE.pixielib.utils.rotation_converter", + "peekOfCode": "def batch_cont2matrix(module_input):\n ''' Decoder for transforming a latent representation to rotation matrices\n Implements the decoding method described in:\n \"On the Continuity of Rotation Representations in Neural Networks\"\n Code from https://github.com/vchoutas/expose\n '''\n batch_size = module_input.shape[0]\n reshaped_input = module_input.reshape(-1, 3, 2)\n # Normalize the first vector\n b1 = F.normalize(reshaped_input[:, :, 0].clone(), dim=1)", + "detail": "modules.PIXIE.pixielib.utils.rotation_converter", + "documentation": {} + }, + { + "label": "pi", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.rotation_converter", + "description": "modules.PIXIE.pixielib.utils.rotation_converter", + "peekOfCode": "pi = torch.Tensor([3.14159265358979323846])\ndef rad2deg(tensor):\n \"\"\"Function that converts angles from radians to degrees.\n See :class:`~torchgeometry.RadToDeg` for details.\n Args:\n tensor (Tensor): Tensor of arbitrary shape.\n Returns:\n Tensor: Tensor with same shape as input.\n Example:\n >>> input = tgm.pi * torch.rand(1, 3, 3)", + "detail": "modules.PIXIE.pixielib.utils.rotation_converter", + "documentation": {} + }, + { + "label": "_AXIS_TO_IND", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.rotation_converter", + "description": "modules.PIXIE.pixielib.utils.rotation_converter", + "peekOfCode": "_AXIS_TO_IND = {'x': 0, 'y': 1, 'z': 2}\ndef _elementary_basis_vector(axis):\n b = torch.zeros(3)\n b[_AXIS_TO_IND[axis]] = 1\n return b\ndef _compute_euler_from_matrix(dcm, seq='xyz', extrinsic=False):\n # The algorithm assumes intrinsic frame transformations. For representation\n # the paper uses transformation matrices, which are transpose of the\n # direction cosine matrices used by our Rotation class.\n # Adapt the algorithm for our case by", + "detail": "modules.PIXIE.pixielib.utils.rotation_converter", + "documentation": {} + }, + { + "label": "Cropper", + "kind": 6, + "importPath": "modules.PIXIE.pixielib.utils.tensor_cropper", + "description": "modules.PIXIE.pixielib.utils.tensor_cropper", + "peekOfCode": "class Cropper(object):\n def __init__(self, crop_size, scale=[1,1], trans_scale = 0.):\n self.crop_size = crop_size\n self.scale = scale\n self.trans_scale = trans_scale\n def crop(self, image, points, points_scale=None):\n # points to bbox\n center, bbox_size = points2bbox(points.clone(), points_scale)\n # argument bbox. TODO: add rotation?\n center, bbox_size = augment_bbox(center, bbox_size, scale=self.scale, trans_scale=self.trans_scale)", + "detail": "modules.PIXIE.pixielib.utils.tensor_cropper", + "documentation": {} + }, + { + "label": "points2bbox", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.tensor_cropper", + "description": "modules.PIXIE.pixielib.utils.tensor_cropper", + "peekOfCode": "def points2bbox(points, points_scale=None):\n if points_scale:\n assert points_scale[0]==points_scale[1]\n points = points.clone()\n points[:,:,:2] = (points[:,:,:2]*0.5 + 0.5)*points_scale[0]\n min_coords, _ = torch.min(points, dim=1)\n xmin, ymin = min_coords[:, 0], min_coords[:, 1]\n max_coords, _ = torch.max(points, dim=1)\n xmax, ymax = max_coords[:, 0], max_coords[:, 1]\n center = torch.stack([xmax + xmin, ymax + ymin], dim=-1) * 0.5", + "detail": "modules.PIXIE.pixielib.utils.tensor_cropper", + "documentation": {} + }, + { + "label": "augment_bbox", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.tensor_cropper", + "description": "modules.PIXIE.pixielib.utils.tensor_cropper", + "peekOfCode": "def augment_bbox(center, bbox_size, scale=[1.0, 1.0], trans_scale=0.):\n batch_size = center.shape[0]\n trans_scale = (torch.rand([batch_size, 2], device=center.device)*2. -1.) * trans_scale\n center = center + trans_scale*bbox_size # 0.5\n scale = torch.rand([batch_size,1], device=center.device) * (scale[1] - scale[0]) + scale[0]\n size = bbox_size*scale\n return center, size\ndef crop_tensor(image, center, bbox_size, crop_size, interpolation = 'bilinear', align_corners=False):\n ''' for batch image\n Args:", + "detail": "modules.PIXIE.pixielib.utils.tensor_cropper", + "documentation": {} + }, + { + "label": "crop_tensor", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.tensor_cropper", + "description": "modules.PIXIE.pixielib.utils.tensor_cropper", + "peekOfCode": "def crop_tensor(image, center, bbox_size, crop_size, interpolation = 'bilinear', align_corners=False):\n ''' for batch image\n Args:\n image (torch.Tensor): the reference tensor of shape BXHxWXC.\n center: [bz, 2]\n bboxsize: [bz, 1]\n crop_size;\n interpolation (str): Interpolation flag. Default: 'bilinear'.\n align_corners (bool): mode for grid_generation. Default: False. See\n https://pytorch.org/docs/stable/nn.functional.html#torch.nn.functional.interpolate for details", + "detail": "modules.PIXIE.pixielib.utils.tensor_cropper", + "documentation": {} + }, + { + "label": "transform_points", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.tensor_cropper", + "description": "modules.PIXIE.pixielib.utils.tensor_cropper", + "peekOfCode": "def transform_points(points, tform, points_scale=None):\n points_2d = points[:,:,:2]\n # -1 to 1\n #'input points must use original range'\n if points_scale:\n assert points_scale[0]==points_scale[1]\n points_2d = (points_2d*0.5 + 0.5)*points_scale[0]\n # import ipdb; ipdb.set_trace()\n # 0 - points_scale\n batch_size, n_points, _ = points.shape", + "detail": "modules.PIXIE.pixielib.utils.tensor_cropper", + "documentation": {} + }, + { + "label": "dict_tensor2npy", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.util", + "description": "modules.PIXIE.pixielib.utils.util", + "peekOfCode": "def dict_tensor2npy(tensor_dict):\n npy_dict = {}\n for key, value in tensor_dict.items():\n if type(value)==torch.Tensor:\n npy_dict[key] = value.detach().cpu().numpy()\n else:\n npy_dict[key] = value\n return npy_dict\n# def dict_tensor2npy(tensor_dict):\n# npy_dict = {}", + "detail": "modules.PIXIE.pixielib.utils.util", + "documentation": {} + }, + { + "label": "perspective_projection", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.util", + "description": "modules.PIXIE.pixielib.utils.util", + "peekOfCode": "def perspective_projection(points, rotation, translation,camera_intrics):\n \"\"\"\n This function computes the perspective projection of a set of points.\n Input:\n points (bs, N, 3): 3D points\n rotation (bs, 3, 3): Camera rotation\n translation (bs, 3): Camera translation\n focal_length (bs,) or scalar: Focal length\n camera_center (bs, 2): Camera center\n \"\"\"", + "detail": "modules.PIXIE.pixielib.utils.util", + "documentation": {} + }, + { + "label": "estimate_translation_np", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.util", + "description": "modules.PIXIE.pixielib.utils.util", + "peekOfCode": "def estimate_translation_np(S, joints_2d, joints_conf, camera_intrics):\n \"\"\"Find camera translation that brings 3D joints S closest to 2D the corresponding joints_2d.\n Input:\n S: (25, 3) 3D joint locations\n joints: (25, 3) 2D joint locations and confidence\n Returns:\n (3,) camera translation vector\n \"\"\"\n num_joints = S.shape[0]\n # focal length", + "detail": "modules.PIXIE.pixielib.utils.util", + "documentation": {} + }, + { + "label": "show_tensor_img", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.util", + "description": "modules.PIXIE.pixielib.utils.util", + "peekOfCode": "def show_tensor_img(im):\n im = im.cpu().numpy()\n im = im*255\n im = im.transpose(1, 2, 0) # chw-->hwc\n im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)\n cv2.imwrite('test.jpg', im)\ndef show_face(datas):\n ax = plt.gca() \n ax.xaxis.set_ticks_position('top')\n ax.invert_yaxis() #y轴反向", + "detail": "modules.PIXIE.pixielib.utils.util", + "documentation": {} + }, + { + "label": "show_face", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.util", + "description": "modules.PIXIE.pixielib.utils.util", + "peekOfCode": "def show_face(datas):\n ax = plt.gca() \n ax.xaxis.set_ticks_position('top')\n ax.invert_yaxis() #y轴反向\n plt.scatter(datas[:,0],datas[:,1])\n# ---------------------------- process/generate vertices, normals, faces\ndef generate_triangles(h, w, mask = None):\n '''\n quad layout:\n 0 1 ... w-1", + "detail": "modules.PIXIE.pixielib.utils.util", + "documentation": {} + }, + { + "label": "generate_triangles", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.util", + "description": "modules.PIXIE.pixielib.utils.util", + "peekOfCode": "def generate_triangles(h, w, mask = None):\n '''\n quad layout:\n 0 1 ... w-1\n w w+1\n .\n w*h\n '''\n triangles = []\n margin=0", + "detail": "modules.PIXIE.pixielib.utils.util", + "documentation": {} + }, + { + "label": "face_vertices", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.util", + "description": "modules.PIXIE.pixielib.utils.util", + "peekOfCode": "def face_vertices(vertices, faces):\n \"\"\"\n borrowed from https://github.com/daniilidis-group/neural_renderer/blob/master/neural_renderer/vertices_to_faces.py\n :param vertices: [batch size, number of vertices, 3]\n :param faces: [batch size, number of faces, 3]\n :return: [batch size, number of faces, 3, 3]\n \"\"\"\n assert (vertices.ndimension() == 3)\n assert (faces.ndimension() == 3)\n assert (vertices.shape[0] == faces.shape[0])", + "detail": "modules.PIXIE.pixielib.utils.util", + "documentation": {} + }, + { + "label": "vertex_normals", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.util", + "description": "modules.PIXIE.pixielib.utils.util", + "peekOfCode": "def vertex_normals(vertices, faces):\n \"\"\"\n borrowed from https://github.com/daniilidis-group/neural_renderer/blob/master/neural_renderer/vertices_to_faces.py\n :param vertices: [batch size, number of vertices, 3]\n :param faces: [batch size, number of faces, 3]\n :return: [batch size, number of vertices, 3]\n \"\"\"\n assert (vertices.ndimension() == 3)\n assert (faces.ndimension() == 3)\n assert (vertices.shape[0] == faces.shape[0])", + "detail": "modules.PIXIE.pixielib.utils.util", + "documentation": {} + }, + { + "label": "batch_orth_proj", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.util", + "description": "modules.PIXIE.pixielib.utils.util", + "peekOfCode": "def batch_orth_proj(X, camera):\n '''\n X is N x num_verts x 3\n '''\n camera = camera.clone().view(-1, 1, 3)\n X_trans = X[:, :, :2] + camera[:, :, 1:]\n X_trans = torch.cat([X_trans, X[:,:,2:]], 2)\n Xn = (camera[:, :, 0:1] * X_trans)\n return Xn\n##### borrowed from https://github.com/vchoutas/expose", + "detail": "modules.PIXIE.pixielib.utils.util", + "documentation": {} + }, + { + "label": "flip_pose", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.util", + "description": "modules.PIXIE.pixielib.utils.util", + "peekOfCode": "def flip_pose(pose_vector, pose_format='rot-mat'):\n if pose_format == 'aa':\n if torch.is_tensor(pose_vector):\n dim_flip = DIM_FLIP_TENSOR\n else:\n dim_flip = DIM_FLIP\n return (pose_vector.reshape(-1, 3) * dim_flip).reshape(-1)\n elif pose_format == 'rot-mat':\n rot_mats = pose_vector.reshape(-1, 9).clone()\n rot_mats[:, [1, 2, 3, 6]] *= -1", + "detail": "modules.PIXIE.pixielib.utils.util", + "documentation": {} + }, + { + "label": "gaussian", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.util", + "description": "modules.PIXIE.pixielib.utils.util", + "peekOfCode": "def gaussian(window_size, sigma):\n def gauss_fcn(x):\n return -(x - window_size // 2)**2 / float(2 * sigma**2)\n gauss = torch.stack(\n [torch.exp(torch.tensor(gauss_fcn(x))) for x in range(window_size)])\n return gauss / gauss.sum()\ndef get_gaussian_kernel(kernel_size: int, sigma: float):\n r\"\"\"Function that returns Gaussian filter coefficients.\n Args:\n kernel_size (int): filter size. It should be odd and positive.", + "detail": "modules.PIXIE.pixielib.utils.util", + "documentation": {} + }, + { + "label": "get_gaussian_kernel", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.util", + "description": "modules.PIXIE.pixielib.utils.util", + "peekOfCode": "def get_gaussian_kernel(kernel_size: int, sigma: float):\n r\"\"\"Function that returns Gaussian filter coefficients.\n Args:\n kernel_size (int): filter size. It should be odd and positive.\n sigma (float): gaussian standard deviation.\n Returns:\n Tensor: 1D tensor with gaussian filter coefficients.\n Shape:\n - Output: :math:`(\\text{kernel_size})`\n Examples::", + "detail": "modules.PIXIE.pixielib.utils.util", + "documentation": {} + }, + { + "label": "get_gaussian_kernel2d", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.util", + "description": "modules.PIXIE.pixielib.utils.util", + "peekOfCode": "def get_gaussian_kernel2d(kernel_size, sigma):\n r\"\"\"Function that returns Gaussian filter matrix coefficients.\n Args:\n kernel_size (Tuple[int, int]): filter sizes in the x and y direction.\n Sizes should be odd and positive.\n sigma (Tuple[int, int]): gaussian standard deviation in the x and y\n direction.\n Returns:\n Tensor: 2D tensor with gaussian filter matrix coefficients.\n Shape:", + "detail": "modules.PIXIE.pixielib.utils.util", + "documentation": {} + }, + { + "label": "gaussian_blur", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.util", + "description": "modules.PIXIE.pixielib.utils.util", + "peekOfCode": "def gaussian_blur(x, kernel_size=(5,5), sigma=(1.3,1.3)):\n b, c, h, w = x.shape\n kernel = get_gaussian_kernel2d(kernel_size, sigma).to(x.device).to(x.dtype)\n kernel = kernel.repeat(c, 1, 1, 1)\n padding = [(k - 1) // 2 for k in kernel_size]\n return F.conv2d(x, kernel, padding=padding, stride=1, groups=c)\ndef _compute_binary_kernel(window_size):\n r\"\"\"Creates a binary kernel to extract the patches. If the window size\n is HxW will create a (H*W)xHxW kernel.\n \"\"\"", + "detail": "modules.PIXIE.pixielib.utils.util", + "documentation": {} + }, + { + "label": "median_blur", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.util", + "description": "modules.PIXIE.pixielib.utils.util", + "peekOfCode": "def median_blur(x, kernel_size=(3,3)):\n b, c, h, w = x.shape\n kernel = _compute_binary_kernel(kernel_size).to(x.device).to(x.dtype)\n kernel = kernel.repeat(c, 1, 1, 1)\n padding = [(k - 1) // 2 for k in kernel_size]\n features = F.conv2d(x, kernel, padding=padding, stride=1, groups=c)\n features = features.view(b,c,-1,h,w)\n median = torch.median(features, dim=2)[0]\n return median\ndef get_laplacian_kernel2d(kernel_size: int):", + "detail": "modules.PIXIE.pixielib.utils.util", + "documentation": {} + }, + { + "label": "get_laplacian_kernel2d", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.util", + "description": "modules.PIXIE.pixielib.utils.util", + "peekOfCode": "def get_laplacian_kernel2d(kernel_size: int):\n r\"\"\"Function that returns Gaussian filter matrix coefficients.\n Args:\n kernel_size (int): filter size should be odd.\n Returns:\n Tensor: 2D tensor with laplacian filter matrix coefficients.\n Shape:\n - Output: :math:`(\\text{kernel_size}_x, \\text{kernel_size}_y)`\n Examples::\n >>> kornia.image.get_laplacian_kernel2d(3)", + "detail": "modules.PIXIE.pixielib.utils.util", + "documentation": {} + }, + { + "label": "laplacian", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.util", + "description": "modules.PIXIE.pixielib.utils.util", + "peekOfCode": "def laplacian(x):\n # https://torchgeometry.readthedocs.io/en/latest/_modules/kornia/filters/laplacian.html\n b, c, h, w = x.shape\n kernel_size = 3\n kernel = get_laplacian_kernel2d(kernel_size).to(x.device).to(x.dtype)\n kernel = kernel.repeat(c, 1, 1, 1)\n padding = (kernel_size - 1) // 2\n return F.conv2d(x, kernel, padding=padding, stride=1, groups=c)\n# -------------------------------------- io\ndef copy_state_dict(cur_state_dict, pre_state_dict, prefix='', load_name=None):", + "detail": "modules.PIXIE.pixielib.utils.util", + "documentation": {} + }, + { + "label": "copy_state_dict", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.util", + "description": "modules.PIXIE.pixielib.utils.util", + "peekOfCode": "def copy_state_dict(cur_state_dict, pre_state_dict, prefix='', load_name=None):\n def _get_params(key):\n key = prefix + key\n if key in pre_state_dict:\n return pre_state_dict[key]\n return None\n for k in cur_state_dict.keys():\n if load_name is not None:\n if load_name not in k:\n continue", + "detail": "modules.PIXIE.pixielib.utils.util", + "documentation": {} + }, + { + "label": "dict2obj", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.util", + "description": "modules.PIXIE.pixielib.utils.util", + "peekOfCode": "def dict2obj(d):\n # if isinstance(d, list):\n # d = [dict2obj(x) for x in d]\n if not isinstance(d, dict):\n return d\n class C(object):\n pass\n o = C()\n for k in d:\n o.__dict__[k] = dict2obj(d[k])", + "detail": "modules.PIXIE.pixielib.utils.util", + "documentation": {} + }, + { + "label": "remove_module", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.util", + "description": "modules.PIXIE.pixielib.utils.util", + "peekOfCode": "def remove_module(state_dict):\n# create new OrderedDict that does not contain `module.`\n new_state_dict = OrderedDict()\n for k, v in state_dict.items():\n name = k[7:] # remove `module.`\n new_state_dict[name] = v\n return new_state_dict\ndef tensor2image(tensor):\n image = tensor.detach().cpu().numpy()\n image = image*255.", + "detail": "modules.PIXIE.pixielib.utils.util", + "documentation": {} + }, + { + "label": "tensor2image", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.util", + "description": "modules.PIXIE.pixielib.utils.util", + "peekOfCode": "def tensor2image(tensor):\n image = tensor.detach().cpu().numpy()\n image = image*255.\n image = np.maximum(np.minimum(image, 255), 0)\n image = image.transpose(1,2,0)[:,:,[2,1,0]]\n return image.astype(np.uint8).copy()\ndef load_config(cfg_file):\n import yaml\n with open(cfg_file, 'r') as f:\n cfg = yaml.load(f, Loader=yaml.FullLoader)", + "detail": "modules.PIXIE.pixielib.utils.util", + "documentation": {} + }, + { + "label": "load_config", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.util", + "description": "modules.PIXIE.pixielib.utils.util", + "peekOfCode": "def load_config(cfg_file):\n import yaml\n with open(cfg_file, 'r') as f:\n cfg = yaml.load(f, Loader=yaml.FullLoader)\n return cfg\ndef move_dict_to_device(dict, device, tensor2float=False):\n for k,v in dict.items():\n if isinstance(v, torch.Tensor):\n if tensor2float:\n dict[k] = v.float().to(device)", + "detail": "modules.PIXIE.pixielib.utils.util", + "documentation": {} + }, + { + "label": "move_dict_to_device", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.util", + "description": "modules.PIXIE.pixielib.utils.util", + "peekOfCode": "def move_dict_to_device(dict, device, tensor2float=False):\n for k,v in dict.items():\n if isinstance(v, torch.Tensor):\n if tensor2float:\n dict[k] = v.float().to(device)\n else:\n dict[k] = v.to(device)\ndef write_obj(obj_name,\n vertices,\n faces,", + "detail": "modules.PIXIE.pixielib.utils.util", + "documentation": {} + }, + { + "label": "write_obj", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.util", + "description": "modules.PIXIE.pixielib.utils.util", + "peekOfCode": "def write_obj(obj_name,\n vertices,\n faces,\n colors=None,\n texture=None,\n uvcoords=None,\n uvfaces=None,\n inverse_face_order=False,\n normal_map=None,\n ):", + "detail": "modules.PIXIE.pixielib.utils.util", + "documentation": {} + }, + { + "label": "save_pkl", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.util", + "description": "modules.PIXIE.pixielib.utils.util", + "peekOfCode": "def save_pkl(savepath, params, ind=0):\n out_data = {}\n for k, v in params.items():\n if torch.is_tensor(v):\n out_data[k] = v[ind].detach().cpu().numpy()\n else:\n out_data[k] = v\n # import ipdb; ipdb.set_trace()\n with open(savepath, 'wb') as f:\n pickle.dump(out_data, f, protocol=2)", + "detail": "modules.PIXIE.pixielib.utils.util", + "documentation": {} + }, + { + "label": "load_obj", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.util", + "description": "modules.PIXIE.pixielib.utils.util", + "peekOfCode": "def load_obj(obj_filename):\n \"\"\" Ref: https://github.com/facebookresearch/pytorch3d/blob/25c065e9dafa90163e7cec873dbb324a637c68b7/pytorch3d/io/obj_io.py\n Load a mesh from a file-like object.\n \"\"\"\n with open(obj_filename, 'r') as f:\n lines = [line.strip() for line in f]\n verts, uvcoords = [], []\n faces, uv_faces = [], []\n # startswith expects each line to be a string. If the file is read in as\n # bytes then first decode to strings.", + "detail": "modules.PIXIE.pixielib.utils.util", + "documentation": {} + }, + { + "label": "draw_rectangle", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.util", + "description": "modules.PIXIE.pixielib.utils.util", + "peekOfCode": "def draw_rectangle(img,\n bbox,\n bbox_color=(255, 255, 255),\n thickness=3,\n is_opaque=False,\n alpha=0.5):\n \"\"\"Draws the rectangle around the object\n borrowed from: https://bbox-visualizer.readthedocs.io/en/latest/_modules/bbox_visualizer/bbox_visualizer.html\n Parameters\n ----------", + "detail": "modules.PIXIE.pixielib.utils.util", + "documentation": {} + }, + { + "label": "plot_bbox", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.util", + "description": "modules.PIXIE.pixielib.utils.util", + "peekOfCode": "def plot_bbox(image, bbox):\n ''' Draw bbox\n Args:\n image: the input image\n bbox: [left, top, right, bottom]\n '''\n image = cv2.rectangle(image.copy(), (bbox[1], bbox[0]), (bbox[3], bbox[2]),\n [0,255,0], thickness=3)\n # image = draw_rectangle(image, bbox, bbox_color=[0,255,0])\n return image", + "detail": "modules.PIXIE.pixielib.utils.util", + "documentation": {} + }, + { + "label": "plot_kpts", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.util", + "description": "modules.PIXIE.pixielib.utils.util", + "peekOfCode": "def plot_kpts(image, kpts, color = 'r'):\n ''' Draw 68 key points\n Args:\n image: the input image\n kpt: (68, 3).\n '''\n kpts = kpts.copy().astype(np.int32)\n if color == 'r':\n c = (255, 0, 0)\n elif color == 'g':", + "detail": "modules.PIXIE.pixielib.utils.util", + "documentation": {} + }, + { + "label": "plot_verts", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.util", + "description": "modules.PIXIE.pixielib.utils.util", + "peekOfCode": "def plot_verts(image, kpts, color = 'r'):\n ''' Draw 68 key points\n Args:\n image: the input image\n kpt: (68, 3).\n '''\n kpts = kpts.copy().astype(np.int32)\n if color == 'r':\n c = (255, 0, 0)\n elif color == 'g':", + "detail": "modules.PIXIE.pixielib.utils.util", + "documentation": {} + }, + { + "label": "tensor_vis_landmarks", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.utils.util", + "description": "modules.PIXIE.pixielib.utils.util", + "peekOfCode": "def tensor_vis_landmarks(images, landmarks, gt_landmarks=None, color = 'g', isScale=True):\n # visualize landmarks\n vis_landmarks = []\n images = images.cpu().numpy()\n predicted_landmarks = landmarks.detach().cpu().numpy()\n if gt_landmarks is not None:\n gt_landmarks_np = gt_landmarks.detach().cpu().numpy()\n for i in range(images.shape[0]):\n image = images[i]\n image = image.transpose(1,2,0)[:,:,[2,1,0]].copy(); image = (image*255)", + "detail": "modules.PIXIE.pixielib.utils.util", + "documentation": {} + }, + { + "label": "DIM_FLIP", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.util", + "description": "modules.PIXIE.pixielib.utils.util", + "peekOfCode": "DIM_FLIP = np.array([1, -1, -1], dtype=np.float32)\nDIM_FLIP_TENSOR = torch.tensor([1, -1, -1], dtype=torch.float32)\ndef flip_pose(pose_vector, pose_format='rot-mat'):\n if pose_format == 'aa':\n if torch.is_tensor(pose_vector):\n dim_flip = DIM_FLIP_TENSOR\n else:\n dim_flip = DIM_FLIP\n return (pose_vector.reshape(-1, 3) * dim_flip).reshape(-1)\n elif pose_format == 'rot-mat':", + "detail": "modules.PIXIE.pixielib.utils.util", + "documentation": {} + }, + { + "label": "DIM_FLIP_TENSOR", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.util", + "description": "modules.PIXIE.pixielib.utils.util", + "peekOfCode": "DIM_FLIP_TENSOR = torch.tensor([1, -1, -1], dtype=torch.float32)\ndef flip_pose(pose_vector, pose_format='rot-mat'):\n if pose_format == 'aa':\n if torch.is_tensor(pose_vector):\n dim_flip = DIM_FLIP_TENSOR\n else:\n dim_flip = DIM_FLIP\n return (pose_vector.reshape(-1, 3) * dim_flip).reshape(-1)\n elif pose_format == 'rot-mat':\n rot_mats = pose_vector.reshape(-1, 9).clone()", + "detail": "modules.PIXIE.pixielib.utils.util", + "documentation": {} + }, + { + "label": "end_list", + "kind": 5, + "importPath": "modules.PIXIE.pixielib.utils.util", + "description": "modules.PIXIE.pixielib.utils.util", + "peekOfCode": "end_list = np.array([17, 22, 27, 42, 48, 31, 36, 68], dtype = np.int32) - 1\ndef plot_kpts(image, kpts, color = 'r'):\n ''' Draw 68 key points\n Args:\n image: the input image\n kpt: (68, 3).\n '''\n kpts = kpts.copy().astype(np.int32)\n if color == 'r':\n c = (255, 0, 0)", + "detail": "modules.PIXIE.pixielib.utils.util", + "documentation": {} + }, + { + "label": "PIXIE", + "kind": 6, + "importPath": "modules.PIXIE.pixielib.pixie", + "description": "modules.PIXIE.pixielib.pixie", + "peekOfCode": "class PIXIE(object):\n def __init__(self, config=None, device='cuda:0'):\n if config is None:\n self.cfg = cfg\n else:\n self.cfg = config\n self.device = device\n # parameters setting\n self.param_list_dict = {}\n for lst in self.cfg.params.keys():", + "detail": "modules.PIXIE.pixielib.pixie", + "documentation": {} + }, + { + "label": "Visualizer", + "kind": 6, + "importPath": "modules.PIXIE.pixielib.visualizer", + "description": "modules.PIXIE.pixielib.visualizer", + "peekOfCode": "class Visualizer(object):\n ''' visualizer\n '''\n def __init__(self, render_size=1024, config=None, device='cuda:0', part='body', background=None, rasterizer_type='standard'):\n if config is None:\n self.cfg = cfg\n else:\n self.cfg = config\n self.device = device\n self.render_size = render_size", + "detail": "modules.PIXIE.pixielib.visualizer", + "documentation": {} + }, + { + "label": "color_map_color", + "kind": 2, + "importPath": "modules.PIXIE.pixielib.visualizer", + "description": "modules.PIXIE.pixielib.visualizer", + "peekOfCode": "def color_map_color(value, cmap_name='cool', vmin=0, vmax=1):\n # norm = plt.Normalize(vmin, vmax)\n cmap = cm.get_cmap(cmap_name) # PiYG\n # will return rgba, we take only first 3 so we get rgb\n rgb = cmap(value)[:, :3]\n # color = matplotlib.colors.rgb2hex(rgb)\n # rgb = np.tile(value[:,None], (1,3))\n # rgb = np.exp(rgb)\n return rgb\nclass Visualizer(object):", + "detail": "modules.PIXIE.pixielib.visualizer", + "documentation": {} + }, + { + "label": "CustomFormatter", + "kind": 6, + "importPath": "modules.PyMAF.apps.demo_smplx", + "description": "modules.PyMAF.apps.demo_smplx", + "peekOfCode": "class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter,\n argparse.RawDescriptionHelpFormatter):\n pass\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(formatter_class=CustomFormatter)\n print('initializing openpifpaf')\n ppnetwork.Factory.cli(parser)\n ppdecoder.cli(parser)\n Predictor.cli(parser)\n Stream.cli(parser)", + "detail": "modules.PyMAF.apps.demo_smplx", + "documentation": {} + }, + { + "label": "prepare_rendering_results", + "kind": 2, + "importPath": "modules.PyMAF.apps.demo_smplx", + "description": "modules.PyMAF.apps.demo_smplx", + "peekOfCode": "def prepare_rendering_results(person_data, nframes):\n frame_results = [{} for _ in range(nframes)]\n for idx, frame_id in enumerate(person_data['frame_ids']):\n person_id = person_data['person_ids'][idx],\n frame_results[frame_id][person_id] = {\n 'verts': person_data['verts'][idx],\n 'smplx_verts': person_data['smplx_verts'][idx] if 'smplx_verts' in person_data else None,\n 'cam': person_data['orig_cam'][idx],\n 'cam_t': person_data['orig_cam_t'][idx] if 'orig_cam_t' in person_data else None,\n # 'cam': person_data['pred_cam'][idx],", + "detail": "modules.PyMAF.apps.demo_smplx", + "documentation": {} + }, + { + "label": "run_demo", + "kind": 2, + "importPath": "modules.PyMAF.apps.demo_smplx", + "description": "modules.PyMAF.apps.demo_smplx", + "peekOfCode": "def run_demo(args):\n device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\n if args.image_folder is None:\n video_file = args.vid_file\n # ========= [Optional] download the youtube video ========= #\n if video_file.startswith('https://www.youtube.com'):\n print(f'Donwloading YouTube video \\\"{video_file}\\\"')\n video_file = download_url(video_file, '/tmp')\n if video_file is None:\n exit('Youtube url is not valid!')", + "detail": "modules.PyMAF.apps.demo_smplx", + "documentation": {} + }, + { + "label": "torch.backends.cudnn.enabled", + "kind": 5, + "importPath": "modules.PyMAF.apps.demo_smplx", + "description": "modules.PyMAF.apps.demo_smplx", + "peekOfCode": "torch.backends.cudnn.enabled = False\nMIN_NUM_FRAMES = 1\ndef prepare_rendering_results(person_data, nframes):\n frame_results = [{} for _ in range(nframes)]\n for idx, frame_id in enumerate(person_data['frame_ids']):\n person_id = person_data['person_ids'][idx],\n frame_results[frame_id][person_id] = {\n 'verts': person_data['verts'][idx],\n 'smplx_verts': person_data['smplx_verts'][idx] if 'smplx_verts' in person_data else None,\n 'cam': person_data['orig_cam'][idx],", + "detail": "modules.PyMAF.apps.demo_smplx", + "documentation": {} + }, + { + "label": "MIN_NUM_FRAMES", + "kind": 5, + "importPath": "modules.PyMAF.apps.demo_smplx", + "description": "modules.PyMAF.apps.demo_smplx", + "peekOfCode": "MIN_NUM_FRAMES = 1\ndef prepare_rendering_results(person_data, nframes):\n frame_results = [{} for _ in range(nframes)]\n for idx, frame_id in enumerate(person_data['frame_ids']):\n person_id = person_data['person_ids'][idx],\n frame_results[frame_id][person_id] = {\n 'verts': person_data['verts'][idx],\n 'smplx_verts': person_data['smplx_verts'][idx] if 'smplx_verts' in person_data else None,\n 'cam': person_data['orig_cam'][idx],\n 'cam_t': person_data['orig_cam_t'][idx] if 'orig_cam_t' in person_data else None,", + "detail": "modules.PyMAF.apps.demo_smplx", + "documentation": {} + }, + { + "label": "get_image", + "kind": 2, + "importPath": "modules.PyMAF.datasets.data_utils.img_utils", + "description": "modules.PyMAF.datasets.data_utils.img_utils", + "peekOfCode": "def get_image(filename):\n image = cv2.imread(filename)\n return cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\ndef do_augmentation(scale_factor=0.3, color_factor=0.2):\n scale = random.uniform(1.2, 1.2+scale_factor)\n # scale = np.clip(np.random.randn(), 0.0, 1.0) * scale_factor + 1.2\n rot = 0 # np.clip(np.random.randn(), -2.0, 2.0) * aug_config.rot_factor if random.random() <= aug_config.rot_aug_rate else 0\n do_flip = False # aug_config.do_flip_aug and random.random() <= aug_config.flip_aug_rate\n c_up = 1.0 + color_factor\n c_low = 1.0 - color_factor", + "detail": "modules.PyMAF.datasets.data_utils.img_utils", + "documentation": {} + }, + { + "label": "do_augmentation", + "kind": 2, + "importPath": "modules.PyMAF.datasets.data_utils.img_utils", + "description": "modules.PyMAF.datasets.data_utils.img_utils", + "peekOfCode": "def do_augmentation(scale_factor=0.3, color_factor=0.2):\n scale = random.uniform(1.2, 1.2+scale_factor)\n # scale = np.clip(np.random.randn(), 0.0, 1.0) * scale_factor + 1.2\n rot = 0 # np.clip(np.random.randn(), -2.0, 2.0) * aug_config.rot_factor if random.random() <= aug_config.rot_aug_rate else 0\n do_flip = False # aug_config.do_flip_aug and random.random() <= aug_config.flip_aug_rate\n c_up = 1.0 + color_factor\n c_low = 1.0 - color_factor\n color_scale = [random.uniform(c_low, c_up), random.uniform(c_low, c_up), random.uniform(c_low, c_up)]\n return scale, rot, do_flip, color_scale\ndef trans_point2d(pt_2d, trans):", + "detail": "modules.PyMAF.datasets.data_utils.img_utils", + "documentation": {} + }, + { + "label": "trans_point2d", + "kind": 2, + "importPath": "modules.PyMAF.datasets.data_utils.img_utils", + "description": "modules.PyMAF.datasets.data_utils.img_utils", + "peekOfCode": "def trans_point2d(pt_2d, trans):\n src_pt = np.array([pt_2d[0], pt_2d[1], 1.]).T\n dst_pt = np.dot(trans, src_pt)\n return dst_pt[0:2]\ndef rotate_2d(pt_2d, rot_rad):\n x = pt_2d[0]\n y = pt_2d[1]\n sn, cs = np.sin(rot_rad), np.cos(rot_rad)\n xx = x * cs - y * sn\n yy = x * sn + y * cs", + "detail": "modules.PyMAF.datasets.data_utils.img_utils", + "documentation": {} + }, + { + "label": "rotate_2d", + "kind": 2, + "importPath": "modules.PyMAF.datasets.data_utils.img_utils", + "description": "modules.PyMAF.datasets.data_utils.img_utils", + "peekOfCode": "def rotate_2d(pt_2d, rot_rad):\n x = pt_2d[0]\n y = pt_2d[1]\n sn, cs = np.sin(rot_rad), np.cos(rot_rad)\n xx = x * cs - y * sn\n yy = x * sn + y * cs\n return np.array([xx, yy], dtype=np.float32)\ndef gen_trans_from_patch_cv(c_x, c_y, src_width, src_height, dst_width, dst_height, scale, rot, inv=False):\n # augment size with scale\n src_w = src_width * scale", + "detail": "modules.PyMAF.datasets.data_utils.img_utils", + "documentation": {} + }, + { + "label": "gen_trans_from_patch_cv", + "kind": 2, + "importPath": "modules.PyMAF.datasets.data_utils.img_utils", + "description": "modules.PyMAF.datasets.data_utils.img_utils", + "peekOfCode": "def gen_trans_from_patch_cv(c_x, c_y, src_width, src_height, dst_width, dst_height, scale, rot, inv=False):\n # augment size with scale\n src_w = src_width * scale\n src_h = src_height * scale\n src_center = np.zeros(2)\n src_center[0] = c_x\n src_center[1] = c_y # np.array([c_x, c_y], dtype=np.float32)\n # augment rotation\n rot_rad = np.pi * rot / 180\n src_downdir = rotate_2d(np.array([0, src_h * 0.5], dtype=np.float32), rot_rad)", + "detail": "modules.PyMAF.datasets.data_utils.img_utils", + "documentation": {} + }, + { + "label": "generate_patch_image_cv", + "kind": 2, + "importPath": "modules.PyMAF.datasets.data_utils.img_utils", + "description": "modules.PyMAF.datasets.data_utils.img_utils", + "peekOfCode": "def generate_patch_image_cv(cvimg, c_x, c_y, bb_width, bb_height, patch_width, patch_height, do_flip, scale, rot):\n img = cvimg.copy()\n img_height, img_width, img_channels = img.shape\n if do_flip:\n img = img[:, ::-1, :]\n c_x = img_width - c_x - 1\n trans = gen_trans_from_patch_cv(c_x, c_y, bb_width, bb_height, patch_width, patch_height, scale, rot, inv=False)\n img_patch = cv2.warpAffine(img, trans, (int(patch_width), int(patch_height)),\n flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT)\n return img_patch, trans", + "detail": "modules.PyMAF.datasets.data_utils.img_utils", + "documentation": {} + }, + { + "label": "crop_image", + "kind": 2, + "importPath": "modules.PyMAF.datasets.data_utils.img_utils", + "description": "modules.PyMAF.datasets.data_utils.img_utils", + "peekOfCode": "def crop_image(image, kp_2d, center_x, center_y, width, height, patch_width, patch_height, do_augment):\n # get augmentation params\n if do_augment:\n scale, rot, do_flip, color_scale = do_augmentation()\n else:\n scale, rot, do_flip, color_scale = 1.3, 0, False, [1.0, 1.0, 1.0]\n # generate image patch\n image, trans = generate_patch_image_cv(\n image,\n center_x,", + "detail": "modules.PyMAF.datasets.data_utils.img_utils", + "documentation": {} + }, + { + "label": "transfrom_keypoints", + "kind": 2, + "importPath": "modules.PyMAF.datasets.data_utils.img_utils", + "description": "modules.PyMAF.datasets.data_utils.img_utils", + "peekOfCode": "def transfrom_keypoints(kp_2d, center_x, center_y, width, height, patch_width, patch_height, do_augment):\n if do_augment:\n scale, rot, do_flip, color_scale = do_augmentation()\n else:\n scale, rot, do_flip, color_scale = 1.2, 0, False, [1.0, 1.0, 1.0]\n # generate transformation\n trans = gen_trans_from_patch_cv(\n center_x,\n center_y,\n width,", + "detail": "modules.PyMAF.datasets.data_utils.img_utils", + "documentation": {} + }, + { + "label": "get_image_crops", + "kind": 2, + "importPath": "modules.PyMAF.datasets.data_utils.img_utils", + "description": "modules.PyMAF.datasets.data_utils.img_utils", + "peekOfCode": "def get_image_crops(image_file, bboxes):\n image = cv2.cvtColor(cv2.imread(image_file), cv2.COLOR_BGR2RGB)\n crop_images = []\n for bb in bboxes:\n c_y, c_x = (bb[0]+bb[2]) // 2, (bb[1]+bb[3]) // 2\n h, w = bb[2]-bb[0], bb[3]-bb[1]\n w = h = np.where(w / h > 1, w, h)\n crop_image, _ = generate_patch_image_cv(\n cvimg=image.copy(),\n c_x=c_x,", + "detail": "modules.PyMAF.datasets.data_utils.img_utils", + "documentation": {} + }, + { + "label": "get_single_image_crop", + "kind": 2, + "importPath": "modules.PyMAF.datasets.data_utils.img_utils", + "description": "modules.PyMAF.datasets.data_utils.img_utils", + "peekOfCode": "def get_single_image_crop(image, bbox, scale=1.3):\n if isinstance(image, str):\n if os.path.isfile(image):\n image = cv2.cvtColor(cv2.imread(image), cv2.COLOR_BGR2RGB)\n else:\n print(image)\n raise BaseException(image, 'is not a valid file!')\n elif isinstance(image, torch.Tensor):\n image = image.numpy()\n elif not isinstance(image, np.ndarray):", + "detail": "modules.PyMAF.datasets.data_utils.img_utils", + "documentation": {} + }, + { + "label": "get_single_image_crop_demo", + "kind": 2, + "importPath": "modules.PyMAF.datasets.data_utils.img_utils", + "description": "modules.PyMAF.datasets.data_utils.img_utils", + "peekOfCode": "def get_single_image_crop_demo(image, bbox, kp_2d, scale=1.2, crop_size=224):\n if isinstance(image, str):\n if os.path.isfile(image):\n image = cv2.cvtColor(cv2.imread(image), cv2.COLOR_BGR2RGB)\n else:\n print(image)\n raise BaseException(image, 'is not a valid file!')\n elif isinstance(image, torch.Tensor):\n image = image.numpy()\n elif not isinstance(image, np.ndarray):", + "detail": "modules.PyMAF.datasets.data_utils.img_utils", + "documentation": {} + }, + { + "label": "read_image", + "kind": 2, + "importPath": "modules.PyMAF.datasets.data_utils.img_utils", + "description": "modules.PyMAF.datasets.data_utils.img_utils", + "peekOfCode": "def read_image(filename):\n image = cv2.cvtColor(cv2.imread(filename), cv2.COLOR_BGR2RGB)\n image = cv2.resize(image, (224,224))\n return convert_cvimg_to_tensor(image)\ndef convert_cvimg_to_tensor(image):\n transform = get_default_transform()\n image = transform(image)\n return image\ndef torch_inv_normal(image):\n image = image * torch.tensor([0.229, 0.224, 0.225], device=image.device).reshape(1, 3, 1, 1)", + "detail": "modules.PyMAF.datasets.data_utils.img_utils", + "documentation": {} + }, + { + "label": "convert_cvimg_to_tensor", + "kind": 2, + "importPath": "modules.PyMAF.datasets.data_utils.img_utils", + "description": "modules.PyMAF.datasets.data_utils.img_utils", + "peekOfCode": "def convert_cvimg_to_tensor(image):\n transform = get_default_transform()\n image = transform(image)\n return image\ndef torch_inv_normal(image):\n image = image * torch.tensor([0.229, 0.224, 0.225], device=image.device).reshape(1, 3, 1, 1)\n image = image + torch.tensor([0.485, 0.456, 0.406], device=image.device).reshape(1, 3, 1, 1)\n image = image.clamp(0., 1.)\n return image\ndef torch2numpy(image):", + "detail": "modules.PyMAF.datasets.data_utils.img_utils", + "documentation": {} + }, + { + "label": "torch_inv_normal", + "kind": 2, + "importPath": "modules.PyMAF.datasets.data_utils.img_utils", + "description": "modules.PyMAF.datasets.data_utils.img_utils", + "peekOfCode": "def torch_inv_normal(image):\n image = image * torch.tensor([0.229, 0.224, 0.225], device=image.device).reshape(1, 3, 1, 1)\n image = image + torch.tensor([0.485, 0.456, 0.406], device=image.device).reshape(1, 3, 1, 1)\n image = image.clamp(0., 1.)\n return image\ndef torch2numpy(image):\n image = image.detach().cpu()\n inv_normalize = transforms.Normalize(\n mean=[-0.485 / 0.229, -0.456 / 0.224, -0.406 / 0.255],\n std=[1 / 0.229, 1 / 0.224, 1 / 0.255]", + "detail": "modules.PyMAF.datasets.data_utils.img_utils", + "documentation": {} + }, + { + "label": "torch2numpy", + "kind": 2, + "importPath": "modules.PyMAF.datasets.data_utils.img_utils", + "description": "modules.PyMAF.datasets.data_utils.img_utils", + "peekOfCode": "def torch2numpy(image):\n image = image.detach().cpu()\n inv_normalize = transforms.Normalize(\n mean=[-0.485 / 0.229, -0.456 / 0.224, -0.406 / 0.255],\n std=[1 / 0.229, 1 / 0.224, 1 / 0.255]\n )\n image = inv_normalize(image)\n image = image.clamp(0., 1.)\n image = image.numpy() * 255.\n image = np.transpose(image, (1, 2, 0))", + "detail": "modules.PyMAF.datasets.data_utils.img_utils", + "documentation": {} + }, + { + "label": "torch_vid2numpy", + "kind": 2, + "importPath": "modules.PyMAF.datasets.data_utils.img_utils", + "description": "modules.PyMAF.datasets.data_utils.img_utils", + "peekOfCode": "def torch_vid2numpy(video):\n video = video.detach().cpu().numpy()\n # video = np.transpose(video, (0, 2, 1, 3, 4)) # NCTHW->NTCHW\n # Denormalize\n mean = np.array([-0.485 / 0.229, -0.456 / 0.224, -0.406 / 0.255])\n std = np.array([1 / 0.229, 1 / 0.224, 1 / 0.255])\n mean = mean[np.newaxis, np.newaxis, ..., np.newaxis, np.newaxis]\n std = std[np.newaxis, np.newaxis, ..., np.newaxis, np.newaxis]\n video = (video - mean) / std # [:, :, i, :, :].sub_(mean[i]).div_(std[i]).clamp_(0., 1.).mul_(255.)\n video = video.clip(0.,1.) * 255", + "detail": "modules.PyMAF.datasets.data_utils.img_utils", + "documentation": {} + }, + { + "label": "get_bbox_from_kp2d", + "kind": 2, + "importPath": "modules.PyMAF.datasets.data_utils.img_utils", + "description": "modules.PyMAF.datasets.data_utils.img_utils", + "peekOfCode": "def get_bbox_from_kp2d(kp_2d):\n # get bbox\n if len(kp_2d.shape) > 2:\n ul = np.array([kp_2d[:, :, 0].min(axis=1), kp_2d[:, :, 1].min(axis=1)]) # upper left\n lr = np.array([kp_2d[:, :, 0].max(axis=1), kp_2d[:, :, 1].max(axis=1)]) # lower right\n else:\n ul = np.array([kp_2d[:, 0].min(), kp_2d[:, 1].min()]) # upper left\n lr = np.array([kp_2d[:, 0].max(), kp_2d[:, 1].max()]) # lower right\n # ul[1] -= (lr[1] - ul[1]) * 0.10 # prevent cutting the head\n w = lr[0] - ul[0]", + "detail": "modules.PyMAF.datasets.data_utils.img_utils", + "documentation": {} + }, + { + "label": "normalize_2d_kp", + "kind": 2, + "importPath": "modules.PyMAF.datasets.data_utils.img_utils", + "description": "modules.PyMAF.datasets.data_utils.img_utils", + "peekOfCode": "def normalize_2d_kp(kp_2d, crop_size=224, inv=False):\n # Normalize keypoints between -1, 1\n if not inv:\n ratio = 1.0 / crop_size\n kp_2d = 2.0 * kp_2d * ratio - 1.0\n else:\n ratio = 1.0 / crop_size\n kp_2d = (kp_2d + 1.0)/(2*ratio)\n return kp_2d\ndef get_default_transform():", + "detail": "modules.PyMAF.datasets.data_utils.img_utils", + "documentation": {} + }, + { + "label": "get_default_transform", + "kind": 2, + "importPath": "modules.PyMAF.datasets.data_utils.img_utils", + "description": "modules.PyMAF.datasets.data_utils.img_utils", + "peekOfCode": "def get_default_transform():\n normalize = transforms.Normalize(\n mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]\n )\n transform = transforms.Compose([\n transforms.ToTensor(),\n normalize,\n ])\n return transform\ndef split_into_chunks(vid_names, seqlen, stride):", + "detail": "modules.PyMAF.datasets.data_utils.img_utils", + "documentation": {} + }, + { + "label": "split_into_chunks", + "kind": 2, + "importPath": "modules.PyMAF.datasets.data_utils.img_utils", + "description": "modules.PyMAF.datasets.data_utils.img_utils", + "peekOfCode": "def split_into_chunks(vid_names, seqlen, stride):\n video_start_end_indices = []\n video_names, group = np.unique(vid_names, return_index=True)\n perm = np.argsort(group)\n video_names, group = video_names[perm], group[perm]\n indices = np.split(np.arange(0, vid_names.shape[0]), group[1:])\n for idx in range(len(video_names)):\n indexes = indices[idx]\n if indexes.shape[0] < seqlen:\n continue", + "detail": "modules.PyMAF.datasets.data_utils.img_utils", + "documentation": {} + }, + { + "label": "keypoint_hflip", + "kind": 2, + "importPath": "modules.PyMAF.datasets.data_utils.kp_utils", + "description": "modules.PyMAF.datasets.data_utils.kp_utils", + "peekOfCode": "def keypoint_hflip(kp, img_width):\n # Flip a keypoint horizontally around the y-axis\n # kp N,2\n if len(kp.shape) == 2:\n kp[:,0] = (img_width - 1.) - kp[:,0]\n elif len(kp.shape) == 3:\n kp[:, :, 0] = (img_width - 1.) - kp[:, :, 0]\n return kp\ndef convert_kps(joints2d, src, dst):\n src_names = eval(f'get_{src}_joint_names')()", + "detail": "modules.PyMAF.datasets.data_utils.kp_utils", + "documentation": {} + }, + { + "label": "convert_kps", + "kind": 2, + "importPath": "modules.PyMAF.datasets.data_utils.kp_utils", + "description": "modules.PyMAF.datasets.data_utils.kp_utils", + "peekOfCode": "def convert_kps(joints2d, src, dst):\n src_names = eval(f'get_{src}_joint_names')()\n dst_names = eval(f'get_{dst}_joint_names')()\n out_joints2d = np.zeros((joints2d.shape[0], len(dst_names), 3))\n for idx, jn in enumerate(dst_names):\n if jn in src_names:\n out_joints2d[:, idx] = joints2d[:, src_names.index(jn)]\n return out_joints2d\ndef get_perm_idxs(src, dst):\n src_names = eval(f'get_{src}_joint_names')()", + "detail": "modules.PyMAF.datasets.data_utils.kp_utils", + "documentation": {} + }, + { + "label": "get_perm_idxs", + "kind": 2, + "importPath": "modules.PyMAF.datasets.data_utils.kp_utils", + "description": "modules.PyMAF.datasets.data_utils.kp_utils", + "peekOfCode": "def get_perm_idxs(src, dst):\n src_names = eval(f'get_{src}_joint_names')()\n dst_names = eval(f'get_{dst}_joint_names')()\n idxs = [src_names.index(h) for h in dst_names if h in src_names]\n return idxs\ndef get_mpii3d_test_joint_names():\n return [\n 'headtop', # 'head_top',\n 'neck',\n 'rshoulder',# 'right_shoulder',", + "detail": "modules.PyMAF.datasets.data_utils.kp_utils", + "documentation": {} + }, + { + "label": "get_mpii3d_test_joint_names", + "kind": 2, + "importPath": "modules.PyMAF.datasets.data_utils.kp_utils", + "description": "modules.PyMAF.datasets.data_utils.kp_utils", + "peekOfCode": "def get_mpii3d_test_joint_names():\n return [\n 'headtop', # 'head_top',\n 'neck',\n 'rshoulder',# 'right_shoulder',\n 'relbow',# 'right_elbow',\n 'rwrist',# 'right_wrist',\n 'lshoulder',# 'left_shoulder',\n 'lelbow', # 'left_elbow',\n 'lwrist', # 'left_wrist',", + "detail": "modules.PyMAF.datasets.data_utils.kp_utils", + "documentation": {} + }, + { + "label": "get_mpii3d_joint_names", + "kind": 2, + "importPath": "modules.PyMAF.datasets.data_utils.kp_utils", + "description": "modules.PyMAF.datasets.data_utils.kp_utils", + "peekOfCode": "def get_mpii3d_joint_names():\n return [\n 'spine3', # 0,\n 'spine4', # 1,\n 'spine2', # 2,\n 'Spine (H36M)', #'spine', # 3,\n 'hip', # 'pelvis', # 4,\n 'neck', # 5,\n 'Head (H36M)', # 'head', # 6,\n \"headtop\", # 'head_top', # 7,", + "detail": "modules.PyMAF.datasets.data_utils.kp_utils", + "documentation": {} + }, + { + "label": "get_insta_joint_names", + "kind": 2, + "importPath": "modules.PyMAF.datasets.data_utils.kp_utils", + "description": "modules.PyMAF.datasets.data_utils.kp_utils", + "peekOfCode": "def get_insta_joint_names():\n return [\n 'OP RHeel',\n 'OP RKnee',\n 'OP RHip',\n 'OP LHip',\n 'OP LKnee',\n 'OP LHeel',\n 'OP RWrist',\n 'OP RElbow',", + "detail": "modules.PyMAF.datasets.data_utils.kp_utils", + "documentation": {} + }, + { + "label": "get_insta_skeleton", + "kind": 2, + "importPath": "modules.PyMAF.datasets.data_utils.kp_utils", + "description": "modules.PyMAF.datasets.data_utils.kp_utils", + "peekOfCode": "def get_insta_skeleton():\n return np.array(\n [\n [0 , 1],\n [1 , 2],\n [2 , 3],\n [3 , 4],\n [4 , 5],\n [6 , 7],\n [7 , 8],", + "detail": "modules.PyMAF.datasets.data_utils.kp_utils", + "documentation": {} + }, + { + "label": "get_staf_skeleton", + "kind": 2, + "importPath": "modules.PyMAF.datasets.data_utils.kp_utils", + "description": "modules.PyMAF.datasets.data_utils.kp_utils", + "peekOfCode": "def get_staf_skeleton():\n return np.array(\n [\n [0, 1],\n [1, 2],\n [2, 3],\n [3, 4],\n [1, 5],\n [5, 6],\n [6, 7],", + "detail": "modules.PyMAF.datasets.data_utils.kp_utils", + "documentation": {} + }, + { + "label": "get_staf_joint_names", + "kind": 2, + "importPath": "modules.PyMAF.datasets.data_utils.kp_utils", + "description": "modules.PyMAF.datasets.data_utils.kp_utils", + "peekOfCode": "def get_staf_joint_names():\n return [\n 'OP Nose', # 0,\n 'OP Neck', # 1,\n 'OP RShoulder', # 2,\n 'OP RElbow', # 3,\n 'OP RWrist', # 4,\n 'OP LShoulder', # 5,\n 'OP LElbow', # 6,\n 'OP LWrist', # 7,", + "detail": "modules.PyMAF.datasets.data_utils.kp_utils", + "documentation": {} + }, + { + "label": "get_spin_joint_names", + "kind": 2, + "importPath": "modules.PyMAF.datasets.data_utils.kp_utils", + "description": "modules.PyMAF.datasets.data_utils.kp_utils", + "peekOfCode": "def get_spin_joint_names():\n return [\n 'OP Nose', # 0\n 'OP Neck', # 1\n 'OP RShoulder', # 2\n 'OP RElbow', # 3\n 'OP RWrist', # 4\n 'OP LShoulder', # 5\n 'OP LElbow', # 6\n 'OP LWrist', # 7", + "detail": "modules.PyMAF.datasets.data_utils.kp_utils", + "documentation": {} + }, + { + "label": "get_h36m_joint_names", + "kind": 2, + "importPath": "modules.PyMAF.datasets.data_utils.kp_utils", + "description": "modules.PyMAF.datasets.data_utils.kp_utils", + "peekOfCode": "def get_h36m_joint_names():\n return [\n 'hip', # 0\n 'lhip', # 1\n 'lknee', # 2\n 'lankle', # 3\n 'rhip', # 4\n 'rknee', # 5\n 'rankle', # 6\n 'Spine (H36M)', # 7", + "detail": "modules.PyMAF.datasets.data_utils.kp_utils", + "documentation": {} + }, + { + "label": "get_spin_skeleton", + "kind": 2, + "importPath": "modules.PyMAF.datasets.data_utils.kp_utils", + "description": "modules.PyMAF.datasets.data_utils.kp_utils", + "peekOfCode": "def get_spin_skeleton():\n return np.array(\n [\n [0 , 1],\n [1 , 2],\n [2 , 3],\n [3 , 4],\n [1 , 5],\n [5 , 6],\n [6 , 7],", + "detail": "modules.PyMAF.datasets.data_utils.kp_utils", + "documentation": {} + }, + { + "label": "get_posetrack_joint_names", + "kind": 2, + "importPath": "modules.PyMAF.datasets.data_utils.kp_utils", + "description": "modules.PyMAF.datasets.data_utils.kp_utils", + "peekOfCode": "def get_posetrack_joint_names():\n return [\n \"nose\",\n \"neck\",\n \"headtop\",\n \"lear\",\n \"rear\",\n \"lshoulder\",\n \"rshoulder\",\n \"lelbow\",", + "detail": "modules.PyMAF.datasets.data_utils.kp_utils", + "documentation": {} + }, + { + "label": "get_posetrack_original_kp_names", + "kind": 2, + "importPath": "modules.PyMAF.datasets.data_utils.kp_utils", + "description": "modules.PyMAF.datasets.data_utils.kp_utils", + "peekOfCode": "def get_posetrack_original_kp_names():\n return [\n 'nose',\n 'head_bottom',\n 'head_top',\n 'left_ear',\n 'right_ear',\n 'left_shoulder',\n 'right_shoulder',\n 'left_elbow',", + "detail": "modules.PyMAF.datasets.data_utils.kp_utils", + "documentation": {} + }, + { + "label": "get_pennaction_joint_names", + "kind": 2, + "importPath": "modules.PyMAF.datasets.data_utils.kp_utils", + "description": "modules.PyMAF.datasets.data_utils.kp_utils", + "peekOfCode": "def get_pennaction_joint_names():\n return [\n \"headtop\", # 0\n \"lshoulder\", # 1\n \"rshoulder\", # 2\n \"lelbow\", # 3\n \"relbow\", # 4\n \"lwrist\", # 5\n \"rwrist\", # 6\n \"lhip\" , # 7", + "detail": "modules.PyMAF.datasets.data_utils.kp_utils", + "documentation": {} + }, + { + "label": "get_common_joint_names", + "kind": 2, + "importPath": "modules.PyMAF.datasets.data_utils.kp_utils", + "description": "modules.PyMAF.datasets.data_utils.kp_utils", + "peekOfCode": "def get_common_joint_names():\n return [\n \"rankle\", # 0 \"lankle\", # 0\n \"rknee\", # 1 \"lknee\", # 1\n \"rhip\", # 2 \"lhip\", # 2\n \"lhip\", # 3 \"rhip\", # 3\n \"lknee\", # 4 \"rknee\", # 4\n \"lankle\", # 5 \"rankle\", # 5\n \"rwrist\", # 6 \"lwrist\", # 6\n \"relbow\", # 7 \"lelbow\", # 7", + "detail": "modules.PyMAF.datasets.data_utils.kp_utils", + "documentation": {} + }, + { + "label": "get_common_skeleton", + "kind": 2, + "importPath": "modules.PyMAF.datasets.data_utils.kp_utils", + "description": "modules.PyMAF.datasets.data_utils.kp_utils", + "peekOfCode": "def get_common_skeleton():\n return np.array(\n [\n [ 0, 1 ],\n [ 1, 2 ],\n [ 3, 4 ],\n [ 4, 5 ],\n [ 6, 7 ],\n [ 7, 8 ],\n [ 8, 2 ],", + "detail": "modules.PyMAF.datasets.data_utils.kp_utils", + "documentation": {} + }, + { + "label": "get_coco_joint_names", + "kind": 2, + "importPath": "modules.PyMAF.datasets.data_utils.kp_utils", + "description": "modules.PyMAF.datasets.data_utils.kp_utils", + "peekOfCode": "def get_coco_joint_names():\n return [\n \"nose\", # 0\n \"leye\", # 1\n \"reye\", # 2\n \"lear\", # 3\n \"rear\", # 4\n \"lshoulder\", # 5\n \"rshoulder\", # 6\n \"lelbow\", # 7", + "detail": "modules.PyMAF.datasets.data_utils.kp_utils", + "documentation": {} + }, + { + "label": "get_coco_skeleton", + "kind": 2, + "importPath": "modules.PyMAF.datasets.data_utils.kp_utils", + "description": "modules.PyMAF.datasets.data_utils.kp_utils", + "peekOfCode": "def get_coco_skeleton():\n # 0 - nose,\n # 1 - leye,\n # 2 - reye,\n # 3 - lear,\n # 4 - rear,\n # 5 - lshoulder,\n # 6 - rshoulder,\n # 7 - lelbow,\n # 8 - relbow,", + "detail": "modules.PyMAF.datasets.data_utils.kp_utils", + "documentation": {} + }, + { + "label": "get_mpii_joint_names", + "kind": 2, + "importPath": "modules.PyMAF.datasets.data_utils.kp_utils", + "description": "modules.PyMAF.datasets.data_utils.kp_utils", + "peekOfCode": "def get_mpii_joint_names():\n return [\n \"rankle\", # 0\n \"rknee\", # 1\n \"rhip\", # 2\n \"lhip\", # 3\n \"lknee\", # 4\n \"lankle\", # 5\n \"hip\", # 6\n \"thorax\", # 7", + "detail": "modules.PyMAF.datasets.data_utils.kp_utils", + "documentation": {} + }, + { + "label": "get_mpii_skeleton", + "kind": 2, + "importPath": "modules.PyMAF.datasets.data_utils.kp_utils", + "description": "modules.PyMAF.datasets.data_utils.kp_utils", + "peekOfCode": "def get_mpii_skeleton():\n # 0 - rankle,\n # 1 - rknee,\n # 2 - rhip,\n # 3 - lhip,\n # 4 - lknee,\n # 5 - lankle,\n # 6 - hip,\n # 7 - thorax,\n # 8 - neck,", + "detail": "modules.PyMAF.datasets.data_utils.kp_utils", + "documentation": {} + }, + { + "label": "get_aich_joint_names", + "kind": 2, + "importPath": "modules.PyMAF.datasets.data_utils.kp_utils", + "description": "modules.PyMAF.datasets.data_utils.kp_utils", + "peekOfCode": "def get_aich_joint_names():\n return [\n \"rshoulder\", # 0\n \"relbow\", # 1\n \"rwrist\", # 2\n \"lshoulder\", # 3\n \"lelbow\", # 4\n \"lwrist\", # 5\n \"rhip\", # 6\n \"rknee\", # 7", + "detail": "modules.PyMAF.datasets.data_utils.kp_utils", + "documentation": {} + }, + { + "label": "get_aich_skeleton", + "kind": 2, + "importPath": "modules.PyMAF.datasets.data_utils.kp_utils", + "description": "modules.PyMAF.datasets.data_utils.kp_utils", + "peekOfCode": "def get_aich_skeleton():\n # 0 - rshoulder,\n # 1 - relbow,\n # 2 - rwrist,\n # 3 - lshoulder,\n # 4 - lelbow,\n # 5 - lwrist,\n # 6 - rhip,\n # 7 - rknee,\n # 8 - rankle,", + "detail": "modules.PyMAF.datasets.data_utils.kp_utils", + "documentation": {} + }, + { + "label": "get_3dpw_joint_names", + "kind": 2, + "importPath": "modules.PyMAF.datasets.data_utils.kp_utils", + "description": "modules.PyMAF.datasets.data_utils.kp_utils", + "peekOfCode": "def get_3dpw_joint_names():\n return [\n \"nose\", # 0\n \"thorax\", # 1\n \"rshoulder\", # 2\n \"relbow\", # 3\n \"rwrist\", # 4\n \"lshoulder\", # 5\n \"lelbow\", # 6\n \"lwrist\", # 7", + "detail": "modules.PyMAF.datasets.data_utils.kp_utils", + "documentation": {} + }, + { + "label": "get_3dpw_skeleton", + "kind": 2, + "importPath": "modules.PyMAF.datasets.data_utils.kp_utils", + "description": "modules.PyMAF.datasets.data_utils.kp_utils", + "peekOfCode": "def get_3dpw_skeleton():\n return np.array(\n [\n [ 0, 1 ],\n [ 1, 2 ],\n [ 2, 3 ],\n [ 3, 4 ],\n [ 1, 5 ],\n [ 5, 6 ],\n [ 6, 7 ],", + "detail": "modules.PyMAF.datasets.data_utils.kp_utils", + "documentation": {} + }, + { + "label": "get_smplcoco_joint_names", + "kind": 2, + "importPath": "modules.PyMAF.datasets.data_utils.kp_utils", + "description": "modules.PyMAF.datasets.data_utils.kp_utils", + "peekOfCode": "def get_smplcoco_joint_names():\n return [\n \"rankle\", # 0\n \"rknee\", # 1\n \"rhip\", # 2\n \"lhip\", # 3\n \"lknee\", # 4\n \"lankle\", # 5\n \"rwrist\", # 6\n \"relbow\", # 7", + "detail": "modules.PyMAF.datasets.data_utils.kp_utils", + "documentation": {} + }, + { + "label": "get_smplcoco_skeleton", + "kind": 2, + "importPath": "modules.PyMAF.datasets.data_utils.kp_utils", + "description": "modules.PyMAF.datasets.data_utils.kp_utils", + "peekOfCode": "def get_smplcoco_skeleton():\n return np.array(\n [\n [ 0, 1 ],\n [ 1, 2 ],\n [ 3, 4 ],\n [ 4, 5 ],\n [ 6, 7 ],\n [ 7, 8 ],\n [ 8, 12],", + "detail": "modules.PyMAF.datasets.data_utils.kp_utils", + "documentation": {} + }, + { + "label": "get_smpl_joint_names", + "kind": 2, + "importPath": "modules.PyMAF.datasets.data_utils.kp_utils", + "description": "modules.PyMAF.datasets.data_utils.kp_utils", + "peekOfCode": "def get_smpl_joint_names():\n return [\n 'hips', # 0\n 'leftUpLeg', # 1\n 'rightUpLeg', # 2\n 'spine', # 3\n 'leftLeg', # 4\n 'rightLeg', # 5\n 'spine1', # 6\n 'leftFoot', # 7", + "detail": "modules.PyMAF.datasets.data_utils.kp_utils", + "documentation": {} + }, + { + "label": "get_smpl_skeleton", + "kind": 2, + "importPath": "modules.PyMAF.datasets.data_utils.kp_utils", + "description": "modules.PyMAF.datasets.data_utils.kp_utils", + "peekOfCode": "def get_smpl_skeleton():\n return np.array(\n [\n [ 0, 1 ],\n [ 0, 2 ],\n [ 0, 3 ],\n [ 1, 4 ],\n [ 2, 5 ],\n [ 3, 6 ],\n [ 4, 7 ],", + "detail": "modules.PyMAF.datasets.data_utils.kp_utils", + "documentation": {} + }, + { + "label": "Inference", + "kind": 6, + "importPath": "modules.PyMAF.datasets.inference", + "description": "modules.PyMAF.datasets.inference", + "peekOfCode": "class Inference(Dataset):\n def __init__(self, image_folder, frames, bboxes=None, joints2d=None, scale=1.0, crop_size=224, pre_load_imgs=None, full_body=False, person_ids=[], wb_kps={}):\n self.pre_load_imgs = pre_load_imgs\n if pre_load_imgs is None:\n self.image_file_names = [\n osp.join(image_folder, x)\n for x in os.listdir(image_folder)\n if x.endswith('.png') or x.endswith('.jpg')\n ]\n self.image_file_names = sorted(self.image_file_names)", + "detail": "modules.PyMAF.datasets.inference", + "documentation": {} + }, + { + "label": "ImageFolder", + "kind": 6, + "importPath": "modules.PyMAF.datasets.inference", + "description": "modules.PyMAF.datasets.inference", + "peekOfCode": "class ImageFolder(Dataset):\n def __init__(self, image_folder):\n self.image_file_names = [\n osp.join(image_folder, x)\n for x in os.listdir(image_folder)\n if x.endswith('.png') or x.endswith('.jpg')\n ]\n self.image_file_names = sorted(self.image_file_names)\n def __len__(self):\n return len(self.image_file_names)", + "detail": "modules.PyMAF.datasets.inference", + "documentation": {} + }, + { + "label": "Graphormer_Body_Network", + "kind": 6, + "importPath": "modules.PyMAF.models.transformers.bert.e2e_body_network", + "description": "modules.PyMAF.models.transformers.bert.e2e_body_network", + "peekOfCode": "class Graphormer_Body_Network(torch.nn.Module):\n '''\n End-to-end Graphormer network for human pose and mesh reconstruction from a single image.\n '''\n def __init__(self, args, config, backbone, trans_encoder, mesh_sampler):\n super(Graphormer_Body_Network, self).__init__()\n self.config = config\n self.config.device = args.device\n self.backbone = backbone\n self.trans_encoder = trans_encoder", + "detail": "modules.PyMAF.models.transformers.bert.e2e_body_network", + "documentation": {} + }, + { + "label": "Graphormer_Hand_Network", + "kind": 6, + "importPath": "modules.PyMAF.models.transformers.bert.e2e_hand_network", + "description": "modules.PyMAF.models.transformers.bert.e2e_hand_network", + "peekOfCode": "class Graphormer_Hand_Network(torch.nn.Module):\n '''\n End-to-end Graphormer network for hand pose and mesh reconstruction from a single image.\n '''\n def __init__(self, args, config, backbone, trans_encoder):\n super(Graphormer_Hand_Network, self).__init__()\n self.config = config\n self.backbone = backbone\n self.trans_encoder = trans_encoder\n self.upsampling = torch.nn.Linear(195, 778)", + "detail": "modules.PyMAF.models.transformers.bert.e2e_hand_network", + "documentation": {} + }, + { + "label": "url_to_filename", + "kind": 2, + "importPath": "modules.PyMAF.models.transformers.bert.file_utils", + "description": "modules.PyMAF.models.transformers.bert.file_utils", + "peekOfCode": "def url_to_filename(url, etag=None):\n \"\"\"\n Convert `url` into a hashed filename in a repeatable way.\n If `etag` is specified, append its hash to the url's, delimited\n by a period.\n \"\"\"\n url_bytes = url.encode('utf-8')\n url_hash = sha256(url_bytes)\n filename = url_hash.hexdigest()\n if etag:", + "detail": "modules.PyMAF.models.transformers.bert.file_utils", + "documentation": {} + }, + { + "label": "filename_to_url", + "kind": 2, + "importPath": "modules.PyMAF.models.transformers.bert.file_utils", + "description": "modules.PyMAF.models.transformers.bert.file_utils", + "peekOfCode": "def filename_to_url(filename, cache_dir=None):\n \"\"\"\n Return the url and etag (which may be ``None``) stored for `filename`.\n Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.\n \"\"\"\n if cache_dir is None:\n cache_dir = PYTORCH_PRETRAINED_BERT_CACHE\n if sys.version_info[0] == 3 and isinstance(cache_dir, Path):\n cache_dir = str(cache_dir)\n cache_path = os.path.join(cache_dir, filename)", + "detail": "modules.PyMAF.models.transformers.bert.file_utils", + "documentation": {} + }, + { + "label": "cached_path", + "kind": 2, + "importPath": "modules.PyMAF.models.transformers.bert.file_utils", + "description": "modules.PyMAF.models.transformers.bert.file_utils", + "peekOfCode": "def cached_path(url_or_filename, cache_dir=None):\n \"\"\"\n Given something that might be a URL (or might be a local path),\n determine which. If it's a URL, download the file and cache it, and\n return the path to the cached file. If it's already a local path,\n make sure the file exists and then return the path.\n \"\"\"\n if cache_dir is None:\n cache_dir = PYTORCH_PRETRAINED_BERT_CACHE\n if sys.version_info[0] == 3 and isinstance(url_or_filename, Path):", + "detail": "modules.PyMAF.models.transformers.bert.file_utils", + "documentation": {} + }, + { + "label": "split_s3_path", + "kind": 2, + "importPath": "modules.PyMAF.models.transformers.bert.file_utils", + "description": "modules.PyMAF.models.transformers.bert.file_utils", + "peekOfCode": "def split_s3_path(url):\n \"\"\"Split a full s3 path into the bucket name and path.\"\"\"\n parsed = urlparse(url)\n if not parsed.netloc or not parsed.path:\n raise ValueError(\"bad s3 path {}\".format(url))\n bucket_name = parsed.netloc\n s3_path = parsed.path\n # Remove '/' at beginning of path.\n if s3_path.startswith(\"/\"):\n s3_path = s3_path[1:]", + "detail": "modules.PyMAF.models.transformers.bert.file_utils", + "documentation": {} + }, + { + "label": "s3_request", + "kind": 2, + "importPath": "modules.PyMAF.models.transformers.bert.file_utils", + "description": "modules.PyMAF.models.transformers.bert.file_utils", + "peekOfCode": "def s3_request(func):\n \"\"\"\n Wrapper function for s3 requests in order to create more helpful error\n messages.\n \"\"\"\n @wraps(func)\n def wrapper(url, *args, **kwargs):\n try:\n return func(url, *args, **kwargs)\n except ClientError as exc:", + "detail": "modules.PyMAF.models.transformers.bert.file_utils", + "documentation": {} + }, + { + "label": "s3_etag", + "kind": 2, + "importPath": "modules.PyMAF.models.transformers.bert.file_utils", + "description": "modules.PyMAF.models.transformers.bert.file_utils", + "peekOfCode": "def s3_etag(url):\n \"\"\"Check ETag on S3 object.\"\"\"\n s3_resource = boto3.resource(\"s3\")\n bucket_name, s3_path = split_s3_path(url)\n s3_object = s3_resource.Object(bucket_name, s3_path)\n return s3_object.e_tag\n@s3_request\ndef s3_get(url, temp_file):\n \"\"\"Pull a file directly from S3.\"\"\"\n s3_resource = boto3.resource(\"s3\")", + "detail": "modules.PyMAF.models.transformers.bert.file_utils", + "documentation": {} + }, + { + "label": "s3_get", + "kind": 2, + "importPath": "modules.PyMAF.models.transformers.bert.file_utils", + "description": "modules.PyMAF.models.transformers.bert.file_utils", + "peekOfCode": "def s3_get(url, temp_file):\n \"\"\"Pull a file directly from S3.\"\"\"\n s3_resource = boto3.resource(\"s3\")\n bucket_name, s3_path = split_s3_path(url)\n s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)\ndef http_get(url, temp_file):\n req = requests.get(url, stream=True)\n content_length = req.headers.get('Content-Length')\n total = int(content_length) if content_length is not None else None\n progress = tqdm(unit=\"B\", total=total)", + "detail": "modules.PyMAF.models.transformers.bert.file_utils", + "documentation": {} + }, + { + "label": "http_get", + "kind": 2, + "importPath": "modules.PyMAF.models.transformers.bert.file_utils", + "description": "modules.PyMAF.models.transformers.bert.file_utils", + "peekOfCode": "def http_get(url, temp_file):\n req = requests.get(url, stream=True)\n content_length = req.headers.get('Content-Length')\n total = int(content_length) if content_length is not None else None\n progress = tqdm(unit=\"B\", total=total)\n for chunk in req.iter_content(chunk_size=1024):\n if chunk: # filter out keep-alive new chunks\n progress.update(len(chunk))\n temp_file.write(chunk)\n progress.close()", + "detail": "modules.PyMAF.models.transformers.bert.file_utils", + "documentation": {} + }, + { + "label": "get_from_cache", + "kind": 2, + "importPath": "modules.PyMAF.models.transformers.bert.file_utils", + "description": "modules.PyMAF.models.transformers.bert.file_utils", + "peekOfCode": "def get_from_cache(url, cache_dir=None):\n \"\"\"\n Given a URL, look for the corresponding dataset in the local cache.\n If it's not there, download it. Then return the path to the cached file.\n \"\"\"\n if cache_dir is None:\n cache_dir = PYTORCH_PRETRAINED_BERT_CACHE\n if sys.version_info[0] == 3 and isinstance(cache_dir, Path):\n cache_dir = str(cache_dir)\n if sys.version_info[0] == 2 and not isinstance(cache_dir, str):", + "detail": "modules.PyMAF.models.transformers.bert.file_utils", + "documentation": {} + }, + { + "label": "default_cache_path", + "kind": 5, + "importPath": "modules.PyMAF.models.transformers.bert.file_utils", + "description": "modules.PyMAF.models.transformers.bert.file_utils", + "peekOfCode": "default_cache_path = os.path.join(torch_cache_home, 'pytorch_transformers')\ntry:\n from urllib.parse import urlparse\nexcept ImportError:\n from urlparse import urlparse\ntry:\n from pathlib import Path\n PYTORCH_PRETRAINED_BERT_CACHE = Path(\n os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path))\nexcept (AttributeError, ImportError):", + "detail": "modules.PyMAF.models.transformers.bert.file_utils", + "documentation": {} + }, + { + "label": "logger", + "kind": 5, + "importPath": "modules.PyMAF.models.transformers.bert.file_utils", + "description": "modules.PyMAF.models.transformers.bert.file_utils", + "peekOfCode": "logger = logging.getLogger(__name__) # pylint: disable=invalid-name\ndef url_to_filename(url, etag=None):\n \"\"\"\n Convert `url` into a hashed filename in a repeatable way.\n If `etag` is specified, append its hash to the url's, delimited\n by a period.\n \"\"\"\n url_bytes = url.encode('utf-8')\n url_hash = sha256(url_bytes)\n filename = url_hash.hexdigest()", + "detail": "modules.PyMAF.models.transformers.bert.file_utils", + "documentation": {} + }, + { + "label": "BertConfig", + "kind": 6, + "importPath": "modules.PyMAF.models.transformers.bert.modeling_bert", + "description": "modules.PyMAF.models.transformers.bert.modeling_bert", + "peekOfCode": "class BertConfig(PretrainedConfig):\n r\"\"\"\n :class:`~pytorch_transformers.BertConfig` is the configuration class to store the configuration of a\n `BertModel`.\n Arguments:\n vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.\n hidden_size: Size of the encoder layers and the pooler layer.\n num_hidden_layers: Number of hidden layers in the Transformer encoder.\n num_attention_heads: Number of attention heads for each attention layer in\n the Transformer encoder.", + "detail": "modules.PyMAF.models.transformers.bert.modeling_bert", + "documentation": {} + }, + { + "label": "BertEmbeddings", + "kind": 6, + "importPath": "modules.PyMAF.models.transformers.bert.modeling_bert", + "description": "modules.PyMAF.models.transformers.bert.modeling_bert", + "peekOfCode": "class BertEmbeddings(nn.Module):\n \"\"\"Construct the embeddings from word, position and token_type embeddings.\n \"\"\"\n def __init__(self, config):\n super(BertEmbeddings, self).__init__()\n self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)\n self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)\n # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load\n # any TensorFlow checkpoint file", + "detail": "modules.PyMAF.models.transformers.bert.modeling_bert", + "documentation": {} + }, + { + "label": "BertSelfAttention", + "kind": 6, + "importPath": "modules.PyMAF.models.transformers.bert.modeling_bert", + "description": "modules.PyMAF.models.transformers.bert.modeling_bert", + "peekOfCode": "class BertSelfAttention(nn.Module):\n def __init__(self, config):\n super(BertSelfAttention, self).__init__()\n if config.hidden_size % config.num_attention_heads != 0:\n raise ValueError(\n \"The hidden size (%d) is not a multiple of the number of attention \"\n \"heads (%d)\" % (config.hidden_size, config.num_attention_heads))\n self.output_attentions = config.output_attentions\n self.num_attention_heads = config.num_attention_heads\n self.attention_head_size = int(config.hidden_size / config.num_attention_heads)", + "detail": "modules.PyMAF.models.transformers.bert.modeling_bert", + "documentation": {} + }, + { + "label": "BertSelfOutput", + "kind": 6, + "importPath": "modules.PyMAF.models.transformers.bert.modeling_bert", + "description": "modules.PyMAF.models.transformers.bert.modeling_bert", + "peekOfCode": "class BertSelfOutput(nn.Module):\n def __init__(self, config):\n super(BertSelfOutput, self).__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)", + "detail": "modules.PyMAF.models.transformers.bert.modeling_bert", + "documentation": {} + }, + { + "label": "BertAttention", + "kind": 6, + "importPath": "modules.PyMAF.models.transformers.bert.modeling_bert", + "description": "modules.PyMAF.models.transformers.bert.modeling_bert", + "peekOfCode": "class BertAttention(nn.Module):\n def __init__(self, config):\n super(BertAttention, self).__init__()\n self.self = BertSelfAttention(config)\n self.output = BertSelfOutput(config)\n def prune_heads(self, heads):\n if len(heads) == 0:\n return\n mask = torch.ones(self.self.num_attention_heads, self.self.attention_head_size)\n for head in heads:", + "detail": "modules.PyMAF.models.transformers.bert.modeling_bert", + "documentation": {} + }, + { + "label": "BertIntermediate", + "kind": 6, + "importPath": "modules.PyMAF.models.transformers.bert.modeling_bert", + "description": "modules.PyMAF.models.transformers.bert.modeling_bert", + "peekOfCode": "class BertIntermediate(nn.Module):\n def __init__(self, config):\n super(BertIntermediate, self).__init__()\n self.dense = nn.Linear(config.hidden_size, config.intermediate_size)\n if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):\n self.intermediate_act_fn = ACT2FN[config.hidden_act]\n else:\n self.intermediate_act_fn = config.hidden_act\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)", + "detail": "modules.PyMAF.models.transformers.bert.modeling_bert", + "documentation": {} + }, + { + "label": "BertOutput", + "kind": 6, + "importPath": "modules.PyMAF.models.transformers.bert.modeling_bert", + "description": "modules.PyMAF.models.transformers.bert.modeling_bert", + "peekOfCode": "class BertOutput(nn.Module):\n def __init__(self, config):\n super(BertOutput, self).__init__()\n self.dense = nn.Linear(config.intermediate_size, config.hidden_size)\n self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)", + "detail": "modules.PyMAF.models.transformers.bert.modeling_bert", + "documentation": {} + }, + { + "label": "BertLayer", + "kind": 6, + "importPath": "modules.PyMAF.models.transformers.bert.modeling_bert", + "description": "modules.PyMAF.models.transformers.bert.modeling_bert", + "peekOfCode": "class BertLayer(nn.Module):\n def __init__(self, config):\n super(BertLayer, self).__init__()\n self.attention = BertAttention(config)\n self.intermediate = BertIntermediate(config)\n self.output = BertOutput(config)\n def forward(self, hidden_states, attention_mask, head_mask=None):\n attention_outputs = self.attention(hidden_states, attention_mask, head_mask)\n attention_output = attention_outputs[0]\n intermediate_output = self.intermediate(attention_output)", + "detail": "modules.PyMAF.models.transformers.bert.modeling_bert", + "documentation": {} + }, + { + "label": "BertEncoder", + "kind": 6, + "importPath": "modules.PyMAF.models.transformers.bert.modeling_bert", + "description": "modules.PyMAF.models.transformers.bert.modeling_bert", + "peekOfCode": "class BertEncoder(nn.Module):\n def __init__(self, config):\n super(BertEncoder, self).__init__()\n self.output_attentions = config.output_attentions\n self.output_hidden_states = config.output_hidden_states\n self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])\n def forward(self, hidden_states, attention_mask, head_mask=None):\n all_hidden_states = ()\n all_attentions = ()\n for i, layer_module in enumerate(self.layer):", + "detail": "modules.PyMAF.models.transformers.bert.modeling_bert", + "documentation": {} + }, + { + "label": "BertPooler", + "kind": 6, + "importPath": "modules.PyMAF.models.transformers.bert.modeling_bert", + "description": "modules.PyMAF.models.transformers.bert.modeling_bert", + "peekOfCode": "class BertPooler(nn.Module):\n def __init__(self, config):\n super(BertPooler, self).__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.activation = nn.Tanh()\n def forward(self, hidden_states):\n # We \"pool\" the model by simply taking the hidden state corresponding\n # to the first token.\n first_token_tensor = hidden_states[:, 0]\n pooled_output = self.dense(first_token_tensor)", + "detail": "modules.PyMAF.models.transformers.bert.modeling_bert", + "documentation": {} + }, + { + "label": "BertPredictionHeadTransform", + "kind": 6, + "importPath": "modules.PyMAF.models.transformers.bert.modeling_bert", + "description": "modules.PyMAF.models.transformers.bert.modeling_bert", + "peekOfCode": "class BertPredictionHeadTransform(nn.Module):\n def __init__(self, config):\n super(BertPredictionHeadTransform, self).__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):\n self.transform_act_fn = ACT2FN[config.hidden_act]\n else:\n self.transform_act_fn = config.hidden_act\n self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n def forward(self, hidden_states):", + "detail": "modules.PyMAF.models.transformers.bert.modeling_bert", + "documentation": {} + }, + { + "label": "BertLMPredictionHead", + "kind": 6, + "importPath": "modules.PyMAF.models.transformers.bert.modeling_bert", + "description": "modules.PyMAF.models.transformers.bert.modeling_bert", + "peekOfCode": "class BertLMPredictionHead(nn.Module):\n def __init__(self, config):\n super(BertLMPredictionHead, self).__init__()\n self.transform = BertPredictionHeadTransform(config)\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size,\n config.vocab_size,\n bias=False)\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))", + "detail": "modules.PyMAF.models.transformers.bert.modeling_bert", + "documentation": {} + }, + { + "label": "BertOnlyMLMHead", + "kind": 6, + "importPath": "modules.PyMAF.models.transformers.bert.modeling_bert", + "description": "modules.PyMAF.models.transformers.bert.modeling_bert", + "peekOfCode": "class BertOnlyMLMHead(nn.Module):\n def __init__(self, config):\n super(BertOnlyMLMHead, self).__init__()\n self.predictions = BertLMPredictionHead(config)\n def forward(self, sequence_output):\n prediction_scores = self.predictions(sequence_output)\n return prediction_scores\nclass BertOnlyNSPHead(nn.Module):\n def __init__(self, config):\n super(BertOnlyNSPHead, self).__init__()", + "detail": "modules.PyMAF.models.transformers.bert.modeling_bert", + "documentation": {} + }, + { + "label": "BertOnlyNSPHead", + "kind": 6, + "importPath": "modules.PyMAF.models.transformers.bert.modeling_bert", + "description": "modules.PyMAF.models.transformers.bert.modeling_bert", + "peekOfCode": "class BertOnlyNSPHead(nn.Module):\n def __init__(self, config):\n super(BertOnlyNSPHead, self).__init__()\n self.seq_relationship = nn.Linear(config.hidden_size, 2)\n def forward(self, pooled_output):\n seq_relationship_score = self.seq_relationship(pooled_output)\n return seq_relationship_score\nclass BertPreTrainingHeads(nn.Module):\n def __init__(self, config):\n super(BertPreTrainingHeads, self).__init__()", + "detail": "modules.PyMAF.models.transformers.bert.modeling_bert", + "documentation": {} + }, + { + "label": "BertPreTrainingHeads", + "kind": 6, + "importPath": "modules.PyMAF.models.transformers.bert.modeling_bert", + "description": "modules.PyMAF.models.transformers.bert.modeling_bert", + "peekOfCode": "class BertPreTrainingHeads(nn.Module):\n def __init__(self, config):\n super(BertPreTrainingHeads, self).__init__()\n self.predictions = BertLMPredictionHead(config)\n self.seq_relationship = nn.Linear(config.hidden_size, 2)\n def forward(self, sequence_output, pooled_output):\n prediction_scores = self.predictions(sequence_output)\n seq_relationship_score = self.seq_relationship(pooled_output)\n return prediction_scores, seq_relationship_score\nclass BertPreTrainedModel(PreTrainedModel):", + "detail": "modules.PyMAF.models.transformers.bert.modeling_bert", + "documentation": {} + }, + { + "label": "BertPreTrainedModel", + "kind": 6, + "importPath": "modules.PyMAF.models.transformers.bert.modeling_bert", + "description": "modules.PyMAF.models.transformers.bert.modeling_bert", + "peekOfCode": "class BertPreTrainedModel(PreTrainedModel):\n \"\"\" An abstract class to handle weights initialization and\n a simple interface for dowloading and loading pretrained models.\n \"\"\"\n config_class = BertConfig\n pretrained_model_archive_map = BERT_PRETRAINED_MODEL_ARCHIVE_MAP\n load_tf_weights = load_tf_weights_in_bert\n base_model_prefix = \"bert\"\n def __init__(self, *inputs, **kwargs):\n super(BertPreTrainedModel, self).__init__(*inputs, **kwargs)", + "detail": "modules.PyMAF.models.transformers.bert.modeling_bert", + "documentation": {} + }, + { + "label": "BertModel", + "kind": 6, + "importPath": "modules.PyMAF.models.transformers.bert.modeling_bert", + "description": "modules.PyMAF.models.transformers.bert.modeling_bert", + "peekOfCode": "class BertModel(BertPreTrainedModel):\n r\"\"\"\n Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:\n **last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``\n Sequence of hidden-states at the output of the last layer of the model.\n **pooler_output**: ``torch.FloatTensor`` of shape ``(batch_size, hidden_size)``\n Last layer hidden-state of the first token of the sequence (classification token)\n further processed by a Linear layer and a Tanh activation function. The Linear\n layer weights are trained from the next sentence prediction (classification)\n objective during Bert pretraining. This output is usually *not* a good summary", + "detail": "modules.PyMAF.models.transformers.bert.modeling_bert", + "documentation": {} + }, + { + "label": "BertForPreTraining", + "kind": 6, + "importPath": "modules.PyMAF.models.transformers.bert.modeling_bert", + "description": "modules.PyMAF.models.transformers.bert.modeling_bert", + "peekOfCode": "class BertForPreTraining(BertPreTrainedModel):\n r\"\"\"\n **masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:\n Labels for computing the masked language modeling loss.\n Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)\n Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels\n in ``[0, ..., config.vocab_size]``\n **next_sentence_label**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:\n Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see ``input_ids`` docstring)\n Indices should be in ``[0, 1]``.", + "detail": "modules.PyMAF.models.transformers.bert.modeling_bert", + "documentation": {} + }, + { + "label": "BertForMaskedLM", + "kind": 6, + "importPath": "modules.PyMAF.models.transformers.bert.modeling_bert", + "description": "modules.PyMAF.models.transformers.bert.modeling_bert", + "peekOfCode": "class BertForMaskedLM(BertPreTrainedModel):\n r\"\"\"\n **masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:\n Labels for computing the masked language modeling loss.\n Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)\n Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels\n in ``[0, ..., config.vocab_size]``\n Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:\n **loss**: (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:\n Masked language modeling loss.", + "detail": "modules.PyMAF.models.transformers.bert.modeling_bert", + "documentation": {} + }, + { + "label": "BertForNextSentencePrediction", + "kind": 6, + "importPath": "modules.PyMAF.models.transformers.bert.modeling_bert", + "description": "modules.PyMAF.models.transformers.bert.modeling_bert", + "peekOfCode": "class BertForNextSentencePrediction(BertPreTrainedModel):\n r\"\"\"\n **next_sentence_label**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:\n Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see ``input_ids`` docstring)\n Indices should be in ``[0, 1]``.\n ``0`` indicates sequence B is a continuation of sequence A,\n ``1`` indicates sequence B is a random sequence.\n Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:\n **loss**: (`optional`, returned when ``next_sentence_label`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:\n Next sequence prediction (classification) loss.", + "detail": "modules.PyMAF.models.transformers.bert.modeling_bert", + "documentation": {} + }, + { + "label": "BertForSequenceClassification", + "kind": 6, + "importPath": "modules.PyMAF.models.transformers.bert.modeling_bert", + "description": "modules.PyMAF.models.transformers.bert.modeling_bert", + "peekOfCode": "class BertForSequenceClassification(BertPreTrainedModel):\n r\"\"\"\n **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:\n Labels for computing the sequence classification/regression loss.\n Indices should be in ``[0, ..., config.num_labels]``.\n If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),\n If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).\n Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:\n **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:\n Classification (or regression if config.num_labels==1) loss.", + "detail": "modules.PyMAF.models.transformers.bert.modeling_bert", + "documentation": {} + }, + { + "label": "BertForMultipleChoice", + "kind": 6, + "importPath": "modules.PyMAF.models.transformers.bert.modeling_bert", + "description": "modules.PyMAF.models.transformers.bert.modeling_bert", + "peekOfCode": "class BertForMultipleChoice(BertPreTrainedModel):\n r\"\"\"\n Inputs:\n **input_ids**: ``torch.LongTensor`` of shape ``(batch_size, num_choices, sequence_length)``:\n Indices of input sequence tokens in the vocabulary.\n The second dimension of the input (`num_choices`) indicates the number of choices to score.\n To match pre-training, BERT input sequence should be formatted with [CLS] and [SEP] tokens as follows:\n (a) For sequence pairs:\n ``tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]``\n ``token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1``", + "detail": "modules.PyMAF.models.transformers.bert.modeling_bert", + "documentation": {} + }, + { + "label": "BertForTokenClassification", + "kind": 6, + "importPath": "modules.PyMAF.models.transformers.bert.modeling_bert", + "description": "modules.PyMAF.models.transformers.bert.modeling_bert", + "peekOfCode": "class BertForTokenClassification(BertPreTrainedModel):\n r\"\"\"\n **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:\n Labels for computing the token classification loss.\n Indices should be in ``[0, ..., config.num_labels]``.\n Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:\n **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:\n Classification loss.\n **scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.num_labels)``\n Classification scores (before SoftMax).", + "detail": "modules.PyMAF.models.transformers.bert.modeling_bert", + "documentation": {} + }, + { + "label": "BertForQuestionAnswering", + "kind": 6, + "importPath": "modules.PyMAF.models.transformers.bert.modeling_bert", + "description": "modules.PyMAF.models.transformers.bert.modeling_bert", + "peekOfCode": "class BertForQuestionAnswering(BertPreTrainedModel):\n r\"\"\"\n **start_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:\n Labels for position (index) of the start of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`).\n Position outside of the sequence are not taken into account for computing the loss.\n **end_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:\n Labels for position (index) of the end of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`).\n Position outside of the sequence are not taken into account for computing the loss.", + "detail": "modules.PyMAF.models.transformers.bert.modeling_bert", + "documentation": {} + }, + { + "label": "load_tf_weights_in_bert", + "kind": 2, + "importPath": "modules.PyMAF.models.transformers.bert.modeling_bert", + "description": "modules.PyMAF.models.transformers.bert.modeling_bert", + "peekOfCode": "def load_tf_weights_in_bert(model, config, tf_checkpoint_path):\n \"\"\" Load tf checkpoints in a pytorch model.\n \"\"\"\n try:\n import re\n import numpy as np\n import tensorflow as tf\n except ImportError:\n logger.error(\"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see \"\n \"https://www.tensorflow.org/install/ for installation instructions.\")", + "detail": "modules.PyMAF.models.transformers.bert.modeling_bert", + "documentation": {} + }, + { + "label": "gelu", + "kind": 2, + "importPath": "modules.PyMAF.models.transformers.bert.modeling_bert", + "description": "modules.PyMAF.models.transformers.bert.modeling_bert", + "peekOfCode": "def gelu(x):\n \"\"\"Implementation of the gelu activation function.\n For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):\n 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))\n Also see https://arxiv.org/abs/1606.08415\n \"\"\"\n return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))\ndef swish(x):\n return x * torch.sigmoid(x)\nACT2FN = {\"gelu\": gelu, \"relu\": torch.nn.functional.relu, \"swish\": swish}", + "detail": "modules.PyMAF.models.transformers.bert.modeling_bert", + "documentation": {} + }, + { + "label": "swish", + "kind": 2, + "importPath": "modules.PyMAF.models.transformers.bert.modeling_bert", + "description": "modules.PyMAF.models.transformers.bert.modeling_bert", + "peekOfCode": "def swish(x):\n return x * torch.sigmoid(x)\nACT2FN = {\"gelu\": gelu, \"relu\": torch.nn.functional.relu, \"swish\": swish}\nclass BertConfig(PretrainedConfig):\n r\"\"\"\n :class:`~pytorch_transformers.BertConfig` is the configuration class to store the configuration of a\n `BertModel`.\n Arguments:\n vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.\n hidden_size: Size of the encoder layers and the pooler layer.", + "detail": "modules.PyMAF.models.transformers.bert.modeling_bert", + "documentation": {} + }, + { + "label": "logger", + "kind": 5, + "importPath": "modules.PyMAF.models.transformers.bert.modeling_bert", + "description": "modules.PyMAF.models.transformers.bert.modeling_bert", + "peekOfCode": "logger = logging.getLogger(__name__)\nBERT_PRETRAINED_MODEL_ARCHIVE_MAP = {\n 'bert-base-uncased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-pytorch_model.bin\",\n 'bert-large-uncased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-pytorch_model.bin\",\n 'bert-base-cased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-pytorch_model.bin\",\n 'bert-large-cased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-pytorch_model.bin\",\n 'bert-base-multilingual-uncased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-pytorch_model.bin\",\n 'bert-base-multilingual-cased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-pytorch_model.bin\",\n 'bert-base-chinese': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-pytorch_model.bin\",\n 'bert-base-german-cased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-cased-pytorch_model.bin\",", + "detail": "modules.PyMAF.models.transformers.bert.modeling_bert", + "documentation": {} + }, + { + "label": "BERT_PRETRAINED_MODEL_ARCHIVE_MAP", + "kind": 5, + "importPath": "modules.PyMAF.models.transformers.bert.modeling_bert", + "description": "modules.PyMAF.models.transformers.bert.modeling_bert", + "peekOfCode": "BERT_PRETRAINED_MODEL_ARCHIVE_MAP = {\n 'bert-base-uncased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-pytorch_model.bin\",\n 'bert-large-uncased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-pytorch_model.bin\",\n 'bert-base-cased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-pytorch_model.bin\",\n 'bert-large-cased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-pytorch_model.bin\",\n 'bert-base-multilingual-uncased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-pytorch_model.bin\",\n 'bert-base-multilingual-cased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-pytorch_model.bin\",\n 'bert-base-chinese': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-pytorch_model.bin\",\n 'bert-base-german-cased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-cased-pytorch_model.bin\",\n 'bert-large-uncased-whole-word-masking': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-pytorch_model.bin\",", + "detail": "modules.PyMAF.models.transformers.bert.modeling_bert", + "documentation": {} + }, + { + "label": "BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", + "kind": 5, + "importPath": "modules.PyMAF.models.transformers.bert.modeling_bert", + "description": "modules.PyMAF.models.transformers.bert.modeling_bert", + "peekOfCode": "BERT_PRETRAINED_CONFIG_ARCHIVE_MAP = {\n 'bert-base-uncased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-config.json\",\n 'bert-large-uncased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-config.json\",\n 'bert-base-cased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-config.json\",\n 'bert-large-cased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-config.json\",\n 'bert-base-multilingual-uncased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-config.json\",\n 'bert-base-multilingual-cased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-config.json\",\n 'bert-base-chinese': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-config.json\",\n 'bert-base-german-cased': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-cased-config.json\",\n 'bert-large-uncased-whole-word-masking': \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-config.json\",", + "detail": "modules.PyMAF.models.transformers.bert.modeling_bert", + "documentation": {} + }, + { + "label": "ACT2FN", + "kind": 5, + "importPath": "modules.PyMAF.models.transformers.bert.modeling_bert", + "description": "modules.PyMAF.models.transformers.bert.modeling_bert", + "peekOfCode": "ACT2FN = {\"gelu\": gelu, \"relu\": torch.nn.functional.relu, \"swish\": swish}\nclass BertConfig(PretrainedConfig):\n r\"\"\"\n :class:`~pytorch_transformers.BertConfig` is the configuration class to store the configuration of a\n `BertModel`.\n Arguments:\n vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.\n hidden_size: Size of the encoder layers and the pooler layer.\n num_hidden_layers: Number of hidden layers in the Transformer encoder.\n num_attention_heads: Number of attention heads for each attention layer in", + "detail": "modules.PyMAF.models.transformers.bert.modeling_bert", + "documentation": {} + }, + { + "label": "BERT_START_DOCSTRING", + "kind": 5, + "importPath": "modules.PyMAF.models.transformers.bert.modeling_bert", + "description": "modules.PyMAF.models.transformers.bert.modeling_bert", + "peekOfCode": "BERT_START_DOCSTRING = r\"\"\" The BERT model was proposed in\n `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_\n by Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina Toutanova. It's a bidirectional transformer\n pre-trained using a combination of masked language modeling objective and next sentence prediction\n on a large corpus comprising the Toronto Book Corpus and Wikipedia.\n This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and\n refer to the PyTorch documentation for all matter related to general usage and behavior.\n .. _`BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`:\n https://arxiv.org/abs/1810.04805\n .. _`torch.nn.Module`:", + "detail": "modules.PyMAF.models.transformers.bert.modeling_bert", + "documentation": {} + }, + { + "label": "BERT_INPUTS_DOCSTRING", + "kind": 5, + "importPath": "modules.PyMAF.models.transformers.bert.modeling_bert", + "description": "modules.PyMAF.models.transformers.bert.modeling_bert", + "peekOfCode": "BERT_INPUTS_DOCSTRING = r\"\"\"\n Inputs:\n **input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:\n Indices of input sequence tokens in the vocabulary.\n To match pre-training, BERT input sequence should be formatted with [CLS] and [SEP] tokens as follows:\n (a) For sequence pairs:\n ``tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]``\n ``token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1``\n (b) For single sequences:\n ``tokens: [CLS] the dog is hairy . [SEP]``", + "detail": "modules.PyMAF.models.transformers.bert.modeling_bert", + "documentation": {} + }, + { + "label": "BertSelfAttention", + "kind": 6, + "importPath": "modules.PyMAF.models.transformers.bert.modeling_graphormer", + "description": "modules.PyMAF.models.transformers.bert.modeling_graphormer", + "peekOfCode": "class BertSelfAttention(nn.Module):\n def __init__(self, config):\n super(BertSelfAttention, self).__init__()\n if config.hidden_size % config.num_attention_heads != 0:\n raise ValueError(\n \"The hidden size (%d) is not a multiple of the number of attention \"\n \"heads (%d)\" % (config.hidden_size, config.num_attention_heads))\n self.output_attentions = config.output_attentions\n self.num_attention_heads = config.num_attention_heads\n self.attention_head_size = int(config.hidden_size / config.num_attention_heads)", + "detail": "modules.PyMAF.models.transformers.bert.modeling_graphormer", + "documentation": {} + }, + { + "label": "BertAttention", + "kind": 6, + "importPath": "modules.PyMAF.models.transformers.bert.modeling_graphormer", + "description": "modules.PyMAF.models.transformers.bert.modeling_graphormer", + "peekOfCode": "class BertAttention(nn.Module):\n def __init__(self, config):\n super(BertAttention, self).__init__()\n self.self = BertSelfAttention(config)\n self.output = BertSelfOutput(config)\n def prune_heads(self, heads):\n if len(heads) == 0:\n return\n mask = torch.ones(self.self.num_attention_heads, self.self.attention_head_size)\n for head in heads:", + "detail": "modules.PyMAF.models.transformers.bert.modeling_graphormer", + "documentation": {} + }, + { + "label": "GraphormerLayer", + "kind": 6, + "importPath": "modules.PyMAF.models.transformers.bert.modeling_graphormer", + "description": "modules.PyMAF.models.transformers.bert.modeling_graphormer", + "peekOfCode": "class GraphormerLayer(nn.Module):\n def __init__(self, config):\n super(GraphormerLayer, self).__init__()\n self.attention = BertAttention(config)\n self.has_graph_conv = config.graph_conv\n self.mesh_type = config.mesh_type\n if self.has_graph_conv == True:\n if self.mesh_type=='hand':\n self.graph_conv = GraphResBlock(config.hidden_size, config.hidden_size, mesh_type=self.mesh_type)\n elif self.mesh_type=='body':", + "detail": "modules.PyMAF.models.transformers.bert.modeling_graphormer", + "documentation": {} + }, + { + "label": "GraphormerEncoder", + "kind": 6, + "importPath": "modules.PyMAF.models.transformers.bert.modeling_graphormer", + "description": "modules.PyMAF.models.transformers.bert.modeling_graphormer", + "peekOfCode": "class GraphormerEncoder(nn.Module):\n def __init__(self, config):\n super(GraphormerEncoder, self).__init__()\n self.output_attentions = config.output_attentions\n self.output_hidden_states = config.output_hidden_states\n self.layer = nn.ModuleList([GraphormerLayer(config) for _ in range(config.num_hidden_layers)])\n def forward(self, hidden_states, attention_mask, head_mask=None,\n encoder_history_states=None):\n all_hidden_states = ()\n all_attentions = ()", + "detail": "modules.PyMAF.models.transformers.bert.modeling_graphormer", + "documentation": {} + }, + { + "label": "EncoderBlock", + "kind": 6, + "importPath": "modules.PyMAF.models.transformers.bert.modeling_graphormer", + "description": "modules.PyMAF.models.transformers.bert.modeling_graphormer", + "peekOfCode": "class EncoderBlock(BertPreTrainedModel):\n def __init__(self, config):\n super(EncoderBlock, self).__init__(config)\n self.config = config\n # self.embeddings = BertEmbeddings(config)\n self.encoder = GraphormerEncoder(config)\n # self.pooler = BertPooler(config)\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)\n self.img_dim = config.img_feature_dim \n try:", + "detail": "modules.PyMAF.models.transformers.bert.modeling_graphormer", + "documentation": {} + }, + { + "label": "Graphormer", + "kind": 6, + "importPath": "modules.PyMAF.models.transformers.bert.modeling_graphormer", + "description": "modules.PyMAF.models.transformers.bert.modeling_graphormer", + "peekOfCode": "class Graphormer(BertPreTrainedModel):\n '''\n The archtecture of a transformer encoder block we used in Graphormer\n '''\n def __init__(self, config):\n super(Graphormer, self).__init__(config)\n self.config = config\n self.bert = EncoderBlock(config)\n self.cls_head = nn.Linear(config.hidden_size, self.config.output_feature_dim)\n self.residual = nn.Linear(config.img_feature_dim, self.config.output_feature_dim)", + "detail": "modules.PyMAF.models.transformers.bert.modeling_graphormer", + "documentation": {} + }, + { + "label": "LayerNormClass", + "kind": 5, + "importPath": "modules.PyMAF.models.transformers.bert.modeling_graphormer", + "description": "modules.PyMAF.models.transformers.bert.modeling_graphormer", + "peekOfCode": "LayerNormClass = torch.nn.LayerNorm\nBertLayerNorm = torch.nn.LayerNorm\nclass BertSelfAttention(nn.Module):\n def __init__(self, config):\n super(BertSelfAttention, self).__init__()\n if config.hidden_size % config.num_attention_heads != 0:\n raise ValueError(\n \"The hidden size (%d) is not a multiple of the number of attention \"\n \"heads (%d)\" % (config.hidden_size, config.num_attention_heads))\n self.output_attentions = config.output_attentions", + "detail": "modules.PyMAF.models.transformers.bert.modeling_graphormer", + "documentation": {} + }, + { + "label": "BertLayerNorm", + "kind": 5, + "importPath": "modules.PyMAF.models.transformers.bert.modeling_graphormer", + "description": "modules.PyMAF.models.transformers.bert.modeling_graphormer", + "peekOfCode": "BertLayerNorm = torch.nn.LayerNorm\nclass BertSelfAttention(nn.Module):\n def __init__(self, config):\n super(BertSelfAttention, self).__init__()\n if config.hidden_size % config.num_attention_heads != 0:\n raise ValueError(\n \"The hidden size (%d) is not a multiple of the number of attention \"\n \"heads (%d)\" % (config.hidden_size, config.num_attention_heads))\n self.output_attentions = config.output_attentions\n self.num_attention_heads = config.num_attention_heads", + "detail": "modules.PyMAF.models.transformers.bert.modeling_graphormer", + "documentation": {} + }, + { + "label": "PretrainedConfig", + "kind": 6, + "importPath": "modules.PyMAF.models.transformers.bert.modeling_utils", + "description": "modules.PyMAF.models.transformers.bert.modeling_utils", + "peekOfCode": "class PretrainedConfig(object):\n \"\"\" Base class for all configuration classes.\n Handle a few common parameters and methods for loading/downloading/saving configurations.\n \"\"\"\n pretrained_config_archive_map = {}\n def __init__(self, **kwargs):\n self.finetuning_task = kwargs.pop('finetuning_task', None)\n self.num_labels = kwargs.pop('num_labels', 2)\n self.output_attentions = kwargs.pop('output_attentions', False)\n self.output_hidden_states = kwargs.pop('output_hidden_states', False)", + "detail": "modules.PyMAF.models.transformers.bert.modeling_utils", + "documentation": {} + }, + { + "label": "PreTrainedModel", + "kind": 6, + "importPath": "modules.PyMAF.models.transformers.bert.modeling_utils", + "description": "modules.PyMAF.models.transformers.bert.modeling_utils", + "peekOfCode": "class PreTrainedModel(nn.Module):\n \"\"\" Base class for all models. Handle loading/storing model config and\n a simple interface for dowloading and loading pretrained models.\n \"\"\"\n config_class = PretrainedConfig\n pretrained_model_archive_map = {}\n load_tf_weights = lambda model, config, path: None\n base_model_prefix = \"\"\n input_embeddings = None\n def __init__(self, config, *inputs, **kwargs):", + "detail": "modules.PyMAF.models.transformers.bert.modeling_utils", + "documentation": {} + }, + { + "label": "Conv1D", + "kind": 6, + "importPath": "modules.PyMAF.models.transformers.bert.modeling_utils", + "description": "modules.PyMAF.models.transformers.bert.modeling_utils", + "peekOfCode": "class Conv1D(nn.Module):\n def __init__(self, nf, nx):\n \"\"\" Conv1D layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2)\n Basically works like a Linear layer but the weights are transposed\n \"\"\"\n super(Conv1D, self).__init__()\n self.nf = nf\n w = torch.empty(nx, nf)\n nn.init.normal_(w, std=0.02)\n self.weight = nn.Parameter(w)", + "detail": "modules.PyMAF.models.transformers.bert.modeling_utils", + "documentation": {} + }, + { + "label": "PoolerStartLogits", + "kind": 6, + "importPath": "modules.PyMAF.models.transformers.bert.modeling_utils", + "description": "modules.PyMAF.models.transformers.bert.modeling_utils", + "peekOfCode": "class PoolerStartLogits(nn.Module):\n \"\"\" Compute SQuAD start_logits from sequence hidden states. \"\"\"\n def __init__(self, config):\n super(PoolerStartLogits, self).__init__()\n self.dense = nn.Linear(config.hidden_size, 1)\n def forward(self, hidden_states, p_mask=None):\n \"\"\" Args:\n **p_mask**: (`optional`) ``torch.FloatTensor`` of shape `(batch_size, seq_len)`\n invalid position mask such as query and special symbols (PAD, SEP, CLS)\n 1.0 means token should be masked.", + "detail": "modules.PyMAF.models.transformers.bert.modeling_utils", + "documentation": {} + }, + { + "label": "PoolerEndLogits", + "kind": 6, + "importPath": "modules.PyMAF.models.transformers.bert.modeling_utils", + "description": "modules.PyMAF.models.transformers.bert.modeling_utils", + "peekOfCode": "class PoolerEndLogits(nn.Module):\n \"\"\" Compute SQuAD end_logits from sequence hidden states and start token hidden state.\n \"\"\"\n def __init__(self, config):\n super(PoolerEndLogits, self).__init__()\n self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)\n self.activation = nn.Tanh()\n self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dense_1 = nn.Linear(config.hidden_size, 1)\n def forward(self, hidden_states, start_states=None, start_positions=None, p_mask=None):", + "detail": "modules.PyMAF.models.transformers.bert.modeling_utils", + "documentation": {} + }, + { + "label": "PoolerAnswerClass", + "kind": 6, + "importPath": "modules.PyMAF.models.transformers.bert.modeling_utils", + "description": "modules.PyMAF.models.transformers.bert.modeling_utils", + "peekOfCode": "class PoolerAnswerClass(nn.Module):\n \"\"\" Compute SQuAD 2.0 answer class from classification and start tokens hidden states. \"\"\"\n def __init__(self, config):\n super(PoolerAnswerClass, self).__init__()\n self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)\n self.activation = nn.Tanh()\n self.dense_1 = nn.Linear(config.hidden_size, 1, bias=False)\n def forward(self, hidden_states, start_states=None, start_positions=None, cls_index=None):\n \"\"\"\n Args:", + "detail": "modules.PyMAF.models.transformers.bert.modeling_utils", + "documentation": {} + }, + { + "label": "SQuADHead", + "kind": 6, + "importPath": "modules.PyMAF.models.transformers.bert.modeling_utils", + "description": "modules.PyMAF.models.transformers.bert.modeling_utils", + "peekOfCode": "class SQuADHead(nn.Module):\n r\"\"\" A SQuAD head inspired by XLNet.\n Parameters:\n config (:class:`~pytorch_transformers.XLNetConfig`): Model configuration class with all the parameters of the model.\n Inputs:\n **hidden_states**: ``torch.FloatTensor`` of shape ``(batch_size, seq_len, hidden_size)``\n hidden states of sequence tokens\n **start_positions**: ``torch.LongTensor`` of shape ``(batch_size,)``\n position of the first token for the labeled span.\n **end_positions**: ``torch.LongTensor`` of shape ``(batch_size,)``", + "detail": "modules.PyMAF.models.transformers.bert.modeling_utils", + "documentation": {} + }, + { + "label": "SequenceSummary", + "kind": 6, + "importPath": "modules.PyMAF.models.transformers.bert.modeling_utils", + "description": "modules.PyMAF.models.transformers.bert.modeling_utils", + "peekOfCode": "class SequenceSummary(nn.Module):\n r\"\"\" Compute a single vector summary of a sequence hidden states according to various possibilities:\n Args of the config class:\n summary_type:\n - 'last' => [default] take the last token hidden state (like XLNet)\n - 'first' => take the first token hidden state (like Bert)\n - 'mean' => take the mean of all tokens hidden states\n - 'token_ids' => supply a Tensor of classification token indices (GPT/GPT-2)\n - 'attn' => Not implemented now, use multi-head attention\n summary_use_proj: Add a projection after the vector extraction", + "detail": "modules.PyMAF.models.transformers.bert.modeling_utils", + "documentation": {} + }, + { + "label": "prune_linear_layer", + "kind": 2, + "importPath": "modules.PyMAF.models.transformers.bert.modeling_utils", + "description": "modules.PyMAF.models.transformers.bert.modeling_utils", + "peekOfCode": "def prune_linear_layer(layer, index, dim=0):\n \"\"\" Prune a linear layer (a model parameters) to keep only entries in index.\n Return the pruned layer as a new layer with requires_grad=True.\n Used to remove heads.\n \"\"\"\n index = index.to(layer.weight.device)\n W = layer.weight.index_select(dim, index).clone().detach()\n if layer.bias is not None:\n if dim == 1:\n b = layer.bias.clone().detach()", + "detail": "modules.PyMAF.models.transformers.bert.modeling_utils", + "documentation": {} + }, + { + "label": "prune_conv1d_layer", + "kind": 2, + "importPath": "modules.PyMAF.models.transformers.bert.modeling_utils", + "description": "modules.PyMAF.models.transformers.bert.modeling_utils", + "peekOfCode": "def prune_conv1d_layer(layer, index, dim=1):\n \"\"\" Prune a Conv1D layer (a model parameters) to keep only entries in index.\n A Conv1D work as a Linear layer (see e.g. BERT) but the weights are transposed.\n Return the pruned layer as a new layer with requires_grad=True.\n Used to remove heads.\n \"\"\"\n index = index.to(layer.weight.device)\n W = layer.weight.index_select(dim, index).clone().detach()\n if dim == 0:\n b = layer.bias.clone().detach()", + "detail": "modules.PyMAF.models.transformers.bert.modeling_utils", + "documentation": {} + }, + { + "label": "prune_layer", + "kind": 2, + "importPath": "modules.PyMAF.models.transformers.bert.modeling_utils", + "description": "modules.PyMAF.models.transformers.bert.modeling_utils", + "peekOfCode": "def prune_layer(layer, index, dim=None):\n \"\"\" Prune a Conv1D or nn.Linear layer (a model parameters) to keep only entries in index.\n Return the pruned layer as a new layer with requires_grad=True.\n Used to remove heads.\n \"\"\"\n if isinstance(layer, nn.Linear):\n return prune_linear_layer(layer, index, dim=0 if dim is None else dim)\n elif isinstance(layer, Conv1D):\n return prune_conv1d_layer(layer, index, dim=1 if dim is None else dim)\n else:", + "detail": "modules.PyMAF.models.transformers.bert.modeling_utils", + "documentation": {} + }, + { + "label": "logger", + "kind": 5, + "importPath": "modules.PyMAF.models.transformers.bert.modeling_utils", + "description": "modules.PyMAF.models.transformers.bert.modeling_utils", + "peekOfCode": "logger = logging.getLogger(__name__)\nCONFIG_NAME = \"config.json\"\nWEIGHTS_NAME = \"pytorch_model.bin\"\nTF_WEIGHTS_NAME = 'model.ckpt'\ntry:\n from torch.nn import Identity\nexcept ImportError:\n # Older PyTorch compatibility\n class Identity(nn.Module):\n r\"\"\"A placeholder identity operator that is argument-insensitive.", + "detail": "modules.PyMAF.models.transformers.bert.modeling_utils", + "documentation": {} + }, + { + "label": "CONFIG_NAME", + "kind": 5, + "importPath": "modules.PyMAF.models.transformers.bert.modeling_utils", + "description": "modules.PyMAF.models.transformers.bert.modeling_utils", + "peekOfCode": "CONFIG_NAME = \"config.json\"\nWEIGHTS_NAME = \"pytorch_model.bin\"\nTF_WEIGHTS_NAME = 'model.ckpt'\ntry:\n from torch.nn import Identity\nexcept ImportError:\n # Older PyTorch compatibility\n class Identity(nn.Module):\n r\"\"\"A placeholder identity operator that is argument-insensitive.\n \"\"\"", + "detail": "modules.PyMAF.models.transformers.bert.modeling_utils", + "documentation": {} + }, + { + "label": "WEIGHTS_NAME", + "kind": 5, + "importPath": "modules.PyMAF.models.transformers.bert.modeling_utils", + "description": "modules.PyMAF.models.transformers.bert.modeling_utils", + "peekOfCode": "WEIGHTS_NAME = \"pytorch_model.bin\"\nTF_WEIGHTS_NAME = 'model.ckpt'\ntry:\n from torch.nn import Identity\nexcept ImportError:\n # Older PyTorch compatibility\n class Identity(nn.Module):\n r\"\"\"A placeholder identity operator that is argument-insensitive.\n \"\"\"\n def __init__(self, *args, **kwargs):", + "detail": "modules.PyMAF.models.transformers.bert.modeling_utils", + "documentation": {} + }, + { + "label": "TF_WEIGHTS_NAME", + "kind": 5, + "importPath": "modules.PyMAF.models.transformers.bert.modeling_utils", + "description": "modules.PyMAF.models.transformers.bert.modeling_utils", + "peekOfCode": "TF_WEIGHTS_NAME = 'model.ckpt'\ntry:\n from torch.nn import Identity\nexcept ImportError:\n # Older PyTorch compatibility\n class Identity(nn.Module):\n r\"\"\"A placeholder identity operator that is argument-insensitive.\n \"\"\"\n def __init__(self, *args, **kwargs):\n super(Identity, self).__init__()", + "detail": "modules.PyMAF.models.transformers.bert.modeling_utils", + "documentation": {} + }, + { + "label": "single_conv", + "kind": 6, + "importPath": "modules.PyMAF.models.transformers.net_utils", + "description": "modules.PyMAF.models.transformers.net_utils", + "peekOfCode": "class single_conv(nn.Module):\n def __init__(self, in_ch, out_ch):\n super(single_conv, self).__init__()\n self.conv = nn.Sequential(nn.Conv2d(in_ch, out_ch, 3, stride=1, padding=1),\n nn.BatchNorm2d(out_ch),\n nn.ReLU(inplace=True),)\n def forward(self, x):\n return self.conv(x)\nclass double_conv(nn.Module):\n def __init__(self, in_ch, out_ch):", + "detail": "modules.PyMAF.models.transformers.net_utils", + "documentation": {} + }, + { + "label": "double_conv", + "kind": 6, + "importPath": "modules.PyMAF.models.transformers.net_utils", + "description": "modules.PyMAF.models.transformers.net_utils", + "peekOfCode": "class double_conv(nn.Module):\n def __init__(self, in_ch, out_ch):\n super(double_conv, self).__init__()\n self.conv = nn.Sequential(nn.Conv2d(in_ch, out_ch, 3, stride=1, padding=1),\n nn.BatchNorm2d(out_ch),\n nn.ReLU(inplace=True),\n nn.Conv2d(out_ch, out_ch, 3, stride=1, padding=1),\n nn.BatchNorm2d(out_ch),\n nn.ReLU(inplace=True))\n def forward(self, x):", + "detail": "modules.PyMAF.models.transformers.net_utils", + "documentation": {} + }, + { + "label": "double_conv_down", + "kind": 6, + "importPath": "modules.PyMAF.models.transformers.net_utils", + "description": "modules.PyMAF.models.transformers.net_utils", + "peekOfCode": "class double_conv_down(nn.Module):\n def __init__(self, in_ch, out_ch):\n super(double_conv_down, self).__init__()\n self.conv = nn.Sequential(nn.Conv2d(in_ch, out_ch, 3, stride=2, padding=1),\n nn.BatchNorm2d(out_ch),\n nn.ReLU(inplace=True),\n nn.Conv2d(out_ch, out_ch, 3, stride=1, padding=1),\n nn.BatchNorm2d(out_ch),\n nn.ReLU(inplace=True))\n def forward(self, x):", + "detail": "modules.PyMAF.models.transformers.net_utils", + "documentation": {} + }, + { + "label": "double_conv_up", + "kind": 6, + "importPath": "modules.PyMAF.models.transformers.net_utils", + "description": "modules.PyMAF.models.transformers.net_utils", + "peekOfCode": "class double_conv_up(nn.Module):\n def __init__(self, in_ch, out_ch):\n super(double_conv_up, self).__init__()\n self.conv = nn.Sequential(nn.UpsamplingNearest2d(scale_factor=2),\n nn.Conv2d(in_ch, out_ch, 3, stride=1, padding=1),\n nn.BatchNorm2d(out_ch),\n nn.ReLU(inplace=True),\n nn.Conv2d(out_ch, out_ch, 3, stride=1, padding=1),\n nn.BatchNorm2d(out_ch),\n nn.ReLU(inplace=True))", + "detail": "modules.PyMAF.models.transformers.net_utils", + "documentation": {} + }, + { + "label": "PosEnSine", + "kind": 6, + "importPath": "modules.PyMAF.models.transformers.net_utils", + "description": "modules.PyMAF.models.transformers.net_utils", + "peekOfCode": "class PosEnSine(nn.Module):\n \"\"\"\n Code borrowed from DETR: models/positional_encoding.py\n output size: b*(2.num_pos_feats)*h*w\n \"\"\"\n def __init__(self, num_pos_feats):\n super(PosEnSine, self).__init__()\n self.num_pos_feats = num_pos_feats\n self.normalize = True\n self.scale = 2 * math.pi", + "detail": "modules.PyMAF.models.transformers.net_utils", + "documentation": {} + }, + { + "label": "softmax_attention", + "kind": 2, + "importPath": "modules.PyMAF.models.transformers.net_utils", + "description": "modules.PyMAF.models.transformers.net_utils", + "peekOfCode": "def softmax_attention(q, k, v):\n # b x n x d x h x w\n h, w = q.shape[-2], q.shape[-1]\n q = q.flatten(-2).transpose(-2, -1) # b x n x hw x d\n k = k.flatten(-2) # b x n x d x hw\n v = v.flatten(-2).transpose(-2, -1)\n print('softmax', q.shape, k.shape, v.shape)\n N = k.shape[-1] # ?????? maybe change to k.shape[-2]????\n attn = torch.matmul(q / N ** 0.5, k)\n attn = F.softmax(attn, dim=-1)", + "detail": "modules.PyMAF.models.transformers.net_utils", + "documentation": {} + }, + { + "label": "dotproduct_attention", + "kind": 2, + "importPath": "modules.PyMAF.models.transformers.net_utils", + "description": "modules.PyMAF.models.transformers.net_utils", + "peekOfCode": "def dotproduct_attention(q, k, v):\n # b x n x d x h x w\n h, w = q.shape[-2], q.shape[-1]\n q = q.flatten(-2).transpose(-2, -1) # b x n x hw x d\n k = k.flatten(-2) # b x n x d x hw\n v = v.flatten(-2).transpose(-2, -1)\n N = k.shape[-1]\n attn = None\n tmp = torch.matmul(k, v) / N\n output = torch.matmul(q, tmp)", + "detail": "modules.PyMAF.models.transformers.net_utils", + "documentation": {} + }, + { + "label": "long_range_attention", + "kind": 2, + "importPath": "modules.PyMAF.models.transformers.net_utils", + "description": "modules.PyMAF.models.transformers.net_utils", + "peekOfCode": "def long_range_attention(q, k, v, P_h, P_w): # fixed patch size\n B, N, C, qH, qW = q.size()\n _, _, _, kH, kW = k.size()\n qQ_h, qQ_w = qH // P_h, qW // P_w\n kQ_h, kQ_w = kH // P_h, kW // P_w\n q = q.reshape(B, N, C, qQ_h, P_h, qQ_w, P_w)\n k = k.reshape(B, N, C, kQ_h, P_h, kQ_w, P_w)\n v = v.reshape(B, N, -1, kQ_h, P_h, kQ_w, P_w)\n q = q.permute(0, 1, 4, 6, 2, 3, 5) # [b, n, Ph, Pw, d, Qh, Qw]\n k = k.permute(0, 1, 4, 6, 2, 3, 5)", + "detail": "modules.PyMAF.models.transformers.net_utils", + "documentation": {} + }, + { + "label": "short_range_attention", + "kind": 2, + "importPath": "modules.PyMAF.models.transformers.net_utils", + "description": "modules.PyMAF.models.transformers.net_utils", + "peekOfCode": "def short_range_attention(q, k, v, Q_h, Q_w): # fixed patch number\n B, N, C, qH, qW = q.size()\n _, _, _, kH, kW = k.size()\n qP_h, qP_w = qH // Q_h, qW // Q_w\n kP_h, kP_w = kH // Q_h, kW // Q_w\n q = q.reshape(B, N, C, Q_h, qP_h, Q_w, qP_w)\n k = k.reshape(B, N, C, Q_h, kP_h, Q_w, kP_w)\n v = v.reshape(B, N, -1, Q_h, kP_h, Q_w, kP_w)\n q = q.permute(0, 1, 3, 5, 2, 4, 6) # [b, n, Qh, Qw, d, Ph, Pw]\n k = k.permute(0, 1, 3, 5, 2, 4, 6)", + "detail": "modules.PyMAF.models.transformers.net_utils", + "documentation": {} + }, + { + "label": "space_to_depth", + "kind": 2, + "importPath": "modules.PyMAF.models.transformers.net_utils", + "description": "modules.PyMAF.models.transformers.net_utils", + "peekOfCode": "def space_to_depth(x, block_size):\n x_shape = x.shape\n c, h, w = x_shape[-3:]\n if len(x.shape) >= 5:\n x = x.view(-1, c, h, w)\n unfolded_x = torch.nn.functional.unfold(x, block_size, stride=block_size)\n return unfolded_x.view(*x_shape[0:-3], c * block_size ** 2, h // block_size, w // block_size)\ndef depth_to_space(x, block_size):\n x_shape = x.shape\n c, h, w = x_shape[-3:]", + "detail": "modules.PyMAF.models.transformers.net_utils", + "documentation": {} + }, + { + "label": "depth_to_space", + "kind": 2, + "importPath": "modules.PyMAF.models.transformers.net_utils", + "description": "modules.PyMAF.models.transformers.net_utils", + "peekOfCode": "def depth_to_space(x, block_size):\n x_shape = x.shape\n c, h, w = x_shape[-3:]\n x = x.view(-1, c, h, w)\n y = torch.nn.functional.pixel_shuffle(x, block_size)\n return y.view(*x_shape[0:-3], -1, h*block_size, w*block_size)\ndef patch_attention(q, k, v, P):\n # q: [b, nhead, c, h, w]\n q_patch = space_to_depth(q, P) # [b, nhead, cP^2, h/P, w/P]\n k_patch = space_to_depth(k, P)", + "detail": "modules.PyMAF.models.transformers.net_utils", + "documentation": {} + }, + { + "label": "patch_attention", + "kind": 2, + "importPath": "modules.PyMAF.models.transformers.net_utils", + "description": "modules.PyMAF.models.transformers.net_utils", + "peekOfCode": "def patch_attention(q, k, v, P):\n # q: [b, nhead, c, h, w]\n q_patch = space_to_depth(q, P) # [b, nhead, cP^2, h/P, w/P]\n k_patch = space_to_depth(k, P)\n v_patch = space_to_depth(v, P)\n # output: [b, nhead, cP^2, h/P, w/P]\n # attn: [b, nhead, h/P*w/P, h/P*w/P]\n output, attn = softmax_attention(q_patch, k_patch, v_patch) \n output = depth_to_space(output, P) # output: [b, nhead, c, h, w]\n return output, attn", + "detail": "modules.PyMAF.models.transformers.net_utils", + "documentation": {} + }, + { + "label": "TransformerDecoderUnit", + "kind": 6, + "importPath": "modules.PyMAF.models.transformers.texformer", + "description": "modules.PyMAF.models.transformers.texformer", + "peekOfCode": "class TransformerDecoderUnit(nn.Module):\n def __init__(self, feat_dim, n_head=8, pos_en_flag=True, attn_type='softmax', P=None):\n super(TransformerDecoderUnit, self).__init__()\n self.feat_dim = feat_dim\n self.attn_type = attn_type\n self.pos_en_flag = pos_en_flag\n self.P = P\n self.pos_en = PosEnSine(self.feat_dim // 2)\n self.attn = OurMultiheadAttention(feat_dim, n_head) # cross-attention\n self.linear1 = nn.Conv2d(self.feat_dim, self.feat_dim, 1)", + "detail": "modules.PyMAF.models.transformers.texformer", + "documentation": {} + }, + { + "label": "Unet", + "kind": 6, + "importPath": "modules.PyMAF.models.transformers.texformer", + "description": "modules.PyMAF.models.transformers.texformer", + "peekOfCode": "class Unet(nn.Module):\n def __init__(self, in_ch, feat_ch, out_ch):\n super().__init__()\n self.conv_in = single_conv(in_ch, feat_ch)\n self.conv1 = double_conv_down(feat_ch, feat_ch)\n self.conv2 = double_conv_down(feat_ch, feat_ch)\n self.conv3 = double_conv(feat_ch, feat_ch)\n self.conv4 = double_conv_up(feat_ch, feat_ch)\n self.conv5 = double_conv_up(feat_ch, feat_ch)\n self.conv6 = double_conv(feat_ch, out_ch)", + "detail": "modules.PyMAF.models.transformers.texformer", + "documentation": {} + }, + { + "label": "Texformer", + "kind": 6, + "importPath": "modules.PyMAF.models.transformers.texformer", + "description": "modules.PyMAF.models.transformers.texformer", + "peekOfCode": "class Texformer(nn.Module):\n def __init__(self, opts):\n super().__init__()\n self.feat_dim = opts.feat_dim\n src_ch = opts.src_ch\n tgt_ch = opts.tgt_ch\n out_ch = opts.out_ch\n self.mask_fusion = opts.mask_fusion\n if not self.mask_fusion:\n v_ch = out_ch", + "detail": "modules.PyMAF.models.transformers.texformer", + "documentation": {} + }, + { + "label": "SpatialAttention", + "kind": 6, + "importPath": "modules.PyMAF.models.transformers.tokenlearner", + "description": "modules.PyMAF.models.transformers.tokenlearner", + "peekOfCode": "class SpatialAttention(nn.Module):\n def __init__(self) -> None:\n super().__init__()\n self.conv = nn.Sequential(\n nn.Conv2d(2, 1, kernel_size=(1,1), stride=1),\n nn.BatchNorm2d(1),\n nn.ReLU()\n )\n self.sgap = nn.AvgPool2d(2)\n def forward(self, x):", + "detail": "modules.PyMAF.models.transformers.tokenlearner", + "documentation": {} + }, + { + "label": "TokenLearner", + "kind": 6, + "importPath": "modules.PyMAF.models.transformers.tokenlearner", + "description": "modules.PyMAF.models.transformers.tokenlearner", + "peekOfCode": "class TokenLearner(nn.Module):\n def __init__(self, S) -> None:\n super().__init__()\n self.S = S\n self.tokenizers = nn.ModuleList([SpatialAttention() for _ in range(S)])\n def forward(self, x):\n B, _, _, C = x.shape\n Z = torch.Tensor(B, self.S, C).to(x)\n for i in range(self.S):\n Ai, _ = self.tokenizers[i](x) # [B, C]", + "detail": "modules.PyMAF.models.transformers.tokenlearner", + "documentation": {} + }, + { + "label": "TokenFuser", + "kind": 6, + "importPath": "modules.PyMAF.models.transformers.tokenlearner", + "description": "modules.PyMAF.models.transformers.tokenlearner", + "peekOfCode": "class TokenFuser(nn.Module):\n def __init__(self, H, W, C, S) -> None:\n super().__init__()\n self.projection = nn.Linear(S, S, bias=False)\n self.Bi = nn.Linear(C, S)\n self.spatial_attn = SpatialAttention()\n self.S = S\n def forward(self, y, x):\n B, S, C = y.shape\n B, H, W, C = x.shape", + "detail": "modules.PyMAF.models.transformers.tokenlearner", + "documentation": {} + }, + { + "label": "OurMultiheadAttention", + "kind": 6, + "importPath": "modules.PyMAF.models.transformers.transformer_basics", + "description": "modules.PyMAF.models.transformers.transformer_basics", + "peekOfCode": "class OurMultiheadAttention(nn.Module):\n def __init__(self, q_feat_dim, k_feat_dim, out_feat_dim, n_head, d_k=None, d_v=None):\n super(OurMultiheadAttention, self).__init__()\n if d_k is None:\n d_k = out_feat_dim // n_head\n if d_v is None:\n d_v = out_feat_dim // n_head\n self.n_head = n_head\n self.d_k = d_k\n self.d_v = d_v", + "detail": "modules.PyMAF.models.transformers.transformer_basics", + "documentation": {} + }, + { + "label": "TransformerEncoderUnit", + "kind": 6, + "importPath": "modules.PyMAF.models.transformers.transformer_basics", + "description": "modules.PyMAF.models.transformers.transformer_basics", + "peekOfCode": "class TransformerEncoderUnit(nn.Module):\n def __init__(self, feat_dim, n_head=8, pos_en_flag=True, attn_type='softmax', P=None):\n super(TransformerEncoderUnit, self).__init__()\n self.feat_dim = feat_dim\n self.attn_type = attn_type\n self.pos_en_flag = pos_en_flag\n self.P = P\n self.pos_en = PosEnSine(self.feat_dim // 2)\n self.attn = OurMultiheadAttention(feat_dim, n_head)\n self.linear1 = nn.Conv2d(self.feat_dim, self.feat_dim, 1)", + "detail": "modules.PyMAF.models.transformers.transformer_basics", + "documentation": {} + }, + { + "label": "TransformerEncoderUnitSparse", + "kind": 6, + "importPath": "modules.PyMAF.models.transformers.transformer_basics", + "description": "modules.PyMAF.models.transformers.transformer_basics", + "peekOfCode": "class TransformerEncoderUnitSparse(nn.Module):\n def __init__(self, feat_dim, n_head=8, pos_en_flag=True, ahw=None):\n super(TransformerEncoderUnitSparse, self).__init__()\n self.feat_dim = feat_dim\n self.pos_en_flag = pos_en_flag\n self.ahw = ahw # [Ph, Pw, Qh, Qw]\n self.pos_en = PosEnSine(self.feat_dim // 2)\n self.attn1 = OurMultiheadAttention(feat_dim, n_head) # long range\n self.attn2 = OurMultiheadAttention(feat_dim, n_head) # short range\n self.linear1 = nn.Conv2d(self.feat_dim, self.feat_dim, 1)", + "detail": "modules.PyMAF.models.transformers.transformer_basics", + "documentation": {} + }, + { + "label": "TransformerDecoderUnit", + "kind": 6, + "importPath": "modules.PyMAF.models.transformers.transformer_basics", + "description": "modules.PyMAF.models.transformers.transformer_basics", + "peekOfCode": "class TransformerDecoderUnit(nn.Module):\n def __init__(self, feat_dim, n_head=8, pos_en_flag=True, attn_type='softmax', P=None):\n super(TransformerDecoderUnit, self).__init__()\n self.feat_dim = feat_dim\n self.attn_type = attn_type\n self.pos_en_flag = pos_en_flag\n self.P = P\n self.pos_en = PosEnSine(self.feat_dim // 2)\n self.attn1 = OurMultiheadAttention(feat_dim, n_head) # self-attention\n self.attn2 = OurMultiheadAttention(feat_dim, n_head) # cross-attention", + "detail": "modules.PyMAF.models.transformers.transformer_basics", + "documentation": {} + }, + { + "label": "TransformerDecoderUnitSparse", + "kind": 6, + "importPath": "modules.PyMAF.models.transformers.transformer_basics", + "description": "modules.PyMAF.models.transformers.transformer_basics", + "peekOfCode": "class TransformerDecoderUnitSparse(nn.Module):\n def __init__(self, feat_dim, n_head=8, pos_en_flag=True, ahw=None):\n super(TransformerDecoderUnitSparse, self).__init__()\n self.feat_dim = feat_dim\n self.ahw = ahw # [Ph_tgt, Pw_tgt, Qh_tgt, Qw_tgt, Ph_src, Pw_src, Qh_tgt, Qw_tgt]\n self.pos_en_flag = pos_en_flag\n self.pos_en = PosEnSine(self.feat_dim // 2)\n self.attn1_1 = OurMultiheadAttention(feat_dim, n_head) # self-attention: long\n self.attn1_2 = OurMultiheadAttention(feat_dim, n_head) # self-attention: short\n self.attn2_1 = OurMultiheadAttention(feat_dim, n_head) # cross-attention: self-attention-long + cross-attention-short", + "detail": "modules.PyMAF.models.transformers.transformer_basics", + "documentation": {} + }, + { + "label": "BertSelfAttention", + "kind": 6, + "importPath": "modules.PyMAF.models.attention", + "description": "modules.PyMAF.models.attention", + "peekOfCode": "class BertSelfAttention(nn.Module):\n def __init__(self, config):\n super(BertSelfAttention, self).__init__()\n if config.hidden_size % config.num_attention_heads != 0:\n raise ValueError(\n \"The hidden size (%d) is not a multiple of the number of attention \"\n \"heads (%d)\" % (config.hidden_size, config.num_attention_heads))\n self.output_attentions = config.output_attentions\n self.num_attention_heads = config.num_attention_heads\n self.attention_head_size = int(config.hidden_size / config.num_attention_heads)", + "detail": "modules.PyMAF.models.attention", + "documentation": {} + }, + { + "label": "BertAttention", + "kind": 6, + "importPath": "modules.PyMAF.models.attention", + "description": "modules.PyMAF.models.attention", + "peekOfCode": "class BertAttention(nn.Module):\n def __init__(self, config):\n super(BertAttention, self).__init__()\n self.self = BertSelfAttention(config)\n self.output = BertSelfOutput(config)\n def prune_heads(self, heads):\n if len(heads) == 0:\n return\n mask = torch.ones(self.self.num_attention_heads, self.self.attention_head_size)\n for head in heads:", + "detail": "modules.PyMAF.models.attention", + "documentation": {} + }, + { + "label": "AttLayer", + "kind": 6, + "importPath": "modules.PyMAF.models.attention", + "description": "modules.PyMAF.models.attention", + "peekOfCode": "class AttLayer(nn.Module):\n def __init__(self, config):\n super(AttLayer, self).__init__()\n self.attention = BertAttention(config)\n self.intermediate = BertIntermediate(config)\n self.output = BertOutput(config)\n def MHA(self, hidden_states, attention_mask, head_mask=None,\n history_state=None):\n attention_outputs = self.attention(hidden_states, attention_mask,\n head_mask, history_state)", + "detail": "modules.PyMAF.models.attention", + "documentation": {} + }, + { + "label": "AttEncoder", + "kind": 6, + "importPath": "modules.PyMAF.models.attention", + "description": "modules.PyMAF.models.attention", + "peekOfCode": "class AttEncoder(nn.Module):\n def __init__(self, config):\n super(AttEncoder, self).__init__()\n self.output_attentions = config.output_attentions\n self.output_hidden_states = config.output_hidden_states\n self.layer = nn.ModuleList([AttLayer(config) for _ in range(config.num_hidden_layers)])\n def forward(self, hidden_states, attention_mask, head_mask=None,\n encoder_history_states=None):\n all_hidden_states = ()\n all_attentions = ()", + "detail": "modules.PyMAF.models.attention", + "documentation": {} + }, + { + "label": "EncoderBlock", + "kind": 6, + "importPath": "modules.PyMAF.models.attention", + "description": "modules.PyMAF.models.attention", + "peekOfCode": "class EncoderBlock(BertPreTrainedModel):\n def __init__(self, config):\n super(EncoderBlock, self).__init__(config)\n self.config = config\n # self.embeddings = BertEmbeddings(config)\n self.encoder = AttEncoder(config)\n # self.pooler = BertPooler(config)\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)\n self.img_dim = config.img_feature_dim \n try:", + "detail": "modules.PyMAF.models.attention", + "documentation": {} + }, + { + "label": "Graphormer", + "kind": 6, + "importPath": "modules.PyMAF.models.attention", + "description": "modules.PyMAF.models.attention", + "peekOfCode": "class Graphormer(BertPreTrainedModel):\n '''\n The archtecture of a transformer encoder block we used in Graphormer\n '''\n def __init__(self, config):\n super(Graphormer, self).__init__(config)\n self.config = config\n self.bert = EncoderBlock(config)\n self.cls_head = nn.Linear(config.hidden_size, self.config.output_feature_dim)\n self.residual = nn.Linear(config.img_feature_dim, self.config.output_feature_dim)", + "detail": "modules.PyMAF.models.attention", + "documentation": {} + }, + { + "label": "get_att_block", + "kind": 2, + "importPath": "modules.PyMAF.models.attention", + "description": "modules.PyMAF.models.attention", + "peekOfCode": "def get_att_block(img_feature_dim=2048, output_feat_dim=512, hidden_feat_dim=1024, num_attention_heads=4, num_hidden_layers=1):\n config_class = BertConfig\n # config = config_class.from_pretrained('models/transformers/bert/bert-base-uncased/')\n config = config_class.from_pretrained('bert-base-uncased')\n interm_size_scale = 2\n config.output_attentions = False\n # config.hidden_dropout_prob = args.drop_out\n config.img_feature_dim = img_feature_dim\n # config.output_feature_dim = output_feat_dim\n config.hidden_size = hidden_feat_dim", + "detail": "modules.PyMAF.models.attention", + "documentation": {} + }, + { + "label": "LayerNormClass", + "kind": 5, + "importPath": "modules.PyMAF.models.attention", + "description": "modules.PyMAF.models.attention", + "peekOfCode": "LayerNormClass = torch.nn.LayerNorm\nBertLayerNorm = torch.nn.LayerNorm\nfrom .transformers.bert import BertConfig\nclass BertSelfAttention(nn.Module):\n def __init__(self, config):\n super(BertSelfAttention, self).__init__()\n if config.hidden_size % config.num_attention_heads != 0:\n raise ValueError(\n \"The hidden size (%d) is not a multiple of the number of attention \"\n \"heads (%d)\" % (config.hidden_size, config.num_attention_heads))", + "detail": "modules.PyMAF.models.attention", + "documentation": {} + }, + { + "label": "BertLayerNorm", + "kind": 5, + "importPath": "modules.PyMAF.models.attention", + "description": "modules.PyMAF.models.attention", + "peekOfCode": "BertLayerNorm = torch.nn.LayerNorm\nfrom .transformers.bert import BertConfig\nclass BertSelfAttention(nn.Module):\n def __init__(self, config):\n super(BertSelfAttention, self).__init__()\n if config.hidden_size % config.num_attention_heads != 0:\n raise ValueError(\n \"The hidden size (%d) is not a multiple of the number of attention \"\n \"heads (%d)\" % (config.hidden_size, config.num_attention_heads))\n self.output_attentions = config.output_attentions", + "detail": "modules.PyMAF.models.attention", + "documentation": {} + }, + { + "label": "Bottleneck", + "kind": 6, + "importPath": "modules.PyMAF.models.hmr", + "description": "modules.PyMAF.models.hmr", + "peekOfCode": "class Bottleneck(nn.Module):\n \"\"\" Redefinition of Bottleneck residual block\n Adapted from the official PyTorch implementation\n \"\"\"\n expansion = 4\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super().__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,", + "detail": "modules.PyMAF.models.hmr", + "documentation": {} + }, + { + "label": "ResNet_Backbone", + "kind": 6, + "importPath": "modules.PyMAF.models.hmr", + "description": "modules.PyMAF.models.hmr", + "peekOfCode": "class ResNet_Backbone(nn.Module):\n \"\"\" Feature Extrator with ResNet backbone\n \"\"\"\n def __init__(self, model='res50', pretrained=True):\n if model == 'res50':\n block, layers = Bottleneck, [3, 4, 6, 3]\n else:\n pass # TODO\n self.inplanes = 64\n super().__init__()", + "detail": "modules.PyMAF.models.hmr", + "documentation": {} + }, + { + "label": "HMR", + "kind": 6, + "importPath": "modules.PyMAF.models.hmr", + "description": "modules.PyMAF.models.hmr", + "peekOfCode": "class HMR(nn.Module):\n \"\"\" SMPL Iterative Regressor with ResNet50 backbone\n \"\"\"\n def __init__(self, block, layers, smpl_mean_params):\n self.inplanes = 64\n super().__init__()\n npose = 24 * 6\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,\n bias=False)\n self.bn1 = nn.BatchNorm2d(64)", + "detail": "modules.PyMAF.models.hmr", + "documentation": {} + }, + { + "label": "hmr", + "kind": 2, + "importPath": "modules.PyMAF.models.hmr", + "description": "modules.PyMAF.models.hmr", + "peekOfCode": "def hmr(smpl_mean_params, pretrained=True, **kwargs):\n \"\"\" Constructs an HMR model with ResNet50 backbone.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = HMR(Bottleneck, [3, 4, 6, 3], smpl_mean_params, **kwargs)\n if pretrained:\n resnet_imagenet = resnet.resnet50(pretrained=True)\n model.load_state_dict(resnet_imagenet.state_dict(),strict=False)\n return model", + "detail": "modules.PyMAF.models.hmr", + "documentation": {} + }, + { + "label": "logger", + "kind": 5, + "importPath": "modules.PyMAF.models.hmr", + "description": "modules.PyMAF.models.hmr", + "peekOfCode": "logger = logging.getLogger(__name__)\nBN_MOMENTUM = 0.1\nclass Bottleneck(nn.Module):\n \"\"\" Redefinition of Bottleneck residual block\n Adapted from the official PyTorch implementation\n \"\"\"\n expansion = 4\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super().__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)", + "detail": "modules.PyMAF.models.hmr", + "documentation": {} + }, + { + "label": "BN_MOMENTUM", + "kind": 5, + "importPath": "modules.PyMAF.models.hmr", + "description": "modules.PyMAF.models.hmr", + "peekOfCode": "BN_MOMENTUM = 0.1\nclass Bottleneck(nn.Module):\n \"\"\" Redefinition of Bottleneck residual block\n Adapted from the official PyTorch implementation\n \"\"\"\n expansion = 4\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super().__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)", + "detail": "modules.PyMAF.models.hmr", + "documentation": {} + }, + { + "label": "HighResolutionModule", + "kind": 6, + "importPath": "modules.PyMAF.models.hr_module", + "description": "modules.PyMAF.models.hr_module", + "peekOfCode": "class HighResolutionModule(nn.Module):\n def __init__(self, num_branches, blocks, num_blocks, num_inchannels,\n num_channels, fuse_method, multi_scale_output=True):\n super().__init__()\n self._check_branches(\n num_branches, blocks, num_blocks, num_inchannels, num_channels)\n self.num_inchannels = num_inchannels\n self.fuse_method = fuse_method\n self.num_branches = num_branches\n self.multi_scale_output = multi_scale_output", + "detail": "modules.PyMAF.models.hr_module", + "documentation": {} + }, + { + "label": "PoseHighResolutionNet", + "kind": 6, + "importPath": "modules.PyMAF.models.hr_module", + "description": "modules.PyMAF.models.hr_module", + "peekOfCode": "class PoseHighResolutionNet(nn.Module):\n def __init__(self, cfg, pretrained=True, global_mode=False):\n self.inplanes = 64\n extra = cfg.HR_MODEL.EXTRA\n super().__init__()\n # stem net\n self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1,\n bias=False)\n self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)\n self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1,", + "detail": "modules.PyMAF.models.hr_module", + "documentation": {} + }, + { + "label": "get_hrnet_encoder", + "kind": 2, + "importPath": "modules.PyMAF.models.hr_module", + "description": "modules.PyMAF.models.hr_module", + "peekOfCode": "def get_hrnet_encoder(cfg, init_weight=True, global_mode=False, **kwargs):\n model = PoseHighResolutionNet(cfg, global_mode=global_mode)\n if init_weight:\n if cfg.HR_MODEL.PRETR_SET in ['imagenet']:\n model.init_weights(cfg.HR_MODEL.PRETRAINED_IM)\n logger.info('loaded HRNet imagenet pretrained model')\n elif cfg.HR_MODEL.PRETR_SET in ['coco']:\n model.init_weights(cfg.HR_MODEL.PRETRAINED_COCO)\n logger.info('loaded HRNet coco pretrained model')\n else:", + "detail": "modules.PyMAF.models.hr_module", + "documentation": {} + }, + { + "label": "logger", + "kind": 5, + "importPath": "modules.PyMAF.models.hr_module", + "description": "modules.PyMAF.models.hr_module", + "peekOfCode": "logger = logging.getLogger(__name__)\nBN_MOMENTUM = 0.1\nclass HighResolutionModule(nn.Module):\n def __init__(self, num_branches, blocks, num_blocks, num_inchannels,\n num_channels, fuse_method, multi_scale_output=True):\n super().__init__()\n self._check_branches(\n num_branches, blocks, num_blocks, num_inchannels, num_channels)\n self.num_inchannels = num_inchannels\n self.fuse_method = fuse_method", + "detail": "modules.PyMAF.models.hr_module", + "documentation": {} + }, + { + "label": "BN_MOMENTUM", + "kind": 5, + "importPath": "modules.PyMAF.models.hr_module", + "description": "modules.PyMAF.models.hr_module", + "peekOfCode": "BN_MOMENTUM = 0.1\nclass HighResolutionModule(nn.Module):\n def __init__(self, num_branches, blocks, num_blocks, num_inchannels,\n num_channels, fuse_method, multi_scale_output=True):\n super().__init__()\n self._check_branches(\n num_branches, blocks, num_blocks, num_inchannels, num_channels)\n self.num_inchannels = num_inchannels\n self.fuse_method = fuse_method\n self.num_branches = num_branches", + "detail": "modules.PyMAF.models.hr_module", + "documentation": {} + }, + { + "label": "blocks_dict", + "kind": 5, + "importPath": "modules.PyMAF.models.hr_module", + "description": "modules.PyMAF.models.hr_module", + "peekOfCode": "blocks_dict = {\n 'BASIC': BasicBlock,\n 'BOTTLENECK': Bottleneck\n}\nclass PoseHighResolutionNet(nn.Module):\n def __init__(self, cfg, pretrained=True, global_mode=False):\n self.inplanes = 64\n extra = cfg.HR_MODEL.EXTRA\n super().__init__()\n # stem net", + "detail": "modules.PyMAF.models.hr_module", + "documentation": {} + }, + { + "label": "TransformerDecoderUnit", + "kind": 6, + "importPath": "modules.PyMAF.models.maf_extractor", + "description": "modules.PyMAF.models.maf_extractor", + "peekOfCode": "class TransformerDecoderUnit(nn.Module):\n def __init__(self, feat_dim, attri_dim=0, n_head=8, pos_en_flag=True, attn_type='softmax', P=None):\n super(TransformerDecoderUnit, self).__init__()\n self.feat_dim = feat_dim\n self.attn_type = attn_type\n self.pos_en_flag = pos_en_flag\n self.P = P\n assert attri_dim == 0\n if self.pos_en_flag:\n pe_dim = 10", + "detail": "modules.PyMAF.models.maf_extractor", + "documentation": {} + }, + { + "label": "Mesh_Sampler", + "kind": 6, + "importPath": "modules.PyMAF.models.maf_extractor", + "description": "modules.PyMAF.models.maf_extractor", + "peekOfCode": "class Mesh_Sampler(nn.Module):\n ''' Mesh Up/Down-sampling\n '''\n def __init__(self, type='smpl', level=2, device=torch.device('cuda'), option=None):\n super().__init__()\n # downsample SMPL mesh and assign part labels\n if type == 'smpl':\n # from https://github.com/nkolot/GraphCMR/blob/master/data/mesh_downsampling.npz\n smpl_mesh_graph = np.load('data/smpl_downsampling.npz', allow_pickle=True, encoding='latin1')\n A = smpl_mesh_graph['A']", + "detail": "modules.PyMAF.models.maf_extractor", + "documentation": {} + }, + { + "label": "MAF_Extractor", + "kind": 6, + "importPath": "modules.PyMAF.models.maf_extractor", + "description": "modules.PyMAF.models.maf_extractor", + "peekOfCode": "class MAF_Extractor(nn.Module):\n ''' Mesh-aligned Feature Extrator\n As discussed in the paper, we extract mesh-aligned features based on 2D projection of the mesh vertices.\n The features extrated from spatial feature maps will go through a MLP for dimension reduction.\n '''\n def __init__(self, filter_channels, device=torch.device('cuda'), iwp_cam_mode=True, option=None):\n super().__init__()\n self.device = device\n self.filters = []\n self.num_views = 1", + "detail": "modules.PyMAF.models.maf_extractor", + "documentation": {} + }, + { + "label": "logger", + "kind": 5, + "importPath": "modules.PyMAF.models.maf_extractor", + "description": "modules.PyMAF.models.maf_extractor", + "peekOfCode": "logger = logging.getLogger(__name__)\nfrom .transformers.net_utils import PosEnSine\nfrom .transformers.transformer_basics import OurMultiheadAttention\nfrom utils.iuvmap import iuv_img2map, iuv_map2img, seg_img2map\nfrom .smpl import get_smpl_tpose\nfrom utils.imutils import j2d_processing\nclass TransformerDecoderUnit(nn.Module):\n def __init__(self, feat_dim, attri_dim=0, n_head=8, pos_en_flag=True, attn_type='softmax', P=None):\n super(TransformerDecoderUnit, self).__init__()\n self.feat_dim = feat_dim", + "detail": "modules.PyMAF.models.maf_extractor", + "documentation": {} + }, + { + "label": "BasicBlock", + "kind": 6, + "importPath": "modules.PyMAF.models.pose_resnet", + "description": "modules.PyMAF.models.pose_resnet", + "peekOfCode": "class BasicBlock(nn.Module):\n expansion = 1\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.downsample = downsample", + "detail": "modules.PyMAF.models.pose_resnet", + "documentation": {} + }, + { + "label": "Bottleneck", + "kind": 6, + "importPath": "modules.PyMAF.models.pose_resnet", + "description": "modules.PyMAF.models.pose_resnet", + "peekOfCode": "class Bottleneck(nn.Module):\n expansion = 4\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,", + "detail": "modules.PyMAF.models.pose_resnet", + "documentation": {} + }, + { + "label": "PoseResNet", + "kind": 6, + "importPath": "modules.PyMAF.models.pose_resnet", + "description": "modules.PyMAF.models.pose_resnet", + "peekOfCode": "class PoseResNet(nn.Module):\n def __init__(self, block, layers, cfg, global_mode, **kwargs):\n self.inplanes = 64\n extra = cfg.POSE_RES_MODEL.EXTRA\n self.extra = extra\n self.deconv_with_bias = extra.DECONV_WITH_BIAS\n super(PoseResNet, self).__init__()\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,\n bias=False)\n self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)", + "detail": "modules.PyMAF.models.pose_resnet", + "documentation": {} + }, + { + "label": "conv3x3", + "kind": 2, + "importPath": "modules.PyMAF.models.pose_resnet", + "description": "modules.PyMAF.models.pose_resnet", + "peekOfCode": "def conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(\n in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False\n )\nclass BasicBlock(nn.Module):\n expansion = 1\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()", + "detail": "modules.PyMAF.models.pose_resnet", + "documentation": {} + }, + { + "label": "get_resnet_encoder", + "kind": 2, + "importPath": "modules.PyMAF.models.pose_resnet", + "description": "modules.PyMAF.models.pose_resnet", + "peekOfCode": "def get_resnet_encoder(cfg, init_weight=True, global_mode=False, **kwargs):\n num_layers = cfg.POSE_RES_MODEL.EXTRA.NUM_LAYERS\n block_class, layers = resnet_spec[num_layers]\n model = PoseResNet(block_class, layers, cfg, global_mode, **kwargs)\n if init_weight:\n if num_layers == 50:\n if cfg.POSE_RES_MODEL.PRETR_SET in ['imagenet']:\n model.init_weights(cfg.POSE_RES_MODEL.PRETRAINED_IM)\n logger.info('loaded ResNet imagenet pretrained model')\n elif cfg.POSE_RES_MODEL.PRETR_SET in ['coco']:", + "detail": "modules.PyMAF.models.pose_resnet", + "documentation": {} + }, + { + "label": "BN_MOMENTUM", + "kind": 5, + "importPath": "modules.PyMAF.models.pose_resnet", + "description": "modules.PyMAF.models.pose_resnet", + "peekOfCode": "BN_MOMENTUM = 0.1\nlogger = logging.getLogger(__name__)\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(\n in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False\n )\nclass BasicBlock(nn.Module):\n expansion = 1", + "detail": "modules.PyMAF.models.pose_resnet", + "documentation": {} + }, + { + "label": "logger", + "kind": 5, + "importPath": "modules.PyMAF.models.pose_resnet", + "description": "modules.PyMAF.models.pose_resnet", + "peekOfCode": "logger = logging.getLogger(__name__)\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(\n in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False\n )\nclass BasicBlock(nn.Module):\n expansion = 1\n def __init__(self, inplanes, planes, stride=1, downsample=None):", + "detail": "modules.PyMAF.models.pose_resnet", + "documentation": {} + }, + { + "label": "resnet_spec", + "kind": 5, + "importPath": "modules.PyMAF.models.pose_resnet", + "description": "modules.PyMAF.models.pose_resnet", + "peekOfCode": "resnet_spec = {\n 18: (BasicBlock, [2, 2, 2, 2]),\n 34: (BasicBlock, [3, 4, 6, 3]),\n 50: (Bottleneck, [3, 4, 6, 3]),\n 101: (Bottleneck, [3, 4, 23, 3]),\n 152: (Bottleneck, [3, 8, 36, 3])\n}\ndef get_resnet_encoder(cfg, init_weight=True, global_mode=False, **kwargs):\n num_layers = cfg.POSE_RES_MODEL.EXTRA.NUM_LAYERS\n block_class, layers = resnet_spec[num_layers]", + "detail": "modules.PyMAF.models.pose_resnet", + "documentation": {} + }, + { + "label": "Regressor", + "kind": 6, + "importPath": "modules.PyMAF.models.pymaf_net", + "description": "modules.PyMAF.models.pymaf_net", + "peekOfCode": "class Regressor(nn.Module):\n def __init__(self, feat_dim, smpl_mean_params, use_cam_feats=False, feat_dim_hand=0, feat_dim_face=0, bhf_names=['body'], smpl_models={}):\n super().__init__()\n npose = 24 * 6\n shape_dim = 10\n cam_dim = 3\n hand_dim = 15 * 6\n face_dim = 3 * 6 + 10\n self.body_feat_dim = feat_dim\n self.smpl_mode = (cfg.MODEL.MESH_MODEL == 'smpl')", + "detail": "modules.PyMAF.models.pymaf_net", + "documentation": {} + }, + { + "label": "PyMAF", + "kind": 6, + "importPath": "modules.PyMAF.models.pymaf_net", + "description": "modules.PyMAF.models.pymaf_net", + "peekOfCode": "class PyMAF(nn.Module):\n \"\"\" PyMAF based Regression Network for Human Mesh Recovery / Full-body Mesh Recovery\n PyMAF: 3D Human Pose and Shape Regression with Pyramidal Mesh Alignment Feedback Loop, in ICCV, 2021\n PyMAF-X: Towards Well-aligned Full-body Model Regression from Monocular Images, arXiv:2207.06400, 2022\n \"\"\"\n def __init__(self, smpl_mean_params=SMPL_MEAN_PARAMS, pretrained=True, device=torch.device('cuda')):\n super().__init__()\n self.device = device\n self.smpl_mode = (cfg.MODEL.MESH_MODEL == 'smpl')\n self.smplx_mode = (cfg.MODEL.MESH_MODEL == 'smplx')", + "detail": "modules.PyMAF.models.pymaf_net", + "documentation": {} + }, + { + "label": "get_attention_modules", + "kind": 2, + "importPath": "modules.PyMAF.models.pymaf_net", + "description": "modules.PyMAF.models.pymaf_net", + "peekOfCode": "def get_attention_modules(module_keys, img_feature_dim_list, hidden_feat_dim, n_iter, num_attention_heads=1):\n align_attention = nn.ModuleDict()\n for k in module_keys:\n align_attention[k] = nn.ModuleList()\n for i in range(n_iter):\n align_attention[k].append(get_att_block(img_feature_dim=img_feature_dim_list[k][i], hidden_feat_dim=hidden_feat_dim, num_attention_heads=num_attention_heads))\n return align_attention\ndef get_fusion_modules(module_keys, ma_feat_dim, grid_feat_dim, n_iter, out_feat_len):\n feat_fusion = nn.ModuleDict()\n for k in module_keys:", + "detail": "modules.PyMAF.models.pymaf_net", + "documentation": {} + }, + { + "label": "get_fusion_modules", + "kind": 2, + "importPath": "modules.PyMAF.models.pymaf_net", + "description": "modules.PyMAF.models.pymaf_net", + "peekOfCode": "def get_fusion_modules(module_keys, ma_feat_dim, grid_feat_dim, n_iter, out_feat_len):\n feat_fusion = nn.ModuleDict()\n for k in module_keys:\n feat_fusion[k] = nn.ModuleList()\n for i in range(n_iter):\n feat_fusion[k].append(nn.Linear(grid_feat_dim + ma_feat_dim[k], out_feat_len[k]))\n return feat_fusion\nclass PyMAF(nn.Module):\n \"\"\" PyMAF based Regression Network for Human Mesh Recovery / Full-body Mesh Recovery\n PyMAF: 3D Human Pose and Shape Regression with Pyramidal Mesh Alignment Feedback Loop, in ICCV, 2021", + "detail": "modules.PyMAF.models.pymaf_net", + "documentation": {} + }, + { + "label": "pymaf_net", + "kind": 2, + "importPath": "modules.PyMAF.models.pymaf_net", + "description": "modules.PyMAF.models.pymaf_net", + "peekOfCode": "def pymaf_net(smpl_mean_params, pretrained=True, device=torch.device('cuda')):\n \"\"\" Constructs an PyMAF model with ResNet50 backbone.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = PyMAF(smpl_mean_params, pretrained, device)\n return model", + "detail": "modules.PyMAF.models.pymaf_net", + "documentation": {} + }, + { + "label": "logger", + "kind": 5, + "importPath": "modules.PyMAF.models.pymaf_net", + "description": "modules.PyMAF.models.pymaf_net", + "peekOfCode": "logger = logging.getLogger(__name__)\nBN_MOMENTUM = 0.1\nclass Regressor(nn.Module):\n def __init__(self, feat_dim, smpl_mean_params, use_cam_feats=False, feat_dim_hand=0, feat_dim_face=0, bhf_names=['body'], smpl_models={}):\n super().__init__()\n npose = 24 * 6\n shape_dim = 10\n cam_dim = 3\n hand_dim = 15 * 6\n face_dim = 3 * 6 + 10", + "detail": "modules.PyMAF.models.pymaf_net", + "documentation": {} + }, + { + "label": "BN_MOMENTUM", + "kind": 5, + "importPath": "modules.PyMAF.models.pymaf_net", + "description": "modules.PyMAF.models.pymaf_net", + "peekOfCode": "BN_MOMENTUM = 0.1\nclass Regressor(nn.Module):\n def __init__(self, feat_dim, smpl_mean_params, use_cam_feats=False, feat_dim_hand=0, feat_dim_face=0, bhf_names=['body'], smpl_models={}):\n super().__init__()\n npose = 24 * 6\n shape_dim = 10\n cam_dim = 3\n hand_dim = 15 * 6\n face_dim = 3 * 6 + 10\n self.body_feat_dim = feat_dim", + "detail": "modules.PyMAF.models.pymaf_net", + "documentation": {} + }, + { + "label": "BasicBlock", + "kind": 6, + "importPath": "modules.PyMAF.models.res_module", + "description": "modules.PyMAF.models.res_module", + "peekOfCode": "class BasicBlock(nn.Module):\n expansion = 1\n def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1):\n super().__init__()\n self.conv1 = conv3x3(inplanes, planes, stride, groups=groups)\n self.bn1 = nn.BatchNorm2d(planes * groups, momentum=BN_MOMENTUM)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes, groups=groups)\n self.bn2 = nn.BatchNorm2d(planes * groups, momentum=BN_MOMENTUM)\n self.downsample = downsample", + "detail": "modules.PyMAF.models.res_module", + "documentation": {} + }, + { + "label": "Bottleneck", + "kind": 6, + "importPath": "modules.PyMAF.models.res_module", + "description": "modules.PyMAF.models.res_module", + "peekOfCode": "class Bottleneck(nn.Module):\n expansion = 4\n def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1):\n super().__init__()\n self.conv1 = nn.Conv2d(inplanes * groups, planes * groups, kernel_size=1, bias=False, groups=groups)\n self.bn1 = nn.BatchNorm2d(planes * groups, momentum=BN_MOMENTUM)\n self.conv2 = nn.Conv2d(planes * groups, planes * groups, kernel_size=3, stride=stride,\n padding=1, bias=False, groups=groups)\n self.bn2 = nn.BatchNorm2d(planes * groups, momentum=BN_MOMENTUM)\n self.conv3 = nn.Conv2d(planes * groups, planes * self.expansion * groups, kernel_size=1,", + "detail": "modules.PyMAF.models.res_module", + "documentation": {} + }, + { + "label": "IUV_predict_layer", + "kind": 6, + "importPath": "modules.PyMAF.models.res_module", + "description": "modules.PyMAF.models.res_module", + "peekOfCode": "class IUV_predict_layer(nn.Module):\n def __init__(self, feat_dim=256, final_cov_k=3, out_channels=25, with_uv=True, mode='iuv'):\n super().__init__()\n assert mode in ['iuv', 'seg', 'pncc']\n self.mode = mode\n if mode == 'seg':\n self.predict_ann_index = nn.Conv2d(\n in_channels=feat_dim,\n out_channels=15,\n kernel_size=final_cov_k,", + "detail": "modules.PyMAF.models.res_module", + "documentation": {} + }, + { + "label": "Seg_predict_layer", + "kind": 6, + "importPath": "modules.PyMAF.models.res_module", + "description": "modules.PyMAF.models.res_module", + "peekOfCode": "class Seg_predict_layer(nn.Module):\n def __init__(self, feat_dim=256, final_cov_k=3, out_channels=25):\n super().__init__()\n self.predict_seg_index = nn.Conv2d(\n in_channels=feat_dim,\n out_channels=out_channels,\n kernel_size=final_cov_k,\n stride=1,\n padding=1 if final_cov_k == 3 else 0\n )", + "detail": "modules.PyMAF.models.res_module", + "documentation": {} + }, + { + "label": "Kps_predict_layer", + "kind": 6, + "importPath": "modules.PyMAF.models.res_module", + "description": "modules.PyMAF.models.res_module", + "peekOfCode": "class Kps_predict_layer(nn.Module):\n def __init__(self, feat_dim=256, final_cov_k=3, out_channels=3, add_module=None):\n super().__init__()\n if add_module is not None:\n conv = nn.Conv2d(\n in_channels=feat_dim,\n out_channels=out_channels,\n kernel_size=final_cov_k,\n stride=1,\n padding=1 if final_cov_k == 3 else 0", + "detail": "modules.PyMAF.models.res_module", + "documentation": {} + }, + { + "label": "SmplResNet", + "kind": 6, + "importPath": "modules.PyMAF.models.res_module", + "description": "modules.PyMAF.models.res_module", + "peekOfCode": "class SmplResNet(nn.Module):\n def __init__(self, resnet_nums, in_channels=3, num_classes=229, last_stride=2, n_extra_feat=0, truncate=0, **kwargs):\n super().__init__()\n self.inplanes = 64\n self.truncate = truncate\n # extra = cfg.MODEL.EXTRA\n # self.deconv_with_bias = extra.DECONV_WITH_BIAS\n block, layers = resnet_spec[resnet_nums]\n self.conv1 = nn.Conv2d(in_channels, 64, kernel_size=7, stride=2, padding=3,\n bias=False)", + "detail": "modules.PyMAF.models.res_module", + "documentation": {} + }, + { + "label": "LimbResLayers", + "kind": 6, + "importPath": "modules.PyMAF.models.res_module", + "description": "modules.PyMAF.models.res_module", + "peekOfCode": "class LimbResLayers(nn.Module):\n def __init__(self, resnet_nums, inplanes, outplanes=None, groups=1, **kwargs):\n super().__init__()\n self.inplanes = inplanes\n block, layers = resnet_spec[resnet_nums]\n self.outplanes = 256 if outplanes == None else outplanes\n self.layer3 = self._make_layer(block, self.outplanes, layers[2], stride=2, groups=groups)\n # self.outplanes = 512 if outplanes == None else outplanes\n # self.layer4 = self._make_layer(block, self.outplanes, layers[3], stride=2, groups=groups)\n self.avg_pooling = nn.AdaptiveAvgPool2d(1)", + "detail": "modules.PyMAF.models.res_module", + "documentation": {} + }, + { + "label": "conv3x3", + "kind": 2, + "importPath": "modules.PyMAF.models.res_module", + "description": "modules.PyMAF.models.res_module", + "peekOfCode": "def conv3x3(in_planes, out_planes, stride=1, bias=False, groups=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes * groups, out_planes * groups, kernel_size=3, stride=stride,\n padding=1, bias=bias, groups=groups)\nclass BasicBlock(nn.Module):\n expansion = 1\n def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1):\n super().__init__()\n self.conv1 = conv3x3(inplanes, planes, stride, groups=groups)\n self.bn1 = nn.BatchNorm2d(planes * groups, momentum=BN_MOMENTUM)", + "detail": "modules.PyMAF.models.res_module", + "documentation": {} + }, + { + "label": "logger", + "kind": 5, + "importPath": "modules.PyMAF.models.res_module", + "description": "modules.PyMAF.models.res_module", + "peekOfCode": "logger = logging.getLogger(__name__)\nBN_MOMENTUM = 0.1\ndef conv3x3(in_planes, out_planes, stride=1, bias=False, groups=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes * groups, out_planes * groups, kernel_size=3, stride=stride,\n padding=1, bias=bias, groups=groups)\nclass BasicBlock(nn.Module):\n expansion = 1\n def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1):\n super().__init__()", + "detail": "modules.PyMAF.models.res_module", + "documentation": {} + }, + { + "label": "BN_MOMENTUM", + "kind": 5, + "importPath": "modules.PyMAF.models.res_module", + "description": "modules.PyMAF.models.res_module", + "peekOfCode": "BN_MOMENTUM = 0.1\ndef conv3x3(in_planes, out_planes, stride=1, bias=False, groups=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes * groups, out_planes * groups, kernel_size=3, stride=stride,\n padding=1, bias=bias, groups=groups)\nclass BasicBlock(nn.Module):\n expansion = 1\n def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1):\n super().__init__()\n self.conv1 = conv3x3(inplanes, planes, stride, groups=groups)", + "detail": "modules.PyMAF.models.res_module", + "documentation": {} + }, + { + "label": "resnet_spec", + "kind": 5, + "importPath": "modules.PyMAF.models.res_module", + "description": "modules.PyMAF.models.res_module", + "peekOfCode": "resnet_spec = {18: (BasicBlock, [2, 2, 2, 2]),\n 34: (BasicBlock, [3, 4, 6, 3]),\n 50: (Bottleneck, [3, 4, 6, 3]),\n 101: (Bottleneck, [3, 4, 23, 3]),\n 152: (Bottleneck, [3, 8, 36, 3])}\nclass IUV_predict_layer(nn.Module):\n def __init__(self, feat_dim=256, final_cov_k=3, out_channels=25, with_uv=True, mode='iuv'):\n super().__init__()\n assert mode in ['iuv', 'seg', 'pncc']\n self.mode = mode", + "detail": "modules.PyMAF.models.res_module", + "documentation": {} + }, + { + "label": "ModelOutput", + "kind": 6, + "importPath": "modules.PyMAF.models.smpl", + "description": "modules.PyMAF.models.smpl", + "peekOfCode": "class ModelOutput(SMPLXOutput):\n smpl_joints: Optional[torch.Tensor] = None\n joints_J19: Optional[torch.Tensor] = None\n smplx_vertices: Optional[torch.Tensor] = None\n flame_vertices: Optional[torch.Tensor] = None\n lhand_vertices: Optional[torch.Tensor] = None\n rhand_vertices: Optional[torch.Tensor] = None\n lhand_joints: Optional[torch.Tensor] = None\n rhand_joints: Optional[torch.Tensor] = None\n face_joints: Optional[torch.Tensor] = None", + "detail": "modules.PyMAF.models.smpl", + "documentation": {} + }, + { + "label": "SMPL", + "kind": 6, + "importPath": "modules.PyMAF.models.smpl", + "description": "modules.PyMAF.models.smpl", + "peekOfCode": "class SMPL(_SMPL):\n \"\"\" Extension of the official SMPL implementation to support more joints \"\"\"\n def __init__(self, create_betas=False, create_global_orient=False, create_body_pose=False, create_transl=False, *args, **kwargs):\n super().__init__(create_betas=create_betas, \n create_global_orient=create_global_orient, \n create_body_pose=create_body_pose, \n create_transl=create_transl, *args, **kwargs)\n joints = [constants.JOINT_MAP[i] for i in constants.JOINT_NAMES]\n J_regressor_extra = np.load(path_config.JOINT_REGRESSOR_TRAIN_EXTRA)\n self.register_buffer('J_regressor_extra', torch.tensor(J_regressor_extra, dtype=torch.float32))", + "detail": "modules.PyMAF.models.smpl", + "documentation": {} + }, + { + "label": "SMPLX", + "kind": 6, + "importPath": "modules.PyMAF.models.smpl", + "description": "modules.PyMAF.models.smpl", + "peekOfCode": "class SMPLX(SMPLXLayer):\n \"\"\" Extension of the official SMPLX implementation to support more functions \"\"\"\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n def get_global_rotation(\n self,\n global_orient: Optional[torch.Tensor] = None,\n body_pose: Optional[torch.Tensor] = None,\n left_hand_pose: Optional[torch.Tensor] = None,\n right_hand_pose: Optional[torch.Tensor] = None,", + "detail": "modules.PyMAF.models.smpl", + "documentation": {} + }, + { + "label": "SMPLX_ALL", + "kind": 6, + "importPath": "modules.PyMAF.models.smpl", + "description": "modules.PyMAF.models.smpl", + "peekOfCode": "class SMPLX_ALL(nn.Module):\n \"\"\" Extension of the official SMPLX implementation to support more joints \"\"\"\n def __init__(self, batch_size=1, use_face_contour=True, all_gender=False, **kwargs):\n super().__init__()\n numBetas = 10\n self.use_face_contour = use_face_contour\n if all_gender:\n self.genders = ['male', 'female', 'neutral']\n else:\n self.genders = ['neutral']", + "detail": "modules.PyMAF.models.smpl", + "documentation": {} + }, + { + "label": "MANO", + "kind": 6, + "importPath": "modules.PyMAF.models.smpl", + "description": "modules.PyMAF.models.smpl", + "peekOfCode": "class MANO(MANOLayer):\n \"\"\" Extension of the official MANO implementation to support more joints \"\"\"\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n def forward(self, *args, **kwargs):\n if 'pose2rot' not in kwargs:\n kwargs['pose2rot'] = True\n pose_keys = ['global_orient', 'right_hand_pose']\n batch_size = kwargs['global_orient'].shape[0]\n if kwargs['pose2rot']:", + "detail": "modules.PyMAF.models.smpl", + "documentation": {} + }, + { + "label": "FLAME", + "kind": 6, + "importPath": "modules.PyMAF.models.smpl", + "description": "modules.PyMAF.models.smpl", + "peekOfCode": "class FLAME(FLAMELayer):\n \"\"\" Extension of the official FLAME implementation to support more joints \"\"\"\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n def forward(self, *args, **kwargs):\n if 'pose2rot' not in kwargs:\n kwargs['pose2rot'] = True\n pose_keys = ['global_orient', 'jaw_pose', 'leye_pose', 'reye_pose']\n batch_size = kwargs['global_orient'].shape[0]\n if kwargs['pose2rot']:", + "detail": "modules.PyMAF.models.smpl", + "documentation": {} + }, + { + "label": "SMPL_Family", + "kind": 6, + "importPath": "modules.PyMAF.models.smpl", + "description": "modules.PyMAF.models.smpl", + "peekOfCode": "class SMPL_Family():\n def __init__(self, model_type='smpl', *args, **kwargs):\n if model_type == 'smpl':\n self.model = SMPL(\n model_path=SMPL_MODEL_DIR,\n *args, **kwargs\n )\n elif model_type == 'smplx':\n self.model = SMPLX_ALL(*args, **kwargs)\n elif model_type == 'mano':", + "detail": "modules.PyMAF.models.smpl", + "documentation": {} + }, + { + "label": "get_smpl_faces", + "kind": 2, + "importPath": "modules.PyMAF.models.smpl", + "description": "modules.PyMAF.models.smpl", + "peekOfCode": "def get_smpl_faces():\n smpl = SMPL(model_path=SMPL_MODEL_DIR, batch_size=1)\n return smpl.faces\ndef get_smplx_faces():\n smplx = SMPLX(SMPL_MODEL_DIR, batch_size=1)\n return smplx.faces\ndef get_mano_faces(hand_type='right'):\n assert hand_type in ['right', 'left']\n is_rhand = True if hand_type == 'right' else False\n mano = MANO(SMPL_MODEL_DIR, batch_size=1, is_rhand=is_rhand)", + "detail": "modules.PyMAF.models.smpl", + "documentation": {} + }, + { + "label": "get_smplx_faces", + "kind": 2, + "importPath": "modules.PyMAF.models.smpl", + "description": "modules.PyMAF.models.smpl", + "peekOfCode": "def get_smplx_faces():\n smplx = SMPLX(SMPL_MODEL_DIR, batch_size=1)\n return smplx.faces\ndef get_mano_faces(hand_type='right'):\n assert hand_type in ['right', 'left']\n is_rhand = True if hand_type == 'right' else False\n mano = MANO(SMPL_MODEL_DIR, batch_size=1, is_rhand=is_rhand)\n return mano.faces\ndef get_flame_faces():\n flame = FLAME(SMPL_MODEL_DIR, batch_size=1)", + "detail": "modules.PyMAF.models.smpl", + "documentation": {} + }, + { + "label": "get_mano_faces", + "kind": 2, + "importPath": "modules.PyMAF.models.smpl", + "description": "modules.PyMAF.models.smpl", + "peekOfCode": "def get_mano_faces(hand_type='right'):\n assert hand_type in ['right', 'left']\n is_rhand = True if hand_type == 'right' else False\n mano = MANO(SMPL_MODEL_DIR, batch_size=1, is_rhand=is_rhand)\n return mano.faces\ndef get_flame_faces():\n flame = FLAME(SMPL_MODEL_DIR, batch_size=1)\n return flame.faces\ndef get_model_faces(type='smpl'):\n if type == 'smpl':", + "detail": "modules.PyMAF.models.smpl", + "documentation": {} + }, + { + "label": "get_flame_faces", + "kind": 2, + "importPath": "modules.PyMAF.models.smpl", + "description": "modules.PyMAF.models.smpl", + "peekOfCode": "def get_flame_faces():\n flame = FLAME(SMPL_MODEL_DIR, batch_size=1)\n return flame.faces\ndef get_model_faces(type='smpl'):\n if type == 'smpl':\n return get_smpl_faces()\n elif type == 'smplx':\n return get_smplx_faces()\n elif type == 'mano':\n return get_mano_faces()", + "detail": "modules.PyMAF.models.smpl", + "documentation": {} + }, + { + "label": "get_model_faces", + "kind": 2, + "importPath": "modules.PyMAF.models.smpl", + "description": "modules.PyMAF.models.smpl", + "peekOfCode": "def get_model_faces(type='smpl'):\n if type == 'smpl':\n return get_smpl_faces()\n elif type == 'smplx':\n return get_smplx_faces()\n elif type == 'mano':\n return get_mano_faces()\n elif type == 'flame':\n return get_flame_faces()\ndef get_model_tpose(type='smpl'):", + "detail": "modules.PyMAF.models.smpl", + "documentation": {} + }, + { + "label": "get_model_tpose", + "kind": 2, + "importPath": "modules.PyMAF.models.smpl", + "description": "modules.PyMAF.models.smpl", + "peekOfCode": "def get_model_tpose(type='smpl'):\n if type == 'smpl':\n return get_smpl_tpose()\n elif type == 'smplx':\n return get_smplx_tpose()\n elif type == 'mano':\n return get_mano_tpose()\n elif type == 'flame':\n return get_flame_tpose()\ndef get_smpl_tpose():", + "detail": "modules.PyMAF.models.smpl", + "documentation": {} + }, + { + "label": "get_smpl_tpose", + "kind": 2, + "importPath": "modules.PyMAF.models.smpl", + "description": "modules.PyMAF.models.smpl", + "peekOfCode": "def get_smpl_tpose():\n smpl = SMPL(create_betas=True, create_global_orient=True, create_body_pose=True, model_path=SMPL_MODEL_DIR, batch_size=1)\n vertices = smpl().vertices[0]\n return vertices.detach()\ndef get_smpl_tpose_joint():\n smpl = SMPL(create_betas=True, create_global_orient=True, create_body_pose=True, model_path=SMPL_MODEL_DIR, batch_size=1)\n tpose_joint = smpl().smpl_joints[0]\n return tpose_joint.detach()\ndef get_smplx_tpose():\n smplx = SMPLXLayer(SMPL_MODEL_DIR, batch_size=1)", + "detail": "modules.PyMAF.models.smpl", + "documentation": {} + }, + { + "label": "get_smpl_tpose_joint", + "kind": 2, + "importPath": "modules.PyMAF.models.smpl", + "description": "modules.PyMAF.models.smpl", + "peekOfCode": "def get_smpl_tpose_joint():\n smpl = SMPL(create_betas=True, create_global_orient=True, create_body_pose=True, model_path=SMPL_MODEL_DIR, batch_size=1)\n tpose_joint = smpl().smpl_joints[0]\n return tpose_joint.detach()\ndef get_smplx_tpose():\n smplx = SMPLXLayer(SMPL_MODEL_DIR, batch_size=1)\n vertices = smplx().vertices[0]\n return vertices\ndef get_smplx_tpose_joint():\n smplx = SMPLXLayer(SMPL_MODEL_DIR, batch_size=1)", + "detail": "modules.PyMAF.models.smpl", + "documentation": {} + }, + { + "label": "get_smplx_tpose", + "kind": 2, + "importPath": "modules.PyMAF.models.smpl", + "description": "modules.PyMAF.models.smpl", + "peekOfCode": "def get_smplx_tpose():\n smplx = SMPLXLayer(SMPL_MODEL_DIR, batch_size=1)\n vertices = smplx().vertices[0]\n return vertices\ndef get_smplx_tpose_joint():\n smplx = SMPLXLayer(SMPL_MODEL_DIR, batch_size=1)\n tpose_joint = smplx().joints[0]\n return tpose_joint\ndef get_mano_tpose():\n mano = MANO(SMPL_MODEL_DIR, batch_size=1, is_rhand=True)", + "detail": "modules.PyMAF.models.smpl", + "documentation": {} + }, + { + "label": "get_smplx_tpose_joint", + "kind": 2, + "importPath": "modules.PyMAF.models.smpl", + "description": "modules.PyMAF.models.smpl", + "peekOfCode": "def get_smplx_tpose_joint():\n smplx = SMPLXLayer(SMPL_MODEL_DIR, batch_size=1)\n tpose_joint = smplx().joints[0]\n return tpose_joint\ndef get_mano_tpose():\n mano = MANO(SMPL_MODEL_DIR, batch_size=1, is_rhand=True)\n vertices = mano(global_orient=torch.zeros(1, 3), \n right_hand_pose=torch.zeros(1, 15*3)).rhand_vertices[0]\n return vertices\ndef get_flame_tpose():", + "detail": "modules.PyMAF.models.smpl", + "documentation": {} + }, + { + "label": "get_mano_tpose", + "kind": 2, + "importPath": "modules.PyMAF.models.smpl", + "description": "modules.PyMAF.models.smpl", + "peekOfCode": "def get_mano_tpose():\n mano = MANO(SMPL_MODEL_DIR, batch_size=1, is_rhand=True)\n vertices = mano(global_orient=torch.zeros(1, 3), \n right_hand_pose=torch.zeros(1, 15*3)).rhand_vertices[0]\n return vertices\ndef get_flame_tpose():\n flame = FLAME(SMPL_MODEL_DIR, batch_size=1)\n vertices = flame(global_orient=torch.zeros(1, 3)).flame_vertices[0]\n return vertices\ndef get_part_joints(smpl_joints):", + "detail": "modules.PyMAF.models.smpl", + "documentation": {} + }, + { + "label": "get_flame_tpose", + "kind": 2, + "importPath": "modules.PyMAF.models.smpl", + "description": "modules.PyMAF.models.smpl", + "peekOfCode": "def get_flame_tpose():\n flame = FLAME(SMPL_MODEL_DIR, batch_size=1)\n vertices = flame(global_orient=torch.zeros(1, 3)).flame_vertices[0]\n return vertices\ndef get_part_joints(smpl_joints):\n batch_size = smpl_joints.shape[0]\n # part_joints = torch.zeros().to(smpl_joints.device)\n one_seg_pairs = [(0, 1), (0, 2), (0, 3), (3, 6), (9, 12), (9, 13), (9, 14), (12, 15), (13, 16), (14, 17)]\n two_seg_pairs = [(1, 4), (2, 5), (4, 7), (5, 8), (16, 18), (17, 19), (18, 20), (19, 21)]\n one_seg_pairs.extend(two_seg_pairs)", + "detail": "modules.PyMAF.models.smpl", + "documentation": {} + }, + { + "label": "get_part_joints", + "kind": 2, + "importPath": "modules.PyMAF.models.smpl", + "description": "modules.PyMAF.models.smpl", + "peekOfCode": "def get_part_joints(smpl_joints):\n batch_size = smpl_joints.shape[0]\n # part_joints = torch.zeros().to(smpl_joints.device)\n one_seg_pairs = [(0, 1), (0, 2), (0, 3), (3, 6), (9, 12), (9, 13), (9, 14), (12, 15), (13, 16), (14, 17)]\n two_seg_pairs = [(1, 4), (2, 5), (4, 7), (5, 8), (16, 18), (17, 19), (18, 20), (19, 21)]\n one_seg_pairs.extend(two_seg_pairs)\n single_joints = [(10), (11), (15), (22), (23)]\n part_joints = []\n for j_p in one_seg_pairs:\n new_joint = torch.mean(smpl_joints[:, j_p], dim=1, keepdim=True)", + "detail": "modules.PyMAF.models.smpl", + "documentation": {} + }, + { + "label": "get_partial_smpl", + "kind": 2, + "importPath": "modules.PyMAF.models.smpl", + "description": "modules.PyMAF.models.smpl", + "peekOfCode": "def get_partial_smpl(body_model='smpl', device=torch.device('cuda')):\n body_model_faces = get_model_faces(body_model)\n body_model_num_verts = len(get_model_tpose(body_model))\n part_vert_faces = {}\n for part in ['lhand', 'rhand', 'face', 'arm', 'forearm', 'larm', 'rarm', 'lwrist', 'rwrist']:\n part_vid_fname = 'data/partial_mesh/{}_{}_vids.npz'.format(body_model, part)\n if os.path.exists(part_vid_fname):\n part_vids = np.load(part_vid_fname)\n part_vert_faces[part] = {'vids': part_vids['vids'], 'faces': part_vids['faces']}\n else:", + "detail": "modules.PyMAF.models.smpl", + "documentation": {} + }, + { + "label": "SMPL_MEAN_PARAMS", + "kind": 5, + "importPath": "modules.PyMAF.models.smpl", + "description": "modules.PyMAF.models.smpl", + "peekOfCode": "SMPL_MEAN_PARAMS = path_config.SMPL_MEAN_PARAMS\nSMPL_MODEL_DIR = path_config.SMPL_MODEL_DIR\n@dataclass\nclass ModelOutput(SMPLXOutput):\n smpl_joints: Optional[torch.Tensor] = None\n joints_J19: Optional[torch.Tensor] = None\n smplx_vertices: Optional[torch.Tensor] = None\n flame_vertices: Optional[torch.Tensor] = None\n lhand_vertices: Optional[torch.Tensor] = None\n rhand_vertices: Optional[torch.Tensor] = None", + "detail": "modules.PyMAF.models.smpl", + "documentation": {} + }, + { + "label": "SMPL_MODEL_DIR", + "kind": 5, + "importPath": "modules.PyMAF.models.smpl", + "description": "modules.PyMAF.models.smpl", + "peekOfCode": "SMPL_MODEL_DIR = path_config.SMPL_MODEL_DIR\n@dataclass\nclass ModelOutput(SMPLXOutput):\n smpl_joints: Optional[torch.Tensor] = None\n joints_J19: Optional[torch.Tensor] = None\n smplx_vertices: Optional[torch.Tensor] = None\n flame_vertices: Optional[torch.Tensor] = None\n lhand_vertices: Optional[torch.Tensor] = None\n rhand_vertices: Optional[torch.Tensor] = None\n lhand_joints: Optional[torch.Tensor] = None", + "detail": "modules.PyMAF.models.smpl", + "documentation": {} + }, + { + "label": "get_cfg_defaults", + "kind": 2, + "importPath": "modules.PyMAF.pymaf_core.cfgs", + "description": "modules.PyMAF.pymaf_core.cfgs", + "peekOfCode": "def get_cfg_defaults():\n \"\"\"Get a yacs CfgNode object with default values for my_project.\"\"\"\n # Return a clone so that the defaults will not be altered\n # This is for the \"local variable\" use pattern\n # return cfg.clone()\n return cfg\ndef update_cfg(cfg_file):\n # cfg = get_cfg_defaults()\n cfg.merge_from_file(cfg_file)\n # return cfg.clone()", + "detail": "modules.PyMAF.pymaf_core.cfgs", + "documentation": {} + }, + { + "label": "update_cfg", + "kind": 2, + "importPath": "modules.PyMAF.pymaf_core.cfgs", + "description": "modules.PyMAF.pymaf_core.cfgs", + "peekOfCode": "def update_cfg(cfg_file):\n # cfg = get_cfg_defaults()\n cfg.merge_from_file(cfg_file)\n # return cfg.clone()\n return cfg\ndef parse_args(args):\n cfg_file = args.cfg_file\n if args.cfg_file is not None:\n cfg = update_cfg(args.cfg_file)\n else:", + "detail": "modules.PyMAF.pymaf_core.cfgs", + "documentation": {} + }, + { + "label": "parse_args", + "kind": 2, + "importPath": "modules.PyMAF.pymaf_core.cfgs", + "description": "modules.PyMAF.pymaf_core.cfgs", + "peekOfCode": "def parse_args(args):\n cfg_file = args.cfg_file\n if args.cfg_file is not None:\n cfg = update_cfg(args.cfg_file)\n else:\n cfg = get_cfg_defaults()\n if args.misc is not None:\n cfg.merge_from_list(args.misc)\n # a=['TRAIN.BHF_MODE', 'full_body', 'MODEL.EVAL_MODE', 'True', 'MODEL.PyMAF.HAND_VIS_TH', '0.1']\n # cfg.merge_from_list(a)", + "detail": "modules.PyMAF.pymaf_core.cfgs", + "documentation": {} + }, + { + "label": "parse_args_extend", + "kind": 2, + "importPath": "modules.PyMAF.pymaf_core.cfgs", + "description": "modules.PyMAF.pymaf_core.cfgs", + "peekOfCode": "def parse_args_extend(args):\n if args.resume:\n if not os.path.exists(args.log_dir):\n raise ValueError('Experiment are set to resume mode, but log directory does not exist.')\n if args.cfg_file is not None:\n cfg = update_cfg(args.cfg_file)\n else:\n cfg = get_cfg_defaults()\n # load log's cfg\n cfg_file = os.path.join(args.log_dir, 'cfg.yaml')", + "detail": "modules.PyMAF.pymaf_core.cfgs", + "documentation": {} + }, + { + "label": "cfg", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.cfgs", + "description": "modules.PyMAF.pymaf_core.cfgs", + "peekOfCode": "cfg = CN(new_allowed=True)\ncfg.OUTPUT_DIR = 'results'\ncfg.DEVICE = 'cuda'\ncfg.DEBUG = False\ncfg.LOGDIR = ''\ncfg.VAL_VIS_BATCH_FREQ = 200\ncfg.TRAIN_VIS_ITER_FERQ = 1000\ncfg.SEED_VALUE = -1\ncfg.TRAIN = CN(new_allowed=True)\ncfg.LOSS = CN(new_allowed=True)", + "detail": "modules.PyMAF.pymaf_core.cfgs", + "documentation": {} + }, + { + "label": "cfg.OUTPUT_DIR", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.cfgs", + "description": "modules.PyMAF.pymaf_core.cfgs", + "peekOfCode": "cfg.OUTPUT_DIR = 'results'\ncfg.DEVICE = 'cuda'\ncfg.DEBUG = False\ncfg.LOGDIR = ''\ncfg.VAL_VIS_BATCH_FREQ = 200\ncfg.TRAIN_VIS_ITER_FERQ = 1000\ncfg.SEED_VALUE = -1\ncfg.TRAIN = CN(new_allowed=True)\ncfg.LOSS = CN(new_allowed=True)\ncfg.LOSS.KP_2D_W = 300.0", + "detail": "modules.PyMAF.pymaf_core.cfgs", + "documentation": {} + }, + { + "label": "cfg.DEVICE", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.cfgs", + "description": "modules.PyMAF.pymaf_core.cfgs", + "peekOfCode": "cfg.DEVICE = 'cuda'\ncfg.DEBUG = False\ncfg.LOGDIR = ''\ncfg.VAL_VIS_BATCH_FREQ = 200\ncfg.TRAIN_VIS_ITER_FERQ = 1000\ncfg.SEED_VALUE = -1\ncfg.TRAIN = CN(new_allowed=True)\ncfg.LOSS = CN(new_allowed=True)\ncfg.LOSS.KP_2D_W = 300.0\ncfg.LOSS.KP_3D_W = 300.0", + "detail": "modules.PyMAF.pymaf_core.cfgs", + "documentation": {} + }, + { + "label": "cfg.DEBUG", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.cfgs", + "description": "modules.PyMAF.pymaf_core.cfgs", + "peekOfCode": "cfg.DEBUG = False\ncfg.LOGDIR = ''\ncfg.VAL_VIS_BATCH_FREQ = 200\ncfg.TRAIN_VIS_ITER_FERQ = 1000\ncfg.SEED_VALUE = -1\ncfg.TRAIN = CN(new_allowed=True)\ncfg.LOSS = CN(new_allowed=True)\ncfg.LOSS.KP_2D_W = 300.0\ncfg.LOSS.KP_3D_W = 300.0\ncfg.LOSS.SHAPE_W = 0.06", + "detail": "modules.PyMAF.pymaf_core.cfgs", + "documentation": {} + }, + { + "label": "cfg.LOGDIR", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.cfgs", + "description": "modules.PyMAF.pymaf_core.cfgs", + "peekOfCode": "cfg.LOGDIR = ''\ncfg.VAL_VIS_BATCH_FREQ = 200\ncfg.TRAIN_VIS_ITER_FERQ = 1000\ncfg.SEED_VALUE = -1\ncfg.TRAIN = CN(new_allowed=True)\ncfg.LOSS = CN(new_allowed=True)\ncfg.LOSS.KP_2D_W = 300.0\ncfg.LOSS.KP_3D_W = 300.0\ncfg.LOSS.SHAPE_W = 0.06\ncfg.LOSS.POSE_W = 60.0", + "detail": "modules.PyMAF.pymaf_core.cfgs", + "documentation": {} + }, + { + "label": "cfg.VAL_VIS_BATCH_FREQ", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.cfgs", + "description": "modules.PyMAF.pymaf_core.cfgs", + "peekOfCode": "cfg.VAL_VIS_BATCH_FREQ = 200\ncfg.TRAIN_VIS_ITER_FERQ = 1000\ncfg.SEED_VALUE = -1\ncfg.TRAIN = CN(new_allowed=True)\ncfg.LOSS = CN(new_allowed=True)\ncfg.LOSS.KP_2D_W = 300.0\ncfg.LOSS.KP_3D_W = 300.0\ncfg.LOSS.SHAPE_W = 0.06\ncfg.LOSS.POSE_W = 60.0\ncfg.LOSS.VERT_W = 0.0", + "detail": "modules.PyMAF.pymaf_core.cfgs", + "documentation": {} + }, + { + "label": "cfg.TRAIN_VIS_ITER_FERQ", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.cfgs", + "description": "modules.PyMAF.pymaf_core.cfgs", + "peekOfCode": "cfg.TRAIN_VIS_ITER_FERQ = 1000\ncfg.SEED_VALUE = -1\ncfg.TRAIN = CN(new_allowed=True)\ncfg.LOSS = CN(new_allowed=True)\ncfg.LOSS.KP_2D_W = 300.0\ncfg.LOSS.KP_3D_W = 300.0\ncfg.LOSS.SHAPE_W = 0.06\ncfg.LOSS.POSE_W = 60.0\ncfg.LOSS.VERT_W = 0.0\n# Loss weights for dense correspondences", + "detail": "modules.PyMAF.pymaf_core.cfgs", + "documentation": {} + }, + { + "label": "cfg.SEED_VALUE", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.cfgs", + "description": "modules.PyMAF.pymaf_core.cfgs", + "peekOfCode": "cfg.SEED_VALUE = -1\ncfg.TRAIN = CN(new_allowed=True)\ncfg.LOSS = CN(new_allowed=True)\ncfg.LOSS.KP_2D_W = 300.0\ncfg.LOSS.KP_3D_W = 300.0\ncfg.LOSS.SHAPE_W = 0.06\ncfg.LOSS.POSE_W = 60.0\ncfg.LOSS.VERT_W = 0.0\n# Loss weights for dense correspondences\ncfg.LOSS.INDEX_WEIGHTS = 2.0", + "detail": "modules.PyMAF.pymaf_core.cfgs", + "documentation": {} + }, + { + "label": "cfg.TRAIN", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.cfgs", + "description": "modules.PyMAF.pymaf_core.cfgs", + "peekOfCode": "cfg.TRAIN = CN(new_allowed=True)\ncfg.LOSS = CN(new_allowed=True)\ncfg.LOSS.KP_2D_W = 300.0\ncfg.LOSS.KP_3D_W = 300.0\ncfg.LOSS.SHAPE_W = 0.06\ncfg.LOSS.POSE_W = 60.0\ncfg.LOSS.VERT_W = 0.0\n# Loss weights for dense correspondences\ncfg.LOSS.INDEX_WEIGHTS = 2.0\n# Loss weights for surface parts. (24 Parts)", + "detail": "modules.PyMAF.pymaf_core.cfgs", + "documentation": {} + }, + { + "label": "cfg.LOSS", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.cfgs", + "description": "modules.PyMAF.pymaf_core.cfgs", + "peekOfCode": "cfg.LOSS = CN(new_allowed=True)\ncfg.LOSS.KP_2D_W = 300.0\ncfg.LOSS.KP_3D_W = 300.0\ncfg.LOSS.SHAPE_W = 0.06\ncfg.LOSS.POSE_W = 60.0\ncfg.LOSS.VERT_W = 0.0\n# Loss weights for dense correspondences\ncfg.LOSS.INDEX_WEIGHTS = 2.0\n# Loss weights for surface parts. (24 Parts)\ncfg.LOSS.PART_WEIGHTS = 0.3", + "detail": "modules.PyMAF.pymaf_core.cfgs", + "documentation": {} + }, + { + "label": "cfg.LOSS.KP_2D_W", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.cfgs", + "description": "modules.PyMAF.pymaf_core.cfgs", + "peekOfCode": "cfg.LOSS.KP_2D_W = 300.0\ncfg.LOSS.KP_3D_W = 300.0\ncfg.LOSS.SHAPE_W = 0.06\ncfg.LOSS.POSE_W = 60.0\ncfg.LOSS.VERT_W = 0.0\n# Loss weights for dense correspondences\ncfg.LOSS.INDEX_WEIGHTS = 2.0\n# Loss weights for surface parts. (24 Parts)\ncfg.LOSS.PART_WEIGHTS = 0.3\n# Loss weights for UV regression.", + "detail": "modules.PyMAF.pymaf_core.cfgs", + "documentation": {} + }, + { + "label": "cfg.LOSS.KP_3D_W", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.cfgs", + "description": "modules.PyMAF.pymaf_core.cfgs", + "peekOfCode": "cfg.LOSS.KP_3D_W = 300.0\ncfg.LOSS.SHAPE_W = 0.06\ncfg.LOSS.POSE_W = 60.0\ncfg.LOSS.VERT_W = 0.0\n# Loss weights for dense correspondences\ncfg.LOSS.INDEX_WEIGHTS = 2.0\n# Loss weights for surface parts. (24 Parts)\ncfg.LOSS.PART_WEIGHTS = 0.3\n# Loss weights for UV regression.\ncfg.LOSS.POINT_REGRESSION_WEIGHTS = 0.5", + "detail": "modules.PyMAF.pymaf_core.cfgs", + "documentation": {} + }, + { + "label": "cfg.LOSS.SHAPE_W", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.cfgs", + "description": "modules.PyMAF.pymaf_core.cfgs", + "peekOfCode": "cfg.LOSS.SHAPE_W = 0.06\ncfg.LOSS.POSE_W = 60.0\ncfg.LOSS.VERT_W = 0.0\n# Loss weights for dense correspondences\ncfg.LOSS.INDEX_WEIGHTS = 2.0\n# Loss weights for surface parts. (24 Parts)\ncfg.LOSS.PART_WEIGHTS = 0.3\n# Loss weights for UV regression.\ncfg.LOSS.POINT_REGRESSION_WEIGHTS = 0.5\ncfg.MODEL = CN(new_allowed=True)", + "detail": "modules.PyMAF.pymaf_core.cfgs", + "documentation": {} + }, + { + "label": "cfg.LOSS.POSE_W", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.cfgs", + "description": "modules.PyMAF.pymaf_core.cfgs", + "peekOfCode": "cfg.LOSS.POSE_W = 60.0\ncfg.LOSS.VERT_W = 0.0\n# Loss weights for dense correspondences\ncfg.LOSS.INDEX_WEIGHTS = 2.0\n# Loss weights for surface parts. (24 Parts)\ncfg.LOSS.PART_WEIGHTS = 0.3\n# Loss weights for UV regression.\ncfg.LOSS.POINT_REGRESSION_WEIGHTS = 0.5\ncfg.MODEL = CN(new_allowed=True)\ncfg.MODEL.PyMAF = CN(new_allowed=True)", + "detail": "modules.PyMAF.pymaf_core.cfgs", + "documentation": {} + }, + { + "label": "cfg.LOSS.VERT_W", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.cfgs", + "description": "modules.PyMAF.pymaf_core.cfgs", + "peekOfCode": "cfg.LOSS.VERT_W = 0.0\n# Loss weights for dense correspondences\ncfg.LOSS.INDEX_WEIGHTS = 2.0\n# Loss weights for surface parts. (24 Parts)\ncfg.LOSS.PART_WEIGHTS = 0.3\n# Loss weights for UV regression.\ncfg.LOSS.POINT_REGRESSION_WEIGHTS = 0.5\ncfg.MODEL = CN(new_allowed=True)\ncfg.MODEL.PyMAF = CN(new_allowed=True)\n## switch", + "detail": "modules.PyMAF.pymaf_core.cfgs", + "documentation": {} + }, + { + "label": "cfg.LOSS.INDEX_WEIGHTS", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.cfgs", + "description": "modules.PyMAF.pymaf_core.cfgs", + "peekOfCode": "cfg.LOSS.INDEX_WEIGHTS = 2.0\n# Loss weights for surface parts. (24 Parts)\ncfg.LOSS.PART_WEIGHTS = 0.3\n# Loss weights for UV regression.\ncfg.LOSS.POINT_REGRESSION_WEIGHTS = 0.5\ncfg.MODEL = CN(new_allowed=True)\ncfg.MODEL.PyMAF = CN(new_allowed=True)\n## switch\ncfg.TRAIN.BATCH_SIZE = 64\ncfg.TRAIN.VAL_LOOP = True", + "detail": "modules.PyMAF.pymaf_core.cfgs", + "documentation": {} + }, + { + "label": "cfg.LOSS.PART_WEIGHTS", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.cfgs", + "description": "modules.PyMAF.pymaf_core.cfgs", + "peekOfCode": "cfg.LOSS.PART_WEIGHTS = 0.3\n# Loss weights for UV regression.\ncfg.LOSS.POINT_REGRESSION_WEIGHTS = 0.5\ncfg.MODEL = CN(new_allowed=True)\ncfg.MODEL.PyMAF = CN(new_allowed=True)\n## switch\ncfg.TRAIN.BATCH_SIZE = 64\ncfg.TRAIN.VAL_LOOP = True\ncfg.TEST = CN(new_allowed=True)\ndef get_cfg_defaults():", + "detail": "modules.PyMAF.pymaf_core.cfgs", + "documentation": {} + }, + { + "label": "cfg.LOSS.POINT_REGRESSION_WEIGHTS", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.cfgs", + "description": "modules.PyMAF.pymaf_core.cfgs", + "peekOfCode": "cfg.LOSS.POINT_REGRESSION_WEIGHTS = 0.5\ncfg.MODEL = CN(new_allowed=True)\ncfg.MODEL.PyMAF = CN(new_allowed=True)\n## switch\ncfg.TRAIN.BATCH_SIZE = 64\ncfg.TRAIN.VAL_LOOP = True\ncfg.TEST = CN(new_allowed=True)\ndef get_cfg_defaults():\n \"\"\"Get a yacs CfgNode object with default values for my_project.\"\"\"\n # Return a clone so that the defaults will not be altered", + "detail": "modules.PyMAF.pymaf_core.cfgs", + "documentation": {} + }, + { + "label": "cfg.MODEL", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.cfgs", + "description": "modules.PyMAF.pymaf_core.cfgs", + "peekOfCode": "cfg.MODEL = CN(new_allowed=True)\ncfg.MODEL.PyMAF = CN(new_allowed=True)\n## switch\ncfg.TRAIN.BATCH_SIZE = 64\ncfg.TRAIN.VAL_LOOP = True\ncfg.TEST = CN(new_allowed=True)\ndef get_cfg_defaults():\n \"\"\"Get a yacs CfgNode object with default values for my_project.\"\"\"\n # Return a clone so that the defaults will not be altered\n # This is for the \"local variable\" use pattern", + "detail": "modules.PyMAF.pymaf_core.cfgs", + "documentation": {} + }, + { + "label": "cfg.MODEL.PyMAF", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.cfgs", + "description": "modules.PyMAF.pymaf_core.cfgs", + "peekOfCode": "cfg.MODEL.PyMAF = CN(new_allowed=True)\n## switch\ncfg.TRAIN.BATCH_SIZE = 64\ncfg.TRAIN.VAL_LOOP = True\ncfg.TEST = CN(new_allowed=True)\ndef get_cfg_defaults():\n \"\"\"Get a yacs CfgNode object with default values for my_project.\"\"\"\n # Return a clone so that the defaults will not be altered\n # This is for the \"local variable\" use pattern\n # return cfg.clone()", + "detail": "modules.PyMAF.pymaf_core.cfgs", + "documentation": {} + }, + { + "label": "cfg.TRAIN.BATCH_SIZE", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.cfgs", + "description": "modules.PyMAF.pymaf_core.cfgs", + "peekOfCode": "cfg.TRAIN.BATCH_SIZE = 64\ncfg.TRAIN.VAL_LOOP = True\ncfg.TEST = CN(new_allowed=True)\ndef get_cfg_defaults():\n \"\"\"Get a yacs CfgNode object with default values for my_project.\"\"\"\n # Return a clone so that the defaults will not be altered\n # This is for the \"local variable\" use pattern\n # return cfg.clone()\n return cfg\ndef update_cfg(cfg_file):", + "detail": "modules.PyMAF.pymaf_core.cfgs", + "documentation": {} + }, + { + "label": "cfg.TRAIN.VAL_LOOP", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.cfgs", + "description": "modules.PyMAF.pymaf_core.cfgs", + "peekOfCode": "cfg.TRAIN.VAL_LOOP = True\ncfg.TEST = CN(new_allowed=True)\ndef get_cfg_defaults():\n \"\"\"Get a yacs CfgNode object with default values for my_project.\"\"\"\n # Return a clone so that the defaults will not be altered\n # This is for the \"local variable\" use pattern\n # return cfg.clone()\n return cfg\ndef update_cfg(cfg_file):\n # cfg = get_cfg_defaults()", + "detail": "modules.PyMAF.pymaf_core.cfgs", + "documentation": {} + }, + { + "label": "cfg.TEST", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.cfgs", + "description": "modules.PyMAF.pymaf_core.cfgs", + "peekOfCode": "cfg.TEST = CN(new_allowed=True)\ndef get_cfg_defaults():\n \"\"\"Get a yacs CfgNode object with default values for my_project.\"\"\"\n # Return a clone so that the defaults will not be altered\n # This is for the \"local variable\" use pattern\n # return cfg.clone()\n return cfg\ndef update_cfg(cfg_file):\n # cfg = get_cfg_defaults()\n cfg.merge_from_file(cfg_file)", + "detail": "modules.PyMAF.pymaf_core.cfgs", + "documentation": {} + }, + { + "label": "FOCAL_LENGTH", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.constants", + "description": "modules.PyMAF.pymaf_core.constants", + "peekOfCode": "FOCAL_LENGTH = 5000.\nIMG_RES = 224\n# IMG_RES = 256\n# Mean and standard deviation for normalizing input image\nIMG_NORM_MEAN = [0.485, 0.456, 0.406]\nIMG_NORM_STD = [0.229, 0.224, 0.225]\n\"\"\"\nWe create a superset of joints containing the OpenPose joints together with the ones that each dataset provides.\nWe keep a superset of 24 joints such that we include all joints from every dataset.\nIf a dataset doesn't provide annotations for a specific joint, we simply ignore it.", + "detail": "modules.PyMAF.pymaf_core.constants", + "documentation": {} + }, + { + "label": "IMG_RES", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.constants", + "description": "modules.PyMAF.pymaf_core.constants", + "peekOfCode": "IMG_RES = 224\n# IMG_RES = 256\n# Mean and standard deviation for normalizing input image\nIMG_NORM_MEAN = [0.485, 0.456, 0.406]\nIMG_NORM_STD = [0.229, 0.224, 0.225]\n\"\"\"\nWe create a superset of joints containing the OpenPose joints together with the ones that each dataset provides.\nWe keep a superset of 24 joints such that we include all joints from every dataset.\nIf a dataset doesn't provide annotations for a specific joint, we simply ignore it.\nThe joints used here are the following:", + "detail": "modules.PyMAF.pymaf_core.constants", + "documentation": {} + }, + { + "label": "IMG_NORM_MEAN", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.constants", + "description": "modules.PyMAF.pymaf_core.constants", + "peekOfCode": "IMG_NORM_MEAN = [0.485, 0.456, 0.406]\nIMG_NORM_STD = [0.229, 0.224, 0.225]\n\"\"\"\nWe create a superset of joints containing the OpenPose joints together with the ones that each dataset provides.\nWe keep a superset of 24 joints such that we include all joints from every dataset.\nIf a dataset doesn't provide annotations for a specific joint, we simply ignore it.\nThe joints used here are the following:\n\"\"\"\nOP_JOINT_NAMES = [\n# 25 OpenPose joints (in the order provided by OpenPose)", + "detail": "modules.PyMAF.pymaf_core.constants", + "documentation": {} + }, + { + "label": "IMG_NORM_STD", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.constants", + "description": "modules.PyMAF.pymaf_core.constants", + "peekOfCode": "IMG_NORM_STD = [0.229, 0.224, 0.225]\n\"\"\"\nWe create a superset of joints containing the OpenPose joints together with the ones that each dataset provides.\nWe keep a superset of 24 joints such that we include all joints from every dataset.\nIf a dataset doesn't provide annotations for a specific joint, we simply ignore it.\nThe joints used here are the following:\n\"\"\"\nOP_JOINT_NAMES = [\n# 25 OpenPose joints (in the order provided by OpenPose)\n'OP Nose',", + "detail": "modules.PyMAF.pymaf_core.constants", + "documentation": {} + }, + { + "label": "OP_JOINT_NAMES", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.constants", + "description": "modules.PyMAF.pymaf_core.constants", + "peekOfCode": "OP_JOINT_NAMES = [\n# 25 OpenPose joints (in the order provided by OpenPose)\n'OP Nose',\n'OP Neck',\n'OP RShoulder',\n'OP RElbow',\n'OP RWrist',\n'OP LShoulder',\n'OP LElbow',\n'OP LWrist',", + "detail": "modules.PyMAF.pymaf_core.constants", + "documentation": {} + }, + { + "label": "SPIN_JOINT_NAMES", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.constants", + "description": "modules.PyMAF.pymaf_core.constants", + "peekOfCode": "SPIN_JOINT_NAMES = [\n# 24 Ground Truth joints (superset of joints from different datasets)\n'Right Ankle',\n'Right Knee',\n'Right Hip', # 2\n'Left Hip',\n'Left Knee', # 4\n'Left Ankle',\n'Right Wrist', # 6\n'Right Elbow',", + "detail": "modules.PyMAF.pymaf_core.constants", + "documentation": {} + }, + { + "label": "JOINT_NAMES", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.constants", + "description": "modules.PyMAF.pymaf_core.constants", + "peekOfCode": "JOINT_NAMES = OP_JOINT_NAMES + SPIN_JOINT_NAMES\nCOCO_KEYPOINTS = ['nose',\n 'left_eye',\n 'right_eye',\n 'left_ear',\n 'right_ear',\n 'left_shoulder',\n 'right_shoulder',\n 'left_elbow',\n 'right_elbow',", + "detail": "modules.PyMAF.pymaf_core.constants", + "documentation": {} + }, + { + "label": "COCO_KEYPOINTS", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.constants", + "description": "modules.PyMAF.pymaf_core.constants", + "peekOfCode": "COCO_KEYPOINTS = ['nose',\n 'left_eye',\n 'right_eye',\n 'left_ear',\n 'right_ear',\n 'left_shoulder',\n 'right_shoulder',\n 'left_elbow',\n 'right_elbow',\n 'left_wrist',", + "detail": "modules.PyMAF.pymaf_core.constants", + "documentation": {} + }, + { + "label": "JOINT_IDS", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.constants", + "description": "modules.PyMAF.pymaf_core.constants", + "peekOfCode": "JOINT_IDS = {JOINT_NAMES[i]: i for i in range(len(JOINT_NAMES))}\n# Map joints to SMPL joints\nJOINT_MAP = {\n'OP Nose': 24, 'OP Neck': 12, 'OP RShoulder': 17,\n'OP RElbow': 19, 'OP RWrist': 21, 'OP LShoulder': 16,\n'OP LElbow': 18, 'OP LWrist': 20, 'OP MidHip': 0,\n'OP RHip': 2, 'OP RKnee': 5, 'OP RAnkle': 8,\n'OP LHip': 1, 'OP LKnee': 4, 'OP LAnkle': 7,\n'OP REye': 25, 'OP LEye': 26, 'OP REar': 27,\n'OP LEar': 28, 'OP LBigToe': 29, 'OP LSmallToe': 30,", + "detail": "modules.PyMAF.pymaf_core.constants", + "documentation": {} + }, + { + "label": "JOINT_MAP", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.constants", + "description": "modules.PyMAF.pymaf_core.constants", + "peekOfCode": "JOINT_MAP = {\n'OP Nose': 24, 'OP Neck': 12, 'OP RShoulder': 17,\n'OP RElbow': 19, 'OP RWrist': 21, 'OP LShoulder': 16,\n'OP LElbow': 18, 'OP LWrist': 20, 'OP MidHip': 0,\n'OP RHip': 2, 'OP RKnee': 5, 'OP RAnkle': 8,\n'OP LHip': 1, 'OP LKnee': 4, 'OP LAnkle': 7,\n'OP REye': 25, 'OP LEye': 26, 'OP REar': 27,\n'OP LEar': 28, 'OP LBigToe': 29, 'OP LSmallToe': 30,\n'OP LHeel': 31, 'OP RBigToe': 32, 'OP RSmallToe': 33, 'OP RHeel': 34,\n'Right Ankle': 8, 'Right Knee': 5, 'Right Hip': 45,", + "detail": "modules.PyMAF.pymaf_core.constants", + "documentation": {} + }, + { + "label": "H36M_TO_J17", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.constants", + "description": "modules.PyMAF.pymaf_core.constants", + "peekOfCode": "H36M_TO_J17 = [6, 5, 4, 1, 2, 3, 16, 15, 14, 11, 12, 13, 8, 10, 0, 7, 9]\nH36M_TO_J14 = H36M_TO_J17[:14]\n# Indices to get the 14 LSP joints from the ground truth joints\nJ24_TO_J17 = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 18, 14, 16, 17]\nJ24_TO_J14 = J24_TO_J17[:14]\nJ24_TO_J19 = J24_TO_J17[:14] + [19, 20, 21, 22, 23]\n# COCO with also 17 joints\nJ24_TO_JCOCO = [19, 20, 21, 22, 23, 9, 8, 10, 7, 11, 6, 3, 2, 4, 1, 5, 0]\n# Permutation of SMPL pose parameters when flipping the shape\nSMPL_JOINTS_FLIP_PERM = [0, 2, 1, 3, 5, 4, 6, 8, 7, 9, 11, 10, 12, 14, 13, 15, 17, 16, 19, 18, 21, 20, 23, 22]", + "detail": "modules.PyMAF.pymaf_core.constants", + "documentation": {} + }, + { + "label": "H36M_TO_J14", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.constants", + "description": "modules.PyMAF.pymaf_core.constants", + "peekOfCode": "H36M_TO_J14 = H36M_TO_J17[:14]\n# Indices to get the 14 LSP joints from the ground truth joints\nJ24_TO_J17 = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 18, 14, 16, 17]\nJ24_TO_J14 = J24_TO_J17[:14]\nJ24_TO_J19 = J24_TO_J17[:14] + [19, 20, 21, 22, 23]\n# COCO with also 17 joints\nJ24_TO_JCOCO = [19, 20, 21, 22, 23, 9, 8, 10, 7, 11, 6, 3, 2, 4, 1, 5, 0]\n# Permutation of SMPL pose parameters when flipping the shape\nSMPL_JOINTS_FLIP_PERM = [0, 2, 1, 3, 5, 4, 6, 8, 7, 9, 11, 10, 12, 14, 13, 15, 17, 16, 19, 18, 21, 20, 23, 22]\nSMPL_POSE_FLIP_PERM = []", + "detail": "modules.PyMAF.pymaf_core.constants", + "documentation": {} + }, + { + "label": "J24_TO_J17", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.constants", + "description": "modules.PyMAF.pymaf_core.constants", + "peekOfCode": "J24_TO_J17 = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 18, 14, 16, 17]\nJ24_TO_J14 = J24_TO_J17[:14]\nJ24_TO_J19 = J24_TO_J17[:14] + [19, 20, 21, 22, 23]\n# COCO with also 17 joints\nJ24_TO_JCOCO = [19, 20, 21, 22, 23, 9, 8, 10, 7, 11, 6, 3, 2, 4, 1, 5, 0]\n# Permutation of SMPL pose parameters when flipping the shape\nSMPL_JOINTS_FLIP_PERM = [0, 2, 1, 3, 5, 4, 6, 8, 7, 9, 11, 10, 12, 14, 13, 15, 17, 16, 19, 18, 21, 20, 23, 22]\nSMPL_POSE_FLIP_PERM = []\nfor i in SMPL_JOINTS_FLIP_PERM:\n SMPL_POSE_FLIP_PERM.append(3*i)", + "detail": "modules.PyMAF.pymaf_core.constants", + "documentation": {} + }, + { + "label": "J24_TO_J14", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.constants", + "description": "modules.PyMAF.pymaf_core.constants", + "peekOfCode": "J24_TO_J14 = J24_TO_J17[:14]\nJ24_TO_J19 = J24_TO_J17[:14] + [19, 20, 21, 22, 23]\n# COCO with also 17 joints\nJ24_TO_JCOCO = [19, 20, 21, 22, 23, 9, 8, 10, 7, 11, 6, 3, 2, 4, 1, 5, 0]\n# Permutation of SMPL pose parameters when flipping the shape\nSMPL_JOINTS_FLIP_PERM = [0, 2, 1, 3, 5, 4, 6, 8, 7, 9, 11, 10, 12, 14, 13, 15, 17, 16, 19, 18, 21, 20, 23, 22]\nSMPL_POSE_FLIP_PERM = []\nfor i in SMPL_JOINTS_FLIP_PERM:\n SMPL_POSE_FLIP_PERM.append(3*i)\n SMPL_POSE_FLIP_PERM.append(3*i+1)", + "detail": "modules.PyMAF.pymaf_core.constants", + "documentation": {} + }, + { + "label": "J24_TO_J19", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.constants", + "description": "modules.PyMAF.pymaf_core.constants", + "peekOfCode": "J24_TO_J19 = J24_TO_J17[:14] + [19, 20, 21, 22, 23]\n# COCO with also 17 joints\nJ24_TO_JCOCO = [19, 20, 21, 22, 23, 9, 8, 10, 7, 11, 6, 3, 2, 4, 1, 5, 0]\n# Permutation of SMPL pose parameters when flipping the shape\nSMPL_JOINTS_FLIP_PERM = [0, 2, 1, 3, 5, 4, 6, 8, 7, 9, 11, 10, 12, 14, 13, 15, 17, 16, 19, 18, 21, 20, 23, 22]\nSMPL_POSE_FLIP_PERM = []\nfor i in SMPL_JOINTS_FLIP_PERM:\n SMPL_POSE_FLIP_PERM.append(3*i)\n SMPL_POSE_FLIP_PERM.append(3*i+1)\n SMPL_POSE_FLIP_PERM.append(3*i+2)", + "detail": "modules.PyMAF.pymaf_core.constants", + "documentation": {} + }, + { + "label": "J24_TO_JCOCO", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.constants", + "description": "modules.PyMAF.pymaf_core.constants", + "peekOfCode": "J24_TO_JCOCO = [19, 20, 21, 22, 23, 9, 8, 10, 7, 11, 6, 3, 2, 4, 1, 5, 0]\n# Permutation of SMPL pose parameters when flipping the shape\nSMPL_JOINTS_FLIP_PERM = [0, 2, 1, 3, 5, 4, 6, 8, 7, 9, 11, 10, 12, 14, 13, 15, 17, 16, 19, 18, 21, 20, 23, 22]\nSMPL_POSE_FLIP_PERM = []\nfor i in SMPL_JOINTS_FLIP_PERM:\n SMPL_POSE_FLIP_PERM.append(3*i)\n SMPL_POSE_FLIP_PERM.append(3*i+1)\n SMPL_POSE_FLIP_PERM.append(3*i+2)\n# Permutation indices for the 24 ground truth joints\nJ24_FLIP_PERM = [5, 4, 3, 2, 1, 0, 11, 10, 9, 8, 7, 6, 12, 13, 14, 15, 16, 17, 18, 19, 21, 20, 23, 22]", + "detail": "modules.PyMAF.pymaf_core.constants", + "documentation": {} + }, + { + "label": "SMPL_JOINTS_FLIP_PERM", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.constants", + "description": "modules.PyMAF.pymaf_core.constants", + "peekOfCode": "SMPL_JOINTS_FLIP_PERM = [0, 2, 1, 3, 5, 4, 6, 8, 7, 9, 11, 10, 12, 14, 13, 15, 17, 16, 19, 18, 21, 20, 23, 22]\nSMPL_POSE_FLIP_PERM = []\nfor i in SMPL_JOINTS_FLIP_PERM:\n SMPL_POSE_FLIP_PERM.append(3*i)\n SMPL_POSE_FLIP_PERM.append(3*i+1)\n SMPL_POSE_FLIP_PERM.append(3*i+2)\n# Permutation indices for the 24 ground truth joints\nJ24_FLIP_PERM = [5, 4, 3, 2, 1, 0, 11, 10, 9, 8, 7, 6, 12, 13, 14, 15, 16, 17, 18, 19, 21, 20, 23, 22]\n# Permutation indices for the full set of 49 joints\nJ49_FLIP_PERM = [0, 1, 5, 6, 7, 2, 3, 4, 8, 12, 13, 14, 9, 10, 11, 16, 15, 18, 17, 22, 23, 24, 19, 20, 21]\\", + "detail": "modules.PyMAF.pymaf_core.constants", + "documentation": {} + }, + { + "label": "SMPL_POSE_FLIP_PERM", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.constants", + "description": "modules.PyMAF.pymaf_core.constants", + "peekOfCode": "SMPL_POSE_FLIP_PERM = []\nfor i in SMPL_JOINTS_FLIP_PERM:\n SMPL_POSE_FLIP_PERM.append(3*i)\n SMPL_POSE_FLIP_PERM.append(3*i+1)\n SMPL_POSE_FLIP_PERM.append(3*i+2)\n# Permutation indices for the 24 ground truth joints\nJ24_FLIP_PERM = [5, 4, 3, 2, 1, 0, 11, 10, 9, 8, 7, 6, 12, 13, 14, 15, 16, 17, 18, 19, 21, 20, 23, 22]\n# Permutation indices for the full set of 49 joints\nJ49_FLIP_PERM = [0, 1, 5, 6, 7, 2, 3, 4, 8, 12, 13, 14, 9, 10, 11, 16, 15, 18, 17, 22, 23, 24, 19, 20, 21]\\\n + [25+i for i in J24_FLIP_PERM]", + "detail": "modules.PyMAF.pymaf_core.constants", + "documentation": {} + }, + { + "label": "J24_FLIP_PERM", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.constants", + "description": "modules.PyMAF.pymaf_core.constants", + "peekOfCode": "J24_FLIP_PERM = [5, 4, 3, 2, 1, 0, 11, 10, 9, 8, 7, 6, 12, 13, 14, 15, 16, 17, 18, 19, 21, 20, 23, 22]\n# Permutation indices for the full set of 49 joints\nJ49_FLIP_PERM = [0, 1, 5, 6, 7, 2, 3, 4, 8, 12, 13, 14, 9, 10, 11, 16, 15, 18, 17, 22, 23, 24, 19, 20, 21]\\\n + [25+i for i in J24_FLIP_PERM]\nSMPL_J49_FLIP_PERM = [0, 1, 5, 6, 7, 2, 3, 4, 8, 12, 13, 14, 9, 10, 11, 16, 15, 18, 17, 22, 23, 24, 19, 20, 21]\\\n + [25+i for i in SMPL_JOINTS_FLIP_PERM]\nSMPLX2SMPL_J45 = [i for i in range(22)] + [30, 45] + [i for i in range(55, 55+21)]\nSMPL_PART_ID = {'rightHand': 1, 'rightUpLeg': 2, 'leftArm': 3, 'leftLeg': 4, 'leftToeBase': 5, \n 'leftFoot': 6, 'spine1': 7, 'spine2': 8, 'leftShoulder': 9, 'rightShoulder': 10, 'rightFoot': 11, \n 'head': 12, 'rightArm': 13, 'leftHandIndex1': 14, 'rightLeg': 15, 'rightHandIndex1': 16, 'leftForeArm': 17, 'rightForeArm': 18, ", + "detail": "modules.PyMAF.pymaf_core.constants", + "documentation": {} + }, + { + "label": "J49_FLIP_PERM", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.constants", + "description": "modules.PyMAF.pymaf_core.constants", + "peekOfCode": "J49_FLIP_PERM = [0, 1, 5, 6, 7, 2, 3, 4, 8, 12, 13, 14, 9, 10, 11, 16, 15, 18, 17, 22, 23, 24, 19, 20, 21]\\\n + [25+i for i in J24_FLIP_PERM]\nSMPL_J49_FLIP_PERM = [0, 1, 5, 6, 7, 2, 3, 4, 8, 12, 13, 14, 9, 10, 11, 16, 15, 18, 17, 22, 23, 24, 19, 20, 21]\\\n + [25+i for i in SMPL_JOINTS_FLIP_PERM]\nSMPLX2SMPL_J45 = [i for i in range(22)] + [30, 45] + [i for i in range(55, 55+21)]\nSMPL_PART_ID = {'rightHand': 1, 'rightUpLeg': 2, 'leftArm': 3, 'leftLeg': 4, 'leftToeBase': 5, \n 'leftFoot': 6, 'spine1': 7, 'spine2': 8, 'leftShoulder': 9, 'rightShoulder': 10, 'rightFoot': 11, \n 'head': 12, 'rightArm': 13, 'leftHandIndex1': 14, 'rightLeg': 15, 'rightHandIndex1': 16, 'leftForeArm': 17, 'rightForeArm': 18, \n 'neck': 19, 'rightToeBase': 20, 'spine': 21, 'leftUpLeg': 22, 'leftHand': 23, 'hips': 24}\n# MANO_NAMES = [", + "detail": "modules.PyMAF.pymaf_core.constants", + "documentation": {} + }, + { + "label": "SMPL_J49_FLIP_PERM", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.constants", + "description": "modules.PyMAF.pymaf_core.constants", + "peekOfCode": "SMPL_J49_FLIP_PERM = [0, 1, 5, 6, 7, 2, 3, 4, 8, 12, 13, 14, 9, 10, 11, 16, 15, 18, 17, 22, 23, 24, 19, 20, 21]\\\n + [25+i for i in SMPL_JOINTS_FLIP_PERM]\nSMPLX2SMPL_J45 = [i for i in range(22)] + [30, 45] + [i for i in range(55, 55+21)]\nSMPL_PART_ID = {'rightHand': 1, 'rightUpLeg': 2, 'leftArm': 3, 'leftLeg': 4, 'leftToeBase': 5, \n 'leftFoot': 6, 'spine1': 7, 'spine2': 8, 'leftShoulder': 9, 'rightShoulder': 10, 'rightFoot': 11, \n 'head': 12, 'rightArm': 13, 'leftHandIndex1': 14, 'rightLeg': 15, 'rightHandIndex1': 16, 'leftForeArm': 17, 'rightForeArm': 18, \n 'neck': 19, 'rightToeBase': 20, 'spine': 21, 'leftUpLeg': 22, 'leftHand': 23, 'hips': 24}\n# MANO_NAMES = [\n# 'wrist',\n# 'index1',", + "detail": "modules.PyMAF.pymaf_core.constants", + "documentation": {} + }, + { + "label": "SMPLX2SMPL_J45", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.constants", + "description": "modules.PyMAF.pymaf_core.constants", + "peekOfCode": "SMPLX2SMPL_J45 = [i for i in range(22)] + [30, 45] + [i for i in range(55, 55+21)]\nSMPL_PART_ID = {'rightHand': 1, 'rightUpLeg': 2, 'leftArm': 3, 'leftLeg': 4, 'leftToeBase': 5, \n 'leftFoot': 6, 'spine1': 7, 'spine2': 8, 'leftShoulder': 9, 'rightShoulder': 10, 'rightFoot': 11, \n 'head': 12, 'rightArm': 13, 'leftHandIndex1': 14, 'rightLeg': 15, 'rightHandIndex1': 16, 'leftForeArm': 17, 'rightForeArm': 18, \n 'neck': 19, 'rightToeBase': 20, 'spine': 21, 'leftUpLeg': 22, 'leftHand': 23, 'hips': 24}\n# MANO_NAMES = [\n# 'wrist',\n# 'index1',\n# 'index2',\n# 'index3',", + "detail": "modules.PyMAF.pymaf_core.constants", + "documentation": {} + }, + { + "label": "SMPL_PART_ID", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.constants", + "description": "modules.PyMAF.pymaf_core.constants", + "peekOfCode": "SMPL_PART_ID = {'rightHand': 1, 'rightUpLeg': 2, 'leftArm': 3, 'leftLeg': 4, 'leftToeBase': 5, \n 'leftFoot': 6, 'spine1': 7, 'spine2': 8, 'leftShoulder': 9, 'rightShoulder': 10, 'rightFoot': 11, \n 'head': 12, 'rightArm': 13, 'leftHandIndex1': 14, 'rightLeg': 15, 'rightHandIndex1': 16, 'leftForeArm': 17, 'rightForeArm': 18, \n 'neck': 19, 'rightToeBase': 20, 'spine': 21, 'leftUpLeg': 22, 'leftHand': 23, 'hips': 24}\n# MANO_NAMES = [\n# 'wrist',\n# 'index1',\n# 'index2',\n# 'index3',\n# 'middle1',", + "detail": "modules.PyMAF.pymaf_core.constants", + "documentation": {} + }, + { + "label": "HAND_NAMES", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.constants", + "description": "modules.PyMAF.pymaf_core.constants", + "peekOfCode": "HAND_NAMES = [\n 'wrist',\n 'thumb1',\n 'thumb2',\n 'thumb3',\n 'thumb',\n 'index1',\n 'index2',\n 'index3',\n 'index',", + "detail": "modules.PyMAF.pymaf_core.constants", + "documentation": {} + }, + { + "label": "SMPLX_JOINT_NAMES", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.constants", + "description": "modules.PyMAF.pymaf_core.constants", + "peekOfCode": "SMPLX_JOINT_NAMES = smplx_joint_name.JOINT_NAMES\nSMPLX_JOINT_IDS = {SMPLX_JOINT_NAMES[i]: i for i in range(len(SMPLX_JOINT_NAMES))}\nFOOT_NAMES = ['big_toe', 'small_toe', 'heel']\nFACIAL_LANDMARKS = [\n 'right_eye_brow1',\n 'right_eye_brow2',\n 'right_eye_brow3',\n 'right_eye_brow4',\n 'right_eye_brow5',\n 'left_eye_brow5',", + "detail": "modules.PyMAF.pymaf_core.constants", + "documentation": {} + }, + { + "label": "SMPLX_JOINT_IDS", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.constants", + "description": "modules.PyMAF.pymaf_core.constants", + "peekOfCode": "SMPLX_JOINT_IDS = {SMPLX_JOINT_NAMES[i]: i for i in range(len(SMPLX_JOINT_NAMES))}\nFOOT_NAMES = ['big_toe', 'small_toe', 'heel']\nFACIAL_LANDMARKS = [\n 'right_eye_brow1',\n 'right_eye_brow2',\n 'right_eye_brow3',\n 'right_eye_brow4',\n 'right_eye_brow5',\n 'left_eye_brow5',\n 'left_eye_brow4',", + "detail": "modules.PyMAF.pymaf_core.constants", + "documentation": {} + }, + { + "label": "FOOT_NAMES", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.constants", + "description": "modules.PyMAF.pymaf_core.constants", + "peekOfCode": "FOOT_NAMES = ['big_toe', 'small_toe', 'heel']\nFACIAL_LANDMARKS = [\n 'right_eye_brow1',\n 'right_eye_brow2',\n 'right_eye_brow3',\n 'right_eye_brow4',\n 'right_eye_brow5',\n 'left_eye_brow5',\n 'left_eye_brow4',\n 'left_eye_brow3',", + "detail": "modules.PyMAF.pymaf_core.constants", + "documentation": {} + }, + { + "label": "FACIAL_LANDMARKS", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.constants", + "description": "modules.PyMAF.pymaf_core.constants", + "peekOfCode": "FACIAL_LANDMARKS = [\n 'right_eye_brow1',\n 'right_eye_brow2',\n 'right_eye_brow3',\n 'right_eye_brow4',\n 'right_eye_brow5',\n 'left_eye_brow5',\n 'left_eye_brow4',\n 'left_eye_brow3',\n 'left_eye_brow2',", + "detail": "modules.PyMAF.pymaf_core.constants", + "documentation": {} + }, + { + "label": "LRHAND_FLIP_PERM", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.constants", + "description": "modules.PyMAF.pymaf_core.constants", + "peekOfCode": "LRHAND_FLIP_PERM = [i for i in range(len(HAND_NAMES), len(HAND_NAMES)*2)] + [i for i in range(len(HAND_NAMES))]\nSINGLE_HAND_FLIP_PERM = [i for i in range(len(HAND_NAMES))]\nFEEF_FLIP_PERM = [i for i in range(len(FOOT_NAMES), len(FOOT_NAMES)*2)] + [i for i in range(len(FOOT_NAMES))]\n# matchedParts = (\n# [17, 26], [18, 25], [19, 24], [20, 23], [21, 22],\n# [21],[20],[19],[18],[17],\n# [27], [28], [29], [30], \n# [31, 35], [32, 34], [33],\n# [32],[31],\n# [36, 45], [37, 44], [38, 43], [39, 42], [40, 47], [41, 46],", + "detail": "modules.PyMAF.pymaf_core.constants", + "documentation": {} + }, + { + "label": "SINGLE_HAND_FLIP_PERM", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.constants", + "description": "modules.PyMAF.pymaf_core.constants", + "peekOfCode": "SINGLE_HAND_FLIP_PERM = [i for i in range(len(HAND_NAMES))]\nFEEF_FLIP_PERM = [i for i in range(len(FOOT_NAMES), len(FOOT_NAMES)*2)] + [i for i in range(len(FOOT_NAMES))]\n# matchedParts = (\n# [17, 26], [18, 25], [19, 24], [20, 23], [21, 22],\n# [21],[20],[19],[18],[17],\n# [27], [28], [29], [30], \n# [31, 35], [32, 34], [33],\n# [32],[31],\n# [36, 45], [37, 44], [38, 43], [39, 42], [40, 47], [41, 46],\n# [39],[38], [37],[36],[41],[40],", + "detail": "modules.PyMAF.pymaf_core.constants", + "documentation": {} + }, + { + "label": "FEEF_FLIP_PERM", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.constants", + "description": "modules.PyMAF.pymaf_core.constants", + "peekOfCode": "FEEF_FLIP_PERM = [i for i in range(len(FOOT_NAMES), len(FOOT_NAMES)*2)] + [i for i in range(len(FOOT_NAMES))]\n# matchedParts = (\n# [17, 26], [18, 25], [19, 24], [20, 23], [21, 22],\n# [21],[20],[19],[18],[17],\n# [27], [28], [29], [30], \n# [31, 35], [32, 34], [33],\n# [32],[31],\n# [36, 45], [37, 44], [38, 43], [39, 42], [40, 47], [41, 46],\n# [39],[38], [37],[36],[41],[40],\n# [48, 54], [49, 53], [50, 52], [51],", + "detail": "modules.PyMAF.pymaf_core.constants", + "documentation": {} + }, + { + "label": "FACE_FLIP_PERM", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.constants", + "description": "modules.PyMAF.pymaf_core.constants", + "peekOfCode": "FACE_FLIP_PERM = [9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 10, 11, 12, 13, 18, 17, 16, 15, 14, 28, 27, 26, 25, 30, 29, 22, 21, 20, 19, 24, 23, 37, 36, 35, 34, 33, 32, 31, 42, 41, 40, 39, 38, 47, 46, 45, 44, 43, 50, 49, 48]\nFACE_FLIP_PERM = FACE_FLIP_PERM + [67, 66, 65, 64, 63, 62, 61, 60, 59, 58, 57, 56, 55, 54, 53, 52, 51]", + "detail": "modules.PyMAF.pymaf_core.constants", + "documentation": {} + }, + { + "label": "FACE_FLIP_PERM", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.constants", + "description": "modules.PyMAF.pymaf_core.constants", + "peekOfCode": "FACE_FLIP_PERM = FACE_FLIP_PERM + [67, 66, 65, 64, 63, 62, 61, 60, 59, 58, 57, 56, 55, 54, 53, 52, 51]", + "detail": "modules.PyMAF.pymaf_core.constants", + "documentation": {} + }, + { + "label": "JOINT_REGRESSOR_TRAIN_EXTRA", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.path_config", + "description": "modules.PyMAF.pymaf_core.path_config", + "peekOfCode": "JOINT_REGRESSOR_TRAIN_EXTRA = 'data/J_regressor_extra.npy'\nJOINT_REGRESSOR_H36M = 'data/J_regressor_h36m.npy'\nSMPL_MEAN_PARAMS = 'data/smpl_mean_params.npz'\nSMPL_MODEL_DIR = 'data/smpl'", + "detail": "modules.PyMAF.pymaf_core.path_config", + "documentation": {} + }, + { + "label": "JOINT_REGRESSOR_H36M", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.path_config", + "description": "modules.PyMAF.pymaf_core.path_config", + "peekOfCode": "JOINT_REGRESSOR_H36M = 'data/J_regressor_h36m.npy'\nSMPL_MEAN_PARAMS = 'data/smpl_mean_params.npz'\nSMPL_MODEL_DIR = 'data/smpl'", + "detail": "modules.PyMAF.pymaf_core.path_config", + "documentation": {} + }, + { + "label": "SMPL_MEAN_PARAMS", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.path_config", + "description": "modules.PyMAF.pymaf_core.path_config", + "peekOfCode": "SMPL_MEAN_PARAMS = 'data/smpl_mean_params.npz'\nSMPL_MODEL_DIR = 'data/smpl'", + "detail": "modules.PyMAF.pymaf_core.path_config", + "documentation": {} + }, + { + "label": "SMPL_MODEL_DIR", + "kind": 5, + "importPath": "modules.PyMAF.pymaf_core.path_config", + "description": "modules.PyMAF.pymaf_core.path_config", + "peekOfCode": "SMPL_MODEL_DIR = 'data/smpl'", + "detail": "modules.PyMAF.pymaf_core.path_config", + "documentation": {} + }, + { + "label": "Voxels", + "kind": 6, + "importPath": "modules.PyMAF.utils.binvox_rw", + "description": "modules.PyMAF.utils.binvox_rw", + "peekOfCode": "class Voxels(object):\n \"\"\" Holds a binvox model.\n data is either a three-dimensional numpy boolean array (dense representation)\n or a two-dimensional numpy float array (coordinate representation).\n dims, translate and scale are the model metadata.\n dims are the voxel dimensions, e.g. [32, 32, 32] for a 32x32x32 model.\n scale and translate relate the voxels to the original model coordinates.\n To translate voxel coordinates i, j, k to original coordinates x, y, z:\n x_n = (i+.5)/dims[0]\n y_n = (j+.5)/dims[1]", + "detail": "modules.PyMAF.utils.binvox_rw", + "documentation": {} + }, + { + "label": "read_header", + "kind": 2, + "importPath": "modules.PyMAF.utils.binvox_rw", + "description": "modules.PyMAF.utils.binvox_rw", + "peekOfCode": "def read_header(fp):\n \"\"\" Read binvox header. Mostly meant for internal use.\n \"\"\"\n line = fp.readline().strip()\n if not line.startswith(b'#binvox'):\n raise IOError('Not a binvox file')\n dims = [int(i) for i in fp.readline().strip().split(b' ')[1:]]\n translate = [float(i) for i in fp.readline().strip().split(b' ')[1:]]\n scale = [float(i) for i in fp.readline().strip().split(b' ')[1:]][0]\n line = fp.readline()", + "detail": "modules.PyMAF.utils.binvox_rw", + "documentation": {} + }, + { + "label": "read_as_3d_array", + "kind": 2, + "importPath": "modules.PyMAF.utils.binvox_rw", + "description": "modules.PyMAF.utils.binvox_rw", + "peekOfCode": "def read_as_3d_array(fp, fix_coords=True):\n \"\"\" Read binary binvox format as array.\n Returns the model with accompanying metadata.\n Voxels are stored in a three-dimensional numpy array, which is simple and\n direct, but may use a lot of memory for large models. (Storage requirements\n are 8*(d^3) bytes, where d is the dimensions of the binvox model. Numpy\n boolean arrays use a byte per element).\n Doesn't do any checks on input except for the '#binvox' line.\n \"\"\"\n dims, translate, scale = read_header(fp)", + "detail": "modules.PyMAF.utils.binvox_rw", + "documentation": {} + }, + { + "label": "read_as_coord_array", + "kind": 2, + "importPath": "modules.PyMAF.utils.binvox_rw", + "description": "modules.PyMAF.utils.binvox_rw", + "peekOfCode": "def read_as_coord_array(fp, fix_coords=True):\n \"\"\" Read binary binvox format as coordinates.\n Returns binvox model with voxels in a \"coordinate\" representation, i.e. an\n 3 x N array where N is the number of nonzero voxels. Each column\n corresponds to a nonzero voxel and the 3 rows are the (x, z, y) coordinates\n of the voxel. (The odd ordering is due to the way binvox format lays out\n data). Note that coordinates refer to the binvox voxels, without any\n scaling or translation.\n Use this to save memory if your model is very sparse (mostly empty).\n Doesn't do any checks on input except for the '#binvox' line.", + "detail": "modules.PyMAF.utils.binvox_rw", + "documentation": {} + }, + { + "label": "dense_to_sparse", + "kind": 2, + "importPath": "modules.PyMAF.utils.binvox_rw", + "description": "modules.PyMAF.utils.binvox_rw", + "peekOfCode": "def dense_to_sparse(voxel_data, dtype=np.int):\n \"\"\" From dense representation to sparse (coordinate) representation.\n No coordinate reordering.\n \"\"\"\n if voxel_data.ndim!=3:\n raise ValueError('voxel_data is wrong shape; should be 3D array.')\n return np.asarray(np.nonzero(voxel_data), dtype)\ndef sparse_to_dense(voxel_data, dims, dtype=np.bool):\n if voxel_data.ndim!=2 or voxel_data.shape[0]!=3:\n raise ValueError('voxel_data is wrong shape; should be 3xN array.')", + "detail": "modules.PyMAF.utils.binvox_rw", + "documentation": {} + }, + { + "label": "sparse_to_dense", + "kind": 2, + "importPath": "modules.PyMAF.utils.binvox_rw", + "description": "modules.PyMAF.utils.binvox_rw", + "peekOfCode": "def sparse_to_dense(voxel_data, dims, dtype=np.bool):\n if voxel_data.ndim!=2 or voxel_data.shape[0]!=3:\n raise ValueError('voxel_data is wrong shape; should be 3xN array.')\n if np.isscalar(dims):\n dims = [dims]*3\n dims = np.atleast_2d(dims).T\n # truncate to integers\n xyz = voxel_data.astype(np.int)\n # discard voxels that fall outside dims\n valid_ix = ~np.any((xyz < 0) | (xyz >= dims), 0)", + "detail": "modules.PyMAF.utils.binvox_rw", + "documentation": {} + }, + { + "label": "write", + "kind": 2, + "importPath": "modules.PyMAF.utils.binvox_rw", + "description": "modules.PyMAF.utils.binvox_rw", + "peekOfCode": "def write(voxel_model, fp):\n \"\"\" Write binary binvox format.\n Note that when saving a model in sparse (coordinate) format, it is first\n converted to dense format.\n Doesn't check if the model is 'sane'.\n \"\"\"\n if voxel_model.data.ndim==2:\n # TODO avoid conversion to dense\n dense_voxel_data = sparse_to_dense(voxel_model.data, voxel_model.dims)\n else:", + "detail": "modules.PyMAF.utils.binvox_rw", + "documentation": {} + }, + { + "label": "get_image_blob", + "kind": 2, + "importPath": "modules.PyMAF.utils.blob", + "description": "modules.PyMAF.utils.blob", + "peekOfCode": "def get_image_blob(im, target_scale, target_max_size):\n \"\"\"Convert an image into a network input.\n Arguments:\n im (ndarray): a color image in BGR order\n Returns:\n blob (ndarray): a data blob holding an image pyramid\n im_scale (float): image scale (target size) / (original size)\n im_info (ndarray)\n \"\"\"\n processed_im, im_scale = prep_im_for_blob(", + "detail": "modules.PyMAF.utils.blob", + "documentation": {} + }, + { + "label": "im_list_to_blob", + "kind": 2, + "importPath": "modules.PyMAF.utils.blob", + "description": "modules.PyMAF.utils.blob", + "peekOfCode": "def im_list_to_blob(ims):\n \"\"\"Convert a list of images into a network input. Assumes images were\n prepared using prep_im_for_blob or equivalent: i.e.\n - BGR channel order\n - pixel means subtracted\n - resized to the desired input size\n - float32 numpy ndarray format\n Output is a 4D HCHW tensor of the images concatenated along axis 0 with\n shape.\n \"\"\"", + "detail": "modules.PyMAF.utils.blob", + "documentation": {} + }, + { + "label": "get_max_shape", + "kind": 2, + "importPath": "modules.PyMAF.utils.blob", + "description": "modules.PyMAF.utils.blob", + "peekOfCode": "def get_max_shape(im_shapes):\n \"\"\"Calculate max spatial size (h, w) for batching given a list of image shapes\n \"\"\"\n max_shape = np.array(im_shapes).max(axis=0)\n assert max_shape.size == 2\n # Pad the image so they can be divisible by a stride\n if cfg.FPN.FPN_ON:\n stride = float(cfg.FPN.COARSEST_STRIDE)\n max_shape[0] = int(np.ceil(max_shape[0] / stride) * stride)\n max_shape[1] = int(np.ceil(max_shape[1] / stride) * stride)", + "detail": "modules.PyMAF.utils.blob", + "documentation": {} + }, + { + "label": "prep_im_for_blob", + "kind": 2, + "importPath": "modules.PyMAF.utils.blob", + "description": "modules.PyMAF.utils.blob", + "peekOfCode": "def prep_im_for_blob(im, pixel_means, target_sizes, max_size):\n \"\"\"Prepare an image for use as a network input blob. Specially:\n - Subtract per-channel pixel mean\n - Convert to float32\n - Rescale to each of the specified target size (capped at max_size)\n Returns a list of transformed images, one for each target size. Also returns\n the scale factors that were used to compute each returned image.\n \"\"\"\n im = im.astype(np.float32, copy=False)\n im -= pixel_means", + "detail": "modules.PyMAF.utils.blob", + "documentation": {} + }, + { + "label": "get_im_blob_sizes", + "kind": 2, + "importPath": "modules.PyMAF.utils.blob", + "description": "modules.PyMAF.utils.blob", + "peekOfCode": "def get_im_blob_sizes(im_shape, target_sizes, max_size):\n \"\"\"Calculate im blob size for multiple target_sizes given original im shape\n \"\"\"\n im_size_min = np.min(im_shape)\n im_size_max = np.max(im_shape)\n im_sizes = []\n for target_size in target_sizes:\n im_scale = get_target_scale(im_size_min, im_size_max, target_size, max_size)\n im_sizes.append(np.round(im_shape * im_scale))\n return np.array(im_sizes)", + "detail": "modules.PyMAF.utils.blob", + "documentation": {} + }, + { + "label": "get_target_scale", + "kind": 2, + "importPath": "modules.PyMAF.utils.blob", + "description": "modules.PyMAF.utils.blob", + "peekOfCode": "def get_target_scale(im_size_min, im_size_max, target_size, max_size):\n \"\"\"Calculate target resize scale\n \"\"\"\n im_scale = float(target_size) / float(im_size_min)\n # Prevent the biggest axis from being more than max_size\n if np.round(im_scale * im_size_max) > max_size:\n im_scale = float(max_size) / float(im_size_max)\n return im_scale\ndef zeros(shape, int32=False):\n \"\"\"Return a blob of all zeros of the given shape with the correct float or", + "detail": "modules.PyMAF.utils.blob", + "documentation": {} + }, + { + "label": "zeros", + "kind": 2, + "importPath": "modules.PyMAF.utils.blob", + "description": "modules.PyMAF.utils.blob", + "peekOfCode": "def zeros(shape, int32=False):\n \"\"\"Return a blob of all zeros of the given shape with the correct float or\n int data type.\n \"\"\"\n return np.zeros(shape, dtype=np.int32 if int32 else np.float32)\ndef ones(shape, int32=False):\n \"\"\"Return a blob of all ones of the given shape with the correct float or\n int data type.\n \"\"\"\n return np.ones(shape, dtype=np.int32 if int32 else np.float32)", + "detail": "modules.PyMAF.utils.blob", + "documentation": {} + }, + { + "label": "ones", + "kind": 2, + "importPath": "modules.PyMAF.utils.blob", + "description": "modules.PyMAF.utils.blob", + "peekOfCode": "def ones(shape, int32=False):\n \"\"\"Return a blob of all ones of the given shape with the correct float or\n int data type.\n \"\"\"\n return np.ones(shape, dtype=np.int32 if int32 else np.float32)\ndef serialize(obj):\n \"\"\"Serialize a Python object using pickle and encode it as an array of\n float32 values so that it can be feed into the workspace. See deserialize().\n \"\"\"\n return np.fromstring(pickle.dumps(obj), dtype=np.uint8).astype(np.float32)", + "detail": "modules.PyMAF.utils.blob", + "documentation": {} + }, + { + "label": "serialize", + "kind": 2, + "importPath": "modules.PyMAF.utils.blob", + "description": "modules.PyMAF.utils.blob", + "peekOfCode": "def serialize(obj):\n \"\"\"Serialize a Python object using pickle and encode it as an array of\n float32 values so that it can be feed into the workspace. See deserialize().\n \"\"\"\n return np.fromstring(pickle.dumps(obj), dtype=np.uint8).astype(np.float32)\ndef deserialize(arr):\n \"\"\"Unserialize a Python object from an array of float32 values fetched from\n a workspace. See serialize().\n \"\"\"\n return pickle.loads(arr.astype(np.uint8).tobytes())", + "detail": "modules.PyMAF.utils.blob", + "documentation": {} + }, + { + "label": "deserialize", + "kind": 2, + "importPath": "modules.PyMAF.utils.blob", + "description": "modules.PyMAF.utils.blob", + "peekOfCode": "def deserialize(arr):\n \"\"\"Unserialize a Python object from an array of float32 values fetched from\n a workspace. See serialize().\n \"\"\"\n return pickle.loads(arr.astype(np.uint8).tobytes())", + "detail": "modules.PyMAF.utils.blob", + "documentation": {} + }, + { + "label": "f_pix2vfov", + "kind": 2, + "importPath": "modules.PyMAF.utils.cam_params", + "description": "modules.PyMAF.utils.cam_params", + "peekOfCode": "def f_pix2vfov(f_pix, img_h):\n if torch.is_tensor(f_pix):\n fov = 2. * torch.arctan(img_h / (2. * f_pix))\n else:\n fov = 2. * np.arctan(img_h / (2. * f_pix))\n return fov\ndef vfov2f_pix(fov, img_h):\n if torch.is_tensor(fov):\n f_pix = img_h / 2. / torch.tan(fov / 2.)\n else:", + "detail": "modules.PyMAF.utils.cam_params", + "documentation": {} + }, + { + "label": "vfov2f_pix", + "kind": 2, + "importPath": "modules.PyMAF.utils.cam_params", + "description": "modules.PyMAF.utils.cam_params", + "peekOfCode": "def vfov2f_pix(fov, img_h):\n if torch.is_tensor(fov):\n f_pix = img_h / 2. / torch.tan(fov / 2.)\n else:\n f_pix = img_h / 2. / np.tan(fov / 2.)\n return f_pix\ndef read_cam_params(cam_params, orig_shape=None):\n # These are predicted camera parameters\n # cam_param_folder = CAM_PARAM_FOLDERS[dataset_name][cam_param_type]\n cam_pitch = cam_params['pitch'].item()", + "detail": "modules.PyMAF.utils.cam_params", + "documentation": {} + }, + { + "label": "read_cam_params", + "kind": 2, + "importPath": "modules.PyMAF.utils.cam_params", + "description": "modules.PyMAF.utils.cam_params", + "peekOfCode": "def read_cam_params(cam_params, orig_shape=None):\n # These are predicted camera parameters\n # cam_param_folder = CAM_PARAM_FOLDERS[dataset_name][cam_param_type]\n cam_pitch = cam_params['pitch'].item()\n cam_roll = cam_params['roll'].item() if 'roll' in cam_params else None\n cam_vfov = cam_params['vfov'].item() if 'vfov' in cam_params else None\n cam_focal_length = cam_params['f_pix']\n orig_shape = cam_params['orig_resolution']\n # cam_rotmat = batch_euler2matrix(torch.tensor([[cam_pitch, 0., cam_roll]]).float())[0]\n cam_rotmat = batch_euler2matrix(torch.tensor([[cam_pitch, 0., 0.]]).float())[0]", + "detail": "modules.PyMAF.utils.cam_params", + "documentation": {} + }, + { + "label": "homo_vector", + "kind": 2, + "importPath": "modules.PyMAF.utils.cam_params", + "description": "modules.PyMAF.utils.cam_params", + "peekOfCode": "def homo_vector(vector):\n \"\"\"\n vector: B x N x C\n h_vector: B x N x (C + 1)\n \"\"\"\n batch_size, n_pts = vector.shape[:2]\n h_vector = torch.cat([vector, torch.ones((batch_size, n_pts, 1)).to(vector)], dim=-1)\n return h_vector", + "detail": "modules.PyMAF.utils.cam_params", + "documentation": {} + }, + { + "label": "AttrDict", + "kind": 6, + "importPath": "modules.PyMAF.utils.collections", + "description": "modules.PyMAF.utils.collections", + "peekOfCode": "class AttrDict(dict):\n IMMUTABLE = '__immutable__'\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.__dict__[AttrDict.IMMUTABLE] = False\n def __getattr__(self, name):\n if name in self.__dict__:\n return self.__dict__[name]\n elif name in self:\n return self[name]", + "detail": "modules.PyMAF.utils.collections", + "documentation": {} + }, + { + "label": "colormap", + "kind": 2, + "importPath": "modules.PyMAF.utils.colormap", + "description": "modules.PyMAF.utils.colormap", + "peekOfCode": "def colormap(rgb=False):\n color_list = np.array(\n [\n 0.000, 0.447, 0.741,\n 0.850, 0.325, 0.098,\n 0.929, 0.694, 0.125,\n 0.494, 0.184, 0.556,\n 0.466, 0.674, 0.188,\n 0.301, 0.745, 0.933,\n 0.635, 0.078, 0.184,", + "detail": "modules.PyMAF.utils.colormap", + "documentation": {} + }, + { + "label": "compute_iou", + "kind": 2, + "importPath": "modules.PyMAF.utils.common", + "description": "modules.PyMAF.utils.common", + "peekOfCode": "def compute_iou(occ1, occ2):\n ''' Computes the Intersection over Union (IoU) value for two sets of\n occupancy values.\n Args:\n occ1 (tensor): first set of occupancy values\n occ2 (tensor): second set of occupancy values\n '''\n occ1 = np.asarray(occ1)\n occ2 = np.asarray(occ2)\n # Put all data in second dimension", + "detail": "modules.PyMAF.utils.common", + "documentation": {} + }, + { + "label": "rgb2gray", + "kind": 2, + "importPath": "modules.PyMAF.utils.common", + "description": "modules.PyMAF.utils.common", + "peekOfCode": "def rgb2gray(rgb):\n ''' rgb of size B x h x w x 3\n '''\n r, g, b = rgb[:, :, :, 0], rgb[:, :, :, 1], rgb[:, :, :, 2]\n gray = 0.2989 * r + 0.5870 * g + 0.1140 * b\n return gray\ndef sample_patch_points(batch_size, n_points, patch_size=1,\n image_resolution=(128, 128), continuous=True):\n ''' Returns sampled points in the range [-1, 1].\n Args:", + "detail": "modules.PyMAF.utils.common", + "documentation": {} + }, + { + "label": "sample_patch_points", + "kind": 2, + "importPath": "modules.PyMAF.utils.common", + "description": "modules.PyMAF.utils.common", + "peekOfCode": "def sample_patch_points(batch_size, n_points, patch_size=1,\n image_resolution=(128, 128), continuous=True):\n ''' Returns sampled points in the range [-1, 1].\n Args:\n batch_size (int): required batch size\n n_points (int): number of points to sample\n patch_size (int): size of patch; if > 1, patches of size patch_size\n are sampled instead of individual points\n image_resolution (tuple): image resolution (required for calculating\n the pixel distances)", + "detail": "modules.PyMAF.utils.common", + "documentation": {} + }, + { + "label": "get_proposal_points_in_unit_cube", + "kind": 2, + "importPath": "modules.PyMAF.utils.common", + "description": "modules.PyMAF.utils.common", + "peekOfCode": "def get_proposal_points_in_unit_cube(ray0, ray_direction, padding=0.1,\n eps=1e-6, n_steps=40):\n ''' Returns n_steps equally spaced points inside the unit cube on the rays\n cast from ray0 with direction ray_direction.\n This function is used to get the ray marching points {p^ray_j} for a given\n camera position ray0 and\n a given ray direction ray_direction which goes from the camera_position to\n the pixel location.\n NOTE: The returned values d_proposal are the lengths of the ray:\n p^ray_j = ray0 + d_proposal_j * ray_direction", + "detail": "modules.PyMAF.utils.common", + "documentation": {} + }, + { + "label": "check_ray_intersection_with_unit_cube", + "kind": 2, + "importPath": "modules.PyMAF.utils.common", + "description": "modules.PyMAF.utils.common", + "peekOfCode": "def check_ray_intersection_with_unit_cube(ray0, ray_direction, padding=0.1,\n eps=1e-6, scale=2.0):\n ''' Checks if rays ray0 + d * ray_direction intersect with unit cube with\n padding padding.\n It returns the two intersection points as well as the sorted ray lengths d.\n Args:\n ray0 (tensor): Start positions of the rays\n ray_direction (tensor): Directions of rays\n padding (float): Padding which is applied to the unit cube\n eps (float): The epsilon value for numerical stability", + "detail": "modules.PyMAF.utils.common", + "documentation": {} + }, + { + "label": "intersect_camera_rays_with_unit_cube", + "kind": 2, + "importPath": "modules.PyMAF.utils.common", + "description": "modules.PyMAF.utils.common", + "peekOfCode": "def intersect_camera_rays_with_unit_cube(\n pixels, camera_mat, world_mat, scale_mat, padding=0.1, eps=1e-6,\n use_ray_length_as_depth=True):\n ''' Returns the intersection points of ray cast from camera origin to\n pixel points p on the image plane.\n The function returns the intersection points as well the depth values and\n a mask specifying which ray intersects the unit cube.\n Args:\n pixels (tensor): Pixel points on image plane (range [-1, 1])\n camera_mat (tensor): camera matrix", + "detail": "modules.PyMAF.utils.common", + "documentation": {} + }, + { + "label": "arange_pixels", + "kind": 2, + "importPath": "modules.PyMAF.utils.common", + "description": "modules.PyMAF.utils.common", + "peekOfCode": "def arange_pixels(resolution=(128, 128), batch_size=1, image_range=(-1., 1.),\n subsample_to=None):\n ''' Arranges pixels for given resolution in range image_range.\n The function returns the unscaled pixel locations as integers and the\n scaled float values.\n Args:\n resolution (tuple): image resolution\n batch_size (int): batch size\n image_range (tuple): range of output points (default [-1, 1])\n subsample_to (int): if integer and > 0, the points are randomly", + "detail": "modules.PyMAF.utils.common", + "documentation": {} + }, + { + "label": "to_pytorch", + "kind": 2, + "importPath": "modules.PyMAF.utils.common", + "description": "modules.PyMAF.utils.common", + "peekOfCode": "def to_pytorch(tensor, return_type=False):\n ''' Converts input tensor to pytorch.\n Args:\n tensor (tensor): Numpy or Pytorch tensor\n return_type (bool): whether to return input type\n '''\n is_numpy = False\n if type(tensor) == np.ndarray:\n tensor = torch.from_numpy(tensor)\n is_numpy = True", + "detail": "modules.PyMAF.utils.common", + "documentation": {} + }, + { + "label": "get_mask", + "kind": 2, + "importPath": "modules.PyMAF.utils.common", + "description": "modules.PyMAF.utils.common", + "peekOfCode": "def get_mask(tensor):\n ''' Returns mask of non-illegal values for tensor.\n Args:\n tensor (tensor): Numpy or Pytorch tensor\n '''\n tensor, is_numpy = to_pytorch(tensor, True)\n mask = ((abs(tensor) != np.inf) & (torch.isnan(tensor) == False))\n mask = mask.to(torch.bool)\n if is_numpy:\n mask = mask.numpy()", + "detail": "modules.PyMAF.utils.common", + "documentation": {} + }, + { + "label": "transform_mesh", + "kind": 2, + "importPath": "modules.PyMAF.utils.common", + "description": "modules.PyMAF.utils.common", + "peekOfCode": "def transform_mesh(mesh, transform):\n ''' Transforms a mesh with given transformation.\n Args:\n mesh (trimesh mesh): mesh\n transform (tensor): transformation matrix of size 4 x 4\n '''\n mesh = deepcopy(mesh)\n v = np.asarray(mesh.vertices).astype(np.float32)\n v_transformed = transform_pointcloud(v, transform)\n mesh.vertices = v_transformed", + "detail": "modules.PyMAF.utils.common", + "documentation": {} + }, + { + "label": "transform_pointcloud", + "kind": 2, + "importPath": "modules.PyMAF.utils.common", + "description": "modules.PyMAF.utils.common", + "peekOfCode": "def transform_pointcloud(pointcloud, transform):\n ''' Transforms a point cloud with given transformation.\n Args:\n pointcloud (tensor): tensor of size N x 3\n transform (tensor): transformation of size 4 x 4\n '''\n assert(transform.shape == (4, 4) and pointcloud.shape[-1] == 3)\n pcl, is_numpy = to_pytorch(pointcloud, True)\n transform = to_pytorch(transform)\n # Transform point cloud to homogen coordinate system", + "detail": "modules.PyMAF.utils.common", + "documentation": {} + }, + { + "label": "transform_points_batch", + "kind": 2, + "importPath": "modules.PyMAF.utils.common", + "description": "modules.PyMAF.utils.common", + "peekOfCode": "def transform_points_batch(p, transform):\n ''' Transform points tensor with given transform.\n Args:\n p (tensor): tensor of size B x N x 3\n transform (tensor): transformation of size B x 4 x 4\n '''\n device = p.device\n assert(transform.shape[1:] == (4, 4) and p.shape[-1]\n == 3 and p.shape[0] == transform.shape[0])\n # Transform points to homogen coordinates", + "detail": "modules.PyMAF.utils.common", + "documentation": {} + }, + { + "label": "get_tensor_values", + "kind": 2, + "importPath": "modules.PyMAF.utils.common", + "description": "modules.PyMAF.utils.common", + "peekOfCode": "def get_tensor_values(tensor, p, grid_sample=True, mode='nearest',\n with_mask=False, squeeze_channel_dim=False):\n '''\n Returns values from tensor at given location p.\n Args:\n tensor (tensor): tensor of size B x C x H x W\n p (tensor): position values scaled between [-1, 1] and\n of size B x N x 2\n grid_sample (boolean): whether to use grid sampling\n mode (string): what mode to perform grid sampling in", + "detail": "modules.PyMAF.utils.common", + "documentation": {} + }, + { + "label": "transform_to_world", + "kind": 2, + "importPath": "modules.PyMAF.utils.common", + "description": "modules.PyMAF.utils.common", + "peekOfCode": "def transform_to_world(pixels, depth, camera_mat, world_mat, scale_mat,\n invert=True):\n ''' Transforms pixel positions p with given depth value d to world coordinates.\n Args:\n pixels (tensor): pixel tensor of size B x N x 2\n depth (tensor): depth tensor of size B x N x 1\n camera_mat (tensor): camera matrix\n world_mat (tensor): world matrix\n scale_mat (tensor): scale matrix\n invert (bool): whether to invert matrices (default: true)", + "detail": "modules.PyMAF.utils.common", + "documentation": {} + }, + { + "label": "transform_to_camera_space", + "kind": 2, + "importPath": "modules.PyMAF.utils.common", + "description": "modules.PyMAF.utils.common", + "peekOfCode": "def transform_to_camera_space(p_world, camera_mat, world_mat, scale_mat):\n ''' Transforms world points to camera space.\n Args:\n p_world (tensor): world points tensor of size B x N x 3\n camera_mat (tensor): camera matrix\n world_mat (tensor): world matrix\n scale_mat (tensor): scale matrix\n '''\n batch_size, n_p, _ = p_world.shape\n device = p_world.device", + "detail": "modules.PyMAF.utils.common", + "documentation": {} + }, + { + "label": "origin_to_world", + "kind": 2, + "importPath": "modules.PyMAF.utils.common", + "description": "modules.PyMAF.utils.common", + "peekOfCode": "def origin_to_world(n_points, camera_mat, world_mat, scale_mat, invert=True):\n ''' Transforms origin (camera location) to world coordinates.\n Args:\n n_points (int): how often the transformed origin is repeated in the\n form (batch_size, n_points, 3)\n camera_mat (tensor): camera matrix\n world_mat (tensor): world matrix\n scale_mat (tensor): scale matrix\n invert (bool): whether to invert the matrices (default: true)\n '''", + "detail": "modules.PyMAF.utils.common", + "documentation": {} + }, + { + "label": "image_points_to_world", + "kind": 2, + "importPath": "modules.PyMAF.utils.common", + "description": "modules.PyMAF.utils.common", + "peekOfCode": "def image_points_to_world(image_points, camera_mat, world_mat, scale_mat,\n invert=True):\n ''' Transforms points on image plane to world coordinates.\n In contrast to transform_to_world, no depth value is needed as points on\n the image plane have a fixed depth of 1.\n Args:\n image_points (tensor): image points tensor of size B x N x 2\n camera_mat (tensor): camera matrix\n world_mat (tensor): world matrix\n scale_mat (tensor): scale matrix", + "detail": "modules.PyMAF.utils.common", + "documentation": {} + }, + { + "label": "check_weights", + "kind": 2, + "importPath": "modules.PyMAF.utils.common", + "description": "modules.PyMAF.utils.common", + "peekOfCode": "def check_weights(params):\n ''' Checks weights for illegal values.\n Args:\n params (tensor): parameter tensor\n '''\n for k, v in params.items():\n if torch.isnan(v).any():\n logger_py.warn('NaN Values detected in model weight %s.' % k)\ndef check_tensor(tensor, tensorname='', input_tensor=None):\n ''' Checks tensor for illegal values.", + "detail": "modules.PyMAF.utils.common", + "documentation": {} + }, + { + "label": "check_tensor", + "kind": 2, + "importPath": "modules.PyMAF.utils.common", + "description": "modules.PyMAF.utils.common", + "peekOfCode": "def check_tensor(tensor, tensorname='', input_tensor=None):\n ''' Checks tensor for illegal values.\n Args:\n tensor (tensor): tensor\n tensorname (string): name of tensor\n input_tensor (tensor): previous input\n '''\n if torch.isnan(tensor).any():\n logger_py.warn('Tensor %s contains nan values.' % tensorname)\n if input_tensor is not None:", + "detail": "modules.PyMAF.utils.common", + "documentation": {} + }, + { + "label": "get_prob_from_logits", + "kind": 2, + "importPath": "modules.PyMAF.utils.common", + "description": "modules.PyMAF.utils.common", + "peekOfCode": "def get_prob_from_logits(logits):\n ''' Returns probabilities for logits\n Args:\n logits (tensor): logits\n '''\n odds = np.exp(logits)\n probs = odds / (1 + odds)\n return probs\ndef get_logits_from_prob(probs, eps=1e-4):\n ''' Returns logits for probabilities.", + "detail": "modules.PyMAF.utils.common", + "documentation": {} + }, + { + "label": "get_logits_from_prob", + "kind": 2, + "importPath": "modules.PyMAF.utils.common", + "description": "modules.PyMAF.utils.common", + "peekOfCode": "def get_logits_from_prob(probs, eps=1e-4):\n ''' Returns logits for probabilities.\n Args:\n probs (tensor): probability tensor\n eps (float): epsilon value for numerical stability\n '''\n probs = np.clip(probs, a_min=eps, a_max=1-eps)\n logits = np.log(probs / (1 - probs))\n return logits\ndef chamfer_distance(points1, points2, use_kdtree=True, give_id=False):", + "detail": "modules.PyMAF.utils.common", + "documentation": {} + }, + { + "label": "chamfer_distance", + "kind": 2, + "importPath": "modules.PyMAF.utils.common", + "description": "modules.PyMAF.utils.common", + "peekOfCode": "def chamfer_distance(points1, points2, use_kdtree=True, give_id=False):\n ''' Returns the chamfer distance for the sets of points.\n Args:\n points1 (numpy array): first point set\n points2 (numpy array): second point set\n use_kdtree (bool): whether to use a kdtree\n give_id (bool): whether to return the IDs of nearest points\n '''\n if use_kdtree:\n return chamfer_distance_kdtree(points1, points2, give_id=give_id)", + "detail": "modules.PyMAF.utils.common", + "documentation": {} + }, + { + "label": "chamfer_distance_naive", + "kind": 2, + "importPath": "modules.PyMAF.utils.common", + "description": "modules.PyMAF.utils.common", + "peekOfCode": "def chamfer_distance_naive(points1, points2):\n ''' Naive implementation of the Chamfer distance.\n Args:\n points1 (numpy array): first point set\n points2 (numpy array): second point set\n '''\n assert(points1.size() == points2.size())\n batch_size, T, _ = points1.size()\n points1 = points1.view(batch_size, T, 1, 3)\n points2 = points2.view(batch_size, 1, T, 3)", + "detail": "modules.PyMAF.utils.common", + "documentation": {} + }, + { + "label": "chamfer_distance_kdtree", + "kind": 2, + "importPath": "modules.PyMAF.utils.common", + "description": "modules.PyMAF.utils.common", + "peekOfCode": "def chamfer_distance_kdtree(points1, points2, give_id=False):\n ''' KD-tree based implementation of the Chamfer distance.\n Args:\n points1 (numpy array): first point set\n points2 (numpy array): second point set\n give_id (bool): whether to return the IDs of the nearest points\n '''\n # Points have size batch_size x T x 3\n batch_size = points1.size(0)\n # First convert points to numpy", + "detail": "modules.PyMAF.utils.common", + "documentation": {} + }, + { + "label": "get_nearest_neighbors_indices_batch", + "kind": 2, + "importPath": "modules.PyMAF.utils.common", + "description": "modules.PyMAF.utils.common", + "peekOfCode": "def get_nearest_neighbors_indices_batch(points_src, points_tgt, k=1):\n ''' Returns the nearest neighbors for point sets batchwise.\n Args:\n points_src (numpy array): source points\n points_tgt (numpy array): target points\n k (int): number of nearest neighbors to return\n '''\n indices = []\n distances = []\n for (p1, p2) in zip(points_src, points_tgt):", + "detail": "modules.PyMAF.utils.common", + "documentation": {} + }, + { + "label": "normalize_imagenet", + "kind": 2, + "importPath": "modules.PyMAF.utils.common", + "description": "modules.PyMAF.utils.common", + "peekOfCode": "def normalize_imagenet(x):\n ''' Normalize input images according to ImageNet standards.\n Args:\n x (tensor): input images\n '''\n x = x.clone()\n x[:, 0] = (x[:, 0] - 0.485) / 0.229\n x[:, 1] = (x[:, 1] - 0.456) / 0.224\n x[:, 2] = (x[:, 2] - 0.406) / 0.225\n return x", + "detail": "modules.PyMAF.utils.common", + "documentation": {} + }, + { + "label": "make_3d_grid", + "kind": 2, + "importPath": "modules.PyMAF.utils.common", + "description": "modules.PyMAF.utils.common", + "peekOfCode": "def make_3d_grid(bb_min, bb_max, shape):\n ''' Makes a 3D grid.\n Args:\n bb_min (tuple): bounding box minimum\n bb_max (tuple): bounding box maximum\n shape (tuple): output shape\n '''\n size = shape[0] * shape[1] * shape[2]\n pxs = torch.linspace(bb_min[0], bb_max[0], shape[0])\n pys = torch.linspace(bb_min[1], bb_max[1], shape[1])", + "detail": "modules.PyMAF.utils.common", + "documentation": {} + }, + { + "label": "get_occupancy_loss_points", + "kind": 2, + "importPath": "modules.PyMAF.utils.common", + "description": "modules.PyMAF.utils.common", + "peekOfCode": "def get_occupancy_loss_points(pixels, camera_mat, world_mat, scale_mat,\n depth_image=None, use_cube_intersection=True,\n occupancy_random_normal=False,\n depth_range=[0, 2.4]):\n ''' Returns 3D points for occupancy loss.\n Args:\n pixels (tensor): sampled pixels in range [-1, 1]\n camera_mat (tensor): camera matrix\n world_mat (tensor): world matrix\n scale_mat (tensor): scale matrix", + "detail": "modules.PyMAF.utils.common", + "documentation": {} + }, + { + "label": "get_freespace_loss_points", + "kind": 2, + "importPath": "modules.PyMAF.utils.common", + "description": "modules.PyMAF.utils.common", + "peekOfCode": "def get_freespace_loss_points(pixels, camera_mat, world_mat, scale_mat,\n use_cube_intersection=True, depth_range=[0, 2.4]):\n ''' Returns 3D points for freespace loss.\n Args:\n pixels (tensor): sampled pixels in range [-1, 1]\n camera_mat (tensor): camera matrix\n world_mat (tensor): world matrix\n scale_mat (tensor): scale matrix\n use_cube_intersection (bool): whether to check unit cube intersection\n depth_range (float): depth range; important when no cube", + "detail": "modules.PyMAF.utils.common", + "documentation": {} + }, + { + "label": "normalize_tensor", + "kind": 2, + "importPath": "modules.PyMAF.utils.common", + "description": "modules.PyMAF.utils.common", + "peekOfCode": "def normalize_tensor(tensor, min_norm=1e-5, feat_dim=-1):\n ''' Normalizes the tensor.\n Args:\n tensor (tensor): tensor\n min_norm (float): minimum norm for numerical stability\n feat_dim (int): feature dimension in tensor (default: -1)\n '''\n norm_tensor = torch.clamp(torch.norm(tensor, dim=feat_dim, keepdim=True),\n min=min_norm)\n normed_tensor = tensor / norm_tensor", + "detail": "modules.PyMAF.utils.common", + "documentation": {} + }, + { + "label": "logger_py", + "kind": 5, + "importPath": "modules.PyMAF.utils.common", + "description": "modules.PyMAF.utils.common", + "peekOfCode": "logger_py = logging.getLogger(__name__)\ndef compute_iou(occ1, occ2):\n ''' Computes the Intersection over Union (IoU) value for two sets of\n occupancy values.\n Args:\n occ1 (tensor): first set of occupancy values\n occ2 (tensor): second set of occupancy values\n '''\n occ1 = np.asarray(occ1)\n occ2 = np.asarray(occ2)", + "detail": "modules.PyMAF.utils.common", + "documentation": {} + }, + { + "label": "RandomSampler", + "kind": 6, + "importPath": "modules.PyMAF.utils.data_loader", + "description": "modules.PyMAF.utils.data_loader", + "peekOfCode": "class RandomSampler(Sampler):\n def __init__(self, data_source, checkpoint):\n self.data_source = data_source\n if checkpoint is not None and checkpoint['dataset_perm'] is not None:\n self.dataset_perm = checkpoint['dataset_perm']\n self.perm = self.dataset_perm[checkpoint['batch_size']*checkpoint['batch_idx']:]\n else:\n self.dataset_perm = torch.randperm(len(self.data_source)).tolist()\n self.perm = torch.randperm(len(self.data_source)).tolist() \n def __iter__(self):", + "detail": "modules.PyMAF.utils.data_loader", + "documentation": {} + }, + { + "label": "SequentialSampler", + "kind": 6, + "importPath": "modules.PyMAF.utils.data_loader", + "description": "modules.PyMAF.utils.data_loader", + "peekOfCode": "class SequentialSampler(Sampler):\n def __init__(self, data_source, checkpoint):\n self.data_source = data_source\n if checkpoint is not None and checkpoint['dataset_perm'] is not None:\n self.dataset_perm = checkpoint['dataset_perm']\n self.perm = self.dataset_perm[checkpoint['batch_size']*checkpoint['batch_idx']:]\n else:\n self.dataset_perm = list(range(len(self.data_source)))\n self.perm = self.dataset_perm\n def __iter__(self):", + "detail": "modules.PyMAF.utils.data_loader", + "documentation": {} + }, + { + "label": "CheckpointDataLoader", + "kind": 6, + "importPath": "modules.PyMAF.utils.data_loader", + "description": "modules.PyMAF.utils.data_loader", + "peekOfCode": "class CheckpointDataLoader(DataLoader):\n \"\"\"\n Extends torch.utils.data.DataLoader to handle resuming training from an arbitrary point within an epoch.\n \"\"\"\n def __init__(self, dataset, checkpoint=None, batch_size=1,\n shuffle=False, num_workers=0, pin_memory=False, drop_last=True,\n timeout=0, worker_init_fn=None):\n if shuffle:\n sampler = RandomSampler(dataset, checkpoint)\n else:", + "detail": "modules.PyMAF.utils.data_loader", + "documentation": {} + }, + { + "label": "preprocess_video", + "kind": 2, + "importPath": "modules.PyMAF.utils.demo_utils", + "description": "modules.PyMAF.utils.demo_utils", + "peekOfCode": "def preprocess_video(video, joints2d, bboxes, frames, scale=1.0, crop_size=224):\n \"\"\"\n Read video, do normalize and crop it according to the bounding box.\n If there are bounding box annotations, use them to crop the image.\n If no bounding box is specified but openpose detections are available, use them to get the bounding box.\n :param video (ndarray): input video\n :param joints2d (ndarray, NxJx3): openpose detections\n :param bboxes (ndarray, Nx5): bbox detections\n :param scale (float): bbox crop scaling factor\n :param crop_size (int): crop width and height", + "detail": "modules.PyMAF.utils.demo_utils", + "documentation": {} + }, + { + "label": "download_youtube_clip", + "kind": 2, + "importPath": "modules.PyMAF.utils.demo_utils", + "description": "modules.PyMAF.utils.demo_utils", + "peekOfCode": "def download_youtube_clip(url, download_folder):\n return YouTube(url).streams.first().download(output_path=download_folder)\ndef smplify_runner(\n pred_rotmat,\n pred_betas,\n pred_cam,\n j2d,\n device,\n batch_size,\n lr=1.0,", + "detail": "modules.PyMAF.utils.demo_utils", + "documentation": {} + }, + { + "label": "smplify_runner", + "kind": 2, + "importPath": "modules.PyMAF.utils.demo_utils", + "description": "modules.PyMAF.utils.demo_utils", + "peekOfCode": "def smplify_runner(\n pred_rotmat,\n pred_betas,\n pred_cam,\n j2d,\n device,\n batch_size,\n lr=1.0,\n opt_steps=1,\n use_lbfgs=True,", + "detail": "modules.PyMAF.utils.demo_utils", + "documentation": {} + }, + { + "label": "trim_videos", + "kind": 2, + "importPath": "modules.PyMAF.utils.demo_utils", + "description": "modules.PyMAF.utils.demo_utils", + "peekOfCode": "def trim_videos(filename, start_time, end_time, output_filename):\n command = ['ffmpeg',\n '-i', '\"%s\"' % filename,\n '-ss', str(start_time),\n '-t', str(end_time - start_time),\n '-c:v', 'libx264', '-c:a', 'copy',\n '-threads', '1',\n '-loglevel', 'panic',\n '\"%s\"' % output_filename]\n # command = ' '.join(command)", + "detail": "modules.PyMAF.utils.demo_utils", + "documentation": {} + }, + { + "label": "video_to_images", + "kind": 2, + "importPath": "modules.PyMAF.utils.demo_utils", + "description": "modules.PyMAF.utils.demo_utils", + "peekOfCode": "def video_to_images(vid_file, img_folder=None, return_info=False):\n if img_folder is None:\n img_folder = osp.join(osp.expanduser('~'), 'tmp', osp.basename(vid_file).replace('.', '_'))\n # img_folder = osp.join('/tmp', osp.basename(vid_file).replace('.', '_'))\n print(img_folder)\n os.makedirs(img_folder, exist_ok=True)\n command = ['ffmpeg',\n '-i', vid_file,\n '-f', 'image2',\n '-v', 'error',", + "detail": "modules.PyMAF.utils.demo_utils", + "documentation": {} + }, + { + "label": "download_url", + "kind": 2, + "importPath": "modules.PyMAF.utils.demo_utils", + "description": "modules.PyMAF.utils.demo_utils", + "peekOfCode": "def download_url(url, outdir):\n print(f'Downloading files from {url}')\n cmd = ['wget', '-c', url, '-P', outdir]\n subprocess.call(cmd)\ndef download_ckpt(outdir='data/vibe_data', use_3dpw=False):\n os.makedirs(outdir, exist_ok=True)\n if use_3dpw:\n ckpt_file = 'data/vibe_data/vibe_model_w_3dpw.pth.tar'\n url = 'https://www.dropbox.com/s/41ozgqorcp095ja/vibe_model_w_3dpw.pth.tar'\n if not os.path.isfile(ckpt_file):", + "detail": "modules.PyMAF.utils.demo_utils", + "documentation": {} + }, + { + "label": "download_ckpt", + "kind": 2, + "importPath": "modules.PyMAF.utils.demo_utils", + "description": "modules.PyMAF.utils.demo_utils", + "peekOfCode": "def download_ckpt(outdir='data/vibe_data', use_3dpw=False):\n os.makedirs(outdir, exist_ok=True)\n if use_3dpw:\n ckpt_file = 'data/vibe_data/vibe_model_w_3dpw.pth.tar'\n url = 'https://www.dropbox.com/s/41ozgqorcp095ja/vibe_model_w_3dpw.pth.tar'\n if not os.path.isfile(ckpt_file):\n download_url(url=url, outdir=outdir)\n else:\n ckpt_file = 'data/vibe_data/vibe_model_wo_3dpw.pth.tar'\n url = 'https://www.dropbox.com/s/amj2p8bmf6g56k6/vibe_model_wo_3dpw.pth.tar'", + "detail": "modules.PyMAF.utils.demo_utils", + "documentation": {} + }, + { + "label": "images_to_video", + "kind": 2, + "importPath": "modules.PyMAF.utils.demo_utils", + "description": "modules.PyMAF.utils.demo_utils", + "peekOfCode": "def images_to_video(img_folder, output_vid_file):\n os.makedirs(img_folder, exist_ok=True)\n command = [\n 'ffmpeg', '-y', '-threads', '16', '-i', f'{img_folder}/%06d.png', '-profile:v', 'baseline',\n '-level', '3.0', '-c:v', 'libx264', '-pix_fmt', 'yuv420p', '-an', '-v', 'error', output_vid_file,\n ]\n print(f'Running \\\"{\" \".join(command)}\\\"')\n try:\n subprocess.call(command)\n except:", + "detail": "modules.PyMAF.utils.demo_utils", + "documentation": {} + }, + { + "label": "convert_crop_cam_to_orig_img", + "kind": 2, + "importPath": "modules.PyMAF.utils.demo_utils", + "description": "modules.PyMAF.utils.demo_utils", + "peekOfCode": "def convert_crop_cam_to_orig_img(cam, bbox, img_width, img_height):\n '''\n Convert predicted camera from cropped image coordinates\n to original image coordinates\n :param cam (ndarray, shape=(3,)): weak perspective camera in cropped img coordinates\n :param bbox (ndarray, shape=(4,)): bbox coordinates (c_x, c_y, h)\n :param img_width (int): original image width\n :param img_height (int): original image height\n :return:\n '''", + "detail": "modules.PyMAF.utils.demo_utils", + "documentation": {} + }, + { + "label": "prepare_rendering_results", + "kind": 2, + "importPath": "modules.PyMAF.utils.demo_utils", + "description": "modules.PyMAF.utils.demo_utils", + "peekOfCode": "def prepare_rendering_results(results_dict, nframes):\n frame_results = [{} for _ in range(nframes)]\n for person_id, person_data in results_dict.items():\n for idx, frame_id in enumerate(person_data['frame_ids']):\n frame_results[frame_id][person_id] = {\n 'verts': person_data['verts'][idx],\n 'smplx_verts': person_data['smplx_verts'][idx] if 'smplx_verts' in person_data else None,\n 'cam': person_data['orig_cam'][idx],\n 'cam_t': person_data['orig_cam_t'][idx] if 'orig_cam_t' in person_data else None,\n # 'cam': person_data['pred_cam'][idx],", + "detail": "modules.PyMAF.utils.demo_utils", + "documentation": {} + }, + { + "label": "DensePoseMethods", + "kind": 6, + "importPath": "modules.PyMAF.utils.densepose_methods", + "description": "modules.PyMAF.utils.densepose_methods", + "peekOfCode": "class DensePoseMethods:\n def __init__(self):\n #\n ALP_UV = loadmat(os.path.join('./data/UV_data', 'UV_Processed.mat'))\n self.FaceIndices = np.array(ALP_UV['All_FaceIndices']).squeeze()\n self.FacesDensePose = ALP_UV['All_Faces'] - 1\n self.U_norm = ALP_UV['All_U_norm'].squeeze()\n self.V_norm = ALP_UV['All_V_norm'].squeeze()\n self.All_vertices = ALP_UV['All_vertices'][0]\n ## Info to compute symmetries.", + "detail": "modules.PyMAF.utils.densepose_methods", + "documentation": {} + }, + { + "label": "batch_rodrigues", + "kind": 2, + "importPath": "modules.PyMAF.utils.geometry", + "description": "modules.PyMAF.utils.geometry", + "peekOfCode": "def batch_rodrigues(theta):\n \"\"\"Convert axis-angle representation to rotation matrix.\n Args:\n theta: size = [B, 3]\n Returns:\n Rotation matrix corresponding to the quaternion -- size = [B, 3, 3]\n \"\"\"\n l1norm = torch.norm(theta + 1e-8, p = 2, dim = 1)\n angle = torch.unsqueeze(l1norm, -1)\n normalized = torch.div(theta, angle)", + "detail": "modules.PyMAF.utils.geometry", + "documentation": {} + }, + { + "label": "quat_to_rotmat", + "kind": 2, + "importPath": "modules.PyMAF.utils.geometry", + "description": "modules.PyMAF.utils.geometry", + "peekOfCode": "def quat_to_rotmat(quat):\n \"\"\"Convert quaternion coefficients to rotation matrix.\n Args:\n quat: size = [B, 4] 4 <===>(w, x, y, z)\n Returns:\n Rotation matrix corresponding to the quaternion -- size = [B, 3, 3]\n \"\"\" \n norm_quat = quat\n norm_quat = norm_quat/norm_quat.norm(p=2, dim=1, keepdim=True)\n w, x, y, z = norm_quat[:,0], norm_quat[:,1], norm_quat[:,2], norm_quat[:,3]", + "detail": "modules.PyMAF.utils.geometry", + "documentation": {} + }, + { + "label": "rotation_matrix_to_angle_axis", + "kind": 2, + "importPath": "modules.PyMAF.utils.geometry", + "description": "modules.PyMAF.utils.geometry", + "peekOfCode": "def rotation_matrix_to_angle_axis(rotation_matrix):\n \"\"\"\n This function is borrowed from https://github.com/kornia/kornia\n Convert 3x4 rotation matrix to Rodrigues vector\n Args:\n rotation_matrix (Tensor): rotation matrix.\n Returns:\n Tensor: Rodrigues vector transformation.\n Shape:\n - Input: :math:`(N, 3, 4)`", + "detail": "modules.PyMAF.utils.geometry", + "documentation": {} + }, + { + "label": "quaternion_to_angle_axis", + "kind": 2, + "importPath": "modules.PyMAF.utils.geometry", + "description": "modules.PyMAF.utils.geometry", + "peekOfCode": "def quaternion_to_angle_axis(quaternion: torch.Tensor) -> torch.Tensor:\n \"\"\"\n This function is borrowed from https://github.com/kornia/kornia\n Convert quaternion vector to angle axis of rotation.\n Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h\n Args:\n quaternion (torch.Tensor): tensor with quaternions.\n Return:\n torch.Tensor: tensor with angle axis of rotation.\n Shape:", + "detail": "modules.PyMAF.utils.geometry", + "documentation": {} + }, + { + "label": "quaternion_to_angle", + "kind": 2, + "importPath": "modules.PyMAF.utils.geometry", + "description": "modules.PyMAF.utils.geometry", + "peekOfCode": "def quaternion_to_angle(quaternion: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert quaternion vector to angle of the rotation.\n Args:\n quaternion (torch.Tensor): tensor with quaternions.\n Return:\n torch.Tensor: tensor with angle axis of rotation.\n Shape:\n - Input: :math:`(*, 4)` where `*` means, any number of dimensions\n - Output: :math:`(*, 1)`", + "detail": "modules.PyMAF.utils.geometry", + "documentation": {} + }, + { + "label": "rotation_matrix_to_quaternion", + "kind": 2, + "importPath": "modules.PyMAF.utils.geometry", + "description": "modules.PyMAF.utils.geometry", + "peekOfCode": "def rotation_matrix_to_quaternion(rotation_matrix, eps=1e-6):\n \"\"\"\n This function is borrowed from https://github.com/kornia/kornia\n Convert 3x4 rotation matrix to 4d quaternion vector\n This algorithm is based on algorithm described in\n https://github.com/KieranWynn/pyquaternion/blob/master/pyquaternion/quaternion.py#L201\n Args:\n rotation_matrix (Tensor): the rotation matrix to convert.\n Return:\n Tensor: the rotation in quaternion", + "detail": "modules.PyMAF.utils.geometry", + "documentation": {} + }, + { + "label": "batch_euler2matrix", + "kind": 2, + "importPath": "modules.PyMAF.utils.geometry", + "description": "modules.PyMAF.utils.geometry", + "peekOfCode": "def batch_euler2matrix(r):\n return quaternion_to_rotation_matrix(euler_to_quaternion(r))\ndef euler_to_quaternion(r):\n x = r[..., 0]\n y = r[..., 1]\n z = r[..., 2]\n z = z/2.0\n y = y/2.0\n x = x/2.0\n cz = torch.cos(z)", + "detail": "modules.PyMAF.utils.geometry", + "documentation": {} + }, + { + "label": "euler_to_quaternion", + "kind": 2, + "importPath": "modules.PyMAF.utils.geometry", + "description": "modules.PyMAF.utils.geometry", + "peekOfCode": "def euler_to_quaternion(r):\n x = r[..., 0]\n y = r[..., 1]\n z = r[..., 2]\n z = z/2.0\n y = y/2.0\n x = x/2.0\n cz = torch.cos(z)\n sz = torch.sin(z)\n cy = torch.cos(y)", + "detail": "modules.PyMAF.utils.geometry", + "documentation": {} + }, + { + "label": "quaternion_to_rotation_matrix", + "kind": 2, + "importPath": "modules.PyMAF.utils.geometry", + "description": "modules.PyMAF.utils.geometry", + "peekOfCode": "def quaternion_to_rotation_matrix(quat):\n \"\"\"Convert quaternion coefficients to rotation matrix.\n Args:\n quat: size = [B, 4] 4 <===>(w, x, y, z)\n Returns:\n Rotation matrix corresponding to the quaternion -- size = [B, 3, 3]\n \"\"\"\n norm_quat = quat\n norm_quat = norm_quat / norm_quat.norm(p=2, dim=1, keepdim=True)\n w, x, y, z = norm_quat[:, 0], norm_quat[:, 1], norm_quat[:, 2], norm_quat[:, 3]", + "detail": "modules.PyMAF.utils.geometry", + "documentation": {} + }, + { + "label": "rot6d_to_rotmat", + "kind": 2, + "importPath": "modules.PyMAF.utils.geometry", + "description": "modules.PyMAF.utils.geometry", + "peekOfCode": "def rot6d_to_rotmat(x):\n \"\"\"Convert 6D rotation representation to 3x3 rotation matrix.\n Based on Zhou et al., \"On the Continuity of Rotation Representations in Neural Networks\", CVPR 2019\n Input:\n (B,6) Batch of 6-D rotation representations\n Output:\n (B,3,3) Batch of corresponding rotation matrices\n \"\"\"\n if x.shape[-1] == 6:\n batch_size = x.shape[0]", + "detail": "modules.PyMAF.utils.geometry", + "documentation": {} + }, + { + "label": "rotmat_to_rot6d", + "kind": 2, + "importPath": "modules.PyMAF.utils.geometry", + "description": "modules.PyMAF.utils.geometry", + "peekOfCode": "def rotmat_to_rot6d(x):\n \"\"\"Convert 3x3 rotation matrix to 6D rotation representation.\n Based on Zhou et al., \"On the Continuity of Rotation Representations in Neural Networks\", CVPR 2019\n Input:\n (B,3,3) Batch of corresponding rotation matrices\n Output:\n (B,6) Batch of 6-D rotation representations\n \"\"\"\n batch_size = x.shape[0]\n x = x[:, :, :2]", + "detail": "modules.PyMAF.utils.geometry", + "documentation": {} + }, + { + "label": "rotmat_to_angle", + "kind": 2, + "importPath": "modules.PyMAF.utils.geometry", + "description": "modules.PyMAF.utils.geometry", + "peekOfCode": "def rotmat_to_angle(x):\n \"\"\"Convert rotation to one-D angle.\n Based on Zhou et al., \"On the Continuity of Rotation Representations in Neural Networks\", CVPR 2019\n Input:\n (B,2) Batch of corresponding rotation\n Output:\n (B,1) Batch of 1-D angle\n \"\"\"\n a = F.normalize(x)\n angle = torch.atan2(a[:, 0], a[:, 1]).unsqueeze(-1)", + "detail": "modules.PyMAF.utils.geometry", + "documentation": {} + }, + { + "label": "projection", + "kind": 2, + "importPath": "modules.PyMAF.utils.geometry", + "description": "modules.PyMAF.utils.geometry", + "peekOfCode": "def projection(pred_joints, pred_camera, retain_z=False, iwp_mode=True):\n \"\"\" Project 3D points on the image plane based on the given camera info, \n Identity rotation and Weak Perspective (IWP) camera is used when iwp_mode=True, more about camera settings:\n SPEC: Seeing People in the Wild with an Estimated Camera, ICCV 2021\n \"\"\"\n batch_size = pred_joints.shape[0]\n if iwp_mode:\n cam_sxy = pred_camera['cam_sxy']\n pred_cam_t = torch.stack([cam_sxy[:, 1],\n cam_sxy[:, 2],", + "detail": "modules.PyMAF.utils.geometry", + "documentation": {} + }, + { + "label": "perspective_projection", + "kind": 2, + "importPath": "modules.PyMAF.utils.geometry", + "description": "modules.PyMAF.utils.geometry", + "peekOfCode": "def perspective_projection(points, rotation, translation,\n focal_length=None, camera_center=None, cam_intrinsics=None, retain_z=False):\n \"\"\"\n This function computes the perspective projection of a set of points.\n Input:\n points (bs, N, 3): 3D points\n rotation (bs, 3, 3): Camera rotation\n translation (bs, 3): Camera translation\n focal_length (bs,) or scalar: Focal length\n camera_center (bs, 2): Camera center", + "detail": "modules.PyMAF.utils.geometry", + "documentation": {} + }, + { + "label": "convert_to_full_img_cam", + "kind": 2, + "importPath": "modules.PyMAF.utils.geometry", + "description": "modules.PyMAF.utils.geometry", + "peekOfCode": "def convert_to_full_img_cam(\n pare_cam, bbox_height, bbox_center,\n img_w, img_h, focal_length):\n # Converts weak perspective camera estimated by PARE in\n # bbox coords to perspective camera in full image coordinates\n # from https://arxiv.org/pdf/2009.06549.pdf\n s, tx, ty = pare_cam[:, 0], pare_cam[:, 1], pare_cam[:, 2]\n res = 224\n r = bbox_height / res\n tz = 2 * focal_length / (r * res * s)", + "detail": "modules.PyMAF.utils.geometry", + "documentation": {} + }, + { + "label": "estimate_translation_np", + "kind": 2, + "importPath": "modules.PyMAF.utils.geometry", + "description": "modules.PyMAF.utils.geometry", + "peekOfCode": "def estimate_translation_np(S, joints_2d, joints_conf, focal_length=5000, img_size=(224., 224.)):\n \"\"\"Find camera translation that brings 3D joints S closest to 2D the corresponding joints_2d.\n Input:\n S: (25, 3) 3D joint locations\n joints: (25, 3) 2D joint locations and confidence\n Returns:\n (3,) camera translation vector\n \"\"\"\n num_joints = S.shape[0]\n # focal length", + "detail": "modules.PyMAF.utils.geometry", + "documentation": {} + }, + { + "label": "estimate_translation", + "kind": 2, + "importPath": "modules.PyMAF.utils.geometry", + "description": "modules.PyMAF.utils.geometry", + "peekOfCode": "def estimate_translation(S, joints_2d, focal_length=5000., img_size=224., use_all_kps=False):\n \"\"\"Find camera translation that brings 3D joints S closest to 2D the corresponding joints_2d.\n Input:\n S: (B, 49, 3) 3D joint locations\n joints: (B, 49, 3) 2D joint locations and confidence\n Returns:\n (B, 3) camera translation vectors\n \"\"\"\n if isinstance(focal_length, numbers.Number):\n focal_length = [focal_length,] * S.shape[0]", + "detail": "modules.PyMAF.utils.geometry", + "documentation": {} + }, + { + "label": "Rot_y", + "kind": 2, + "importPath": "modules.PyMAF.utils.geometry", + "description": "modules.PyMAF.utils.geometry", + "peekOfCode": "def Rot_y(angle, category='torch', prepend_dim=True, device=None):\n\t'''Rotate around y-axis by angle\n\tArgs:\n\t\tcategory: 'torch' or 'numpy'\n\t\tprepend_dim: prepend an extra dimension\n\tReturn: Rotation matrix with shape [1, 3, 3] (prepend_dim=True)\n\t'''\n\tm = np.array([\n\t\t\t\t\t[np.cos(angle), 0., np.sin(angle)],\n\t\t\t\t\t[0., 1., 0.],", + "detail": "modules.PyMAF.utils.geometry", + "documentation": {} + }, + { + "label": "Rot_x", + "kind": 2, + "importPath": "modules.PyMAF.utils.geometry", + "description": "modules.PyMAF.utils.geometry", + "peekOfCode": "def Rot_x(angle, category='torch', prepend_dim=True, device=None):\n\t'''Rotate around x-axis by angle\n\tArgs:\n\t\tcategory: 'torch' or 'numpy'\n\t\tprepend_dim: prepend an extra dimension\n\tReturn: Rotation matrix with shape [1, 3, 3] (prepend_dim=True)\n\t'''\n\tm = np.array([\n\t\t\t\t [1., 0., 0.],\n\t\t\t\t [0., np.cos(angle), -np.sin(angle)],", + "detail": "modules.PyMAF.utils.geometry", + "documentation": {} + }, + { + "label": "Rot_z", + "kind": 2, + "importPath": "modules.PyMAF.utils.geometry", + "description": "modules.PyMAF.utils.geometry", + "peekOfCode": "def Rot_z(angle, category='torch', prepend_dim=True, device=None):\n\t'''Rotate around z-axis by angle\n\tArgs:\n\t\tcategory: 'torch' or 'numpy'\n\t\tprepend_dim: prepend an extra dimension\n\tReturn: Rotation matrix with shape [1, 3, 3] (prepend_dim=True)\n\t'''\n\tm = np.array([\n\t\t\t\t [np.cos(angle), -np.sin(angle), 0.],\n\t\t\t\t [np.sin(angle), np.cos(angle), 0.],", + "detail": "modules.PyMAF.utils.geometry", + "documentation": {} + }, + { + "label": "compute_twist_ratation", + "kind": 2, + "importPath": "modules.PyMAF.utils.geometry", + "description": "modules.PyMAF.utils.geometry", + "peekOfCode": "def compute_twist_ratation(rotation_matrix, twist_axis):\n '''\n Compute the twist component of given rotation and twist axis\n https://stackoverflow.com/questions/3684269/component-of-a-quaternion-rotation-around-an-axis\n Parameters\n ----------\n rotation_matrix : Tensor (B, 3, 3,)\n The rotation to convert\n twist_axis : Tensor (B, 3,)\n The twist axis", + "detail": "modules.PyMAF.utils.geometry", + "documentation": {} + }, + { + "label": "\tm", + "kind": 5, + "importPath": "modules.PyMAF.utils.geometry", + "description": "modules.PyMAF.utils.geometry", + "peekOfCode": "\tm = np.array([\n\t\t\t\t\t[np.cos(angle), 0., np.sin(angle)],\n\t\t\t\t\t[0., 1., 0.],\n\t\t\t\t\t[-np.sin(angle), 0., np.cos(angle)]\n\t\t\t\t])\n\tif category == 'torch':\n\t\tif prepend_dim:\n\t\t\treturn torch.tensor(m, dtype=torch.float, device=device).unsqueeze(0)\n\t\telse:\n\t\t\treturn torch.tensor(m, dtype=torch.float, device=device)", + "detail": "modules.PyMAF.utils.geometry", + "documentation": {} + }, + { + "label": "\tm", + "kind": 5, + "importPath": "modules.PyMAF.utils.geometry", + "description": "modules.PyMAF.utils.geometry", + "peekOfCode": "\tm = np.array([\n\t\t\t\t [1., 0., 0.],\n\t\t\t\t [0., np.cos(angle), -np.sin(angle)],\n\t\t\t\t [0., np.sin(angle), np.cos(angle)]\n\t\t\t\t])\n\tif category == 'torch':\n\t\tif prepend_dim:\n\t\t\treturn torch.tensor(m, dtype=torch.float, device=device).unsqueeze(0)\n\t\telse:\n\t\t\treturn torch.tensor(m, dtype=torch.float, device=device)", + "detail": "modules.PyMAF.utils.geometry", + "documentation": {} + }, + { + "label": "\tm", + "kind": 5, + "importPath": "modules.PyMAF.utils.geometry", + "description": "modules.PyMAF.utils.geometry", + "peekOfCode": "\tm = np.array([\n\t\t\t\t [np.cos(angle), -np.sin(angle), 0.],\n\t\t\t\t [np.sin(angle), np.cos(angle), 0.],\n\t\t\t\t [0., 0., 1.]\n\t\t\t\t])\n\tif category == 'torch':\n\t\tif prepend_dim:\n\t\t\treturn torch.tensor(m, dtype=torch.float, device=device).unsqueeze(0)\n\t\telse:\n\t\t\treturn torch.tensor(m, dtype=torch.float, device=device)", + "detail": "modules.PyMAF.utils.geometry", + "documentation": {} + }, + { + "label": "get_transform", + "kind": 2, + "importPath": "modules.PyMAF.utils.imutils", + "description": "modules.PyMAF.utils.imutils", + "peekOfCode": "def get_transform(center, scale, res, rot=0):\n \"\"\"Generate transformation matrix.\"\"\"\n h = 200 * scale\n t = np.zeros((3, 3))\n t[0, 0] = float(res[1]) / h\n t[1, 1] = float(res[0]) / h\n t[0, 2] = res[1] * (-float(center[0]) / h + .5)\n t[1, 2] = res[0] * (-float(center[1]) / h + .5)\n t[2, 2] = 1\n if not rot == 0:", + "detail": "modules.PyMAF.utils.imutils", + "documentation": {} + }, + { + "label": "get_rot_transf", + "kind": 2, + "importPath": "modules.PyMAF.utils.imutils", + "description": "modules.PyMAF.utils.imutils", + "peekOfCode": "def get_rot_transf(res, rot):\n \"\"\"Generate rotation transformation matrix.\"\"\"\n if rot == 0:\n return np.identity(3)\n rot = -rot # To match direction of rotation from cropping\n rot_mat = np.zeros((3,3))\n rot_rad = rot * np.pi / 180\n sn,cs = np.sin(rot_rad), np.cos(rot_rad)\n rot_mat[0,:2] = [cs, -sn]\n rot_mat[1,:2] = [sn, cs]", + "detail": "modules.PyMAF.utils.imutils", + "documentation": {} + }, + { + "label": "transform", + "kind": 2, + "importPath": "modules.PyMAF.utils.imutils", + "description": "modules.PyMAF.utils.imutils", + "peekOfCode": "def transform(pt, center, scale, res, invert=0, rot=0):\n \"\"\"Transform pixel location to different reference.\"\"\"\n t = get_transform(center, scale, res, rot=rot)\n if invert:\n t = np.linalg.inv(t)\n new_pt = np.array([pt[0] - 1, pt[1] - 1, 1.]).T\n new_pt = np.dot(t, new_pt)\n return new_pt[:2].astype(int) + 1\ndef transform_pts(coords, center, scale, res, invert=0, rot=0):\n \"\"\"Transform coordinates (N x 2) to different reference.\"\"\"", + "detail": "modules.PyMAF.utils.imutils", + "documentation": {} + }, + { + "label": "transform_pts", + "kind": 2, + "importPath": "modules.PyMAF.utils.imutils", + "description": "modules.PyMAF.utils.imutils", + "peekOfCode": "def transform_pts(coords, center, scale, res, invert=0, rot=0):\n \"\"\"Transform coordinates (N x 2) to different reference.\"\"\"\n new_coords = coords.copy()\n for p in range(coords.shape[0]):\n new_coords[p, 0:2] = transform(coords[p, 0:2], center, scale, res, invert, rot)\n return new_coords\ndef crop(img, center, scale, res, rot=0):\n \"\"\"Crop image according to the supplied bounding box.\"\"\"\n # Upper left point\n ul = np.array(transform([1, 1], center, scale, res, invert=1))-1", + "detail": "modules.PyMAF.utils.imutils", + "documentation": {} + }, + { + "label": "crop", + "kind": 2, + "importPath": "modules.PyMAF.utils.imutils", + "description": "modules.PyMAF.utils.imutils", + "peekOfCode": "def crop(img, center, scale, res, rot=0):\n \"\"\"Crop image according to the supplied bounding box.\"\"\"\n # Upper left point\n ul = np.array(transform([1, 1], center, scale, res, invert=1))-1\n # Bottom right point\n br = np.array(transform([res[0]+1, \n res[1]+1], center, scale, res, invert=1))-1\n # Padding so that when rotated proper amount of context is included\n pad = int(np.linalg.norm(br - ul) / 2 - float(br[1] - ul[1]) / 2)\n if not rot == 0:", + "detail": "modules.PyMAF.utils.imutils", + "documentation": {} + }, + { + "label": "uncrop", + "kind": 2, + "importPath": "modules.PyMAF.utils.imutils", + "description": "modules.PyMAF.utils.imutils", + "peekOfCode": "def uncrop(img, center, scale, orig_shape, rot=0, is_rgb=True):\n \"\"\"'Undo' the image cropping/resizing.\n This function is used when evaluating mask/part segmentation.\n \"\"\"\n res = img.shape[:2]\n # Upper left point\n ul = np.array(transform([1, 1], center, scale, res, invert=1))-1\n # Bottom right point\n br = np.array(transform([res[0]+1,res[1]+1], center, scale, res, invert=1))-1\n # size of cropped image", + "detail": "modules.PyMAF.utils.imutils", + "documentation": {} + }, + { + "label": "rot_aa", + "kind": 2, + "importPath": "modules.PyMAF.utils.imutils", + "description": "modules.PyMAF.utils.imutils", + "peekOfCode": "def rot_aa(aa, rot):\n \"\"\"Rotate axis angle parameters.\"\"\"\n # pose parameters\n R = np.array([[np.cos(np.deg2rad(-rot)), -np.sin(np.deg2rad(-rot)), 0],\n [np.sin(np.deg2rad(-rot)), np.cos(np.deg2rad(-rot)), 0],\n [0, 0, 1]])\n # find the rotation of the body in camera frame\n per_rdg, _ = cv2.Rodrigues(aa)\n # apply the global rotation to the global orientation\n resrot, _ = cv2.Rodrigues(np.dot(R,per_rdg))", + "detail": "modules.PyMAF.utils.imutils", + "documentation": {} + }, + { + "label": "flip_img", + "kind": 2, + "importPath": "modules.PyMAF.utils.imutils", + "description": "modules.PyMAF.utils.imutils", + "peekOfCode": "def flip_img(img):\n \"\"\"Flip rgb images or masks.\n channels come last, e.g. (256,256,3).\n \"\"\"\n img = np.fliplr(img)\n return img\ndef flip_kp(kp, is_smpl=False, type='body'):\n \"\"\"Flip keypoints.\"\"\"\n assert type in ['body', 'hand', 'face', 'feet']\n if type == 'body':", + "detail": "modules.PyMAF.utils.imutils", + "documentation": {} + }, + { + "label": "flip_kp", + "kind": 2, + "importPath": "modules.PyMAF.utils.imutils", + "description": "modules.PyMAF.utils.imutils", + "peekOfCode": "def flip_kp(kp, is_smpl=False, type='body'):\n \"\"\"Flip keypoints.\"\"\"\n assert type in ['body', 'hand', 'face', 'feet']\n if type == 'body':\n if len(kp) == 24:\n if is_smpl:\n flipped_parts = constants.SMPL_JOINTS_FLIP_PERM\n else:\n flipped_parts = constants.J24_FLIP_PERM\n elif len(kp) == 49:", + "detail": "modules.PyMAF.utils.imutils", + "documentation": {} + }, + { + "label": "flip_pose", + "kind": 2, + "importPath": "modules.PyMAF.utils.imutils", + "description": "modules.PyMAF.utils.imutils", + "peekOfCode": "def flip_pose(pose):\n \"\"\"Flip pose.\n The flipping is based on SMPL parameters.\n \"\"\"\n flipped_parts = constants.SMPL_POSE_FLIP_PERM\n pose = pose[flipped_parts]\n # we also negate the second and the third dimension of the axis-angle\n pose[1::3] = -pose[1::3]\n pose[2::3] = -pose[2::3]\n return pose", + "detail": "modules.PyMAF.utils.imutils", + "documentation": {} + }, + { + "label": "flip_aa", + "kind": 2, + "importPath": "modules.PyMAF.utils.imutils", + "description": "modules.PyMAF.utils.imutils", + "peekOfCode": "def flip_aa(pose):\n \"\"\"Flip aa.\n \"\"\"\n # we also negate the second and the third dimension of the axis-angle\n if len(pose.shape) == 1:\n pose[1::3] = -pose[1::3]\n pose[2::3] = -pose[2::3]\n elif len(pose.shape) == 2:\n pose[:, 1::3] = -pose[:, 1::3]\n pose[:, 2::3] = -pose[:, 2::3]", + "detail": "modules.PyMAF.utils.imutils", + "documentation": {} + }, + { + "label": "normalize_2d_kp", + "kind": 2, + "importPath": "modules.PyMAF.utils.imutils", + "description": "modules.PyMAF.utils.imutils", + "peekOfCode": "def normalize_2d_kp(kp_2d, crop_size=224, inv=False):\n # Normalize keypoints between -1, 1\n if not inv:\n ratio = 1.0 / crop_size\n kp_2d = 2.0 * kp_2d * ratio - 1.0\n else:\n ratio = 1.0 / crop_size\n kp_2d = (kp_2d + 1.0)/(2*ratio)\n return kp_2d\ndef j2d_processing(kp, transf):", + "detail": "modules.PyMAF.utils.imutils", + "documentation": {} + }, + { + "label": "j2d_processing", + "kind": 2, + "importPath": "modules.PyMAF.utils.imutils", + "description": "modules.PyMAF.utils.imutils", + "peekOfCode": "def j2d_processing(kp, transf):\n \"\"\"Process gt 2D keypoints and apply transforms.\"\"\"\n # nparts = kp.shape[1]\n bs, npart = kp.shape[:2]\n kp_pad = torch.cat([kp, torch.ones((bs, npart, 1)).to(kp)], dim=-1)\n kp_new = torch.bmm(transf, kp_pad.transpose(1, 2))\n kp_new = kp_new.transpose(1, 2)\n kp_new[:, :, :-1] = 2.*kp_new[:, :, :-1] / constants.IMG_RES - 1.\n return kp_new[:, :, :2]\ndef generate_heatmap(joints, heatmap_size, sigma=1, joints_vis=None):", + "detail": "modules.PyMAF.utils.imutils", + "documentation": {} + }, + { + "label": "generate_heatmap", + "kind": 2, + "importPath": "modules.PyMAF.utils.imutils", + "description": "modules.PyMAF.utils.imutils", + "peekOfCode": "def generate_heatmap(joints, heatmap_size, sigma=1, joints_vis=None):\n '''\n param joints: [num_joints, 3]\n param joints_vis: [num_joints, 3]\n return: target, target_weight(1: visible, 0: invisible)\n '''\n num_joints = joints.shape[0]\n device = joints.device\n cur_device = torch.device(device.type, device.index)\n if not hasattr(heatmap_size, '__len__'):", + "detail": "modules.PyMAF.utils.imutils", + "documentation": {} + }, + { + "label": "save_object", + "kind": 2, + "importPath": "modules.PyMAF.utils.io", + "description": "modules.PyMAF.utils.io", + "peekOfCode": "def save_object(obj, file_name):\n \"\"\"Save a Python object by pickling it.\"\"\"\n file_name = os.path.abspath(file_name)\n with open(file_name, 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)\ndef cache_url(url_or_file, cache_dir):\n \"\"\"Download the file specified by the URL to the cache_dir and return the\n path to the cached file. If the argument is not a URL, simply return it as\n is.\n \"\"\"", + "detail": "modules.PyMAF.utils.io", + "documentation": {} + }, + { + "label": "cache_url", + "kind": 2, + "importPath": "modules.PyMAF.utils.io", + "description": "modules.PyMAF.utils.io", + "peekOfCode": "def cache_url(url_or_file, cache_dir):\n \"\"\"Download the file specified by the URL to the cache_dir and return the\n path to the cached file. If the argument is not a URL, simply return it as\n is.\n \"\"\"\n is_url = re.match(r'^(?:http)s?://', url_or_file, re.IGNORECASE) is not None\n if not is_url:\n return url_or_file\n url = url_or_file\n # assert url.startswith(_DETECTRON_S3_BASE_URL), \\", + "detail": "modules.PyMAF.utils.io", + "documentation": {} + }, + { + "label": "assert_cache_file_is_ok", + "kind": 2, + "importPath": "modules.PyMAF.utils.io", + "description": "modules.PyMAF.utils.io", + "peekOfCode": "def assert_cache_file_is_ok(url, file_path):\n \"\"\"Check that cache file has the correct hash.\"\"\"\n # File is already in the cache, verify that the md5sum matches and\n # return local path\n cache_file_md5sum = _get_file_md5sum(file_path)\n ref_md5sum = _get_reference_md5sum(url)\n assert cache_file_md5sum == ref_md5sum, \\\n ('Target URL {} appears to be downloaded to the local cache file '\n '{}, but the md5 hash of the local file does not match the '\n 'reference (actual: {} vs. expected: {}). You may wish to delete '", + "detail": "modules.PyMAF.utils.io", + "documentation": {} + }, + { + "label": "download_url", + "kind": 2, + "importPath": "modules.PyMAF.utils.io", + "description": "modules.PyMAF.utils.io", + "peekOfCode": "def download_url(\n url, dst_file_path, chunk_size=8192, progress_hook=_progress_bar\n):\n \"\"\"Download url and write it to dst_file_path.\n Credit:\n https://stackoverflow.com/questions/2028517/python-urllib2-progress-hook\n \"\"\"\n response = urlopen(url)\n total_size = response.info().getheader('Content-Length').strip()\n total_size = int(total_size)", + "detail": "modules.PyMAF.utils.io", + "documentation": {} + }, + { + "label": "logger", + "kind": 5, + "importPath": "modules.PyMAF.utils.io", + "description": "modules.PyMAF.utils.io", + "peekOfCode": "logger = logging.getLogger(__name__)\n_DETECTRON_S3_BASE_URL = 'https://s3-us-west-2.amazonaws.com/detectron'\ndef save_object(obj, file_name):\n \"\"\"Save a Python object by pickling it.\"\"\"\n file_name = os.path.abspath(file_name)\n with open(file_name, 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)\ndef cache_url(url_or_file, cache_dir):\n \"\"\"Download the file specified by the URL to the cache_dir and return the\n path to the cached file. If the argument is not a URL, simply return it as", + "detail": "modules.PyMAF.utils.io", + "documentation": {} + }, + { + "label": "_DETECTRON_S3_BASE_URL", + "kind": 5, + "importPath": "modules.PyMAF.utils.io", + "description": "modules.PyMAF.utils.io", + "peekOfCode": "_DETECTRON_S3_BASE_URL = 'https://s3-us-west-2.amazonaws.com/detectron'\ndef save_object(obj, file_name):\n \"\"\"Save a Python object by pickling it.\"\"\"\n file_name = os.path.abspath(file_name)\n with open(file_name, 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)\ndef cache_url(url_or_file, cache_dir):\n \"\"\"Download the file specified by the URL to the cache_dir and return the\n path to the cached file. If the argument is not a URL, simply return it as\n is.", + "detail": "modules.PyMAF.utils.io", + "documentation": {} + }, + { + "label": "iuvmap_clean", + "kind": 2, + "importPath": "modules.PyMAF.utils.iuvmap", + "description": "modules.PyMAF.utils.iuvmap", + "peekOfCode": "def iuvmap_clean(U_uv, V_uv, Index_UV, AnnIndex=None):\n Index_UV_max = torch.argmax(Index_UV, dim=1).float()\n recon_Index_UV = []\n for i in range(Index_UV.size(1)):\n if i == 0:\n recon_Index_UV_i = torch.min(F.threshold(Index_UV_max + 1, 0.5, 0),\n -F.threshold(-Index_UV_max - 1, -1.5, 0))\n else:\n recon_Index_UV_i = torch.min(F.threshold(Index_UV_max, i - 0.5, 0),\n -F.threshold(-Index_UV_max, -i - 0.5, 0)) / float(i)", + "detail": "modules.PyMAF.utils.iuvmap", + "documentation": {} + }, + { + "label": "iuv_map2img", + "kind": 2, + "importPath": "modules.PyMAF.utils.iuvmap", + "description": "modules.PyMAF.utils.iuvmap", + "peekOfCode": "def iuv_map2img(U_uv, V_uv, Index_UV, AnnIndex=None, uv_rois=None, ind_mapping=None, n_part=24):\n device_id = U_uv.get_device()\n batch_size = U_uv.size(0)\n K = U_uv.size(1)\n heatmap_size = U_uv.size(2)\n Index_UV_max = torch.argmax(Index_UV, dim=1)\n if AnnIndex is None:\n Index_UV_max = Index_UV_max.to(torch.int64)\n else:\n AnnIndex_max = torch.argmax(AnnIndex, dim=1)", + "detail": "modules.PyMAF.utils.iuvmap", + "documentation": {} + }, + { + "label": "iuv_img2map", + "kind": 2, + "importPath": "modules.PyMAF.utils.iuvmap", + "description": "modules.PyMAF.utils.iuvmap", + "peekOfCode": "def iuv_img2map(uvimages, uv_rois=None, new_size=None, n_part=24):\n device_id = uvimages.get_device()\n batch_size = uvimages.size(0)\n uvimg_size = uvimages.size(-1)\n Index2mask = [[0], [1, 2], [3], [4], [5], [6], [7, 9], [8, 10], [11, 13], [12, 14], [15, 17], [16, 18], [19, 21],\n [20, 22], [23, 24]]\n part_ind = torch.round(uvimages[:, 0, :, :] * n_part)\n part_u = uvimages[:, 1, :, :]\n part_v = uvimages[:, 2, :, :]\n recon_U = []", + "detail": "modules.PyMAF.utils.iuvmap", + "documentation": {} + }, + { + "label": "seg_img2map", + "kind": 2, + "importPath": "modules.PyMAF.utils.iuvmap", + "description": "modules.PyMAF.utils.iuvmap", + "peekOfCode": "def seg_img2map(segimages, uv_rois=None, new_size=None, n_part=24):\n device_id = segimages.get_device()\n batch_size = segimages.size(0)\n uvimg_size = segimages.size(-1)\n part_ind = torch.round(segimages[:, 0, :, :] * n_part)\n recon_Index_UV = []\n for i in range(n_part+1):\n if i == 0:\n recon_Index_UV_i = torch.min(F.threshold(part_ind + 1, 0.5, 0), -F.threshold(-part_ind - 1, -1.5, 0))\n else:", + "detail": "modules.PyMAF.utils.iuvmap", + "documentation": {} + }, + { + "label": "get_keypoints", + "kind": 2, + "importPath": "modules.PyMAF.utils.keypoints", + "description": "modules.PyMAF.utils.keypoints", + "peekOfCode": "def get_keypoints():\n \"\"\"Get the COCO keypoints and their left/right flip coorespondence map.\"\"\"\n # Keypoints are not available in the COCO json for the test split, so we\n # provide them here.\n keypoints = [\n 'nose',\n 'left_eye',\n 'right_eye',\n 'left_ear',\n 'right_ear',", + "detail": "modules.PyMAF.utils.keypoints", + "documentation": {} + }, + { + "label": "get_person_class_index", + "kind": 2, + "importPath": "modules.PyMAF.utils.keypoints", + "description": "modules.PyMAF.utils.keypoints", + "peekOfCode": "def get_person_class_index():\n \"\"\"Index of the person class in COCO.\"\"\"\n return 1\ndef flip_keypoints(keypoints, keypoint_flip_map, keypoint_coords, width):\n \"\"\"Left/right flip keypoint_coords. keypoints and keypoint_flip_map are\n accessible from get_keypoints().\n \"\"\"\n flipped_kps = keypoint_coords.copy()\n for lkp, rkp in keypoint_flip_map.items():\n lid = keypoints.index(lkp)", + "detail": "modules.PyMAF.utils.keypoints", + "documentation": {} + }, + { + "label": "flip_keypoints", + "kind": 2, + "importPath": "modules.PyMAF.utils.keypoints", + "description": "modules.PyMAF.utils.keypoints", + "peekOfCode": "def flip_keypoints(keypoints, keypoint_flip_map, keypoint_coords, width):\n \"\"\"Left/right flip keypoint_coords. keypoints and keypoint_flip_map are\n accessible from get_keypoints().\n \"\"\"\n flipped_kps = keypoint_coords.copy()\n for lkp, rkp in keypoint_flip_map.items():\n lid = keypoints.index(lkp)\n rid = keypoints.index(rkp)\n flipped_kps[:, :, lid] = keypoint_coords[:, :, rid]\n flipped_kps[:, :, rid] = keypoint_coords[:, :, lid]", + "detail": "modules.PyMAF.utils.keypoints", + "documentation": {} + }, + { + "label": "flip_heatmaps", + "kind": 2, + "importPath": "modules.PyMAF.utils.keypoints", + "description": "modules.PyMAF.utils.keypoints", + "peekOfCode": "def flip_heatmaps(heatmaps):\n \"\"\"Flip heatmaps horizontally.\"\"\"\n keypoints, flip_map = get_keypoints()\n heatmaps_flipped = heatmaps.copy()\n for lkp, rkp in flip_map.items():\n lid = keypoints.index(lkp)\n rid = keypoints.index(rkp)\n heatmaps_flipped[:, rid, :, :] = heatmaps[:, lid, :, :]\n heatmaps_flipped[:, lid, :, :] = heatmaps[:, rid, :, :]\n heatmaps_flipped = heatmaps_flipped[:, :, :, ::-1]", + "detail": "modules.PyMAF.utils.keypoints", + "documentation": {} + }, + { + "label": "heatmaps_to_keypoints", + "kind": 2, + "importPath": "modules.PyMAF.utils.keypoints", + "description": "modules.PyMAF.utils.keypoints", + "peekOfCode": "def heatmaps_to_keypoints(maps, rois):\n \"\"\"Extract predicted keypoint locations from heatmaps. Output has shape\n (#rois, 4, #keypoints) with the 4 rows corresponding to (x, y, logit, prob)\n for each keypoint.\n \"\"\"\n # This function converts a discrete image coordinate in a HEATMAP_SIZE x\n # HEATMAP_SIZE image to a continuous keypoint coordinate. We maintain\n # consistency with keypoints_to_heatmap_labels by using the conversion from\n # Heckbert 1990: c = d + 0.5, where d is a discrete coordinate and c is a\n # continuous coordinate.", + "detail": "modules.PyMAF.utils.keypoints", + "documentation": {} + }, + { + "label": "keypoints_to_heatmap_labels", + "kind": 2, + "importPath": "modules.PyMAF.utils.keypoints", + "description": "modules.PyMAF.utils.keypoints", + "peekOfCode": "def keypoints_to_heatmap_labels(keypoints, rois):\n \"\"\"Encode keypoint location in the target heatmap for use in\n SoftmaxWithLoss.\n \"\"\"\n # Maps keypoints from the half-open interval [x1, x2) on continuous image\n # coordinates to the closed interval [0, HEATMAP_SIZE - 1] on discrete image\n # coordinates. We use the continuous <-> discrete conversion from Heckbert\n # 1990 (\"What is the coordinate of a pixel?\"): d = floor(c) and c = d + 0.5,\n # where d is a discrete coordinate and c is a continuous coordinate.\n assert keypoints.shape[2] == cfg.KRCNN.NUM_KEYPOINTS", + "detail": "modules.PyMAF.utils.keypoints", + "documentation": {} + }, + { + "label": "scores_to_probs", + "kind": 2, + "importPath": "modules.PyMAF.utils.keypoints", + "description": "modules.PyMAF.utils.keypoints", + "peekOfCode": "def scores_to_probs(scores):\n \"\"\"Transforms CxHxW of scores to probabilities spatially.\"\"\"\n channels = scores.shape[0]\n for c in range(channels):\n temp = scores[c, :, :]\n max_score = temp.max()\n temp = np.exp(temp - max_score) / np.sum(np.exp(temp - max_score))\n scores[c, :, :] = temp\n return scores\ndef nms_oks(kp_predictions, rois, thresh):", + "detail": "modules.PyMAF.utils.keypoints", + "documentation": {} + }, + { + "label": "nms_oks", + "kind": 2, + "importPath": "modules.PyMAF.utils.keypoints", + "description": "modules.PyMAF.utils.keypoints", + "peekOfCode": "def nms_oks(kp_predictions, rois, thresh):\n \"\"\"Nms based on kp predictions.\"\"\"\n scores = np.mean(kp_predictions[:, 2, :], axis=1)\n order = scores.argsort()[::-1]\n keep = []\n while order.size > 0:\n i = order[0]\n keep.append(i)\n ovr = compute_oks(\n kp_predictions[i], rois[i], kp_predictions[order[1:]],", + "detail": "modules.PyMAF.utils.keypoints", + "documentation": {} + }, + { + "label": "compute_oks", + "kind": 2, + "importPath": "modules.PyMAF.utils.keypoints", + "description": "modules.PyMAF.utils.keypoints", + "peekOfCode": "def compute_oks(src_keypoints, src_roi, dst_keypoints, dst_roi):\n \"\"\"Compute OKS for predicted keypoints wrt gt_keypoints.\n src_keypoints: 4xK\n src_roi: 4x1\n dst_keypoints: Nx4xK\n dst_roi: Nx4\n \"\"\"\n sigmas = np.array([\n .26, .25, .25, .35, .35, .79, .79, .72, .72, .62, .62, 1.07, 1.07, .87,\n .87, .89, .89]) / 10.0", + "detail": "modules.PyMAF.utils.keypoints", + "documentation": {} + }, + { + "label": "get_max_preds", + "kind": 2, + "importPath": "modules.PyMAF.utils.keypoints", + "description": "modules.PyMAF.utils.keypoints", + "peekOfCode": "def get_max_preds(batch_heatmaps):\n '''\n get predictions from score maps\n heatmaps: numpy.ndarray([batch_size, num_joints, height, width])\n '''\n assert isinstance(batch_heatmaps, np.ndarray), \\\n 'batch_heatmaps should be numpy.ndarray'\n assert batch_heatmaps.ndim == 4, 'batch_images should be 4-ndim'\n batch_size = batch_heatmaps.shape[0]\n num_joints = batch_heatmaps.shape[1]", + "detail": "modules.PyMAF.utils.keypoints", + "documentation": {} + }, + { + "label": "generate_3d_integral_preds_tensor", + "kind": 2, + "importPath": "modules.PyMAF.utils.keypoints", + "description": "modules.PyMAF.utils.keypoints", + "peekOfCode": "def generate_3d_integral_preds_tensor(heatmaps, num_joints, x_dim, y_dim, z_dim):\n assert isinstance(heatmaps, torch.Tensor)\n if z_dim is not None:\n heatmaps = heatmaps.reshape((heatmaps.shape[0], num_joints, z_dim, y_dim, x_dim))\n accu_x = heatmaps.sum(dim=2)\n accu_x = accu_x.sum(dim=2)\n accu_y = heatmaps.sum(dim=2)\n accu_y = accu_y.sum(dim=3)\n accu_z = heatmaps.sum(dim=3)\n accu_z = accu_z.sum(dim=3)", + "detail": "modules.PyMAF.utils.keypoints", + "documentation": {} + }, + { + "label": "softmax_integral_tensor", + "kind": 2, + "importPath": "modules.PyMAF.utils.keypoints", + "description": "modules.PyMAF.utils.keypoints", + "peekOfCode": "def softmax_integral_tensor(preds, num_joints, hm_width, hm_height, hm_depth=None):\n # global soft max\n preds = preds.reshape((preds.shape[0], num_joints, -1))\n preds = F.softmax(preds, 2)\n output_3d = False if hm_depth is None else True\n # integrate heatmap into joint location\n if output_3d:\n x, y, z = generate_3d_integral_preds_tensor(preds, num_joints, hm_width, hm_height, hm_depth)\n # x = x / float(hm_width) - 0.5\n # y = y / float(hm_height) - 0.5", + "detail": "modules.PyMAF.utils.keypoints", + "documentation": {} + }, + { + "label": "Generator3D", + "kind": 6, + "importPath": "modules.PyMAF.utils.mesh_generation", + "description": "modules.PyMAF.utils.mesh_generation", + "peekOfCode": "class Generator3D(object):\n ''' Generator class for DVRs.\n It provides functions to generate the final mesh as well refining options.\n Args:\n model (nn.Module): trained DVR model\n points_batch_size (int): batch size for points evaluation\n threshold (float): threshold value\n refinement_step (int): number of refinement steps\n device (device): pytorch device\n resolution0 (int): start resolution for MISE", + "detail": "modules.PyMAF.utils.mesh_generation", + "documentation": {} + }, + { + "label": "PartRenderer", + "kind": 6, + "importPath": "modules.PyMAF.utils.part_utils", + "description": "modules.PyMAF.utils.part_utils", + "peekOfCode": "class PartRenderer():\n \"\"\"Renderer used to render segmentation masks and part segmentations.\n Internally it uses the Neural 3D Mesh Renderer\n \"\"\"\n def __init__(self, focal_length=5000., render_res=224):\n # Parameters for rendering\n self.focal_length = focal_length\n self.render_res = render_res\n # We use Neural 3D mesh renderer for rendering masks and part segmentations\n self.neural_renderer = nr.Renderer(dist_coeffs=None, orig_size=self.render_res,", + "detail": "modules.PyMAF.utils.part_utils", + "documentation": {} + }, + { + "label": "run_openpose", + "kind": 2, + "importPath": "modules.PyMAF.utils.pose_tracker", + "description": "modules.PyMAF.utils.pose_tracker", + "peekOfCode": "def run_openpose(\n video_file,\n output_folder,\n staf_folder,\n vis=False,\n):\n pwd = os.getcwd()\n os.chdir(staf_folder)\n render = 1 if vis else 0\n display = 2 if vis else 0", + "detail": "modules.PyMAF.utils.pose_tracker", + "documentation": {} + }, + { + "label": "read_posetrack_keypoints", + "kind": 2, + "importPath": "modules.PyMAF.utils.pose_tracker", + "description": "modules.PyMAF.utils.pose_tracker", + "peekOfCode": "def read_posetrack_keypoints(output_folder):\n people = dict()\n for idx, result_file in enumerate(sorted(os.listdir(output_folder))):\n json_file = osp.join(output_folder, result_file)\n data = json.load(open(json_file))\n # print(idx, data)\n for person in data['people']:\n person_id = person['person_id'][0]\n joints2d = person['pose_keypoints_2d']\n if person_id in people.keys():", + "detail": "modules.PyMAF.utils.pose_tracker", + "documentation": {} + }, + { + "label": "run_posetracker", + "kind": 2, + "importPath": "modules.PyMAF.utils.pose_tracker", + "description": "modules.PyMAF.utils.pose_tracker", + "peekOfCode": "def run_posetracker(video_file, staf_folder, posetrack_output_folder='/tmp', display=False):\n posetrack_output_folder = os.path.join(\n posetrack_output_folder,\n f'{os.path.basename(video_file)}_posetrack'\n )\n # run posetrack on video\n run_openpose(\n video_file,\n posetrack_output_folder,\n vis=display,", + "detail": "modules.PyMAF.utils.pose_tracker", + "documentation": {} + }, + { + "label": "compute_similarity_transform", + "kind": 2, + "importPath": "modules.PyMAF.utils.pose_utils", + "description": "modules.PyMAF.utils.pose_utils", + "peekOfCode": "def compute_similarity_transform(S1, S2):\n \"\"\"\n Computes a similarity transform (sR, t) that takes\n a set of 3D points S1 (3 x N) closest to a set of 3D points S2,\n where R is an 3x3 rotation matrix, t 3x1 translation, s scale.\n i.e. solves the orthogonal Procrutes problem.\n \"\"\"\n transposed = False\n if S1.shape[0] != 3 and S1.shape[0] != 2:\n S1 = S1.T", + "detail": "modules.PyMAF.utils.pose_utils", + "documentation": {} + }, + { + "label": "compute_similarity_transform_batch", + "kind": 2, + "importPath": "modules.PyMAF.utils.pose_utils", + "description": "modules.PyMAF.utils.pose_utils", + "peekOfCode": "def compute_similarity_transform_batch(S1, S2):\n \"\"\"Batched version of compute_similarity_transform.\"\"\"\n S1_hat = np.zeros_like(S1)\n for i in range(S1.shape[0]):\n S1_hat[i] = compute_similarity_transform(S1[i], S2[i])\n return S1_hat\ndef reconstruction_error(S1, S2, reduction='mean'):\n \"\"\"Do Procrustes alignment and compute reconstruction error.\"\"\"\n S1_hat = compute_similarity_transform_batch(S1, S2)\n re = np.sqrt( ((S1_hat - S2)** 2).sum(axis=-1)).mean(axis=-1)", + "detail": "modules.PyMAF.utils.pose_utils", + "documentation": {} + }, + { + "label": "reconstruction_error", + "kind": 2, + "importPath": "modules.PyMAF.utils.pose_utils", + "description": "modules.PyMAF.utils.pose_utils", + "peekOfCode": "def reconstruction_error(S1, S2, reduction='mean'):\n \"\"\"Do Procrustes alignment and compute reconstruction error.\"\"\"\n S1_hat = compute_similarity_transform_batch(S1, S2)\n re = np.sqrt( ((S1_hat - S2)** 2).sum(axis=-1)).mean(axis=-1)\n if reduction == 'mean':\n re = re.mean()\n elif reduction == 'sum':\n re = re.sum()\n return re, S1_hat\n# https://math.stackexchange.com/questions/382760/composition-of-two-axis-angle-rotations", + "detail": "modules.PyMAF.utils.pose_utils", + "documentation": {} + }, + { + "label": "axis_angle_add", + "kind": 2, + "importPath": "modules.PyMAF.utils.pose_utils", + "description": "modules.PyMAF.utils.pose_utils", + "peekOfCode": "def axis_angle_add(theta, roll_axis, alpha):\n \"\"\"Composition of two axis-angle rotations (PyTorch version)\n Args:\n theta: N x 3\n roll_axis: N x 3\n alph: N x 1\n Returns:\n equivalent axis-angle of the composition\n \"\"\"\n alpha = alpha / 2.", + "detail": "modules.PyMAF.utils.pose_utils", + "documentation": {} + }, + { + "label": "axis_angle_add_np", + "kind": 2, + "importPath": "modules.PyMAF.utils.pose_utils", + "description": "modules.PyMAF.utils.pose_utils", + "peekOfCode": "def axis_angle_add_np(theta, roll_axis, alpha):\n \"\"\"Composition of two axis-angle rotations (NumPy version)\n Args:\n theta: N x 3\n roll_axis: N x 3\n alph: N x 1\n Returns:\n equivalent axis-angle of the composition\n \"\"\"\n alpha = alpha / 2.", + "detail": "modules.PyMAF.utils.pose_utils", + "documentation": {} + }, + { + "label": "WeakPerspectiveCamera", + "kind": 6, + "importPath": "modules.PyMAF.utils.renderer", + "description": "modules.PyMAF.utils.renderer", + "peekOfCode": "class WeakPerspectiveCamera(pyrender.Camera):\n def __init__(self,\n scale,\n translation,\n znear=pyrender.camera.DEFAULT_Z_NEAR,\n zfar=None,\n name=None):\n super(WeakPerspectiveCamera, self).__init__(\n znear=znear,\n zfar=zfar,", + "detail": "modules.PyMAF.utils.renderer", + "documentation": {} + }, + { + "label": "PyRenderer", + "kind": 6, + "importPath": "modules.PyMAF.utils.renderer", + "description": "modules.PyMAF.utils.renderer", + "peekOfCode": "class PyRenderer:\n def __init__(self, resolution=(224,224), orig_img=False, wireframe=False, scale_ratio=1., vis_ratio=1.):\n self.resolution = (resolution[0] * scale_ratio, resolution[1] * scale_ratio)\n # self.scale_ratio = scale_ratio\n self.faces = {'smplx': get_model_faces('smplx'),\n 'smpl': get_model_faces('smpl'),\n # 'mano': get_model_faces('mano'),\n # 'flame': get_model_faces('flame'),\n }\n self.orig_img = orig_img", + "detail": "modules.PyMAF.utils.renderer", + "documentation": {} + }, + { + "label": "OpenDRenderer", + "kind": 6, + "importPath": "modules.PyMAF.utils.renderer", + "description": "modules.PyMAF.utils.renderer", + "peekOfCode": "class OpenDRenderer:\n def __init__(self, resolution=(224, 224), ratio=1):\n self.resolution = (resolution[0] * ratio, resolution[1] * ratio)\n self.ratio = ratio\n self.focal_length = 5000.\n self.K = np.array([[self.focal_length, 0., self.resolution[1] / 2.],\n [0., self.focal_length, self.resolution[0] / 2.],\n [0., 0., 1.]])\n self.colors_dict = {\n 'red': np.array([0.5, 0.2, 0.2]),", + "detail": "modules.PyMAF.utils.renderer", + "documentation": {} + }, + { + "label": "IUV_Renderer", + "kind": 6, + "importPath": "modules.PyMAF.utils.renderer", + "description": "modules.PyMAF.utils.renderer", + "peekOfCode": "class IUV_Renderer(object):\n def __init__(self, focal_length=5000., orig_size=224, output_size=56, mode='iuv', device=torch.device('cuda'), mesh_type='smpl'):\n self.focal_length = focal_length\n self.orig_size = orig_size\n self.output_size = output_size\n if mode in ['iuv']:\n if mesh_type == 'smpl':\n DP = DensePoseMethods()\n vert_mapping = DP.All_vertices.astype('int64') - 1\n self.vert_mapping = torch.from_numpy(vert_mapping)", + "detail": "modules.PyMAF.utils.renderer", + "documentation": {} + }, + { + "label": "rotateY", + "kind": 2, + "importPath": "modules.PyMAF.utils.renderer", + "description": "modules.PyMAF.utils.renderer", + "peekOfCode": "def rotateY(points, angle):\n \"\"\"Rotate all points in a 2D array around the y axis.\"\"\"\n ry = np.array([\n [np.cos(angle), 0., np.sin(angle)],\n [0., 1., 0. ],\n [-np.sin(angle), 0., np.cos(angle)]\n ])\n return np.dot(points, ry)\ndef rotateX( points, angle ):\n \"\"\"Rotate all points in a 2D array around the x axis.\"\"\"", + "detail": "modules.PyMAF.utils.renderer", + "documentation": {} + }, + { + "label": "rotateX", + "kind": 2, + "importPath": "modules.PyMAF.utils.renderer", + "description": "modules.PyMAF.utils.renderer", + "peekOfCode": "def rotateX( points, angle ):\n \"\"\"Rotate all points in a 2D array around the x axis.\"\"\"\n rx = np.array([\n [1., 0., 0. ],\n [0., np.cos(angle), -np.sin(angle)],\n [0., np.sin(angle), np.cos(angle) ]\n ])\n return np.dot(points, rx)\ndef rotateZ( points, angle ):\n \"\"\"Rotate all points in a 2D array around the z axis.\"\"\"", + "detail": "modules.PyMAF.utils.renderer", + "documentation": {} + }, + { + "label": "rotateZ", + "kind": 2, + "importPath": "modules.PyMAF.utils.renderer", + "description": "modules.PyMAF.utils.renderer", + "peekOfCode": "def rotateZ( points, angle ):\n \"\"\"Rotate all points in a 2D array around the z axis.\"\"\"\n rz = np.array([\n [np.cos(angle), -np.sin(angle), 0. ],\n [np.sin(angle), np.cos(angle), 0. ],\n [0., 0., 1. ]\n ])\n return np.dot(points, rz)\nclass IUV_Renderer(object):\n def __init__(self, focal_length=5000., orig_size=224, output_size=56, mode='iuv', device=torch.device('cuda'), mesh_type='smpl'):", + "detail": "modules.PyMAF.utils.renderer", + "documentation": {} + }, + { + "label": "logger", + "kind": 5, + "importPath": "modules.PyMAF.utils.renderer", + "description": "modules.PyMAF.utils.renderer", + "peekOfCode": "logger = logging.getLogger(__name__)\nclass WeakPerspectiveCamera(pyrender.Camera):\n def __init__(self,\n scale,\n translation,\n znear=pyrender.camera.DEFAULT_Z_NEAR,\n zfar=None,\n name=None):\n super(WeakPerspectiveCamera, self).__init__(\n znear=znear,", + "detail": "modules.PyMAF.utils.renderer", + "documentation": {} + }, + { + "label": "get_occ_gt", + "kind": 2, + "importPath": "modules.PyMAF.utils.sample_mesh", + "description": "modules.PyMAF.utils.sample_mesh", + "peekOfCode": "def get_occ_gt(in_path=None, vertices=None, faces=None, pts_num=1000, points_sigma=0.01, with_dp=False, points=None, extra_points=None):\n if in_path is not None:\n mesh = trimesh.load(in_path, process=False)\n print(type(mesh.vertices), mesh.vertices.shape, mesh.faces.shape)\n mesh = trimesh.Trimesh(vertices=vertices, faces=faces, process=False)\n # print('get_occ_gt', type(mesh.vertices), mesh.vertices.shape, mesh.faces.shape)\n # points_size = 100000\n points_padding = 0.1\n # points_sigma = 0.01\n points_uniform_ratio = 0.5", + "detail": "modules.PyMAF.utils.sample_mesh", + "documentation": {} + }, + { + "label": "CheckpointSaver", + "kind": 6, + "importPath": "modules.PyMAF.utils.saver", + "description": "modules.PyMAF.utils.saver", + "peekOfCode": "class CheckpointSaver():\n \"\"\"Class that handles saving and loading checkpoints during training.\"\"\"\n def __init__(self, save_dir, save_steps=1000, overwrite=False):\n self.save_dir = os.path.abspath(save_dir)\n self.save_steps = save_steps\n self.overwrite = overwrite\n if not os.path.exists(self.save_dir):\n os.makedirs(self.save_dir)\n self.get_latest_checkpoint()\n return", + "detail": "modules.PyMAF.utils.saver", + "documentation": {} + }, + { + "label": "logger", + "kind": 5, + "importPath": "modules.PyMAF.utils.saver", + "description": "modules.PyMAF.utils.saver", + "peekOfCode": "logger = logging.getLogger(__name__)\nclass CheckpointSaver():\n \"\"\"Class that handles saving and loading checkpoints during training.\"\"\"\n def __init__(self, save_dir, save_steps=1000, overwrite=False):\n self.save_dir = os.path.abspath(save_dir)\n self.save_steps = save_steps\n self.overwrite = overwrite\n if not os.path.exists(self.save_dir):\n os.makedirs(self.save_dir)\n self.get_latest_checkpoint()", + "detail": "modules.PyMAF.utils.saver", + "documentation": {} + }, + { + "label": "GetDensePoseMask", + "kind": 2, + "importPath": "modules.PyMAF.utils.segms", + "description": "modules.PyMAF.utils.segms", + "peekOfCode": "def GetDensePoseMask(Polys):\n MaskGen = np.zeros([256, 256])\n for i in range(1, 15):\n if (Polys[i - 1]):\n current_mask = mask_util.decode(Polys[i - 1])\n MaskGen[current_mask > 0] = i\n return MaskGen\ndef flip_segms(segms, height, width):\n \"\"\"Left/right flip each mask in a list of masks.\"\"\"\n def _flip_poly(poly, width):", + "detail": "modules.PyMAF.utils.segms", + "documentation": {} + }, + { + "label": "flip_segms", + "kind": 2, + "importPath": "modules.PyMAF.utils.segms", + "description": "modules.PyMAF.utils.segms", + "peekOfCode": "def flip_segms(segms, height, width):\n \"\"\"Left/right flip each mask in a list of masks.\"\"\"\n def _flip_poly(poly, width):\n flipped_poly = np.array(poly)\n flipped_poly[0::2] = width - np.array(poly[0::2]) - 1\n return flipped_poly.tolist()\n def _flip_rle(rle, height, width):\n if 'counts' in rle and type(rle['counts']) == list:\n # Magic RLE format handling painfully discovered by looking at the\n # COCO API showAnns function.", + "detail": "modules.PyMAF.utils.segms", + "documentation": {} + }, + { + "label": "polys_to_mask", + "kind": 2, + "importPath": "modules.PyMAF.utils.segms", + "description": "modules.PyMAF.utils.segms", + "peekOfCode": "def polys_to_mask(polygons, height, width):\n \"\"\"Convert from the COCO polygon segmentation format to a binary mask\n encoded as a 2D array of data type numpy.float32. The polygon segmentation\n is understood to be enclosed inside a height x width image. The resulting\n mask is therefore of shape (height, width).\n \"\"\"\n rle = mask_util.frPyObjects(polygons, height, width)\n mask = np.array(mask_util.decode(rle), dtype=np.float32)\n # Flatten in case polygons was a list\n mask = np.sum(mask, axis=2)", + "detail": "modules.PyMAF.utils.segms", + "documentation": {} + }, + { + "label": "mask_to_bbox", + "kind": 2, + "importPath": "modules.PyMAF.utils.segms", + "description": "modules.PyMAF.utils.segms", + "peekOfCode": "def mask_to_bbox(mask):\n \"\"\"Compute the tight bounding box of a binary mask.\"\"\"\n xs = np.where(np.sum(mask, axis=0) > 0)[0]\n ys = np.where(np.sum(mask, axis=1) > 0)[0]\n if len(xs) == 0 or len(ys) == 0:\n return None\n x0 = xs[0]\n x1 = xs[-1]\n y0 = ys[0]\n y1 = ys[-1]", + "detail": "modules.PyMAF.utils.segms", + "documentation": {} + }, + { + "label": "polys_to_mask_wrt_box", + "kind": 2, + "importPath": "modules.PyMAF.utils.segms", + "description": "modules.PyMAF.utils.segms", + "peekOfCode": "def polys_to_mask_wrt_box(polygons, box, M):\n \"\"\"Convert from the COCO polygon segmentation format to a binary mask\n encoded as a 2D array of data type numpy.float32. The polygon segmentation\n is understood to be enclosed in the given box and rasterized to an M x M\n mask. The resulting mask is therefore of shape (M, M).\n \"\"\"\n w = box[2] - box[0]\n h = box[3] - box[1]\n w = np.maximum(w, 1)\n h = np.maximum(h, 1)", + "detail": "modules.PyMAF.utils.segms", + "documentation": {} + }, + { + "label": "polys_to_boxes", + "kind": 2, + "importPath": "modules.PyMAF.utils.segms", + "description": "modules.PyMAF.utils.segms", + "peekOfCode": "def polys_to_boxes(polys):\n \"\"\"Convert a list of polygons into an array of tight bounding boxes.\"\"\"\n boxes_from_polys = np.zeros((len(polys), 4), dtype=np.float32)\n for i in range(len(polys)):\n poly = polys[i]\n x0 = min(min(p[::2]) for p in poly)\n x1 = max(max(p[::2]) for p in poly)\n y0 = min(min(p[1::2]) for p in poly)\n y1 = max(max(p[1::2]) for p in poly)\n boxes_from_polys[i, :] = [x0, y0, x1, y1]", + "detail": "modules.PyMAF.utils.segms", + "documentation": {} + }, + { + "label": "rle_mask_voting", + "kind": 2, + "importPath": "modules.PyMAF.utils.segms", + "description": "modules.PyMAF.utils.segms", + "peekOfCode": "def rle_mask_voting(top_masks,\n all_masks,\n all_dets,\n iou_thresh,\n binarize_thresh,\n method='AVG'):\n \"\"\"Returns new masks (in correspondence with `top_masks`) by combining\n multiple overlapping masks coming from the pool of `all_masks`. Two methods\n for combining masks are supported: 'AVG' uses a weighted average of\n overlapping mask pixels; 'UNION' takes the union of all mask pixels.", + "detail": "modules.PyMAF.utils.segms", + "documentation": {} + }, + { + "label": "rle_mask_nms", + "kind": 2, + "importPath": "modules.PyMAF.utils.segms", + "description": "modules.PyMAF.utils.segms", + "peekOfCode": "def rle_mask_nms(masks, dets, thresh, mode='IOU'):\n \"\"\"Performs greedy non-maximum suppression based on an overlap measurement\n between masks. The type of measurement is determined by `mode` and can be\n either 'IOU' (standard intersection over union) or 'IOMA' (intersection over\n mininum area).\n \"\"\"\n if len(masks) == 0:\n return []\n if len(masks) == 1:\n return [0]", + "detail": "modules.PyMAF.utils.segms", + "documentation": {} + }, + { + "label": "rle_masks_to_boxes", + "kind": 2, + "importPath": "modules.PyMAF.utils.segms", + "description": "modules.PyMAF.utils.segms", + "peekOfCode": "def rle_masks_to_boxes(masks):\n \"\"\"Computes the bounding box of each mask in a list of RLE encoded masks.\"\"\"\n if len(masks) == 0:\n return []\n decoded_masks = [\n np.array(mask_util.decode(rle), dtype=np.float32) for rle in masks\n ]\n def get_bounds(flat_mask):\n inds = np.where(flat_mask > 0)[0]\n return inds.min(), inds.max()", + "detail": "modules.PyMAF.utils.segms", + "documentation": {} + }, + { + "label": "get_smooth_bbox_params", + "kind": 2, + "importPath": "modules.PyMAF.utils.smooth_bbox", + "description": "modules.PyMAF.utils.smooth_bbox", + "peekOfCode": "def get_smooth_bbox_params(kps, vis_thresh=2, kernel_size=11, sigma=3):\n \"\"\"\n Computes smooth bounding box parameters from keypoints:\n 1. Computes bbox by rescaling the person to be around 150 px.\n 2. Linearly interpolates bbox params for missing annotations.\n 3. Median filtering\n 4. Gaussian filtering.\n Recommended thresholds:\n * detect-and-track: 0\n * 3DPW: 0.1", + "detail": "modules.PyMAF.utils.smooth_bbox", + "documentation": {} + }, + { + "label": "kp_to_bbox_param", + "kind": 2, + "importPath": "modules.PyMAF.utils.smooth_bbox", + "description": "modules.PyMAF.utils.smooth_bbox", + "peekOfCode": "def kp_to_bbox_param(kp, vis_thresh):\n \"\"\"\n Finds the bounding box parameters from the 2D keypoints.\n Args:\n kp (Kx3): 2D Keypoints.\n vis_thresh (float): Threshold for visibility.\n Returns:\n [center_x, center_y, scale]\n \"\"\"\n if kp is None:", + "detail": "modules.PyMAF.utils.smooth_bbox", + "documentation": {} + }, + { + "label": "get_all_bbox_params", + "kind": 2, + "importPath": "modules.PyMAF.utils.smooth_bbox", + "description": "modules.PyMAF.utils.smooth_bbox", + "peekOfCode": "def get_all_bbox_params(kps, vis_thresh=2):\n \"\"\"\n Finds bounding box parameters for all keypoints.\n Look for sequences in the middle with no predictions and linearly\n interpolate the bbox params for those\n Args:\n kps (list): List of kps (Kx3) or None.\n vis_thresh (float): Threshold for visibility.\n Returns:\n bbox_params, start_index (incl), end_index (excl)", + "detail": "modules.PyMAF.utils.smooth_bbox", + "documentation": {} + }, + { + "label": "smooth_bbox_params", + "kind": 2, + "importPath": "modules.PyMAF.utils.smooth_bbox", + "description": "modules.PyMAF.utils.smooth_bbox", + "peekOfCode": "def smooth_bbox_params(bbox_params, kernel_size=11, sigma=8):\n \"\"\"\n Applies median filtering and then gaussian filtering to bounding box\n parameters.\n Args:\n bbox_params (Nx3): [cx, cy, scale].\n kernel_size (int): Kernel size for median filtering (must be odd).\n sigma (float): Sigma for gaussian smoothing.\n Returns:\n Smoothed bounding box parameters (Nx3).", + "detail": "modules.PyMAF.utils.smooth_bbox", + "documentation": {} + }, + { + "label": "flip_back", + "kind": 2, + "importPath": "modules.PyMAF.utils.transforms", + "description": "modules.PyMAF.utils.transforms", + "peekOfCode": "def flip_back(output_flipped, matched_parts):\n '''\n ouput_flipped: numpy.ndarray(batch_size, num_joints, height, width)\n '''\n assert output_flipped.ndim == 4,\\\n 'output_flipped should be [batch_size, num_joints, height, width]'\n output_flipped = output_flipped[:, :, :, ::-1]\n for pair in matched_parts:\n tmp = output_flipped[:, pair[0], :, :].copy()\n output_flipped[:, pair[0], :, :] = output_flipped[:, pair[1], :, :]", + "detail": "modules.PyMAF.utils.transforms", + "documentation": {} + }, + { + "label": "fliplr_joints", + "kind": 2, + "importPath": "modules.PyMAF.utils.transforms", + "description": "modules.PyMAF.utils.transforms", + "peekOfCode": "def fliplr_joints(joints, joints_vis, width, matched_parts):\n \"\"\"\n flip coords\n \"\"\"\n # Flip horizontal\n joints[:, 0] = width - joints[:, 0] - 1\n # Change left-right parts\n for pair in matched_parts:\n joints[pair[0], :], joints[pair[1], :] = \\\n joints[pair[1], :], joints[pair[0], :].copy()", + "detail": "modules.PyMAF.utils.transforms", + "documentation": {} + }, + { + "label": "transform_preds", + "kind": 2, + "importPath": "modules.PyMAF.utils.transforms", + "description": "modules.PyMAF.utils.transforms", + "peekOfCode": "def transform_preds(coords, center, scale, output_size):\n target_coords = np.zeros(coords.shape)\n trans = get_affine_transform(center, scale, 0, output_size, inv=1)\n for p in range(coords.shape[0]):\n target_coords[p, 0:2] = affine_transform(coords[p, 0:2], trans)\n return target_coords\ndef get_affine_transform(\n center, scale, rot, output_size,\n shift=np.array([0, 0], dtype=np.float32), inv=0\n):", + "detail": "modules.PyMAF.utils.transforms", + "documentation": {} + }, + { + "label": "get_affine_transform", + "kind": 2, + "importPath": "modules.PyMAF.utils.transforms", + "description": "modules.PyMAF.utils.transforms", + "peekOfCode": "def get_affine_transform(\n center, scale, rot, output_size,\n shift=np.array([0, 0], dtype=np.float32), inv=0\n):\n if not isinstance(scale, np.ndarray) and not isinstance(scale, list):\n # print(scale)\n scale = np.array([scale, scale])\n scale_tmp = scale * 200.0\n src_w = scale_tmp[0]\n dst_w = output_size[0]", + "detail": "modules.PyMAF.utils.transforms", + "documentation": {} + }, + { + "label": "affine_transform", + "kind": 2, + "importPath": "modules.PyMAF.utils.transforms", + "description": "modules.PyMAF.utils.transforms", + "peekOfCode": "def affine_transform(pt, t):\n new_pt = np.array([pt[0], pt[1], 1.]).T\n new_pt = np.dot(t, new_pt)\n return new_pt[:2]\ndef get_3rd_point(a, b):\n direct = a - b\n return b + np.array([-direct[1], direct[0]], dtype=np.float32)\ndef get_dir(src_point, rot_rad):\n sn, cs = np.sin(rot_rad), np.cos(rot_rad)\n src_result = [0, 0]", + "detail": "modules.PyMAF.utils.transforms", + "documentation": {} + }, + { + "label": "get_3rd_point", + "kind": 2, + "importPath": "modules.PyMAF.utils.transforms", + "description": "modules.PyMAF.utils.transforms", + "peekOfCode": "def get_3rd_point(a, b):\n direct = a - b\n return b + np.array([-direct[1], direct[0]], dtype=np.float32)\ndef get_dir(src_point, rot_rad):\n sn, cs = np.sin(rot_rad), np.cos(rot_rad)\n src_result = [0, 0]\n src_result[0] = src_point[0] * cs - src_point[1] * sn\n src_result[1] = src_point[0] * sn + src_point[1] * cs\n return src_result\ndef crop(img, center, scale, output_size, rot=0):", + "detail": "modules.PyMAF.utils.transforms", + "documentation": {} + }, + { + "label": "get_dir", + "kind": 2, + "importPath": "modules.PyMAF.utils.transforms", + "description": "modules.PyMAF.utils.transforms", + "peekOfCode": "def get_dir(src_point, rot_rad):\n sn, cs = np.sin(rot_rad), np.cos(rot_rad)\n src_result = [0, 0]\n src_result[0] = src_point[0] * cs - src_point[1] * sn\n src_result[1] = src_point[0] * sn + src_point[1] * cs\n return src_result\ndef crop(img, center, scale, output_size, rot=0):\n trans = get_affine_transform(center, scale, rot, output_size)\n dst_img = cv2.warpAffine(\n img, trans, (int(output_size[0]), int(output_size[1])),", + "detail": "modules.PyMAF.utils.transforms", + "documentation": {} + }, + { + "label": "crop", + "kind": 2, + "importPath": "modules.PyMAF.utils.transforms", + "description": "modules.PyMAF.utils.transforms", + "peekOfCode": "def crop(img, center, scale, output_size, rot=0):\n trans = get_affine_transform(center, scale, rot, output_size)\n dst_img = cv2.warpAffine(\n img, trans, (int(output_size[0]), int(output_size[1])),\n flags=cv2.INTER_LINEAR\n )\n return dst_img", + "detail": "modules.PyMAF.utils.transforms", + "documentation": {} + }, + { + "label": "iuv_map2img", + "kind": 2, + "importPath": "modules.PyMAF.utils.uv_vis", + "description": "modules.PyMAF.utils.uv_vis", + "peekOfCode": "def iuv_map2img(U_uv, V_uv, Index_UV, AnnIndex=None, uv_rois=None, ind_mapping=None):\n device_id = U_uv.get_device()\n batch_size = U_uv.size(0)\n K = U_uv.size(1)\n heatmap_size = U_uv.size(2)\n Index_UV_max = torch.argmax(Index_UV, dim=1)\n if AnnIndex is None:\n Index_UV_max = Index_UV_max.to(torch.int64)\n else:\n AnnIndex_max = torch.argmax(AnnIndex, dim=1)", + "detail": "modules.PyMAF.utils.uv_vis", + "documentation": {} + }, + { + "label": "vis_smpl_iuv", + "kind": 2, + "importPath": "modules.PyMAF.utils.uv_vis", + "description": "modules.PyMAF.utils.uv_vis", + "peekOfCode": "def vis_smpl_iuv(image, cam_pred, vert_pred, face, pred_uv, vert_errors_batch, image_name, save_path, opt, ratio=1):\n # save_path = os.path.join('./notebooks/output/demo_results-wild', ids[f_id][0])\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n # dr_render = OpenDRenderer(ratio=ratio)\n dr_render = PyRenderer()\n focal_length = 5000.\n orig_size = 224.\n if pred_uv is not None:\n iuv_img = iuv_map2img(*pred_uv)", + "detail": "modules.PyMAF.utils.uv_vis", + "documentation": {} + }, + { + "label": "get_colors", + "kind": 2, + "importPath": "modules.PyMAF.utils.vis", + "description": "modules.PyMAF.utils.vis", + "peekOfCode": "def get_colors():\n colors = {\n 'pink': np.array([197, 27, 125]), # L lower leg\n 'light_pink': np.array([233, 163, 201]), # L upper leg\n 'light_green': np.array([161, 215, 106]), # L lower arm\n 'green': np.array([77, 146, 33]), # L upper arm\n 'red': np.array([215, 48, 39]), # head\n 'light_red': np.array([252, 146, 114]), # head\n 'light_orange': np.array([252, 141, 89]), # chest\n 'purple': np.array([118, 42, 131]), # R lower leg", + "detail": "modules.PyMAF.utils.vis", + "documentation": {} + }, + { + "label": "kp_connections", + "kind": 2, + "importPath": "modules.PyMAF.utils.vis", + "description": "modules.PyMAF.utils.vis", + "peekOfCode": "def kp_connections(keypoints):\n kp_lines = [\n [keypoints.index('left_eye'), keypoints.index('right_eye')],\n [keypoints.index('left_eye'), keypoints.index('nose')],\n [keypoints.index('right_eye'), keypoints.index('nose')],\n [keypoints.index('right_eye'), keypoints.index('right_ear')],\n [keypoints.index('left_eye'), keypoints.index('left_ear')],\n [keypoints.index('right_shoulder'), keypoints.index('right_elbow')],\n [keypoints.index('right_elbow'), keypoints.index('right_wrist')],\n [keypoints.index('left_shoulder'), keypoints.index('left_elbow')],", + "detail": "modules.PyMAF.utils.vis", + "documentation": {} + }, + { + "label": "convert_from_cls_format", + "kind": 2, + "importPath": "modules.PyMAF.utils.vis", + "description": "modules.PyMAF.utils.vis", + "peekOfCode": "def convert_from_cls_format(cls_boxes, cls_segms, cls_keyps):\n \"\"\"Convert from the class boxes/segms/keyps format generated by the testing\n code.\n \"\"\"\n box_list = [b for b in cls_boxes if len(b) > 0]\n if len(box_list) > 0:\n boxes = np.concatenate(box_list)\n else:\n boxes = None\n if cls_segms is not None:", + "detail": "modules.PyMAF.utils.vis", + "documentation": {} + }, + { + "label": "vis_bbox_opencv", + "kind": 2, + "importPath": "modules.PyMAF.utils.vis", + "description": "modules.PyMAF.utils.vis", + "peekOfCode": "def vis_bbox_opencv(img, bbox, thick=1):\n \"\"\"Visualizes a bounding box.\"\"\"\n (x0, y0, w, h) = bbox\n x1, y1 = int(x0 + w), int(y0 + h)\n x0, y0 = int(x0), int(y0)\n cv2.rectangle(img, (x0, y0), (x1, y1), _GREEN, thickness=thick)\n return img\ndef get_class_string(class_index, score, dataset):\n class_text = dataset.classes[class_index] if dataset is not None else \\\n 'id{:d}'.format(class_index)", + "detail": "modules.PyMAF.utils.vis", + "documentation": {} + }, + { + "label": "get_class_string", + "kind": 2, + "importPath": "modules.PyMAF.utils.vis", + "description": "modules.PyMAF.utils.vis", + "peekOfCode": "def get_class_string(class_index, score, dataset):\n class_text = dataset.classes[class_index] if dataset is not None else \\\n 'id{:d}'.format(class_index)\n return class_text + ' {:0.2f}'.format(score).lstrip('0')\ndef vis_one_image(\n im, im_name, output_dir, boxes, segms=None, keypoints=None, body_uv=None, thresh=0.9,\n kp_thresh=2, dpi=200, box_alpha=0.0, dataset=None, show_class=False,\n ext='pdf'):\n \"\"\"Visual debugging of detections.\"\"\"\n if not os.path.exists(output_dir):", + "detail": "modules.PyMAF.utils.vis", + "documentation": {} + }, + { + "label": "vis_one_image", + "kind": 2, + "importPath": "modules.PyMAF.utils.vis", + "description": "modules.PyMAF.utils.vis", + "peekOfCode": "def vis_one_image(\n im, im_name, output_dir, boxes, segms=None, keypoints=None, body_uv=None, thresh=0.9,\n kp_thresh=2, dpi=200, box_alpha=0.0, dataset=None, show_class=False,\n ext='pdf'):\n \"\"\"Visual debugging of detections.\"\"\"\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n if isinstance(boxes, list):\n boxes, segms, keypoints, classes = convert_from_cls_format(\n boxes, segms, keypoints)", + "detail": "modules.PyMAF.utils.vis", + "documentation": {} + }, + { + "label": "vis_batch_image_with_joints", + "kind": 2, + "importPath": "modules.PyMAF.utils.vis", + "description": "modules.PyMAF.utils.vis", + "peekOfCode": "def vis_batch_image_with_joints(batch_image, batch_joints, batch_joints_vis,\n file_name=None, nrow=8, padding=0, pad_value=1, add_text=True):\n '''\n batch_image: [batch_size, channel, height, width]\n batch_joints: [batch_size, num_joints, 3],\n batch_joints_vis: [batch_size, num_joints, 1],\n }\n '''\n grid = torchvision.utils.make_grid(batch_image, nrow, padding, True, pad_value=pad_value)\n ndarr = grid.mul(255).clamp(0, 255).byte().permute(1, 2, 0).cpu().numpy()", + "detail": "modules.PyMAF.utils.vis", + "documentation": {} + }, + { + "label": "vis_img_3Djoint", + "kind": 2, + "importPath": "modules.PyMAF.utils.vis", + "description": "modules.PyMAF.utils.vis", + "peekOfCode": "def vis_img_3Djoint(batch_img, joints, pairs=None, joint_group=None):\n n_sample = joints.shape[0]\n max_show = 2\n if n_sample > max_show:\n if batch_img is not None:\n batch_img = batch_img[:max_show]\n joints = joints[:max_show]\n n_sample = max_show\n color = ['#00B0F0', '#00B050', '#DC6464', '#207070', '#BC4484']\n # color = ['g', 'b', 'r']", + "detail": "modules.PyMAF.utils.vis", + "documentation": {} + }, + { + "label": "vis_img_2Djoint", + "kind": 2, + "importPath": "modules.PyMAF.utils.vis", + "description": "modules.PyMAF.utils.vis", + "peekOfCode": "def vis_img_2Djoint(batch_img, joints, pairs=None, joint_group=None):\n n_sample = joints.shape[0]\n max_show = 2\n if n_sample > max_show:\n if batch_img is not None:\n batch_img = batch_img[:max_show]\n joints = joints[:max_show]\n n_sample = max_show\n color = ['#00B0F0', '#00B050', '#DC6464', '#207070', '#BC4484']\n # color = ['g', 'b', 'r']", + "detail": "modules.PyMAF.utils.vis", + "documentation": {} + }, + { + "label": "draw_skeleton", + "kind": 2, + "importPath": "modules.PyMAF.utils.vis", + "description": "modules.PyMAF.utils.vis", + "peekOfCode": "def draw_skeleton(image, kp_2d, dataset='common', unnormalize=True, thickness=2):\n if unnormalize:\n kp_2d[:,:2] = normalize_2d_kp(kp_2d[:,:2], 224, inv=True)\n kp_2d[:,2] = kp_2d[:,2] > 0.3\n kp_2d = np.array(kp_2d, dtype=int)\n rcolor = get_colors()['red'].tolist()\n pcolor = get_colors()['green'].tolist()\n lcolor = get_colors()['blue'].tolist()\n common_lr = [0,0,1,1,0,0,0,0,1,0,0,1,1,1,0]\n for idx,pt in enumerate(kp_2d):", + "detail": "modules.PyMAF.utils.vis", + "documentation": {} + }, + { + "label": "set_axes_equal", + "kind": 2, + "importPath": "modules.PyMAF.utils.vis", + "description": "modules.PyMAF.utils.vis", + "peekOfCode": "def set_axes_equal(ax):\n '''Make axes of 3D plot have equal scale so that spheres appear as spheres,\n cubes as cubes, etc.. This is one possible solution to Matplotlib's\n ax.set_aspect('equal') and ax.axis('equal') not working for 3D.\n Input\n ax: a matplotlib axis, e.g., as output from plt.gca().\n '''\n x_limits = ax.get_xlim3d()\n y_limits = ax.get_ylim3d()\n z_limits = ax.get_zlim3d()", + "detail": "modules.PyMAF.utils.vis", + "documentation": {} + }, + { + "label": "plt.rcParams['pdf.fonttype']", + "kind": 5, + "importPath": "modules.PyMAF.utils.vis", + "description": "modules.PyMAF.utils.vis", + "peekOfCode": "plt.rcParams['pdf.fonttype'] = 42 # For editing in Adobe Illustrator\n_GRAY = (218, 227, 218)\n_GREEN = (18, 127, 15)\n_WHITE = (255, 255, 255)\ndef get_colors():\n colors = {\n 'pink': np.array([197, 27, 125]), # L lower leg\n 'light_pink': np.array([233, 163, 201]), # L upper leg\n 'light_green': np.array([161, 215, 106]), # L lower arm\n 'green': np.array([77, 146, 33]), # L upper arm", + "detail": "modules.PyMAF.utils.vis", + "documentation": {} + }, + { + "label": "_GRAY", + "kind": 5, + "importPath": "modules.PyMAF.utils.vis", + "description": "modules.PyMAF.utils.vis", + "peekOfCode": "_GRAY = (218, 227, 218)\n_GREEN = (18, 127, 15)\n_WHITE = (255, 255, 255)\ndef get_colors():\n colors = {\n 'pink': np.array([197, 27, 125]), # L lower leg\n 'light_pink': np.array([233, 163, 201]), # L upper leg\n 'light_green': np.array([161, 215, 106]), # L lower arm\n 'green': np.array([77, 146, 33]), # L upper arm\n 'red': np.array([215, 48, 39]), # head", + "detail": "modules.PyMAF.utils.vis", + "documentation": {} + }, + { + "label": "_GREEN", + "kind": 5, + "importPath": "modules.PyMAF.utils.vis", + "description": "modules.PyMAF.utils.vis", + "peekOfCode": "_GREEN = (18, 127, 15)\n_WHITE = (255, 255, 255)\ndef get_colors():\n colors = {\n 'pink': np.array([197, 27, 125]), # L lower leg\n 'light_pink': np.array([233, 163, 201]), # L upper leg\n 'light_green': np.array([161, 215, 106]), # L lower arm\n 'green': np.array([77, 146, 33]), # L upper arm\n 'red': np.array([215, 48, 39]), # head\n 'light_red': np.array([252, 146, 114]), # head", + "detail": "modules.PyMAF.utils.vis", + "documentation": {} + }, + { + "label": "_WHITE", + "kind": 5, + "importPath": "modules.PyMAF.utils.vis", + "description": "modules.PyMAF.utils.vis", + "peekOfCode": "_WHITE = (255, 255, 255)\ndef get_colors():\n colors = {\n 'pink': np.array([197, 27, 125]), # L lower leg\n 'light_pink': np.array([233, 163, 201]), # L upper leg\n 'light_green': np.array([161, 215, 106]), # L lower arm\n 'green': np.array([77, 146, 33]), # L upper arm\n 'red': np.array([215, 48, 39]), # head\n 'light_red': np.array([252, 146, 114]), # head\n 'light_orange': np.array([252, 141, 89]), # chest", + "detail": "modules.PyMAF.utils.vis", + "documentation": {} + }, + { + "label": "video", + "kind": 2, + "importPath": "modules.PyMAF.pymaf_x_demo", + "description": "modules.PyMAF.pymaf_x_demo", + "peekOfCode": "def video(path):\n mp4 = open(path,'rb').read()\n data_url = \"data:video/mp4;base64,\" + b64encode(mp4).decode()\n return HTML('' % data_url)\nvideo('output/dancer/dancer_result.mp4')", + "detail": "modules.PyMAF.pymaf_x_demo", + "documentation": {} + }, + { + "label": "hand_pca_to_axis", + "kind": 2, + "importPath": "SHOW.datasets.model_func_atach", + "description": "SHOW.datasets.model_func_atach", + "peekOfCode": "def hand_pca_to_axis(self, lhand_pca, rhand_pca):\n # device=self.left_hand_mean.device\n lhand_axis = torch.einsum('bi,ij->bj', [lhand_pca, self.left_hand_components])\n rhand_axis = torch.einsum('bi,ij->bj', [rhand_pca, self.right_hand_components])\n if not self.flat_hand_mean:\n lhand_axis=lhand_axis+self.left_hand_mean\n rhand_axis=rhand_axis+self.right_hand_mean\n return lhand_axis,rhand_axis\ndef hand_axis_to_pca(self, lhand_axis, rhand_axis):\n device=self.left_hand_mean.device", + "detail": "SHOW.datasets.model_func_atach", + "documentation": {} + }, + { + "label": "hand_axis_to_pca", + "kind": 2, + "importPath": "SHOW.datasets.model_func_atach", + "description": "SHOW.datasets.model_func_atach", + "peekOfCode": "def hand_axis_to_pca(self, lhand_axis, rhand_axis):\n device=self.left_hand_mean.device\n if isinstance(lhand_axis, np.ndarray):\n lhand_axis = torch.from_numpy(lhand_axis)\n if isinstance(rhand_axis, np.ndarray):\n rhand_axis = torch.from_numpy(rhand_axis)\n lhand_axis = lhand_axis.reshape(-1, 45).to(device)\n rhand_axis = rhand_axis.reshape(-1, 45).to(device)\n if not self.flat_hand_mean:\n lhand_axis=lhand_axis-self.left_hand_mean", + "detail": "SHOW.datasets.model_func_atach", + "documentation": {} + }, + { + "label": "atach_model_func", + "kind": 2, + "importPath": "SHOW.datasets.model_func_atach", + "description": "SHOW.datasets.model_func_atach", + "peekOfCode": "def atach_model_func(model):\n if not hasattr(model, 'hand_axis_to_pca'):\n setattr(model, 'hand_axis_to_pca',hand_axis_to_pca)\n if not hasattr(model, 'hand_pca_to_axis'):\n setattr(model, 'hand_pca_to_axis',hand_pca_to_axis)\n if not hasattr(model, 'l_comp'):\n l_comp = torch.linalg.pinv(model.left_hand_components)\n r_comp = torch.linalg.pinv(model.right_hand_components)\n setattr(model, 'l_comp', l_comp)\n setattr(model, 'r_comp', r_comp)", + "detail": "SHOW.datasets.model_func_atach", + "documentation": {} + }, + { + "label": "op_base", + "kind": 6, + "importPath": "SHOW.datasets.op_base", + "description": "SHOW.datasets.op_base", + "peekOfCode": "class op_base(object):\n def get_joint_weights(self) -> torch.Tensor:\n # @return optim_weights: [1,135,1]\n self.optim_weights = torch.ones(\n self.num_joints +\n 2 * self.use_hands +\n 51 * self.use_face +\n 17 * self.use_face_contour,\n dtype=self.dtype\n ).to(self.device)", + "detail": "SHOW.datasets.op_base", + "documentation": {} + }, + { + "label": "return_item_tuple", + "kind": 5, + "importPath": "SHOW.datasets.op_base", + "description": "SHOW.datasets.op_base", + "peekOfCode": "return_item_tuple = namedtuple(\n 'return_item_tuple',\n ['keypoints_2d', 'gender_gt']\n)\nreturn_item_tuple.__new__.__defaults__ = (None,)*len(return_item_tuple._fields)\nclass op_base(object):\n def get_joint_weights(self) -> torch.Tensor:\n # @return optim_weights: [1,135,1]\n self.optim_weights = torch.ones(\n self.num_joints +", + "detail": "SHOW.datasets.op_base", + "documentation": {} + }, + { + "label": "return_item_tuple.__new__.__defaults__", + "kind": 5, + "importPath": "SHOW.datasets.op_base", + "description": "SHOW.datasets.op_base", + "peekOfCode": "return_item_tuple.__new__.__defaults__ = (None,)*len(return_item_tuple._fields)\nclass op_base(object):\n def get_joint_weights(self) -> torch.Tensor:\n # @return optim_weights: [1,135,1]\n self.optim_weights = torch.ones(\n self.num_joints +\n 2 * self.use_hands +\n 51 * self.use_face +\n 17 * self.use_face_contour,\n dtype=self.dtype", + "detail": "SHOW.datasets.op_base", + "documentation": {} + }, + { + "label": "op_dataset", + "kind": 6, + "importPath": "SHOW.datasets.op_dataset", + "description": "SHOW.datasets.op_dataset", + "peekOfCode": "class op_dataset(op_base):\n NUM_BODY_JOINTS = 25\n NUM_HAND_JOINTS = 20\n def __init__(\n self,\n dtype=torch.float32,\n device='cpu',\n batch_size=-1,\n config=None,\n face_ider=None,", + "detail": "SHOW.datasets.op_dataset", + "documentation": {} + }, + { + "label": "op_post_process", + "kind": 6, + "importPath": "SHOW.datasets.op_post_process", + "description": "SHOW.datasets.op_post_process", + "peekOfCode": "class op_post_process(object):\n def __init__(self, all_processed_item, device, dtype):\n self.all_processed_item = all_processed_item\n self.device = device\n self.dtype = dtype\n def run(self):\n self.parse_batch()\n self.merge_list_to_tensor()\n return self.parse_data\n def check_valid(self):", + "detail": "SHOW.datasets.op_post_process", + "documentation": {} + }, + { + "label": "deca_exp_to_smplx", + "kind": 2, + "importPath": "SHOW.datasets.pre_dataset", + "description": "SHOW.datasets.pre_dataset", + "peekOfCode": "def deca_exp_to_smplx(e_deca):\n e_deca = np.concatenate([e_deca, np.zeros(50)])\n e_smplx = deca_exp_to_smplx_X.dot(e_deca)\n e_smplx = e_smplx[:50]\n return e_smplx\ndef read_mp(mp_npz_file, height, width):\n try:\n mp_npz = np.load(mp_npz_file, allow_pickle=True)\n except:\n import traceback", + "detail": "SHOW.datasets.pre_dataset", + "documentation": {} + }, + { + "label": "read_mp", + "kind": 2, + "importPath": "SHOW.datasets.pre_dataset", + "description": "SHOW.datasets.pre_dataset", + "peekOfCode": "def read_mp(mp_npz_file, height, width):\n try:\n mp_npz = np.load(mp_npz_file, allow_pickle=True)\n except:\n import traceback\n traceback.print_exc()\n return None\n mp_npz = list(mp_npz.values())\n return_list = []\n for ret in mp_npz:", + "detail": "SHOW.datasets.pre_dataset", + "documentation": {} + }, + { + "label": "read_pixie", + "kind": 2, + "importPath": "SHOW.datasets.pre_dataset", + "description": "SHOW.datasets.pre_dataset", + "peekOfCode": "def read_pixie(pixie_mat_file, height, width, cvt_hand_func=None):\n pixie_ret_list = mmcv.load(pixie_mat_file)\n assert cvt_hand_func, 'cvt_hand_func must set'\n return_list = []\n for ret in pixie_ret_list:\n for key, val in ret.items():\n if isinstance(val, np.ndarray) and val.shape[0] == 1:\n ret[key] = ret[key][0]\n face_bbox = lmk2d_to_bbox(ret['face_kpt'], height, width)\n if 1:", + "detail": "SHOW.datasets.pre_dataset", + "documentation": {} + }, + { + "label": "read_deca", + "kind": 2, + "importPath": "SHOW.datasets.pre_dataset", + "description": "SHOW.datasets.pre_dataset", + "peekOfCode": "def read_deca(deca_mat_file):\n assert(osp.exists(deca_mat_file))\n deca_ret_list = mmcv.load(deca_mat_file)\n assert(deca_ret_list != [])\n return_list = []\n for ret in deca_ret_list:\n for key, val in ret.items():\n if isinstance(val, np.ndarray) and val.shape[0] == 1:\n ret[key] = ret[key][0]\n deca_lmk = torch.tensor(ret['landmarks2d'])", + "detail": "SHOW.datasets.pre_dataset", + "documentation": {} + }, + { + "label": "deca_exp_to_smplx_X", + "kind": 5, + "importPath": "SHOW.datasets.pre_dataset", + "description": "SHOW.datasets.pre_dataset", + "peekOfCode": "deca_exp_to_smplx_X = np.load(\n osp.join(os.path.dirname(__file__),\n '../../../data/flame2020to2019_exp_trafo.npy')\n)\ndef deca_exp_to_smplx(e_deca):\n e_deca = np.concatenate([e_deca, np.zeros(50)])\n e_smplx = deca_exp_to_smplx_X.dot(e_deca)\n e_smplx = e_smplx[:50]\n return e_smplx\ndef read_mp(mp_npz_file, height, width):", + "detail": "SHOW.datasets.pre_dataset", + "documentation": {} + }, + { + "label": "path_enter", + "kind": 6, + "importPath": "SHOW.datasets.pre_runner", + "description": "SHOW.datasets.pre_runner", + "peekOfCode": "class path_enter(object):\n def __init__(self,target_path=None):\n self.origin_path=None\n self.target_path=target_path\n def __enter__(self):\n if sys.path[0]!=self.target_path:\n sys.path.insert(\n 0,self.target_path\n )\n if self.target_path:", + "detail": "SHOW.datasets.pre_runner", + "documentation": {} + }, + { + "label": "run_smplifyx_org", + "kind": 2, + "importPath": "SHOW.datasets.pre_runner", + "description": "SHOW.datasets.pre_runner", + "peekOfCode": "def run_smplifyx_org(\n image_folder, \n output_folder,\n smplifyx_code_dir,\n log_cmds=True,\n **kwargs,\n): \n with path_enter(smplifyx_code_dir):\n data_folder=os.path.dirname(image_folder)\n cmds=[", + "detail": "SHOW.datasets.pre_runner", + "documentation": {} + }, + { + "label": "run_pymafx", + "kind": 2, + "importPath": "SHOW.datasets.pre_runner", + "description": "SHOW.datasets.pre_runner", + "peekOfCode": "def run_pymafx(\n image_folder, \n output_folder,\n pymaf_code_dir,\n log_cmds=True,\n no_render=True,\n):\n with path_enter(pymaf_code_dir):\n cmds=[\n 'python apps/demo_smplx.py',", + "detail": "SHOW.datasets.pre_runner", + "documentation": {} + }, + { + "label": "run_psfr", + "kind": 2, + "importPath": "SHOW.datasets.pre_runner", + "description": "SHOW.datasets.pre_runner", + "peekOfCode": "def run_psfr(\n image_folder,\n image_sup_folder,\n log_cmds=True,\n):\n psfr_code_dir=os.path.join(os.path.dirname(__file__),'../../modules/PSFRGAN')\n with path_enter(psfr_code_dir):\n cmds=[\n 'python test_enhance_dir_unalign.py',\n '--src_dir',f'\"{image_folder}\"', ", + "detail": "SHOW.datasets.pre_runner", + "documentation": {} + }, + { + "label": "FaceDetector", + "kind": 6, + "importPath": "SHOW.detector.face_detector", + "description": "SHOW.detector.face_detector", + "peekOfCode": "class FaceDetector:\n def __init__(self, type='google', device='cpu'):\n self.type = type\n self.detector = mp_face_mesh.FaceMesh(\n static_image_mode=False,\n max_num_faces=3,\n refine_landmarks=True,\n min_detection_confidence=0.5,\n min_tracking_confidence=0.5)\n def dense_multi_face(self, image):", + "detail": "SHOW.detector.face_detector", + "documentation": {} + }, + { + "label": "mp_drawing", + "kind": 5, + "importPath": "SHOW.detector.face_detector", + "description": "SHOW.detector.face_detector", + "peekOfCode": "mp_drawing = mp.solutions.drawing_utils\nmp_drawing_styles = mp.solutions.drawing_styles\nmp_face_mesh = mp.solutions.face_mesh\nclass FaceDetector:\n def __init__(self, type='google', device='cpu'):\n self.type = type\n self.detector = mp_face_mesh.FaceMesh(\n static_image_mode=False,\n max_num_faces=3,\n refine_landmarks=True,", + "detail": "SHOW.detector.face_detector", + "documentation": {} + }, + { + "label": "mp_drawing_styles", + "kind": 5, + "importPath": "SHOW.detector.face_detector", + "description": "SHOW.detector.face_detector", + "peekOfCode": "mp_drawing_styles = mp.solutions.drawing_styles\nmp_face_mesh = mp.solutions.face_mesh\nclass FaceDetector:\n def __init__(self, type='google', device='cpu'):\n self.type = type\n self.detector = mp_face_mesh.FaceMesh(\n static_image_mode=False,\n max_num_faces=3,\n refine_landmarks=True,\n min_detection_confidence=0.5,", + "detail": "SHOW.detector.face_detector", + "documentation": {} + }, + { + "label": "mp_face_mesh", + "kind": 5, + "importPath": "SHOW.detector.face_detector", + "description": "SHOW.detector.face_detector", + "peekOfCode": "mp_face_mesh = mp.solutions.face_mesh\nclass FaceDetector:\n def __init__(self, type='google', device='cpu'):\n self.type = type\n self.detector = mp_face_mesh.FaceMesh(\n static_image_mode=False,\n max_num_faces=3,\n refine_landmarks=True,\n min_detection_confidence=0.5,\n min_tracking_confidence=0.5)", + "detail": "SHOW.detector.face_detector", + "documentation": {} + }, + { + "label": "FAN_Detector", + "kind": 6, + "importPath": "SHOW.detector.fan_detector", + "description": "SHOW.detector.fan_detector", + "peekOfCode": "class FAN_Detector(object):\n def __init__(self,device='cuda'):\n if self.__dict__.get('face_detector',None) is None:\n import face_alignment\n self.face_detector = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D, device=device)\n def predict(self,\n img_folder,fan_npy_folder,\n save_vis,fan_vis_dir):\n if save_vis:\n os.makedirs(fan_vis_dir,exist_ok=True)", + "detail": "SHOW.detector.fan_detector", + "documentation": {} + }, + { + "label": "pifpaf_detector", + "kind": 6, + "importPath": "SHOW.detector.pifpaf_detector", + "description": "SHOW.detector.pifpaf_detector", + "peekOfCode": "class pifpaf_detector(object):\n # chk_type:str = 'shufflenetv2k30-wholebody'\n # def __post_init__(self):\n def __init__(self):\n chk_type:str = 'shufflenetv2k30-wholebody'\n self.predictor = openpifpaf.Predictor(checkpoint=chk_type)\n def process(self,full_file_name,out_file_name,out_img_name):\n pil_im = PIL.Image.open(full_file_name).convert('RGB')\n predictions, _, _ = self.predictor.pil_image(pil_im)\n # sample_img=cv2.cvtColor(sample_img,cv2.COLOR_BGR2RGB)", + "detail": "SHOW.detector.pifpaf_detector", + "documentation": {} + }, + { + "label": "arcface_ider", + "kind": 6, + "importPath": "SHOW.face_iders.arcface_ider", + "description": "SHOW.face_iders.arcface_ider", + "peekOfCode": "class arcface_ider(ider_base):\n def __init__(self,\n weight=default_weight_path,\n name='r100', fp16=True, \n det='fan', threshold=0.45, **kwargs\n ):\n self.threshold = threshold\n self.det = det\n from modules.arcface_torch.backbones import get_model\n self.net = get_model(name, fp16=fp16)", + "detail": "SHOW.face_iders.arcface_ider", + "documentation": {} + }, + { + "label": "ider_base", + "kind": 6, + "importPath": "SHOW.face_iders.base", + "description": "SHOW.face_iders.base", + "peekOfCode": "class ider_base(object):\n def get_all_emb(self, im: np.ndarray = None) -> np.ndarray:\n # im:bgr\n faces = self.app.get(im)\n return faces\n def get_face_emb(self, im: np.ndarray = None) -> np.ndarray:\n # im:bgr\n faces = self.app.get(im)\n emb = faces[0].normed_embedding\n return emb", + "detail": "SHOW.face_iders.base", + "documentation": {} + }, + { + "label": "insightface_ider", + "kind": 6, + "importPath": "SHOW.face_iders.base", + "description": "SHOW.face_iders.base", + "peekOfCode": "class insightface_ider(ider_base):\n def __init__(self, threshold=0.6, **kwargs):\n self.threshold = threshold\n from insightface.app import FaceAnalysis\n self.app = FaceAnalysis(providers=['CUDAExecutionProvider'])\n self.app.prepare(ctx_id=-1, det_size=(640, 640))", + "detail": "SHOW.face_iders.base", + "documentation": {} + }, + { + "label": "build_ider", + "kind": 2, + "importPath": "SHOW.face_iders.builder", + "description": "SHOW.face_iders.builder", + "peekOfCode": "def build_ider(config):\n return mmcv.build_from_cfg(config,IDER)\ndef build_ider2(config):\n return IDER.build(config)", + "detail": "SHOW.face_iders.builder", + "documentation": {} + }, + { + "label": "build_ider2", + "kind": 2, + "importPath": "SHOW.face_iders.builder", + "description": "SHOW.face_iders.builder", + "peekOfCode": "def build_ider2(config):\n return IDER.build(config)", + "detail": "SHOW.face_iders.builder", + "documentation": {} + }, + { + "label": "IDER", + "kind": 5, + "importPath": "SHOW.face_iders.builder", + "description": "SHOW.face_iders.builder", + "peekOfCode": "IDER = Registry('ider')\ndef build_ider(config):\n return mmcv.build_from_cfg(config,IDER)\ndef build_ider2(config):\n return IDER.build(config)", + "detail": "SHOW.face_iders.builder", + "documentation": {} + }, + { + "label": "match_faces", + "kind": 2, + "importPath": "SHOW.face_iders.utils", + "description": "SHOW.face_iders.utils", + "peekOfCode": "def match_faces(img, face_ider, person_face_emb):\n # img: bgr,hw3,uint8\n faces = face_ider.get(img)\n if faces is None:\n return None, None\n # face_ider: 1.func:get(np_img) --> {2.normed_embedding,3.bbox}\n for face in faces:\n cur_emb = face.normed_embedding\n sim = face_ider.cal_emb_sim(cur_emb, person_face_emb)\n if sim >= face_ider.threshold:", + "detail": "SHOW.face_iders.utils", + "documentation": {} + }, + { + "label": "Struct", + "kind": 6, + "importPath": "SHOW.flame.FLAME", + "description": "SHOW.flame.FLAME", + "peekOfCode": "class Struct(object):\n def __init__(self, **kwargs):\n for key, val in kwargs.items():\n setattr(self, key, val)\ndef rot_mat_to_euler(rot_mats):\n # Calculates rotation matrix to euler angles\n # Careful for extreme cases of eular angles like [0.0, pi, 0.0]\n sy = torch.sqrt(rot_mats[:, 0, 0] * rot_mats[:, 0, 0] +\n rot_mats[:, 1, 0] * rot_mats[:, 1, 0])\n return torch.atan2(-rot_mats[:, 2, 0], sy)", + "detail": "SHOW.flame.FLAME", + "documentation": {} + }, + { + "label": "FLAME", + "kind": 6, + "importPath": "SHOW.flame.FLAME", + "description": "SHOW.flame.FLAME", + "peekOfCode": "class FLAME(nn.Module):\n \"\"\"\n borrowed from https://github.com/soubhiksanyal/FLAME_PyTorch/blob/master/FLAME.py\n Given FLAME parameters for shape, pose, and expression, this class generates a differentiable FLAME function\n which outputs the a mesh and 2D/3D facial landmarks\n \"\"\"\n def __init__(self, config):\n super(FLAME, self).__init__()\n print(\"Creating the FLAME Decoder\")\n with open(config.flame_geom_path, 'rb') as f:", + "detail": "SHOW.flame.FLAME", + "documentation": {} + }, + { + "label": "FLAMETex", + "kind": 6, + "importPath": "SHOW.flame.FLAME", + "description": "SHOW.flame.FLAME", + "peekOfCode": "class FLAMETex(nn.Module):\n \"\"\"\n current FLAME texture are adapted from BFM Texture Model\n \"\"\"\n def __init__(self, config):\n super(FLAMETex, self).__init__()\n tex_space = np.load(config.tex_space_path)\n mu_key = 'MU'\n pc_key = 'PC' \n n_pc = 199", + "detail": "SHOW.flame.FLAME", + "documentation": {} + }, + { + "label": "to_tensor", + "kind": 2, + "importPath": "SHOW.flame.FLAME", + "description": "SHOW.flame.FLAME", + "peekOfCode": "def to_tensor(array, dtype=torch.float32):\n if 'torch.tensor' not in str(type(array)):\n return torch.tensor(array, dtype=dtype)\ndef to_np(array, dtype=np.float32):\n if 'scipy.sparse' in str(type(array)):\n array = array.todense()\n return np.array(array, dtype=dtype)\nclass Struct(object):\n def __init__(self, **kwargs):\n for key, val in kwargs.items():", + "detail": "SHOW.flame.FLAME", + "documentation": {} + }, + { + "label": "to_np", + "kind": 2, + "importPath": "SHOW.flame.FLAME", + "description": "SHOW.flame.FLAME", + "peekOfCode": "def to_np(array, dtype=np.float32):\n if 'scipy.sparse' in str(type(array)):\n array = array.todense()\n return np.array(array, dtype=dtype)\nclass Struct(object):\n def __init__(self, **kwargs):\n for key, val in kwargs.items():\n setattr(self, key, val)\ndef rot_mat_to_euler(rot_mats):\n # Calculates rotation matrix to euler angles", + "detail": "SHOW.flame.FLAME", + "documentation": {} + }, + { + "label": "rot_mat_to_euler", + "kind": 2, + "importPath": "SHOW.flame.FLAME", + "description": "SHOW.flame.FLAME", + "peekOfCode": "def rot_mat_to_euler(rot_mats):\n # Calculates rotation matrix to euler angles\n # Careful for extreme cases of eular angles like [0.0, pi, 0.0]\n sy = torch.sqrt(rot_mats[:, 0, 0] * rot_mats[:, 0, 0] +\n rot_mats[:, 1, 0] * rot_mats[:, 1, 0])\n return torch.atan2(-rot_mats[:, 2, 0], sy)\nclass FLAME(nn.Module):\n \"\"\"\n borrowed from https://github.com/soubhiksanyal/FLAME_PyTorch/blob/master/FLAME.py\n Given FLAME parameters for shape, pose, and expression, this class generates a differentiable FLAME function", + "detail": "SHOW.flame.FLAME", + "documentation": {} + }, + { + "label": "stereographic_unproject_old", + "kind": 2, + "importPath": "SHOW.flame.lbs", + "description": "SHOW.flame.lbs", + "peekOfCode": "def stereographic_unproject_old(a):\n s2 = torch.pow(a, 2).sum(1) # batch\n unproj = 2 * a / (s2 + 1).view(-1, 1).repeat(1, 5) # batch*5\n w = (s2 - 1) / (s2 + 1) # batch\n out = torch.cat((unproj, w.view(-1, 1)), 1) # batch*6\n return out\n# in a batch*5, axis int\ndef stereographic_unproject(a, axis=None):\n \"\"\"\n\tInverse of stereographic projection: increases dimension by one.", + "detail": "SHOW.flame.lbs", + "documentation": {} + }, + { + "label": "stereographic_unproject", + "kind": 2, + "importPath": "SHOW.flame.lbs", + "description": "SHOW.flame.lbs", + "peekOfCode": "def stereographic_unproject(a, axis=None):\n \"\"\"\n\tInverse of stereographic projection: increases dimension by one.\n\t\"\"\"\n batch = a.shape[0]\n if axis is None:\n axis = a.shape[1]\n s2 = torch.pow(a, 2).sum(1) # batch\n ans = torch.autograd.Variable(torch.zeros(batch, a.shape[1] + 1).cuda()) # batch*6\n unproj = 2 * a / (s2 + 1).view(batch, 1).repeat(1, a.shape[1]) # batch*5", + "detail": "SHOW.flame.lbs", + "documentation": {} + }, + { + "label": "rot_mat_to_euler", + "kind": 2, + "importPath": "SHOW.flame.lbs", + "description": "SHOW.flame.lbs", + "peekOfCode": "def rot_mat_to_euler(rot_mats):\n # Calculates rotation matrix to euler angles\n # Careful for extreme cases of eular angles like [0.0, pi, 0.0]\n sy = torch.sqrt(rot_mats[:, 0, 0] * rot_mats[:, 0, 0] +\n rot_mats[:, 1, 0] * rot_mats[:, 1, 0])\n return torch.atan2(-rot_mats[:, 2, 0], sy)\ndef find_dynamic_lmk_idx_and_bcoords(vertices, pose, dynamic_lmk_faces_idx,\n dynamic_lmk_b_coords,\n neck_kin_chain, dtype=torch.float32):\n ''' Compute the faces, barycentric coordinates for the dynamic landmarks", + "detail": "SHOW.flame.lbs", + "documentation": {} + }, + { + "label": "find_dynamic_lmk_idx_and_bcoords", + "kind": 2, + "importPath": "SHOW.flame.lbs", + "description": "SHOW.flame.lbs", + "peekOfCode": "def find_dynamic_lmk_idx_and_bcoords(vertices, pose, dynamic_lmk_faces_idx,\n dynamic_lmk_b_coords,\n neck_kin_chain, dtype=torch.float32):\n ''' Compute the faces, barycentric coordinates for the dynamic landmarks\n To do so, we first compute the rotation of the neck around the y-axis\n and then use a pre-computed look-up table to find the faces and the\n barycentric coordinates that will be used.\n Special thanks to Soubhik Sanyal (soubhik.sanyal@tuebingen.mpg.de)\n for providing the original TensorFlow implementation and for the LUT.\n Parameters", + "detail": "SHOW.flame.lbs", + "documentation": {} + }, + { + "label": "vertices2landmarks", + "kind": 2, + "importPath": "SHOW.flame.lbs", + "description": "SHOW.flame.lbs", + "peekOfCode": "def vertices2landmarks(vertices, faces, lmk_faces_idx, lmk_bary_coords):\n ''' Calculates landmarks by barycentric interpolation\n Parameters\n ----------\n vertices: torch.tensor BxVx3, dtype = torch.float32\n The tensor of input vertices\n faces: torch.tensor Fx3, dtype = torch.long\n The faces of the mesh\n lmk_faces_idx: torch.tensor L, dtype = torch.long\n The tensor with the indices of the faces used to calculate the", + "detail": "SHOW.flame.lbs", + "documentation": {} + }, + { + "label": "lbs", + "kind": 2, + "importPath": "SHOW.flame.lbs", + "description": "SHOW.flame.lbs", + "peekOfCode": "def lbs(betas, pose, v_template, shapedirs, posedirs, J_regressor, parents,\n lbs_weights, pose2rot=True, dtype=torch.float32):\n ''' Performs Linear Blend Skinning with the given shape and pose parameters\n Parameters\n ----------\n betas : torch.tensor BxNB\n The tensor of shape parameters\n pose : torch.tensor Bx(J + 1) * 3\n The pose parameters in axis-angle format\n v_template torch.tensor BxVx3", + "detail": "SHOW.flame.lbs", + "documentation": {} + }, + { + "label": "vertices2joints", + "kind": 2, + "importPath": "SHOW.flame.lbs", + "description": "SHOW.flame.lbs", + "peekOfCode": "def vertices2joints(J_regressor, vertices):\n ''' Calculates the 3D joint locations from the vertices\n Parameters\n ----------\n J_regressor : torch.tensor JxV\n The regressor array that is used to calculate the joints from the\n position of the vertices\n vertices : torch.tensor BxVx3\n The tensor of mesh vertices\n Returns", + "detail": "SHOW.flame.lbs", + "documentation": {} + }, + { + "label": "blend_shapes", + "kind": 2, + "importPath": "SHOW.flame.lbs", + "description": "SHOW.flame.lbs", + "peekOfCode": "def blend_shapes(betas, shape_disps):\n ''' Calculates the per vertex displacement due to the blend shapes\n Parameters\n ----------\n betas : torch.tensor Bx(num_betas)\n Blend shape coefficients\n shape_disps: torch.tensor Vx3x(num_betas)\n Blend shapes\n Returns\n -------", + "detail": "SHOW.flame.lbs", + "documentation": {} + }, + { + "label": "transform_mat", + "kind": 2, + "importPath": "SHOW.flame.lbs", + "description": "SHOW.flame.lbs", + "peekOfCode": "def transform_mat(R, t):\n ''' Creates a batch of transformation matrices\n Args:\n - R: Bx3x3 array of a batch of rotation matrices\n - t: Bx3x1 array of a batch of translation vectors\n Returns:\n - T: Bx4x4 Transformation matrix\n '''\n # No padding left or right, only add an extra row\n return torch.cat([F.pad(R, [0, 0, 0, 1]),", + "detail": "SHOW.flame.lbs", + "documentation": {} + }, + { + "label": "batch_rigid_transform", + "kind": 2, + "importPath": "SHOW.flame.lbs", + "description": "SHOW.flame.lbs", + "peekOfCode": "def batch_rigid_transform(rot_mats, joints, parents, dtype=torch.float32):\n \"\"\"\n Applies a batch of rigid transformations to the joints\n Parameters\n ----------\n rot_mats : torch.tensor BxNx3x3\n Tensor of rotation matrices\n joints : torch.tensor BxNx3\n Locations of joints\n parents : torch.tensor BxN", + "detail": "SHOW.flame.lbs", + "documentation": {} + }, + { + "label": "my_logger", + "kind": 6, + "importPath": "SHOW.loggers.base", + "description": "SHOW.loggers.base", + "peekOfCode": "class my_logger(ABCMeta):\n __metaclass__ = ABCMeta\n @abstractmethod\n def log(self,*args,**kwargs):\n pass\n @abstractmethod\n def log_bs(self,*args,**kwargs):\n pass\n @abstractmethod\n def update_config(self,*args,**kwargs):", + "detail": "SHOW.loggers.base", + "documentation": {} + }, + { + "label": "img_preprocess", + "kind": 2, + "importPath": "SHOW.loggers.base", + "description": "SHOW.loggers.base", + "peekOfCode": "def img_preprocess(img):\n # img: 0-1\n # img: tensor or ndarray\n # img: (1,c,h,w)\n # img: (c,h,w)\n # img: (h,w,c)\n # img: cpu or gpu\n # img: frad or no_grad\n if isinstance(img,torch.Tensor):\n img=img.cpu().detach()", + "detail": "SHOW.loggers.base", + "documentation": {} + }, + { + "label": "batch_logger", + "kind": 6, + "importPath": "SHOW.loggers.builder", + "description": "SHOW.loggers.builder", + "peekOfCode": "class batch_logger:\n def __init__(self, loggers=[]):\n self.loggers = loggers\n self.warn_filter_list = []\n def __iter__(self):\n pass\n def __getstate__(self):\n pass\n def __getattr__(self, name):\n # print(f'__getattr__:{name}')", + "detail": "SHOW.loggers.builder", + "documentation": {} + }, + { + "label": "build_my_logger", + "kind": 2, + "importPath": "SHOW.loggers.builder", + "description": "SHOW.loggers.builder", + "peekOfCode": "def build_my_logger(log_config, init_run=True):\n logger_list = []\n log_interval = log_config['interval']\n for info in log_config['hooks']:\n logger_hook = mmcv.build_from_cfg(\n info, MMYLOGGER, default_args=dict(interval=log_interval))\n if init_run:\n # logger_hook.before_run(None)\n logger_hook.initialize()\n logger_list.append(logger_hook)", + "detail": "SHOW.loggers.builder", + "documentation": {} + }, + { + "label": "MMYLOGGER", + "kind": 5, + "importPath": "SHOW.loggers.builder", + "description": "SHOW.loggers.builder", + "peekOfCode": "MMYLOGGER = Registry('mylogger')\nclass batch_logger:\n def __init__(self, loggers=[]):\n self.loggers = loggers\n self.warn_filter_list = []\n def __iter__(self):\n pass\n def __getstate__(self):\n pass\n def __getattr__(self, name):", + "detail": "SHOW.loggers.builder", + "documentation": {} + }, + { + "label": "StreamToLoguru", + "kind": 6, + "importPath": "SHOW.loggers.logger", + "description": "SHOW.loggers.logger", + "peekOfCode": "class StreamToLoguru:\n \"\"\"\n stream object that redirects writes to a logger instance.\n \"\"\"\n def __init__(self, level=\"INFO\", caller_names=(\"apex\", \"pycocotools\")):\n \"\"\"\n Args:\n level(str): log level string of loguru. Default value: \"INFO\".\n caller_names(tuple): caller names of redirected module.\n Default value: (apex, pycocotools).", + "detail": "SHOW.loggers.logger", + "documentation": {} + }, + { + "label": "get_caller_name", + "kind": 2, + "importPath": "SHOW.loggers.logger", + "description": "SHOW.loggers.logger", + "peekOfCode": "def get_caller_name(depth=0):\n \"\"\"\n Args:\n depth (int): Depth of caller conext, use 0 for caller depth. Default value: 0.\n Returns:\n str: module name of the caller\n \"\"\"\n # the following logic is a little bit faster than inspect.stack() logic\n frame = inspect.currentframe().f_back\n for _ in range(depth):", + "detail": "SHOW.loggers.logger", + "documentation": {} + }, + { + "label": "setup_logger", + "kind": 2, + "importPath": "SHOW.loggers.logger", + "description": "SHOW.loggers.logger", + "peekOfCode": "def setup_logger(save_dir, distributed_rank=0, filename=\"log.txt\", mode=\"a\"):\n \"\"\"setup logger for training and testing.\n Args:\n save_dir(str): location to save log file\n distributed_rank(int): device rank when multi-gpu environment\n filename (string): log save name.\n mode(str): log file write mode, `append` or `override`. default is `a`.\n Return:\n logger instance.\n \"\"\"", + "detail": "SHOW.loggers.logger", + "documentation": {} + }, + { + "label": "MyNeptuneLogger", + "kind": 6, + "importPath": "SHOW.loggers.MyNeptuneLogger", + "description": "SHOW.loggers.MyNeptuneLogger", + "peekOfCode": "class MyNeptuneLogger(NeptuneLoggerHook):\n def __init__(self,*args, **kwargs):\n super().__init__(*args, **kwargs)\n from neptune.new.types import File\n self.File=File\n def initialize(self):\n if self.init_kwargs:\n self.run = self.neptune.init(**self.init_kwargs)\n else:\n self.run = self.neptune.init()", + "detail": "SHOW.loggers.MyNeptuneLogger", + "documentation": {} + }, + { + "label": "MyTextLogger", + "kind": 6, + "importPath": "SHOW.loggers.MyTextLogger", + "description": "SHOW.loggers.MyTextLogger", + "peekOfCode": "class MyTextLogger(object):\n def __init__(self,save_dir,filename=\"log.txt\", mode=\"a\",*args, **kwargs):\n from SHOW.loggers.logger import setup_logger\n setup_logger(save_dir,filename,mode=mode)\n @logger.catch\n def log(self, tag_name:str, tag_value,print_to_screen=False,**kwargs):\n logger.log(f\"{tag_name}:{tag_value}\")\n @logger.catch\n def log_bs(self,append=True,print_to_screen=False,**kwargs):\n for key,val in kwargs.items():", + "detail": "SHOW.loggers.MyTextLogger", + "documentation": {} + }, + { + "label": "MyTFLogger", + "kind": 6, + "importPath": "SHOW.loggers.MyTFLogger", + "description": "SHOW.loggers.MyTFLogger", + "peekOfCode": "class MyTFLogger(TensorboardLoggerHook,my_logger):\n def __init__(self,*args, **kwargs):\n super().__init__(*args, **kwargs)\n if Path(self.log_dir).exists():\n shutil.rmtree(self.log_dir)\n @logger.catch\n def log(self, tags:dict,iters=0):\n for tag, val in tags.items():\n if isinstance(val, str):\n self.writer.add_text(tag, val, iters)", + "detail": "SHOW.loggers.MyTFLogger", + "documentation": {} + }, + { + "label": "MyWandbLogger", + "kind": 6, + "importPath": "SHOW.loggers.MyWandbLogger", + "description": "SHOW.loggers.MyWandbLogger", + "peekOfCode": "class MyWandbLogger(WandbLoggerHook):\n def __init__(self,wandb_key,wandb_name,*args, **kwargs):\n os.environ['WANDB_API_KEY'] = wandb_key\n os.environ['WANDB_NAME'] = wandb_name\n super().__init__(*args, **kwargs)\n def initialize(self):\n if self.wandb is None:\n self.import_wandb()\n if self.init_kwargs:\n self.wandb.init(**self.init_kwargs)", + "detail": "SHOW.loggers.MyWandbLogger", + "documentation": {} + }, + { + "label": "GMoF", + "kind": 6, + "importPath": "SHOW.losses.losses", + "description": "SHOW.losses.losses", + "peekOfCode": "class GMoF(nn.Module):\n def __init__(self, rho=100):\n super().__init__()\n self.rho = rho\n def extra_repr(self):\n return 'rho = {}'.format(self.rho)\n def forward(self, residual):\n squared_res = residual ** 2\n dist = torch.div(squared_res, squared_res + self.rho ** 2)\n return self.rho ** 2 * dist", + "detail": "SHOW.losses.losses", + "documentation": {} + }, + { + "label": "temporary_loss", + "kind": 2, + "importPath": "SHOW.losses.losses", + "description": "SHOW.losses.losses", + "peekOfCode": "def temporary_loss(o_w, i_w, gmof, param):\n return (o_w ** 2) * (gmof(\n i_w*(param[2:, ...] + param[:-2, ...] - 2 * param[1:-1, ...]))).mean()\ndef cal_deg_delta(deta, deg_interval):\n # deg_loss=0\n theta = torch.arctan((deta[:, 2]) / (deta[:, 1]))\n theta_deg = torch.rad2deg(theta)\n diff_up = theta_deg - deg_interval[1]\n diff_up = torch.where(diff_up > 0, diff_up, torch.zeros(\n diff_up.shape, device=diff_up.device))", + "detail": "SHOW.losses.losses", + "documentation": {} + }, + { + "label": "cal_deg_delta", + "kind": 2, + "importPath": "SHOW.losses.losses", + "description": "SHOW.losses.losses", + "peekOfCode": "def cal_deg_delta(deta, deg_interval):\n # deg_loss=0\n theta = torch.arctan((deta[:, 2]) / (deta[:, 1]))\n theta_deg = torch.rad2deg(theta)\n diff_up = theta_deg - deg_interval[1]\n diff_up = torch.where(diff_up > 0, diff_up, torch.zeros(\n diff_up.shape, device=diff_up.device))\n diff_down = deg_interval[0] - theta_deg\n diff_down = torch.where(diff_down > 0, diff_down, torch.zeros(\n diff_down.shape, device=diff_down.device))", + "detail": "SHOW.losses.losses", + "documentation": {} + }, + { + "label": "get_body_height", + "kind": 2, + "importPath": "SHOW.losses.losses", + "description": "SHOW.losses.losses", + "peekOfCode": "def get_body_height(verts, faces):\n if verts.shape[0] == 0:\n return 0.0\n else:\n head_faces = faces[2581]\n head_verts = verts[head_faces]\n top_head_v = 0.8277337276382795 * head_verts[0] + 0.1422200962169292 * head_verts[1] + \\\n 0.030046176144791284 * head_verts[2]\n feet_faces = faces[15605]\n feet_verts = verts[feet_faces]", + "detail": "SHOW.losses.losses", + "documentation": {} + }, + { + "label": "compute_mass", + "kind": 2, + "importPath": "SHOW.losses.losses", + "description": "SHOW.losses.losses", + "peekOfCode": "def compute_mass(tris, DENSITY=1):\n ''' Computes the mass from volume and average body density\n '''\n x = tris[:, :, :, 0]\n y = tris[:, :, :, 1]\n z = tris[:, :, :, 2]\n volume = (\n -x[:, :, 2] * y[:, :, 1] * z[:, :, 0] +\n x[:, :, 1] * y[:, :, 2] * z[:, :, 0] +\n x[:, :, 2] * y[:, :, 0] * z[:, :, 1] -", + "detail": "SHOW.losses.losses", + "documentation": {} + }, + { + "label": "cvt_dict_to_grad", + "kind": 2, + "importPath": "SHOW.losses.losses", + "description": "SHOW.losses.losses", + "peekOfCode": "def cvt_dict_to_grad(params,device,dtype):\n # params:{ np.ndarray/torch.Tensor(cpu/gpu) }\n for key in params.keys():\n if isinstance(params[key],np.ndarray):\n params[key]=torch.from_numpy(params[key])\n params[key]=nn.Parameter(params[key].clone().detach().to(device).type(dtype))\n return params\ndef cal_model_output(vposer=None,body_model=None,body_params=None):\n cur_pose = None\n if vposer is not None:", + "detail": "SHOW.losses.losses", + "documentation": {} + }, + { + "label": "cal_model_output", + "kind": 2, + "importPath": "SHOW.losses.losses", + "description": "SHOW.losses.losses", + "peekOfCode": "def cal_model_output(vposer=None,body_model=None,body_params=None):\n cur_pose = None\n if vposer is not None:\n cur_pose = vposer.decode(\n body_params['pose_embedding'],\n output_type='aa'\n ).view(-1,63)\n cur_bs=cur_pose.shape[0]\n model_output = body_model(\n return_verts=True,", + "detail": "SHOW.losses.losses", + "documentation": {} + }, + { + "label": "get_tpose_vertice", + "kind": 2, + "importPath": "SHOW.losses.losses", + "description": "SHOW.losses.losses", + "peekOfCode": "def get_tpose_vertice(body_model,betas,**kwargs):\n batch_size=body_model.batch_size\n tpose_body = body_model(\n return_verts=True,\n body_pose=torch.zeros(batch_size, 63).type_as(betas),\n betas=betas.expand(batch_size, -1),\n **kwargs\n )\n tpose_vertices = tpose_body.vertices[0]\n return tpose_vertices", + "detail": "SHOW.losses.losses", + "documentation": {} + }, + { + "label": "cal_smplx_head_transl", + "kind": 2, + "importPath": "SHOW.losses.losses", + "description": "SHOW.losses.losses", + "peekOfCode": "def cal_smplx_head_transl(tpose_vertices,smplx2flame_idx):\n pre_flame_vertices = torch.index_select(\n tpose_vertices, 0, smplx2flame_idx)\n smplx_shape_mean = pre_flame_vertices.mean(0)\n return smplx_shape_mean\nclass GMoF(nn.Module):\n def __init__(self, rho=100):\n super().__init__()\n self.rho = rho\n def extra_repr(self):", + "detail": "SHOW.losses.losses", + "documentation": {} + }, + { + "label": "create_closure", + "kind": 2, + "importPath": "SHOW.losses.losses", + "description": "SHOW.losses.losses", + "peekOfCode": "def create_closure(\n optimizer,\n vposer,body_model, \n body_params,\n camera_org,\n lmk_faces_idx,\n lmk_bary_coords,\n op_2dkpts,op_j_weight,\n op_gt_conf,op_valid_flag,\n robustifier,", + "detail": "SHOW.losses.losses", + "documentation": {} + }, + { + "label": "attr_dict", + "kind": 6, + "importPath": "SHOW.utils.attrdict", + "description": "SHOW.utils.attrdict", + "peekOfCode": "class attr_dict(dict):\n def __init__(self, d=None, **kwargs):\n if d is None:\n d = {}\n if kwargs:\n d.update(**kwargs)\n for k, v in d.items():\n setattr(self, k, v)\n # Class attributes\n for k in self.__class__.__dict__.keys():", + "detail": "SHOW.utils.attrdict", + "documentation": {} + }, + { + "label": "msk_to_xywh", + "kind": 2, + "importPath": "SHOW.utils.bbox", + "description": "SHOW.utils.bbox", + "peekOfCode": "def msk_to_xywh(msk):\n \"\"\"\n calculate box [left upper width height] from mask.\n :param msk: nd.array, single-channel or 3-channels mask\n :return: float, iou score \n \"\"\"\n if len(msk.shape) > 2:\n msk = msk[..., 0]\n nonzeros = np.nonzero(msk.astype(np.uint8))\n u, l = np.min(nonzeros, axis=1)", + "detail": "SHOW.utils.bbox", + "documentation": {} + }, + { + "label": "msk_to_xyxy", + "kind": 2, + "importPath": "SHOW.utils.bbox", + "description": "SHOW.utils.bbox", + "peekOfCode": "def msk_to_xyxy(msk):\n \"\"\"\n calculate box [left upper right bottom] from mask.\n :param msk: nd.array, single-channel or 3-channels mask\n :return: float, iou score \n \"\"\"\n if len(msk.shape) > 2:\n msk = msk[..., 0]\n nonzeros = np.nonzero(msk.astype(np.uint8))\n u, l = np.min(nonzeros, axis=1)", + "detail": "SHOW.utils.bbox", + "documentation": {} + }, + { + "label": "get_edges", + "kind": 2, + "importPath": "SHOW.utils.bbox", + "description": "SHOW.utils.bbox", + "peekOfCode": "def get_edges(msk):\n \"\"\"\n get edge from mask\n :param msk: nd.array, single-channel or 3-channel mask\n :return: edges: nd.array, edges with same shape with mask\n \"\"\"\n msk_sp = msk.shape\n if len(msk_sp) == 2:\n c = 1 # single channel\n elif (len(msk_sp) == 3) and (msk_sp[2] == 3):", + "detail": "SHOW.utils.bbox", + "documentation": {} + }, + { + "label": "prj_vtx_cam", + "kind": 2, + "importPath": "SHOW.utils.cam", + "description": "SHOW.utils.cam", + "peekOfCode": "def prj_vtx_cam(vtx, cam_K):\n \"\"\"\n project 3D vertices to 2-dimensional image plane\n :param vtx: (N, 3) or vertices\n :param cam_K: (3, 3), intrinsic camera parameter\n :return: pts_2D: (N, 2), pixel coordinates; z: (N,), depth\n \"\"\"\n pts_3d_c = np.matmul(cam_K, vtx.T) \n pts_2d = pts_3d_c[:2] / pts_3d_c[2]\n z = pts_3d_c[2]", + "detail": "SHOW.utils.cam", + "documentation": {} + }, + { + "label": "prj_vtx_pose", + "kind": 2, + "importPath": "SHOW.utils.cam", + "description": "SHOW.utils.cam", + "peekOfCode": "def prj_vtx_pose(vtx, pose, cam_K):\n \"\"\"\n project 3D vertices to 2-dimensional image plane by pose\n :param vtx: (N, 3), vertices\n :param pose: (3, 4)\n :param cam_K: (3, 3), intrinsic camera parameter\n :return: pts_2D: (N, 2), pixel coordinates; z: (N,), depth\n \"\"\"\n # pts_3d_w = torch.mm(pose[:, :3], vtx.t) + pose[:, 3].reshape((3, 1)) # (3, N)\n # pts_3d_c = torch.mm(cam_K, pts_3d_w) ", + "detail": "SHOW.utils.cam", + "documentation": {} + }, + { + "label": "show_im", + "kind": 2, + "importPath": "SHOW.utils.colab", + "description": "SHOW.utils.colab", + "peekOfCode": "def show_im():\n from IPython.display import Image, display\n import tempfile\n import os.path as osp\n with tempfile.TemporaryDirectory() as tmpdir:\n file_name = osp.join(tmpdir, 'pose_results.png')\n cv2.imwrite(file_name, vis_result)\n display(Image(file_name))\ndef video(path):\n mp4 = open(path,'rb').read()", + "detail": "SHOW.utils.colab", + "documentation": {} + }, + { + "label": "video", + "kind": 2, + "importPath": "SHOW.utils.colab", + "description": "SHOW.utils.colab", + "peekOfCode": "def video(path):\n mp4 = open(path,'rb').read()\n data_url = \"data:video/mp4;base64,\" + b64encode(mp4).decode()\n return HTML('' % data_url)", + "detail": "SHOW.utils.colab", + "documentation": {} + }, + { + "label": "try_statement", + "kind": 2, + "importPath": "SHOW.utils.decorator", + "description": "SHOW.utils.decorator", + "peekOfCode": "def try_statement(func):\n def wrapper(*args, **kwargs):\n try:\n func(*args, **kwargs)\n except:\n import traceback\n traceback.print_exc()\n return wrapper \n# convert a function into recursive style to \n# handle nested dict/list/tuple variables", + "detail": "SHOW.utils.decorator", + "documentation": {} + }, + { + "label": "make_recursive_func", + "kind": 2, + "importPath": "SHOW.utils.decorator", + "description": "SHOW.utils.decorator", + "peekOfCode": "def make_recursive_func(func):\n def wrapper(vars):\n if isinstance(vars, list):\n return [wrapper(x) for x in vars]\n elif isinstance(vars, tuple):\n return tuple([wrapper(x) for x in vars])\n elif isinstance(vars, dict):\n return {k: wrapper(v) for k, v in vars.items()}\n else:\n return func(vars)", + "detail": "SHOW.utils.decorator", + "documentation": {} + }, + { + "label": "tensor2float", + "kind": 2, + "importPath": "SHOW.utils.decorator", + "description": "SHOW.utils.decorator", + "peekOfCode": "def tensor2float(vars):\n if isinstance(vars, float) or isinstance(vars, int):\n return vars\n elif isinstance(vars, torch.Tensor):\n return vars.data.item()\n else:\n raise NotImplementedError(\"invalid input type {} for tensor2float\".format(type(vars)))\n@make_recursive_func\ndef tensor2numpy(vars):\n if isinstance(vars, np.ndarray):", + "detail": "SHOW.utils.decorator", + "documentation": {} + }, + { + "label": "tensor2numpy", + "kind": 2, + "importPath": "SHOW.utils.decorator", + "description": "SHOW.utils.decorator", + "peekOfCode": "def tensor2numpy(vars):\n if isinstance(vars, np.ndarray):\n return vars\n elif isinstance(vars, torch.Tensor):\n return vars.detach().cpu().numpy().copy()\n else:\n raise NotImplementedError(\"invalid input type {} for tensor2numpy\".format(type(vars)))\n@make_recursive_func\ndef to_tensor(vars):\n if isinstance(vars, np.ndarray):", + "detail": "SHOW.utils.decorator", + "documentation": {} + }, + { + "label": "to_tensor", + "kind": 2, + "importPath": "SHOW.utils.decorator", + "description": "SHOW.utils.decorator", + "peekOfCode": "def to_tensor(vars):\n if isinstance(vars, np.ndarray):\n return torch.from_numpy(vars)\n elif isinstance(vars, (int,float)):\n return torch.tensor(vars)\n else:\n return vars\n@make_recursive_func\ndef tocuda(vars):\n if isinstance(vars, torch.Tensor):", + "detail": "SHOW.utils.decorator", + "documentation": {} + }, + { + "label": "tocuda", + "kind": 2, + "importPath": "SHOW.utils.decorator", + "description": "SHOW.utils.decorator", + "peekOfCode": "def tocuda(vars):\n if isinstance(vars, torch.Tensor):\n return vars.cuda()\n elif isinstance(vars, str):\n return vars\n else:\n raise NotImplementedError(\"invalid input type {} for tensor2numpy\".format(type(vars)))", + "detail": "SHOW.utils.decorator", + "documentation": {} + }, + { + "label": "img_preprocess", + "kind": 2, + "importPath": "SHOW.utils.disp_img", + "description": "SHOW.utils.disp_img", + "peekOfCode": "def img_preprocess(img):\n # img: 0-1\n # img: tensor or ndarray\n # img: (1,c,h,w)\n # img: (c,h,w)\n # img: (h,w,c)\n # img: cpu or gpu\n # img: frad or no_grad\n if isinstance(img,torch.Tensor):\n img=img.cpu().detach()", + "detail": "SHOW.utils.disp_img", + "documentation": {} + }, + { + "label": "show_PIL_im_window", + "kind": 2, + "importPath": "SHOW.utils.disp_img", + "description": "SHOW.utils.disp_img", + "peekOfCode": "def show_PIL_im_window(tensor):\n import PIL\n img=img_preprocess(tensor)\n if isinstance(img,torch.Tensor):\n img=img.numpy()\n scale=255/img.max()\n img = (img.copy() * scale)\n if img.shape[-1]==3:\n img=img[:, :, [2, 1, 0]]\n if img.shape[-1]==1:", + "detail": "SHOW.utils.disp_img", + "documentation": {} + }, + { + "label": "save_tensor_to_file", + "kind": 2, + "importPath": "SHOW.utils.disp_img", + "description": "SHOW.utils.disp_img", + "peekOfCode": "def save_tensor_to_file(tensor, path='tensor.jpg'):\n if isinstance(tensor, torch.Tensor):\n tensor = tensor[0].detach().cpu().numpy()\n img = (tensor.transpose(1, 2, 0).copy() * 255)[:, :, [2, 1, 0]]\n img = np.minimum(np.maximum(img, 0), 255).astype(np.uint8)\n cv2.imwrite(path, img)\ndef show_plt_fig_face(datas):\n plt.figure()\n ax = plt.gca()\n ax.xaxis.set_ticks_position('top')", + "detail": "SHOW.utils.disp_img", + "documentation": {} + }, + { + "label": "show_plt_fig_face", + "kind": 2, + "importPath": "SHOW.utils.disp_img", + "description": "SHOW.utils.disp_img", + "peekOfCode": "def show_plt_fig_face(datas):\n plt.figure()\n ax = plt.gca()\n ax.xaxis.set_ticks_position('top')\n ax.invert_yaxis()\n plt.axis('equal')\n plt.scatter(datas[:, 0], datas[:, 1])\n for i, p in enumerate(datas):\n plt.annotate(str(i), p)\n plt.show()", + "detail": "SHOW.utils.disp_img", + "documentation": {} + }, + { + "label": "show_plt_fig_im", + "kind": 2, + "importPath": "SHOW.utils.disp_img", + "description": "SHOW.utils.disp_img", + "peekOfCode": "def show_plt_fig_im(X):\n X = X[0].detach().cpu().numpy()*255\n X = X.astype(np.uint8).transpose(1, 2, 0)\n plt.imshow(X)\ndef gen_cheers_board(\n im_size = 8,\n im_size2 = 8\n):\n a=np.zeros([im_size,im_size2])\n b=np.zeros([im_size,im_size2])", + "detail": "SHOW.utils.disp_img", + "documentation": {} + }, + { + "label": "gen_cheers_board", + "kind": 2, + "importPath": "SHOW.utils.disp_img", + "description": "SHOW.utils.disp_img", + "peekOfCode": "def gen_cheers_board(\n im_size = 8,\n im_size2 = 8\n):\n a=np.zeros([im_size,im_size2])\n b=np.zeros([im_size,im_size2])\n for i in range(a.shape[0]):\n if i%2==0:\n a[i,:]=1\n else:", + "detail": "SHOW.utils.disp_img", + "documentation": {} + }, + { + "label": "fig2data", + "kind": 2, + "importPath": "SHOW.utils.disp_img", + "description": "SHOW.utils.disp_img", + "peekOfCode": "def fig2data(fig):\n \"\"\"\n fig = plt.figure()\n image = fig2data(fig)\n @brief Convert a Matplotlib figure to a 4D numpy array with RGBA channels and return it\n @param fig a matplotlib figure\n @return a numpy 3D array of RGBA values\n \"\"\"\n import PIL.Image as Image\n # draw the renderer", + "detail": "SHOW.utils.disp_img", + "documentation": {} + }, + { + "label": "ffmpeg_merge_api", + "kind": 2, + "importPath": "SHOW.utils.ffmpeg", + "description": "SHOW.utils.ffmpeg", + "peekOfCode": "def ffmpeg_merge_api(\n print_cmd=False,\n loglevel='error',\n fontcolor='red',\n out_name='out.mp4',\n ffmpeg_path='ffmpeg',\n font_file_path=DEFAULT_FONT_FILE_PATH,\n title2vpath_map=None,\n resolution=None,\n fontsize = 'h/15',", + "detail": "SHOW.utils.ffmpeg", + "documentation": {} + }, + { + "label": "temporal_concat_video", + "kind": 2, + "importPath": "SHOW.utils.ffmpeg", + "description": "SHOW.utils.ffmpeg", + "peekOfCode": "def temporal_concat_video(\n input_path_list: List[str],\n input_title_list: List[str],\n output_path: str,\n resolution=(720, 1080 * 3),\n remove_raw_files: bool = False,\n disable_log: bool = False,\n font_file_path=DEFAULT_FONT_FILE_PATH,\n fontcolor='red',\n fontsize = 'h/20',", + "detail": "SHOW.utils.ffmpeg", + "documentation": {} + }, + { + "label": "DEFAULT_FONT_FILE_PATH", + "kind": 5, + "importPath": "SHOW.utils.ffmpeg", + "description": "SHOW.utils.ffmpeg", + "peekOfCode": "DEFAULT_FONT_FILE_PATH = os.path.join(\n os.path.dirname(__file__), '../../../data/AdobeHeitiStd-Regular2.otf')\ndef ffmpeg_merge_api(\n print_cmd=False,\n loglevel='error',\n fontcolor='red',\n out_name='out.mp4',\n ffmpeg_path='ffmpeg',\n font_file_path=DEFAULT_FONT_FILE_PATH,\n title2vpath_map=None,", + "detail": "SHOW.utils.ffmpeg", + "documentation": {} + }, + { + "label": "func_factory", + "kind": 6, + "importPath": "SHOW.utils.fun_factory", + "description": "SHOW.utils.fun_factory", + "peekOfCode": "class func_factory(object):\n def __init__(self,factory_name='task'):\n self.factory_name=factory_name\n self.__func_name_to_mem_map={}\n def register_module(self):\n def decorator(func):\n logger.info(f'{func.__name__} is registered')\n self.__func_name_to_mem_map[func.__name__] = func\n def wrapper(*args, **kwargs):\n func(*args, **kwargs)", + "detail": "SHOW.utils.fun_factory", + "documentation": {} + }, + { + "label": "dict2obj", + "kind": 2, + "importPath": "SHOW.utils.fun_factory", + "description": "SHOW.utils.fun_factory", + "peekOfCode": "def dict2obj(d):\n if isinstance(d, list):\n d = [dict2obj(x) for x in d]\n if not isinstance(d, dict):\n return d\n class C(object):\n pass\n o = C()\n for k in d:\n o.__dict__[k] = dict2obj(d[k])", + "detail": "SHOW.utils.fun_factory", + "documentation": {} + }, + { + "label": "AverageMeter", + "kind": 6, + "importPath": "SHOW.utils.metric", + "description": "SHOW.utils.metric", + "peekOfCode": "class AverageMeter:\n \"\"\"Track a series of values and provide access to smoothed values over a\n window or the global series average.\n \"\"\"\n def __init__(self, window_size=50):\n self._deque = deque(maxlen=window_size)\n self._total = 0.0\n self._count = 0\n def update(self, value):\n self._deque.append(value)", + "detail": "SHOW.utils.metric", + "documentation": {} + }, + { + "label": "MeterBuffer", + "kind": 6, + "importPath": "SHOW.utils.metric", + "description": "SHOW.utils.metric", + "peekOfCode": "class MeterBuffer(defaultdict):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self, window_size=20):\n factory = functools.partial(AverageMeter, window_size=window_size)\n super().__init__(factory)\n def reset(self):\n for v in self.values():\n v.reset()\n def get_filtered_meter(self, filter_key=\"time\"):\n return {k: v for k, v in self.items() if filter_key in k}", + "detail": "SHOW.utils.metric", + "documentation": {} + }, + { + "label": "get_total_and_free_memory_in_Mb", + "kind": 2, + "importPath": "SHOW.utils.metric", + "description": "SHOW.utils.metric", + "peekOfCode": "def get_total_and_free_memory_in_Mb(cuda_device):\n devices_info_str = os.popen(\n \"nvidia-smi --query-gpu=memory.total,memory.used --format=csv,nounits,noheader\"\n )\n devices_info = devices_info_str.read().strip().split(\"\\n\")\n total, used = devices_info[int(cuda_device)].split(\",\")\n return int(total), int(used)\ndef occupy_mem(cuda_device, mem_ratio=0.9):\n \"\"\"\n pre-allocate gpu memory for training to avoid memory Fragmentation.", + "detail": "SHOW.utils.metric", + "documentation": {} + }, + { + "label": "occupy_mem", + "kind": 2, + "importPath": "SHOW.utils.metric", + "description": "SHOW.utils.metric", + "peekOfCode": "def occupy_mem(cuda_device, mem_ratio=0.9):\n \"\"\"\n pre-allocate gpu memory for training to avoid memory Fragmentation.\n \"\"\"\n total, used = get_total_and_free_memory_in_Mb(cuda_device)\n max_mem = int(total * mem_ratio)\n block_mem = max_mem - used\n x = torch.cuda.FloatTensor(256, 1024, block_mem)\n del x\n time.sleep(5)", + "detail": "SHOW.utils.metric", + "documentation": {} + }, + { + "label": "gpu_mem_usage", + "kind": 2, + "importPath": "SHOW.utils.metric", + "description": "SHOW.utils.metric", + "peekOfCode": "def gpu_mem_usage():\n \"\"\"\n Compute the GPU memory usage for the current device (MB).\n \"\"\"\n mem_usage_bytes = torch.cuda.max_memory_allocated()\n return mem_usage_bytes / (1024 * 1024)\nclass AverageMeter:\n \"\"\"Track a series of values and provide access to smoothed values over a\n window or the global series average.\n \"\"\"", + "detail": "SHOW.utils.metric", + "documentation": {} + }, + { + "label": "__all__", + "kind": 5, + "importPath": "SHOW.utils.metric", + "description": "SHOW.utils.metric", + "peekOfCode": "__all__ = [\n \"AverageMeter\",\n \"MeterBuffer\",\n \"get_total_and_free_memory_in_Mb\",\n \"occupy_mem\",\n \"gpu_mem_usage\",\n]\ndef get_total_and_free_memory_in_Mb(cuda_device):\n devices_info_str = os.popen(\n \"nvidia-smi --query-gpu=memory.total,memory.used --format=csv,nounits,noheader\"", + "detail": "SHOW.utils.metric", + "documentation": {} + }, + { + "label": "replace_spec_code", + "kind": 2, + "importPath": "SHOW.utils.misc", + "description": "SHOW.utils.misc", + "peekOfCode": "def replace_spec_code(in_name):\n disp_name=re.sub(\n re.compile(\n r'[^a-zA-Z0-9]'\n # r'[-,$()#+&*]'\n ),\n \"_\",\n in_name)\n return disp_name\ndef cvt_dict_to_tensor(data,device,dtype):", + "detail": "SHOW.utils.misc", + "documentation": {} + }, + { + "label": "cvt_dict_to_tensor", + "kind": 2, + "importPath": "SHOW.utils.misc", + "description": "SHOW.utils.misc", + "peekOfCode": "def cvt_dict_to_tensor(data,device,dtype):\n if isinstance(data,list):\n return [cvt_dict_to_tensor(v,device,dtype) for v in data]\n elif isinstance(data,dict):\n return {k : (cvt_dict_to_tensor(v,device,dtype) if k!='seg_stack' else v)\n for k,v in data.items()}\n else:\n if isinstance(data,np.ndarray):\n return torch.from_numpy(data).to(device=device,dtype=dtype)\n elif isinstance(data,torch.Tensor):", + "detail": "SHOW.utils.misc", + "documentation": {} + }, + { + "label": "expand_var_shape", + "kind": 2, + "importPath": "SHOW.utils.misc", + "description": "SHOW.utils.misc", + "peekOfCode": "def expand_var_shape(var,target_len=300,expand_axis=-1):\n if isinstance(var,np.ndarray):\n org_len=var.shape[expand_axis]\n if target_len==org_len:\n return\n oth_len=var.shape[:expand_axis]\n new_var=np.concatenate([var,np.zeros(*oth_len,target_len-org_len)],axis=expand_axis)\n return new_var\n if isinstance(var,torch.Tensor):\n org_len=var.shape[expand_axis]", + "detail": "SHOW.utils.misc", + "documentation": {} + }, + { + "label": "str_to_torch_dtype", + "kind": 2, + "importPath": "SHOW.utils.misc", + "description": "SHOW.utils.misc", + "peekOfCode": "def str_to_torch_dtype(s):\n dtype = torch.float32\n if s == 'float64':\n dtype = torch.float64\n elif s == 'float32':\n dtype = torch.float32\n return dtype\ndef reload_module(s:str='SHOW.smplx_dataset'):\n import imp\n eval(f'import {s} as target')", + "detail": "SHOW.utils.misc", + "documentation": {} + }, + { + "label": "reload_module", + "kind": 2, + "importPath": "SHOW.utils.misc", + "description": "SHOW.utils.misc", + "peekOfCode": "def reload_module(s:str='SHOW.smplx_dataset'):\n import imp\n eval(f'import {s} as target')\n imp.reload(locals()['target'])\ndef print_args(args:dict):\n print(\"################################ args ################################\")\n for k, v in args.__dict__.items():\n print(\"{0: <10}\\t{1: <30}\\t{2: <20}\".format(k, str(v), str(type(v))))\n print(\"########################################################################\")\ndef print_dict_losses(losses):", + "detail": "SHOW.utils.misc", + "documentation": {} + }, + { + "label": "print_args", + "kind": 2, + "importPath": "SHOW.utils.misc", + "description": "SHOW.utils.misc", + "peekOfCode": "def print_args(args:dict):\n print(\"################################ args ################################\")\n for k, v in args.__dict__.items():\n print(\"{0: <10}\\t{1: <30}\\t{2: <20}\".format(k, str(v), str(type(v))))\n print(\"########################################################################\")\ndef print_dict_losses(losses):\n return reduce(\n lambda a, b: a + f' {b}={round(losses[b].item(), 4)}', \n [\"\"] + list(losses.keys())\n )", + "detail": "SHOW.utils.misc", + "documentation": {} + }, + { + "label": "print_dict_losses", + "kind": 2, + "importPath": "SHOW.utils.misc", + "description": "SHOW.utils.misc", + "peekOfCode": "def print_dict_losses(losses):\n return reduce(\n lambda a, b: a + f' {b}={round(losses[b].item(), 4)}', \n [\"\"] + list(losses.keys())\n )\ndef platform_init():\n import platform\n if platform.system() == \"Linux\":\n os.environ['PYOPENGL_PLATFORM'] = 'egl'\n else:", + "detail": "SHOW.utils.misc", + "documentation": {} + }, + { + "label": "platform_init", + "kind": 2, + "importPath": "SHOW.utils.misc", + "description": "SHOW.utils.misc", + "peekOfCode": "def platform_init():\n import platform\n if platform.system() == \"Linux\":\n os.environ['PYOPENGL_PLATFORM'] = 'egl'\n else:\n if 'PYOPENGL_PLATFORM' in os.environ:\n os.environ.__delitem__('PYOPENGL_PLATFORM')\ndef work_seek_init(rank = 42):\n import torch\n import torch.backends.cudnn", + "detail": "SHOW.utils.misc", + "documentation": {} + }, + { + "label": "work_seek_init", + "kind": 2, + "importPath": "SHOW.utils.misc", + "description": "SHOW.utils.misc", + "peekOfCode": "def work_seek_init(rank = 42):\n import torch\n import torch.backends.cudnn\n import numpy as np\n os.environ[\"OPENCV_IO_ENABLE_OPENEXR\"] = \"1\"\n torch.manual_seed(rank)\n torch.cuda.manual_seed(rank)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n np.random.seed(rank)", + "detail": "SHOW.utils.misc", + "documentation": {} + }, + { + "label": "get_gpu_info", + "kind": 2, + "importPath": "SHOW.utils.misc", + "description": "SHOW.utils.misc", + "peekOfCode": "def get_gpu_info():\n try:\n import pynvml\n pynvml.nvmlInit()\n gpu_count= pynvml.nvmlDeviceGetCount()\n handle = pynvml.nvmlDeviceGetHandleByIndex(0)\n gpu_name=pynvml.nvmlDeviceGetName(handle)\n gpu_version=pynvml.nvmlSystemGetDriverVersion()\n info = pynvml.nvmlDeviceGetMemoryInfo(handle)\n B_to_MB=1024*1024", + "detail": "SHOW.utils.misc", + "documentation": {} + }, + { + "label": "get_machine_info", + "kind": 2, + "importPath": "SHOW.utils.misc", + "description": "SHOW.utils.misc", + "peekOfCode": "def get_machine_info():\n host_name=platform.node()\n gpu_info = get_gpu_info()\n machine_info=dict(\n host_name=host_name,\n gpu_info=gpu_info,\n )\n return machine_info", + "detail": "SHOW.utils.misc", + "documentation": {} + }, + { + "label": "ObjectFactory", + "kind": 6, + "importPath": "SHOW.utils.obj_factory", + "description": "SHOW.utils.obj_factory", + "peekOfCode": "class ObjectFactory:\n def add_to_parser(self, parser):\n raise NotImplementedError()\n def from_dict(self, arguments):\n raise NotImplementedError()\nclass EmptyFactory(ObjectFactory):\n \"\"\"An EmptyFactory simply returns the passed in object.\"\"\"\n def __init__(self, _object):\n self._object = _object\n def add_to_parser(self, parser):", + "detail": "SHOW.utils.obj_factory", + "documentation": {} + }, + { + "label": "EmptyFactory", + "kind": 6, + "importPath": "SHOW.utils.obj_factory", + "description": "SHOW.utils.obj_factory", + "peekOfCode": "class EmptyFactory(ObjectFactory):\n \"\"\"An EmptyFactory simply returns the passed in object.\"\"\"\n def __init__(self, _object):\n self._object = _object\n def add_to_parser(self, parser):\n pass\n def from_dict(self, arguments):\n return self._object\nclass FactoryList(ObjectFactory):\n def __init__(self, factories):", + "detail": "SHOW.utils.obj_factory", + "documentation": {} + }, + { + "label": "FactoryList", + "kind": 6, + "importPath": "SHOW.utils.obj_factory", + "description": "SHOW.utils.obj_factory", + "peekOfCode": "class FactoryList(ObjectFactory):\n def __init__(self, factories):\n self.factories = factories\n def add_to_parser(self, parser):\n for f in self.factories:\n f.add_to_parser(parser)\n def from_dict(self, arguments):\n return [\n f.from_dict(arguments)\n for f in self.factories", + "detail": "SHOW.utils.obj_factory", + "documentation": {} + }, + { + "label": "CallableFactory", + "kind": 6, + "importPath": "SHOW.utils.obj_factory", + "description": "SHOW.utils.obj_factory", + "peekOfCode": "class CallableFactory(ObjectFactory):\n \"\"\"CallableFactory creates an ObjectFactory instance from a callable using\n Python's reflection to define the arguments, argument types and default\n parameters.\"\"\"\n def __init__(self, func, namespace=\"\"):\n self._func = func\n self._signature = signature(self._func)\n self._namespace = namespace\n @property\n def arg_pattern(self):", + "detail": "SHOW.utils.obj_factory", + "documentation": {} + }, + { + "label": "run_openpose", + "kind": 2, + "importPath": "SHOW.utils.op_utils", + "description": "SHOW.utils.op_utils", + "peekOfCode": "def run_openpose(\n openpose_root_path,\n openpose_bin_path,\n img_dir,\n out_dir,\n video_out=None,\n img_out=None,\n low_res=False,\n limit_num=False\n):", + "detail": "SHOW.utils.op_utils", + "documentation": {} + }, + { + "label": "set_op_build_version", + "kind": 2, + "importPath": "SHOW.utils.op_utils", + "description": "SHOW.utils.op_utils", + "peekOfCode": "def set_op_build_version(cfg,version='build_Q6000'):\n cfg.openpose_bin_path=os.path.join(\n cfg.openpose_root_path,version,cfg.openpose_bin_path_str\n )\n # assert(Path(cfg.openpose_bin_path).exists())\n if not Path(cfg.openpose_bin_path).exists():\n logger.warning(f'openpose_bin_path not exist: {cfg.openpose_bin_path}')\ndef set_op_path_by_gpu_name(cfg,gpu_name='Q6000'):\n for k,v in cfg.gpu_name2op_build_map.items():\n if k in gpu_name:", + "detail": "SHOW.utils.op_utils", + "documentation": {} + }, + { + "label": "set_op_path_by_gpu_name", + "kind": 2, + "importPath": "SHOW.utils.op_utils", + "description": "SHOW.utils.op_utils", + "peekOfCode": "def set_op_path_by_gpu_name(cfg,gpu_name='Q6000'):\n for k,v in cfg.gpu_name2op_build_map.items():\n if k in gpu_name:\n set_op_build_version(cfg,v)\n return\n # raise RuntimeError \n logger.warning(f'gpu_name has not build version: {gpu_name}')", + "detail": "SHOW.utils.op_utils", + "documentation": {} + }, + { + "label": "get_file_size", + "kind": 2, + "importPath": "SHOW.utils.paths", + "description": "SHOW.utils.paths", + "peekOfCode": "def get_file_size(fpath):\n statinfo = os.stat(fpath)\n size = statinfo.st_size\n return size\ndef recursive_walk(rootdir):\n \"\"\"\n Yields:\n str: All files in rootdir, recursively.\n \"\"\"\n for r, dirs, files in os.walk(rootdir):", + "detail": "SHOW.utils.paths", + "documentation": {} + }, + { + "label": "recursive_walk", + "kind": 2, + "importPath": "SHOW.utils.paths", + "description": "SHOW.utils.paths", + "peekOfCode": "def recursive_walk(rootdir):\n \"\"\"\n Yields:\n str: All files in rootdir, recursively.\n \"\"\"\n for r, dirs, files in os.walk(rootdir):\n for f in files:\n yield os.path.join(r, f)\ndef glob_exts_in_path(path, img_ext=['png', 'jpg']):\n return reduce(", + "detail": "SHOW.utils.paths", + "documentation": {} + }, + { + "label": "glob_exts_in_path", + "kind": 2, + "importPath": "SHOW.utils.paths", + "description": "SHOW.utils.paths", + "peekOfCode": "def glob_exts_in_path(path, img_ext=['png', 'jpg']):\n return reduce(\n lambda before, ext: before+glob.glob(\n os.path.join(path, f'*.{ext}')\n ),\n [[]]+img_ext)\ndef find_full_impath_by_name(root, name):\n for ext in ['jpg', 'png', 'bmp', 'jpeg']:\n input_img = os.path.join(root, f'{name}.{ext}')\n if Path(input_img).exists():", + "detail": "SHOW.utils.paths", + "documentation": {} + }, + { + "label": "find_full_impath_by_name", + "kind": 2, + "importPath": "SHOW.utils.paths", + "description": "SHOW.utils.paths", + "peekOfCode": "def find_full_impath_by_name(root, name):\n for ext in ['jpg', 'png', 'bmp', 'jpeg']:\n input_img = os.path.join(root, f'{name}.{ext}')\n if Path(input_img).exists():\n return input_img\n return None\ndef files_num_in_dir(dir_name):\n if not Path(dir_name).exists():\n return -1\n return len(os.listdir(dir_name))", + "detail": "SHOW.utils.paths", + "documentation": {} + }, + { + "label": "files_num_in_dir", + "kind": 2, + "importPath": "SHOW.utils.paths", + "description": "SHOW.utils.paths", + "peekOfCode": "def files_num_in_dir(dir_name):\n if not Path(dir_name).exists():\n return -1\n return len(os.listdir(dir_name))\ndef ext_files_num_in_dir(dir_name, exts=['*.pkl', '*.pkl.empty']):\n if not Path(dir_name).exists():\n return -1\n all_list = []\n for ext in exts:\n all_list += glob.glob(os.path.join(dir_name, ext))", + "detail": "SHOW.utils.paths", + "documentation": {} + }, + { + "label": "ext_files_num_in_dir", + "kind": 2, + "importPath": "SHOW.utils.paths", + "description": "SHOW.utils.paths", + "peekOfCode": "def ext_files_num_in_dir(dir_name, exts=['*.pkl', '*.pkl.empty']):\n if not Path(dir_name).exists():\n return -1\n all_list = []\n for ext in exts:\n all_list += glob.glob(os.path.join(dir_name, ext))\n return len(all_list)\ndef img_files_num_in_dir(dir_name):\n if not Path(dir_name).exists():\n return -1", + "detail": "SHOW.utils.paths", + "documentation": {} + }, + { + "label": "img_files_num_in_dir", + "kind": 2, + "importPath": "SHOW.utils.paths", + "description": "SHOW.utils.paths", + "peekOfCode": "def img_files_num_in_dir(dir_name):\n if not Path(dir_name).exists():\n return -1\n i = glob.glob(os.path.join(dir_name, '*.jpg')) +\\\n glob.glob(os.path.join(dir_name, '*.png'))\n return len(i)\ndef is_empty_dir(dir_name):\n if not os.path.exists(dir_name):\n return 1\n return int(files_num_in_dir(dir_name) == 0)", + "detail": "SHOW.utils.paths", + "documentation": {} + }, + { + "label": "is_empty_dir", + "kind": 2, + "importPath": "SHOW.utils.paths", + "description": "SHOW.utils.paths", + "peekOfCode": "def is_empty_dir(dir_name):\n if not os.path.exists(dir_name):\n return 1\n return int(files_num_in_dir(dir_name) == 0)\ndef is_notexist_file(dir_name):\n return 0 if os.path.exists(dir_name) else 1\ndef purge_dir(target_dir):\n if os.path.exists(target_dir):\n if os.path.isfile(target_dir):\n os.remove(target_dir)", + "detail": "SHOW.utils.paths", + "documentation": {} + }, + { + "label": "is_notexist_file", + "kind": 2, + "importPath": "SHOW.utils.paths", + "description": "SHOW.utils.paths", + "peekOfCode": "def is_notexist_file(dir_name):\n return 0 if os.path.exists(dir_name) else 1\ndef purge_dir(target_dir):\n if os.path.exists(target_dir):\n if os.path.isfile(target_dir):\n os.remove(target_dir)\n else:\n shutil.rmtree(target_dir)\n # os.makedirs(target_dir, exist_ok=True)\ndef check_makedir(dir):", + "detail": "SHOW.utils.paths", + "documentation": {} + }, + { + "label": "purge_dir", + "kind": 2, + "importPath": "SHOW.utils.paths", + "description": "SHOW.utils.paths", + "peekOfCode": "def purge_dir(target_dir):\n if os.path.exists(target_dir):\n if os.path.isfile(target_dir):\n os.remove(target_dir)\n else:\n shutil.rmtree(target_dir)\n # os.makedirs(target_dir, exist_ok=True)\ndef check_makedir(dir):\n dir = osp.abspath(dir)\n root = osp.dirname(dir)", + "detail": "SHOW.utils.paths", + "documentation": {} + }, + { + "label": "check_makedir", + "kind": 2, + "importPath": "SHOW.utils.paths", + "description": "SHOW.utils.paths", + "peekOfCode": "def check_makedir(dir):\n dir = osp.abspath(dir)\n root = osp.dirname(dir)\n if not os.path.exists(root):\n os.makedirs(root, exist_ok=True)\ndef remake_dir(root):\n if os.path.exists(root):\n shutil.rmtree(root)\n os.makedirs(root, exist_ok=True)\ndef parse_abs_path(cureent_file_path, path):", + "detail": "SHOW.utils.paths", + "documentation": {} + }, + { + "label": "remake_dir", + "kind": 2, + "importPath": "SHOW.utils.paths", + "description": "SHOW.utils.paths", + "peekOfCode": "def remake_dir(root):\n if os.path.exists(root):\n shutil.rmtree(root)\n os.makedirs(root, exist_ok=True)\ndef parse_abs_path(cureent_file_path, path):\n return os.path.abspath(\n os.path.join(\n os.path.dirname(cureent_file_path), path)\n )\ndef from_rela_path(cureent_file_path, path):", + "detail": "SHOW.utils.paths", + "documentation": {} + }, + { + "label": "parse_abs_path", + "kind": 2, + "importPath": "SHOW.utils.paths", + "description": "SHOW.utils.paths", + "peekOfCode": "def parse_abs_path(cureent_file_path, path):\n return os.path.abspath(\n os.path.join(\n os.path.dirname(cureent_file_path), path)\n )\ndef from_rela_path(cureent_file_path, path):\n return mmcv.Config.fromfile(\n parse_abs_path(cureent_file_path, path)\n )", + "detail": "SHOW.utils.paths", + "documentation": {} + }, + { + "label": "from_rela_path", + "kind": 2, + "importPath": "SHOW.utils.paths", + "description": "SHOW.utils.paths", + "peekOfCode": "def from_rela_path(cureent_file_path, path):\n return mmcv.Config.fromfile(\n parse_abs_path(cureent_file_path, path)\n )", + "detail": "SHOW.utils.paths", + "documentation": {} + }, + { + "label": "View", + "kind": 6, + "importPath": "SHOW.utils.render", + "description": "SHOW.utils.render", + "peekOfCode": "class View(Enum):\n GROUND_TRUTH = 1\n COLOR_OVERLAY = 2\n SHAPE_OVERLAY = 4\n SHAPE = 8\n LANDMARKS = 16\n HEATMAP = 32\n DEPTH = 64\ndef render_shape(vertices, flame_faces, mesh_rasterizer, debug_renderer, cameras, white=True):\n # mesh_file=('./../data/head_template_mesh.obj')", + "detail": "SHOW.utils.render", + "documentation": {} + }, + { + "label": "render_shape", + "kind": 2, + "importPath": "SHOW.utils.render", + "description": "SHOW.utils.render", + "peekOfCode": "def render_shape(vertices, flame_faces, mesh_rasterizer, debug_renderer, cameras, white=True):\n # mesh_file=('./../data/head_template_mesh.obj')\n # flame_faces = load_obj(mesh_file)[1]\n # mesh_rasterizer: MeshRasterizer\n # debug_renderer: MeshRenderer\n # cameras: PerspectiveCameras\n B = vertices.shape[0]\n V = vertices.shape[1]\n faces = flame_faces.verts_idx.cuda()[None].repeat(B, 1, 1)\n if not white:", + "detail": "SHOW.utils.render", + "documentation": {} + }, + { + "label": "IterTimer", + "kind": 6, + "importPath": "SHOW.utils.timer", + "description": "SHOW.utils.timer", + "peekOfCode": "class IterTimer:\n def __init__(self, name='time', sync=True, enabled=True,print_block=True):\n self.ll=20\n self.name = name\n self.times = []\n self.timer = Timer(start=False)\n self.sync = sync\n self.enabled = enabled\n self.print_block=print_block\n def __enter__(self):", + "detail": "SHOW.utils.timer", + "documentation": {} + }, + { + "label": "IterTimers", + "kind": 6, + "importPath": "SHOW.utils.timer", + "description": "SHOW.utils.timer", + "peekOfCode": "class IterTimers(dict):\n def __init__(self, *args, **kwargs):\n super(IterTimers, self).__init__(*args, **kwargs)\n # self.register_list=[]\n def disable_all(self):\n for timer in self.values():\n timer.enabled = False\n def enable_all(self):\n for timer in self.values():\n timer.enabled = True", + "detail": "SHOW.utils.timer", + "documentation": {} + }, + { + "label": "tic", + "kind": 2, + "importPath": "SHOW.utils.timer", + "description": "SHOW.utils.timer", + "peekOfCode": "def tic():\n global start_time\n start_time = time.time()\n return start_time\ndef toc():\n if 'start_time' in globals():\n end_time = time.time()\n return end_time - start_time\n else:\n return None", + "detail": "SHOW.utils.timer", + "documentation": {} + }, + { + "label": "toc", + "kind": 2, + "importPath": "SHOW.utils.timer", + "description": "SHOW.utils.timer", + "peekOfCode": "def toc():\n if 'start_time' in globals():\n end_time = time.time()\n return end_time - start_time\n else:\n return None\n@contextlib.contextmanager\ndef test_time():\n st=time.time()\n yield True", + "detail": "SHOW.utils.timer", + "documentation": {} + }, + { + "label": "test_time", + "kind": 2, + "importPath": "SHOW.utils.timer", + "description": "SHOW.utils.timer", + "peekOfCode": "def test_time():\n st=time.time()\n yield True\n et=time.time()\n logger.info(f'used time: {et-st}')\ndef timeit(func):\n def _warp(*args, **kwargs):\n start_time = time.time()\n result = func(*args, **kwargs)\n elastic_time = time.time() - start_time", + "detail": "SHOW.utils.timer", + "documentation": {} + }, + { + "label": "timeit", + "kind": 2, + "importPath": "SHOW.utils.timer", + "description": "SHOW.utils.timer", + "peekOfCode": "def timeit(func):\n def _warp(*args, **kwargs):\n start_time = time.time()\n result = func(*args, **kwargs)\n elastic_time = time.time() - start_time\n print(\"The execution time of the function '%s' is %.6fs\\n\" % (\n func.__name__, elastic_time))\n return result\n return _warp\nclass IterTimer:", + "detail": "SHOW.utils.timer", + "documentation": {} + }, + { + "label": "default_timers", + "kind": 5, + "importPath": "SHOW.utils.timer", + "description": "SHOW.utils.timer", + "peekOfCode": "default_timers = IterTimers()", + "detail": "SHOW.utils.timer", + "documentation": {} + }, + { + "label": "video", + "kind": 2, + "importPath": "SHOW.utils.video", + "description": "SHOW.utils.video", + "peekOfCode": "def video():\n for actor in tqdm(filter(os.path.isdir, glob(f'./test_results/*_rgb_*'))):\n os.system(f'ffmpeg -y -framerate 30 -pattern_type glob -i \\'{actor}/video/*.jpg\\' -c:v libx264 {actor}.mp4')\n@logger.catch\ndef video_to_images(\n vid_file,\n prefix='',\n start=0,\n duration=10,\n img_folder=None,", + "detail": "SHOW.utils.video", + "documentation": {} + }, + { + "label": "video_to_images", + "kind": 2, + "importPath": "SHOW.utils.video", + "description": "SHOW.utils.video", + "peekOfCode": "def video_to_images(\n vid_file,\n prefix='',\n start=0,\n duration=10,\n img_folder=None,\n return_info=False,\n fps=15\n):\n '''", + "detail": "SHOW.utils.video", + "documentation": {} + }, + { + "label": "frame_to_video", + "kind": 2, + "importPath": "SHOW.utils.video", + "description": "SHOW.utils.video", + "peekOfCode": "def frame_to_video(\n image_path,\n video_name,\n fps=9,\n cut=-1,\n size = (960,540)\n):\n # fps=15,\n # size = (960,540)\n # size = (480,720)", + "detail": "SHOW.utils.video", + "documentation": {} + }, + { + "label": "images_to_sorted_images", + "kind": 2, + "importPath": "SHOW.utils.video", + "description": "SHOW.utils.video", + "peekOfCode": "def images_to_sorted_images(input_folder, output_folder, img_format='%06d'):\n \"\"\"Copy and rename a folder of images into a new folder following the\n `img_format`.\n Args:\n input_folder (str): input folder.\n output_folder (str): output folder.\n img_format (str, optional): image format name, do not need extension.\n Defaults to '%06d'.\n Returns:\n str: image format of the rename images.", + "detail": "SHOW.utils.video", + "documentation": {} + }, + { + "label": "images_to_video", + "kind": 2, + "importPath": "SHOW.utils.video", + "description": "SHOW.utils.video", + "peekOfCode": "def images_to_video(input_folder: str,\n output_path: str,\n remove_raw_file: bool = False,\n img_format: str = '%06d.png',\n fps: Union[int, float] = 30,\n resolution: Optional[Union[Tuple[int, int],\n Tuple[float, float]]] = None,\n start: int = 0,\n end: Optional[int] = None,\n disable_log: bool = False) -> None:", + "detail": "SHOW.utils.video", + "documentation": {} + }, + { + "label": "deeplab_seg", + "kind": 6, + "importPath": "SHOW.video_filter.deeplab_seg", + "description": "SHOW.video_filter.deeplab_seg", + "peekOfCode": "class deeplab_seg(object):\n def __init__(self):\n self.device = torch.device(\n 'cuda:0') if torch.cuda.is_available() else torch.device('cpu')\n self.deeplab_model = torch.hub.load('pytorch/vision:v0.6.0',\n 'deeplabv3_resnet101',\n pretrained=True).to(self.device)\n self.deeplab_model.eval()\n self.deeplab_preprocess = transforms.Compose([\n transforms.ToTensor(),", + "detail": "SHOW.video_filter.deeplab_seg", + "documentation": {} + }, + { + "label": "MMPoseAnalyzer", + "kind": 6, + "importPath": "SHOW.video_filter.MMposer", + "description": "SHOW.video_filter.MMposer", + "peekOfCode": "class MMPoseAnalyzer():\n def __init__(self):\n from mmpose.apis import (inference_top_down_pose_model, init_pose_model,\n vis_pose_result, process_mmdet_results)\n from mmdet.apis import inference_detector, init_detector\n import os\n mmpose_root = os.environ.get('mmpose_root')\n pose_config = os.path.join(mmpose_root,'configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/coco/hrnet_w48_coco_256x192.py')\n det_config = os.path.join(mmpose_root,'demo/mmdetection_cfg/faster_rcnn_r50_fpn_coco.py')\n pose_checkpoint = 'https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_256x192-b9e0b3ab_20200708.pth'", + "detail": "SHOW.video_filter.MMposer", + "documentation": {} + }, + { + "label": "sitting_pose", + "kind": 5, + "importPath": "SHOW.constants", + "description": "SHOW.constants", + "peekOfCode": "sitting_pose = [\n 0.0, 0.0, 0.0, -1.1826512813568115, 0.23866955935955048, 0.15146760642528534, -1.2604516744613647,\n -0.3160211145877838, -0.1603458970785141, 0.0, 0.0, 0.0, 1.1654603481292725, 0.0, 0.0,\n 1.2521806955337524, 0.041598282754421234, -0.06312154978513718, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,", + "detail": "SHOW.constants", + "documentation": {} + }, + { + "label": "lmk2d_to_bbox", + "kind": 2, + "importPath": "SHOW.image", + "description": "SHOW.image", + "peekOfCode": "def lmk2d_to_bbox(lmks,h,w,bb_scale=2.0):\n # lmks:68,2\n x_min, x_max, y_min, y_max = np.min(lmks[:, 0]), np.max(lmks[:, 0]), np.min(lmks[:, 1]), np.max(lmks[:, 1])\n x_center, y_center = int((x_max + x_min) / 2.0), int((y_max + y_min) / 2.0)\n size = int(bb_scale * 2 * max(x_center - x_min, y_center - y_min))\n xb_min, xb_max, yb_min, yb_max = max(x_center - size // 2, 0), min(x_center + size // 2, w - 1), \\\n max(y_center - size // 2, 0), min(y_center + size // 2, h - 1)\n yb_max = min(yb_max, h-1)\n xb_max = min(xb_max, w-1)\n yb_min = max(yb_min, 0)", + "detail": "SHOW.image", + "documentation": {} + }, + { + "label": "landmark_crop", + "kind": 2, + "importPath": "SHOW.image", + "description": "SHOW.image", + "peekOfCode": "def landmark_crop(image, lmks, dense_lmk, bb_scale=2.0):\n h, w = image.shape[1:]\n xb_min,yb_min, xb_max,yb_max=lmk2d_to_bbox(lmks,h,w,bb_scale=bb_scale)\n if (xb_max - xb_min) % 2 != 0:\n xb_min += 1\n if (yb_max - yb_min) % 2 != 0:\n yb_min += 1\n cropped_image = crop_image(image, xb_min, yb_min, xb_max, yb_max)\n cropped_image_lmks = np.vstack((lmks[:, 0] - xb_min, lmks[:, 1] - yb_min)).T\n cropped_dense_lmk = np.vstack((dense_lmk[:, 0] - xb_min, dense_lmk[:, 1] - yb_min)).T", + "detail": "SHOW.image", + "documentation": {} + }, + { + "label": "crop_image", + "kind": 2, + "importPath": "SHOW.image", + "description": "SHOW.image", + "peekOfCode": "def crop_image(image, x_min, y_min, x_max, y_max):\n # image:C,H,W or c,y,x\n return image[:, max(y_min, 0):min(y_max, image.shape[1] - 1), max(x_min, 0):min(x_max, image.shape[2] - 1)]\ndef squarefiy(image, lmk, dense_lmk, size=512):\n _, h, w = image.shape\n px = py = 0\n max_wh = max(w, h)\n if w != h:\n px = int((max_wh - w) / 2)\n py = int((max_wh - h) / 2)", + "detail": "SHOW.image", + "documentation": {} + }, + { + "label": "squarefiy", + "kind": 2, + "importPath": "SHOW.image", + "description": "SHOW.image", + "peekOfCode": "def squarefiy(image, lmk, dense_lmk, size=512):\n _, h, w = image.shape\n px = py = 0\n max_wh = max(w, h)\n if w != h:\n px = int((max_wh - w) / 2)\n py = int((max_wh - h) / 2)\n image = F.pad(image, (px, px, py, py), 'constant', 0)\n img = F.interpolate(image[None], (size, size), mode='bilinear', align_corners=False)[0]\n if False:", + "detail": "SHOW.image", + "documentation": {} + }, + { + "label": "tensor2im", + "kind": 2, + "importPath": "SHOW.image", + "description": "SHOW.image", + "peekOfCode": "def tensor2im(input_image, imtype=np.uint8):\n if isinstance(input_image, torch.Tensor):\n input_image = torch.clamp(input_image, -1.0, 1.0)\n image_tensor = input_image.data\n else:\n return input_image.reshape(3, 512, 512).transpose()\n image_numpy = image_tensor[0].cpu().float().numpy()\n if image_numpy.shape[0] == 1:\n image_numpy = np.tile(image_numpy, (3, 1, 1))\n image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0", + "detail": "SHOW.image", + "documentation": {} + }, + { + "label": "get_heatmap", + "kind": 2, + "importPath": "SHOW.image", + "description": "SHOW.image", + "peekOfCode": "def get_heatmap( values):\n import cv2\n l2 = tensor2im(values)\n l2 = cv2.cvtColor(l2, cv2.COLOR_RGB2BGR)\n l2 = cv2.normalize(l2, None, 0, 255, cv2.NORM_MINMAX)\n heatmap = cv2.applyColorMap(l2, cv2.COLORMAP_JET)\n heatmap = cv2.cvtColor(cv2.addWeighted(heatmap, 0.75, l2, 0.25, 0).astype(np.uint8), cv2.COLOR_BGR2RGB) / 255.\n heatmap = torch.from_numpy(heatmap).permute(2, 0, 1)\n return heatmap\ndef crop_image_bbox(image, lmks, dense_lmk, bbox):", + "detail": "SHOW.image", + "documentation": {} + }, + { + "label": "crop_image_bbox", + "kind": 2, + "importPath": "SHOW.image", + "description": "SHOW.image", + "peekOfCode": "def crop_image_bbox(image, lmks, dense_lmk, bbox):\n xb_min = bbox['xb_min']\n yb_min = bbox['yb_min']\n xb_max = bbox['xb_max']\n yb_max = bbox['yb_max']\n cropped = crop_image(image, xb_min, yb_min, xb_max, yb_max)\n cropped_image_lmks = np.vstack((lmks[:, 0] - xb_min, lmks[:, 1] - yb_min)).T\n cropped_image_dense_lmk = np.vstack((dense_lmk[:, 0] - xb_min, dense_lmk[:, 1] - yb_min)).T\n return cropped_image_lmks, cropped_image_dense_lmk, cropped", + "detail": "SHOW.image", + "documentation": {} + }, + { + "label": "get_possible_person", + "kind": 2, + "importPath": "SHOW.load_assets", + "description": "SHOW.load_assets", + "peekOfCode": "def get_possible_person(poser, template_im):\n def is_small_person(bbox, org_im: np.ndarray):\n img_height, img_width, _ = org_im.shape\n box_height = bbox[3] - bbox[1]\n box_width = bbox[2] - bbox[0]\n is_small_person = 0\n if ((box_height / img_height) < 0.4 or (box_width / img_width) < 0.3):\n is_small_person = 1\n return is_small_person\n def is_kpts_whole(kpts):", + "detail": "SHOW.load_assets", + "documentation": {} + }, + { + "label": "read_shape", + "kind": 2, + "importPath": "SHOW.load_assets", + "description": "SHOW.load_assets", + "peekOfCode": "def read_shape(speaker_ply_file_path):\n # return: (5023, 3)\n ply_data = PlyData.read(speaker_ply_file_path)\n speaker_shape = np.stack([\n ply_data['vertex']['x'], ply_data['vertex']['y'],\n ply_data['vertex']['z']\n ], 1)\n return speaker_shape\ndef load_assets(config, face_ider=None, template_im=None, **kwargs):\n assets_root = config.assets_root", + "detail": "SHOW.load_assets", + "documentation": {} + }, + { + "label": "load_assets", + "kind": 2, + "importPath": "SHOW.load_assets", + "description": "SHOW.load_assets", + "peekOfCode": "def load_assets(config, face_ider=None, template_im=None, **kwargs):\n assets_root = config.assets_root\n dtype = config.dtype\n device = config.device\n ret_dict = EasyDict({})\n shape_res_factory_dir = f'{assets_root}/id_pic/shape_factory2'\n emb_res_factory_dir = f'{assets_root}/id_pic/{config.ider_cfg.npy_folder_name}'\n emb_res_factory_path = f'{assets_root}/id_pic/emb_factory2.pkl'\n emb_res_factory_is_changed_flag = False\n if Path(emb_res_factory_path).exists():", + "detail": "SHOW.load_assets", + "documentation": {} + }, + { + "label": "JointMapper", + "kind": 6, + "importPath": "SHOW.load_models", + "description": "SHOW.load_models", + "peekOfCode": "class JointMapper(nn.Module):\n def __init__(self, joint_maps=None):\n super().__init__()\n self.register_buffer('joint_maps',\n torch.tensor(joint_maps, dtype=torch.long))\n def forward(self, joints, **kwargs):\n return torch.index_select(joints, 1, self.joint_maps)\ndef load_save_pkl(ours_pkl_file_path, device='cuda'):\n data = mmcv.load(ours_pkl_file_path)[0]\n for key in data.keys():", + "detail": "SHOW.load_models", + "documentation": {} + }, + { + "label": "load_save_pkl", + "kind": 2, + "importPath": "SHOW.load_models", + "description": "SHOW.load_models", + "peekOfCode": "def load_save_pkl(ours_pkl_file_path, device='cuda'):\n data = mmcv.load(ours_pkl_file_path)[0]\n for key in data.keys():\n if isinstance(data[key], np.ndarray):\n data[key] = torch.from_numpy(data[key]).to(device)\n data['batch_size'] = data['expression'].shape[0]\n return data\ndef load_smplx_model(device='cuda', **kwargs):\n body_model = smplx.create(joint_mapper=JointMapper(\n op_dataset.smpl_to_openpose()),", + "detail": "SHOW.load_models", + "documentation": {} + }, + { + "label": "load_smplx_model", + "kind": 2, + "importPath": "SHOW.load_models", + "description": "SHOW.load_models", + "peekOfCode": "def load_smplx_model(device='cuda', **kwargs):\n body_model = smplx.create(joint_mapper=JointMapper(\n op_dataset.smpl_to_openpose()),\n **DEFAULT_SMPLX_CONFIG,\n **kwargs).to(device=device)\n return body_model\ndef load_vposer_model(device='cuda', vposer_ckpt=''):\n vposer_ckpt = osp.expandvars(vposer_ckpt)\n vposer, _ = load_vposer(vposer_ckpt, vp_model='snapshot')\n vposer = vposer.to(device=device)", + "detail": "SHOW.load_models", + "documentation": {} + }, + { + "label": "load_vposer_model", + "kind": 2, + "importPath": "SHOW.load_models", + "description": "SHOW.load_models", + "peekOfCode": "def load_vposer_model(device='cuda', vposer_ckpt=''):\n vposer_ckpt = osp.expandvars(vposer_ckpt)\n vposer, _ = load_vposer(vposer_ckpt, vp_model='snapshot')\n vposer = vposer.to(device=device)\n vposer.eval()\n return vposer", + "detail": "SHOW.load_models", + "documentation": {} + }, + { + "label": "DEFAULT_SMPLX_CONFIG2", + "kind": 5, + "importPath": "SHOW.load_models", + "description": "SHOW.load_models", + "peekOfCode": "DEFAULT_SMPLX_CONFIG2 = dict(\n dtype=torch.float32,\n num_betas=200,\n num_expression_coeffs=50,\n num_pca_comps=12,\n flat_hand_mean=True,\n use_pca=True,\n model_type='smplx',\n use_face_contour=True,\n)", + "detail": "SHOW.load_models", + "documentation": {} + }, + { + "label": "DEFAULT_SMPLX_CONFIG", + "kind": 5, + "importPath": "SHOW.load_models", + "description": "SHOW.load_models", + "peekOfCode": "DEFAULT_SMPLX_CONFIG = dict(\n create_global_orient=True,\n create_body_pose=True,\n create_betas=True,\n create_left_hand_pose=True,\n create_right_hand_pose=True,\n create_expression=True,\n create_jaw_pose=True,\n create_leye_pose=True,\n create_reye_pose=True,", + "detail": "SHOW.load_models", + "documentation": {} + }, + { + "label": "Struct", + "kind": 6, + "importPath": "SHOW.masking", + "description": "SHOW.masking", + "peekOfCode": "class Struct(object):\n def __init__(self, **kwargs):\n for key, val in kwargs.items():\n setattr(self, key, val)\nclass Masking(nn.Module):\n def __init__(self):\n dir = os.path.abspath(os.path.join(os.path.dirname(__file__),'..'))\n super(Masking, self).__init__()\n with open(f'{dir}/../data/FLAME_masks.pkl', 'rb') as f:\n ss = pickle.load(f, encoding='latin1')", + "detail": "SHOW.masking", + "documentation": {} + }, + { + "label": "Masking", + "kind": 6, + "importPath": "SHOW.masking", + "description": "SHOW.masking", + "peekOfCode": "class Masking(nn.Module):\n def __init__(self):\n dir = os.path.abspath(os.path.join(os.path.dirname(__file__),'..'))\n super(Masking, self).__init__()\n with open(f'{dir}/../data/FLAME_masks.pkl', 'rb') as f:\n ss = pickle.load(f, encoding='latin1')\n self.masks = Struct(**ss)\n with open(f'{dir}/../data/generic_model.pkl', 'rb') as f:\n ss = pickle.load(f, encoding='latin1')\n flame_model = Struct(**ss)", + "detail": "SHOW.masking", + "documentation": {} + }, + { + "label": "to_tensor", + "kind": 2, + "importPath": "SHOW.masking", + "description": "SHOW.masking", + "peekOfCode": "def to_tensor(array, dtype=torch.float32):\n if 'torch.tensor' not in str(type(array)):\n return torch.tensor(array, dtype=dtype)\ndef to_np(array, dtype=np.float32):\n if 'scipy.sparse' in str(type(array)):\n array = array.todense()\n return np.array(array, dtype=dtype)\nclass Struct(object):\n def __init__(self, **kwargs):\n for key, val in kwargs.items():", + "detail": "SHOW.masking", + "documentation": {} + }, + { + "label": "to_np", + "kind": 2, + "importPath": "SHOW.masking", + "description": "SHOW.masking", + "peekOfCode": "def to_np(array, dtype=np.float32):\n if 'scipy.sparse' in str(type(array)):\n array = array.todense()\n return np.array(array, dtype=dtype)\nclass Struct(object):\n def __init__(self, **kwargs):\n for key, val in kwargs.items():\n setattr(self, key, val)\nclass Masking(nn.Module):\n def __init__(self):", + "detail": "SHOW.masking", + "documentation": {} + }, + { + "label": "parse_config_to_weight_dict", + "kind": 2, + "importPath": "SHOW.parse_weight", + "description": "SHOW.parse_weight", + "peekOfCode": "def parse_config_to_weight_dict(config)->dict:\n opt_weights_dict=config.opt_weights_dict\n if config.use_pre_compute_betas:\n opt_weights_dict.update(config.pre_compute_betas_weight)\n len_set=[len(val) for _,val in opt_weights_dict.items()]\n # assert(len(set(len_set))==1)\n max_weight_len=max(len_set)\n for k,v in opt_weights_dict.items():\n assert isinstance(v,list), \"Invalid weights type\"\n if len(v)list:\n keys = opt_weights_dict.keys()\n opt_weights = [dict(zip(keys, vals)) for vals in\n zip(*(opt_weights_dict[k] for k in keys))]\n for weight_list in opt_weights:\n for key in weight_list:\n weight_list[key] = torch.tensor(\n weight_list[key], device=device, dtype=dtype)\n return opt_weights\ndef parse_weight(config,device,dtype)->list:", + "detail": "SHOW.parse_weight", + "documentation": {} + }, + { + "label": "parse_weight", + "kind": 2, + "importPath": "SHOW.parse_weight", + "description": "SHOW.parse_weight", + "peekOfCode": "def parse_weight(config,device,dtype)->list:\n ret=parse_config_to_weight_dict(config)\n ret=parse_weight_dict_to_list(ret,device,dtype)\n return ret", + "detail": "SHOW.parse_weight", + "documentation": {} + }, + { + "label": "L2Prior", + "kind": 6, + "importPath": "SHOW.prior", + "description": "SHOW.prior", + "peekOfCode": "class L2Prior(nn.Module):\n def __init__(self,**kwargs):\n super().__init__()\n def forward(self, module_input):\n return torch.sum(module_input.pow(2))\n@PRIOR.register_module()\nclass SMPLifyAnglePrior(nn.Module):\n def __init__(self, dtype=torch.float32, **kwargs):\n super().__init__()\n # 55: left elbow, 90deg bend at -np.pi/2", + "detail": "SHOW.prior", + "documentation": {} + }, + { + "label": "SMPLifyAnglePrior", + "kind": 6, + "importPath": "SHOW.prior", + "description": "SHOW.prior", + "peekOfCode": "class SMPLifyAnglePrior(nn.Module):\n def __init__(self, dtype=torch.float32, **kwargs):\n super().__init__()\n # 55: left elbow, 90deg bend at -np.pi/2\n # 58: right elbow, 90deg bend at np.pi/2\n # 12: left knee, 90deg bend at np.pi/2\n # 15: right knee, 90deg bend at np.pi/2\n angle_prior_idxs = torch.tensor([55, 58, 12, 15], dtype=torch.long)\n self.register_buffer('angle_prior_idxs', angle_prior_idxs)\n angle_prior_signs = torch.tensor([1, -1, -1, -1], dtype=dtype)", + "detail": "SHOW.prior", + "documentation": {} + }, + { + "label": "MaxMixturePrior", + "kind": 6, + "importPath": "SHOW.prior", + "description": "SHOW.prior", + "peekOfCode": "class MaxMixturePrior(nn.Module):\n def __init__(\n self, \n prior_folder='prior',\n num_gaussians=6, \n dtype=torch.float32, \n epsilon=1e-16,\n use_merged=True,\n **kwargs\n ):", + "detail": "SHOW.prior", + "documentation": {} + }, + { + "label": "build_prior", + "kind": 2, + "importPath": "SHOW.prior", + "description": "SHOW.prior", + "peekOfCode": "def build_prior(cfg):\n return PRIOR.build(cfg)\n@PRIOR.register_module()\nclass L2Prior(nn.Module):\n def __init__(self,**kwargs):\n super().__init__()\n def forward(self, module_input):\n return torch.sum(module_input.pow(2))\n@PRIOR.register_module()\nclass SMPLifyAnglePrior(nn.Module):", + "detail": "SHOW.prior", + "documentation": {} + }, + { + "label": "PRIOR", + "kind": 5, + "importPath": "SHOW.prior", + "description": "SHOW.prior", + "peekOfCode": "PRIOR = Registry('PRIOR')\ndef build_prior(cfg):\n return PRIOR.build(cfg)\n@PRIOR.register_module()\nclass L2Prior(nn.Module):\n def __init__(self,**kwargs):\n super().__init__()\n def forward(self, module_input):\n return torch.sum(module_input.pow(2))\n@PRIOR.register_module()", + "detail": "SHOW.prior", + "documentation": {} + }, + { + "label": "Renderer", + "kind": 6, + "importPath": "SHOW.renderer", + "description": "SHOW.renderer", + "peekOfCode": "class Renderer(nn.Module):\n def __init__(self, image_size, obj_filename, uv_size=512, flip=False):\n super(Renderer, self).__init__()\n self.image_size = image_size\n self.uv_size = uv_size\n verts, faces, aux = load_obj(obj_filename)\n uvcoords = aux.verts_uvs[None, ...] # (N, V, 2)\n uvfaces = faces.textures_idx[None, ...] # (N, F, 3)\n faces = faces.verts_idx[None, ...]\n mask = torch.from_numpy(imread(parse_abs_path(__file__,'../../data/uv_mask_eyes.jpg')) / 255.).permute(2, 0, 1).cuda()[0:3, :, :]", + "detail": "SHOW.renderer", + "documentation": {} + }, + { + "label": "apply_gamma", + "kind": 2, + "importPath": "SHOW.renderer", + "description": "SHOW.renderer", + "peekOfCode": "def apply_gamma(rgb, gamma=\"srgb\"):\n if gamma == \"srgb\":\n T = 0.0031308\n rgb1 = torch.max(rgb, rgb.new_tensor(T))\n return torch.where(rgb < T, 12.92 * rgb, (1.055 * torch.pow(torch.abs(rgb1), 1 / 2.4) - 0.055))\n elif gamma is None:\n return rgb\n else:\n return torch.pow(torch.max(rgb, rgb.new_tensor(0.0)), 1.0 / gamma)\ndef remove_gamma(rgb, gamma=\"srgb\"):", + "detail": "SHOW.renderer", + "documentation": {} + }, + { + "label": "remove_gamma", + "kind": 2, + "importPath": "SHOW.renderer", + "description": "SHOW.renderer", + "peekOfCode": "def remove_gamma(rgb, gamma=\"srgb\"):\n if gamma == \"srgb\":\n T = 0.04045\n rgb1 = torch.max(rgb, rgb.new_tensor(T))\n return torch.where(rgb < T, rgb / 12.92, torch.pow(torch.abs(rgb1 + 0.055) / 1.055, 2.4))\n elif gamma is None:\n return rgb\n else:\n res = torch.pow(torch.max(rgb, rgb.new_tensor(0.0)), gamma) + torch.min(rgb, rgb.new_tensor(0.0))\n return res", + "detail": "SHOW.renderer", + "documentation": {} + }, + { + "label": "sky", + "kind": 5, + "importPath": "SHOW.renderer", + "description": "SHOW.renderer", + "peekOfCode": "sky = torch.from_numpy(np.array([80, 140, 200]) / 255.).cuda()\ndef apply_gamma(rgb, gamma=\"srgb\"):\n if gamma == \"srgb\":\n T = 0.0031308\n rgb1 = torch.max(rgb, rgb.new_tensor(T))\n return torch.where(rgb < T, 12.92 * rgb, (1.055 * torch.pow(torch.abs(rgb1), 1 / 2.4) - 0.055))\n elif gamma is None:\n return rgb\n else:\n return torch.pow(torch.max(rgb, rgb.new_tensor(0.0)), 1.0 / gamma)", + "detail": "SHOW.renderer", + "documentation": {} + }, + { + "label": "save_one_results", + "kind": 2, + "importPath": "SHOW.save_results", + "description": "SHOW.save_results", + "peekOfCode": "def save_one_results(\n vertices,faces,\n img_size,#(height,width)\n center,#(cx,cy)\n focal_length,#(focalx,focaly)\n camera_pose,#K:(4,4)\n meta_data={},\n color_type='sky',\n input_renderer=None,\n):", + "detail": "SHOW.save_results", + "documentation": {} + }, + { + "label": "colors_dict", + "kind": 5, + "importPath": "SHOW.save_results", + "description": "SHOW.save_results", + "peekOfCode": "colors_dict = {\n 'red': np.array([0.5, 0.2, 0.2]),\n 'pink': np.array([0.7, 0.5, 0.5]),\n 'neutral': np.array([0.7, 0.7, 0.6]),\n # 'purple': np.array([0.5, 0.5, 0.7]),\n 'purple': np.array([0.55, 0.4, 0.9]),\n 'green': np.array([0.5, 0.55, 0.3]),\n 'sky': np.array([0.3, 0.5, 0.55]),\n 'white': np.array([1.0, 0.98, 0.94]),\n }", + "detail": "SHOW.save_results", + "documentation": {} + }, + { + "label": "save_tracker", + "kind": 2, + "importPath": "SHOW.save_tracker", + "description": "SHOW.save_tracker", + "peekOfCode": "def save_tracker(\n img,valid_bool,valid_bs,\n ops,vertices,cameras,image_lmks,proj_lmks,\n flame_faces,mesh_rasterizer,debug_renderer,\n save_callback,\n):\n with torch.no_grad():\n images=img[valid_bool.bool(),...]\n mask = SHOW.utils.parse_mask(ops)\n gt_images = images", + "detail": "SHOW.save_tracker", + "documentation": {} + }, + { + "label": "ImagesDataset", + "kind": 6, + "importPath": "SHOW.smplx_dataset", + "description": "SHOW.smplx_dataset", + "peekOfCode": "class ImagesDataset(Dataset):\n def __init__(\n self,\n config,\n start_frame=0,\n bbox=None,\n person_face_emb: np.ndarray = None,\n face_ider=None,\n face_detector=None,\n face_detector_mediapipe=None,", + "detail": "SHOW.smplx_dataset", + "documentation": {} + }, + { + "label": "TrackerRasterizer", + "kind": 6, + "importPath": "SHOW.tracker_rasterizer", + "description": "SHOW.tracker_rasterizer", + "peekOfCode": "class TrackerRasterizer(MeshRasterizer):\n def __init__(self, image_size, cameras) -> None:\n settings = RasterizationSettings()\n settings.image_size = (image_size, image_size)\n settings.perspective_correct = True\n settings.cull_backfaces = True\n super().__init__(cameras, settings)\n self.reset()\n def reset(self):\n self.bary_coords = None", + "detail": "SHOW.tracker_rasterizer", + "documentation": {} + }, + { + "label": "RadToDeg", + "kind": 6, + "importPath": "conversions", + "description": "conversions", + "peekOfCode": "class RadToDeg(nn.Module):\n r\"\"\"Creates an object that converts angles from radians to degrees.\n Args:\n tensor (Tensor): Tensor of arbitrary shape.\n Returns:\n Tensor: Tensor with same shape as input.\n Examples::\n >>> input = tgm.pi * torch.rand(1, 3, 3)\n >>> output = tgm.RadToDeg()(input)\n \"\"\"", + "detail": "conversions", + "documentation": {} + }, + { + "label": "DegToRad", + "kind": 6, + "importPath": "conversions", + "description": "conversions", + "peekOfCode": "class DegToRad(nn.Module):\n r\"\"\"Function that converts angles from degrees to radians.\n Args:\n tensor (Tensor): Tensor of arbitrary shape.\n Returns:\n Tensor: Tensor with same shape as input.\n Examples::\n >>> input = 360. * torch.rand(1, 3, 3)\n >>> output = tgm.DegToRad()(input)\n \"\"\"", + "detail": "conversions", + "documentation": {} + }, + { + "label": "ConvertPointsFromHomogeneous", + "kind": 6, + "importPath": "conversions", + "description": "conversions", + "peekOfCode": "class ConvertPointsFromHomogeneous(nn.Module):\n r\"\"\"Creates a transformation that converts points from homogeneous to\n Euclidean space.\n Args:\n points (Tensor): tensor of N-dimensional points.\n Returns:\n Tensor: tensor of N-1-dimensional points.\n Shape:\n - Input: :math:`(B, D, N)` or :math:`(D, N)`\n - Output: :math:`(B, D, N + 1)` or :math:`(D, N + 1)`", + "detail": "conversions", + "documentation": {} + }, + { + "label": "ConvertPointsToHomogeneous", + "kind": 6, + "importPath": "conversions", + "description": "conversions", + "peekOfCode": "class ConvertPointsToHomogeneous(nn.Module):\n r\"\"\"Creates a transformation to convert points from Euclidean to\n homogeneous space.\n Args:\n points (Tensor): tensor of N-dimensional points.\n Returns:\n Tensor: tensor of N+1-dimensional points.\n Shape:\n - Input: :math:`(B, D, N)` or :math:`(D, N)`\n - Output: :math:`(B, D, N + 1)` or :math:`(D, N + 1)`", + "detail": "conversions", + "documentation": {} + }, + { + "label": "rad2deg", + "kind": 2, + "importPath": "conversions", + "description": "conversions", + "peekOfCode": "def rad2deg(tensor):\n r\"\"\"Function that converts angles from radians to degrees.\n See :class:`~torchgeometry.RadToDeg` for details.\n Args:\n tensor (Tensor): Tensor of arbitrary shape.\n Returns:\n Tensor: Tensor with same shape as input.\n Example:\n >>> input = tgm.pi * torch.rand(1, 3, 3)\n >>> output = tgm.rad2deg(input)", + "detail": "conversions", + "documentation": {} + }, + { + "label": "deg2rad", + "kind": 2, + "importPath": "conversions", + "description": "conversions", + "peekOfCode": "def deg2rad(tensor):\n r\"\"\"Function that converts angles from degrees to radians.\n See :class:`~torchgeometry.DegToRad` for details.\n Args:\n tensor (Tensor): Tensor of arbitrary shape.\n Returns:\n Tensor: Tensor with same shape as input.\n Examples::\n >>> input = 360. * torch.rand(1, 3, 3)\n >>> output = tgm.deg2rad(input)", + "detail": "conversions", + "documentation": {} + }, + { + "label": "convert_points_from_homogeneous", + "kind": 2, + "importPath": "conversions", + "description": "conversions", + "peekOfCode": "def convert_points_from_homogeneous(points):\n r\"\"\"Function that converts points from homogeneous to Euclidean space.\n See :class:`~torchgeometry.ConvertPointsFromHomogeneous` for details.\n Examples::\n >>> input = torch.rand(2, 4, 3) # BxNx3\n >>> output = tgm.convert_points_from_homogeneous(input) # BxNx2\n \"\"\"\n if not torch.is_tensor(points):\n raise TypeError(\"Input type is not a torch.Tensor. Got {}\".format(\n type(points)))", + "detail": "conversions", + "documentation": {} + }, + { + "label": "convert_points_to_homogeneous", + "kind": 2, + "importPath": "conversions", + "description": "conversions", + "peekOfCode": "def convert_points_to_homogeneous(points):\n r\"\"\"Function that converts points from Euclidean to homogeneous space.\n See :class:`~torchgeometry.ConvertPointsToHomogeneous` for details.\n Examples::\n >>> input = torch.rand(2, 4, 3) # BxNx3\n >>> output = tgm.convert_points_to_homogeneous(input) # BxNx4\n \"\"\"\n if not torch.is_tensor(points):\n raise TypeError(\"Input type is not a torch.Tensor. Got {}\".format(\n type(points)))", + "detail": "conversions", + "documentation": {} + }, + { + "label": "angle_axis_to_rotation_matrix", + "kind": 2, + "importPath": "conversions", + "description": "conversions", + "peekOfCode": "def angle_axis_to_rotation_matrix(angle_axis):\n \"\"\"Convert 3d vector of axis-angle rotation to 4x4 rotation matrix\n Args:\n angle_axis (Tensor): tensor of 3d vector of axis-angle rotations.\n Returns:\n Tensor: tensor of 4x4 rotation matrices.\n Shape:\n - Input: :math:`(N, 3)`\n - Output: :math:`(N, 4, 4)`\n Example:", + "detail": "conversions", + "documentation": {} + }, + { + "label": "rtvec_to_pose", + "kind": 2, + "importPath": "conversions", + "description": "conversions", + "peekOfCode": "def rtvec_to_pose(rtvec):\n \"\"\"\n Convert axis-angle rotation and translation vector to 4x4 pose matrix\n Args:\n rtvec (Tensor): Rodrigues vector transformations\n Returns:\n Tensor: transformation matrices\n Shape:\n - Input: :math:`(N, 6)`\n - Output: :math:`(N, 4, 4)`", + "detail": "conversions", + "documentation": {} + }, + { + "label": "rotation_matrix_to_angle_axis", + "kind": 2, + "importPath": "conversions", + "description": "conversions", + "peekOfCode": "def rotation_matrix_to_angle_axis(rotation_matrix):\n \"\"\"Convert 3x4 rotation matrix to Rodrigues vector\n Args:\n rotation_matrix (Tensor): rotation matrix.\n Returns:\n Tensor: Rodrigues vector transformation.\n Shape:\n - Input: :math:`(N, 3, 4)`\n - Output: :math:`(N, 3)`\n Example:", + "detail": "conversions", + "documentation": {} + }, + { + "label": "rotation_matrix_to_quaternion", + "kind": 2, + "importPath": "conversions", + "description": "conversions", + "peekOfCode": "def rotation_matrix_to_quaternion(rotation_matrix, eps=1e-6):\n \"\"\"Convert 3x4 rotation matrix to 4d quaternion vector\n This algorithm is based on algorithm described in\n https://github.com/KieranWynn/pyquaternion/blob/master/pyquaternion/quaternion.py#L201\n Args:\n rotation_matrix (Tensor): the rotation matrix to convert.\n Return:\n Tensor: the rotation in quaternion\n Shape:\n - Input: :math:`(N, 3, 4)`", + "detail": "conversions", + "documentation": {} + }, + { + "label": "quaternion_to_angle_axis", + "kind": 2, + "importPath": "conversions", + "description": "conversions", + "peekOfCode": "def quaternion_to_angle_axis(quaternion: torch.Tensor) -> torch.Tensor:\n \"\"\"Convert quaternion vector to angle axis of rotation.\n Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h\n Args:\n quaternion (torch.Tensor): tensor with quaternions.\n Return:\n torch.Tensor: tensor with angle axis of rotation.\n Shape:\n - Input: :math:`(*, 4)` where `*` means, any number of dimensions\n - Output: :math:`(*, 3)`", + "detail": "conversions", + "documentation": {} + }, + { + "label": "angle_axis_to_quaternion", + "kind": 2, + "importPath": "conversions", + "description": "conversions", + "peekOfCode": "def angle_axis_to_quaternion(angle_axis: torch.Tensor) -> torch.Tensor:\n \"\"\"Convert an angle axis to a quaternion.\n Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h\n Args:\n angle_axis (torch.Tensor): tensor with angle axis.\n Return:\n torch.Tensor: tensor with quaternion.\n Shape:\n - Input: :math:`(*, 3)` where `*` means, any number of dimensions\n - Output: :math:`(*, 4)`", + "detail": "conversions", + "documentation": {} + }, + { + "label": "__all__", + "kind": 5, + "importPath": "conversions", + "description": "conversions", + "peekOfCode": "__all__ = [\n # functional api\n \"pi\",\n \"rad2deg\",\n \"deg2rad\",\n \"convert_points_from_homogeneous\",\n \"convert_points_to_homogeneous\",\n \"angle_axis_to_rotation_matrix\",\n \"rotation_matrix_to_angle_axis\",\n \"rotation_matrix_to_quaternion\",", + "detail": "conversions", + "documentation": {} + }, + { + "label": "pi", + "kind": 5, + "importPath": "conversions", + "description": "conversions", + "peekOfCode": "pi = torch.Tensor([3.14159265358979323846])\ndef rad2deg(tensor):\n r\"\"\"Function that converts angles from radians to degrees.\n See :class:`~torchgeometry.RadToDeg` for details.\n Args:\n tensor (Tensor): Tensor of arbitrary shape.\n Returns:\n Tensor: Tensor with same shape as input.\n Example:\n >>> input = tgm.pi * torch.rand(1, 3, 3)", + "detail": "conversions", + "documentation": {} + }, + { + "label": "parser", + "kind": 5, + "importPath": "download_youtube", + "description": "download_youtube", + "peekOfCode": "parser = argparse.ArgumentParser()\nparser.add_argument('-base_path', '--base_path', help='base folder path of dataset')\nparser.add_argument('-speaker', '--speaker',\n help='download videos of a specific speaker {oliver, jon, conan, rock, chemistry, ellen, almaram, angelica, seth, shelly}')\nargs = parser.parse_args()\nBASE_PATH = args.base_path\ndf = pd.read_csv(os.path.join(BASE_PATH, \"videos_links.csv\"))\nif args.speaker:\n df = df[df['speaker'] == args.speaker]\ntemp_output_path = './tmp2/temp_video.mp4'", + "detail": "download_youtube", + "documentation": {} + }, + { + "label": "args", + "kind": 5, + "importPath": "download_youtube", + "description": "download_youtube", + "peekOfCode": "args = parser.parse_args()\nBASE_PATH = args.base_path\ndf = pd.read_csv(os.path.join(BASE_PATH, \"videos_links.csv\"))\nif args.speaker:\n df = df[df['speaker'] == args.speaker]\ntemp_output_path = './tmp2/temp_video.mp4'\nfor _, row in tqdm(df.iterrows(), total=df.shape[0]):\n i, name, link = row\n if 'youtube' in link:\n try:", + "detail": "download_youtube", + "documentation": {} + }, + { + "label": "BASE_PATH", + "kind": 5, + "importPath": "download_youtube", + "description": "download_youtube", + "peekOfCode": "BASE_PATH = args.base_path\ndf = pd.read_csv(os.path.join(BASE_PATH, \"videos_links.csv\"))\nif args.speaker:\n df = df[df['speaker'] == args.speaker]\ntemp_output_path = './tmp2/temp_video.mp4'\nfor _, row in tqdm(df.iterrows(), total=df.shape[0]):\n i, name, link = row\n if 'youtube' in link:\n try:\n output_path = os.path.join(BASE_PATH, row[\"speaker\"], \"videos\", row[\"video_fn\"])", + "detail": "download_youtube", + "documentation": {} + }, + { + "label": "df", + "kind": 5, + "importPath": "download_youtube", + "description": "download_youtube", + "peekOfCode": "df = pd.read_csv(os.path.join(BASE_PATH, \"videos_links.csv\"))\nif args.speaker:\n df = df[df['speaker'] == args.speaker]\ntemp_output_path = './tmp2/temp_video.mp4'\nfor _, row in tqdm(df.iterrows(), total=df.shape[0]):\n i, name, link = row\n if 'youtube' in link:\n try:\n output_path = os.path.join(BASE_PATH, row[\"speaker\"], \"videos\", row[\"video_fn\"])\n if not (os.path.exists(os.path.dirname(output_path))):", + "detail": "download_youtube", + "documentation": {} + }, + { + "label": "temp_output_path", + "kind": 5, + "importPath": "download_youtube", + "description": "download_youtube", + "peekOfCode": "temp_output_path = './tmp2/temp_video.mp4'\nfor _, row in tqdm(df.iterrows(), total=df.shape[0]):\n i, name, link = row\n if 'youtube' in link:\n try:\n output_path = os.path.join(BASE_PATH, row[\"speaker\"], \"videos\", row[\"video_fn\"])\n if not (os.path.exists(os.path.dirname(output_path))):\n os.makedirs(os.path.dirname(output_path))\n command = 'yt-dlp -o {temp_path} -f mp4 {link}'.format(link=link, temp_path=temp_output_path)\n res1 = call(command, shell=True)", + "detail": "download_youtube", + "documentation": {} + }, + { + "label": "my_cmd", + "kind": 5, + "importPath": "download_youtube", + "description": "download_youtube", + "peekOfCode": "my_cmd = 'ls ' + os.path.join(BASE_PATH, row[\"speaker\"], \"videos\") + ' | wc -l'\nos.system(my_cmd)", + "detail": "download_youtube", + "documentation": {} + }, + { + "label": "cvt_cfg", + "kind": 2, + "importPath": "main", + "description": "main", + "peekOfCode": "def cvt_cfg(val):\n if val == 'True':\n return True\n elif val == 'False':\n return False\n elif val.isdigit():\n return int(val)\n else:\n return val\ndef parse_other_Cfg(other_cfg: list):", + "detail": "main", + "documentation": {} + }, + { + "label": "parse_other_Cfg", + "kind": 2, + "importPath": "main", + "description": "main", + "peekOfCode": "def parse_other_Cfg(other_cfg: list):\n parse_dict = {}\n parse_key = None\n parse_val = None\n def reg_key():\n nonlocal parse_dict\n nonlocal parse_key\n nonlocal parse_val\n if parse_key != None:\n parse_dict[parse_key] = parse_val", + "detail": "main", + "documentation": {} + }, + { + "label": "parse_overwrite_flag", + "kind": 2, + "importPath": "main", + "description": "main", + "peekOfCode": "def parse_overwrite_flag(over_write_cfg):\n over_write_cfg = over_write_cfg.strip(',')\n over_write_cfg = over_write_cfg.split(',')\n ret = {}\n for i in over_write_cfg:\n k, v = i.split('=')\n ret[k] = cvt_cfg(v)\n return ret\nif __name__ == '__main__':\n SHOW.utils.work_seek_init()", + "detail": "main", + "documentation": {} + }, + { + "label": "render_pkl_api", + "kind": 2, + "importPath": "pkl2img", + "description": "pkl2img", + "peekOfCode": "def render_pkl_api(\n img_folder,\n ours_pkl_file_path,\n ours_images_path,\n output_video_path=None,\n output_obj_path=None,\n mica_all_dir=None,\n replace_from_mica=False,\n # use_npy_betas_ver='betas_2019_male',\n model_path='../models/smplx/SMPLX_NEUTRAL_2020_org.npz',", + "detail": "pkl2img", + "documentation": {} + }, + { + "label": "global_orient[:,:]", + "kind": 5, + "importPath": "post_process", + "description": "post_process", + "peekOfCode": "global_orient[:,:] = global_orient[0,:]\ntransl[:,:] = transl[0,:]\nif (\n speaker == \"oliver\" or\n speaker == \"seth\" or\n speaker == \"chemistry\"\n):\n pose_type='sitting'\nelse:\n pose_type='standing'", + "detail": "post_process", + "documentation": {} + }, + { + "label": "transl[:,:]", + "kind": 5, + "importPath": "post_process", + "description": "post_process", + "peekOfCode": "transl[:,:] = transl[0,:]\nif (\n speaker == \"oliver\" or\n speaker == \"seth\" or\n speaker == \"chemistry\"\n):\n pose_type='sitting'\nelse:\n pose_type='standing'\nif pose_type == 'standing':", + "detail": "post_process", + "documentation": {} + }, + { + "label": "body_pose", + "kind": 5, + "importPath": "post_process", + "description": "post_process", + "peekOfCode": "body_pose = body_pose_axis.reshape(bs, 63)\nfor i in [1, 2, 4, 5, 7, 8, 10, 11]:\n body_pose[:, (i - 1) * 3 + 0] = ref_pose[(i) * 3 + 0]\n body_pose[:, (i - 1) * 3 + 1] = ref_pose[(i) * 3 + 1]\n body_pose[:, (i - 1) * 3 + 2] = ref_pose[(i) * 3 + 2]", + "detail": "post_process", + "documentation": {} + }, + { + "label": "save_one_results", + "kind": 2, + "importPath": "render_pkl_release", + "description": "render_pkl_release", + "peekOfCode": "def save_one_results(\n vertices,\n faces,\n img_size, #(height,width)\n center, #(cx,cy)\n focal_length, #(focalx,focaly)\n camera_pose, #K:(4,4)\n meta_data={},\n color_type='sky',\n input_renderer=None,", + "detail": "render_pkl_release", + "documentation": {} + }, + { + "label": "render_pkl_api", + "kind": 2, + "importPath": "render_pkl_release", + "description": "render_pkl_release", + "peekOfCode": "def render_pkl_api(\n pkl_file_path, # pkl path\n out_images_path, # image path folder\n output_video_path=None, # endwith .mp4\n smplx_model_path='../models/smplx/SMPLX_NEUTRAL_2020_org.npz', #smplx neutral 2020 npz file\n **kwargs):\n dtype = torch.float32\n smplx_model_path = os.path.abspath(\n os.path.join(os.path.dirname(__file__), smplx_model_path))\n all_var = mmcv.load(pkl_file_path)", + "detail": "render_pkl_release", + "documentation": {} + }, + { + "label": "DEFAULT_SMPLX_CONFIG", + "kind": 5, + "importPath": "render_pkl_release", + "description": "render_pkl_release", + "peekOfCode": "DEFAULT_SMPLX_CONFIG = dict(\n create_global_orient=True,\n create_body_pose=True,\n create_betas=True,\n create_left_hand_pose=True,\n create_right_hand_pose=True,\n create_expression=True,\n create_jaw_pose=True,\n create_leye_pose=True,\n create_reye_pose=True,", + "detail": "render_pkl_release", + "documentation": {} + }, + { + "label": "colors_dict", + "kind": 5, + "importPath": "render_pkl_release", + "description": "render_pkl_release", + "peekOfCode": "colors_dict = {\n 'red': np.array([0.5, 0.2, 0.2]),\n 'pink': np.array([0.7, 0.5, 0.5]),\n 'neutral': np.array([0.7, 0.7, 0.6]),\n # 'purple': np.array([0.5, 0.5, 0.7]),\n 'purple': np.array([0.55, 0.4, 0.9]),\n 'green': np.array([0.5, 0.55, 0.3]),\n 'sky': np.array([0.3, 0.5, 0.55]),\n 'white': np.array([1.0, 0.98, 0.94]),\n}", + "detail": "render_pkl_release", + "documentation": {} + }, + { + "label": "SHOW_stage1", + "kind": 2, + "importPath": "stage1_main", + "description": "stage1_main", + "peekOfCode": "def SHOW_stage1(*args, **kwargs):\n machine_info = SHOW.get_machine_info()\n import pprint\n pprint.pprint(f'machine_info: {machine_info}')\n smplifyx_cfg = SHOW.utils.from_rela_path(\n __file__, './configs/mmcv_smplifyx_config.py')\n smplifyx_cfg.merge_from_dict(kwargs)\n def update_betas_name_cfg():\n smplifyx_cfg.save_betas_name = smplifyx_cfg.betas_ver_temp.format(\n smplifyx_cfg.speaker_name)", + "detail": "stage1_main", + "documentation": {} + }, + { + "label": "SHOW_stage2", + "kind": 2, + "importPath": "stage2_main", + "description": "stage2_main", + "peekOfCode": "def SHOW_stage2(*args, **kwargs):\n machine_info = SHOW.get_machine_info()\n import pprint\n pprint.pprint(f'machine_info: {machine_info}')\n loggers = kwargs.get('loggers', None)\n tracker_cfg = SHOW.from_rela_path(__file__,\n './configs/mmcv_tracker_config.py')\n tracker_cfg.update(**kwargs)\n tracker_cfg.merge_from_dict(condor_cfg)\n if tracker_cfg.get('over_write_cfg', None):", + "detail": "stage2_main", + "documentation": {} + } +] \ No newline at end of file