FelixzeroSun's picture
Upload folder using huggingface_hub
19c1f58 verified
[project]
name = "nnunetv2"
version = "2.5"
requires-python = ">=3.9"
description = "nnU-Net_translation is an adapted nnUNet for medical image translation"
readme = "README.md"
license = { file = "LICENSE" }
authors = [
{ name = "Bowen Xin", email = "bowen.xin@csiro.au"},
]
classifiers = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Intended Audience :: Healthcare Industry",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Scientific/Engineering :: Image Recognition",
"Topic :: Scientific/Engineering :: Medical Science Apps.",
]
keywords = [
'deep learning',
'image segmentation',
'semantic segmentation',
'medical image analysis',
'medical image segmentation',
'nnU-Net',
'nnunet',
'image translation',
'image synthesis',
'medical image translation'
]
dependencies = [
"acvl-utils>=0.2,<0.3",
"matplotlib",
"seaborn",
]
[project.urls]
homepage = "https://github.com/bowenxin/nnsyn"
repository = "https://github.com/bowenxin/nnsyn"
[project.scripts]
nnUNetv2_plan_and_preprocess = "nnunetv2.experiment_planning.plan_and_preprocess_entrypoints:plan_and_preprocess_entry"
nnUNetv2_extract_fingerprint = "nnunetv2.experiment_planning.plan_and_preprocess_entrypoints:extract_fingerprint_entry"
nnUNetv2_plan_experiment = "nnunetv2.experiment_planning.plan_and_preprocess_entrypoints:plan_experiment_entry"
nnUNetv2_preprocess = "nnunetv2.experiment_planning.plan_and_preprocess_entrypoints:preprocess_entry"
nnUNetv2_train = "nnunetv2.run.run_training:run_training_entry"
nnUNetv2_unpack = "nnunetv2.run.run_training:run_unpacking_entry"
nnUNetv2_predict_from_modelfolder = "nnunetv2.inference.predict_from_raw_data:predict_entry_point_modelfolder"
nnUNetv2_predict = "nnunetv2.inference.predict_from_raw_data:predict_entry_point"
nnUNetv2_convert_old_nnUNet_dataset = "nnunetv2.dataset_conversion.convert_raw_dataset_from_old_nnunet_format:convert_entry_point"
nnUNetv2_find_best_configuration = "nnunetv2.evaluation.find_best_configuration:find_best_configuration_entry_point"
nnUNetv2_determine_postprocessing = "nnunetv2.postprocessing.remove_connected_components:entry_point_determine_postprocessing_folder"
nnUNetv2_apply_postprocessing = "nnunetv2.postprocessing.remove_connected_components:entry_point_apply_postprocessing"
nnUNetv2_ensemble = "nnunetv2.ensembling.ensemble:entry_point_ensemble_folders"
nnUNetv2_accumulate_crossval_results = "nnunetv2.evaluation.find_best_configuration:accumulate_crossval_results_entry_point"
nnUNetv2_plot_overlay_pngs = "nnunetv2.utilities.overlay_plots:entry_point_generate_overlay"
nnUNetv2_download_pretrained_model_by_url = "nnunetv2.model_sharing.entry_points:download_by_url"
nnUNetv2_install_pretrained_model_from_zip = "nnunetv2.model_sharing.entry_points:install_from_zip_entry_point"
nnUNetv2_export_model_to_zip = "nnunetv2.model_sharing.entry_points:export_pretrained_model_entry"
nnUNetv2_move_plans_between_datasets = "nnunetv2.experiment_planning.plans_for_pretraining.move_plans_between_datasets:entry_point_move_plans_between_datasets"
nnUNetv2_evaluate_folder = "nnunetv2.evaluation.evaluate_predictions:evaluate_folder_entry_point"
nnUNetv2_evaluate_simple = "nnunetv2.evaluation.evaluate_predictions:evaluate_simple_entry_point"
nnUNetv2_convert_MSD_dataset = "nnunetv2.dataset_conversion.convert_MSD_dataset:entry_point"
nnsyn_plan_and_preprocess = "nnunetv2.nnsyn.nnsyn_preprocessing_entrypoints:nnsyn_plan_and_preprocess_entry"
nnsyn_plan_and_preprocess_seg = "nnunetv2.nnsyn.nnsyn_preprocessing_entrypoints:nnsyn_plan_and_preprocess_seg_entry"
nnsyn_train = "nnunetv2.run.run_training:run_training_entry"
nnsyn_predict = "nnunetv2.nnsyn.nnsyn_predict_entrypoints:nnsyn_predict_entry"
[project.optional-dependencies]
dev = [
"black",
"ruff",
"pre-commit"
]
[build-system]
requires = ["setuptools>=67.8.0"]
build-backend = "setuptools.build_meta"
[tool.codespell]
skip = '.git,*.pdf,*.svg'
#
# ignore-words-list = ''