file_path stringlengths 3 280 | file_language stringclasses 66 values | content stringlengths 1 1.04M | repo_name stringlengths 5 92 | repo_stars int64 0 154k | repo_description stringlengths 0 402 | repo_primary_language stringclasses 108 values | developer_username stringlengths 1 25 | developer_name stringlengths 0 30 | developer_company stringlengths 0 82 |
|---|---|---|---|---|---|---|---|---|---|
tests/resolvers/test_gitlab.py | Python | import pytest
from yarl import URL
from repoproviders.resolvers.base import MaybeExists
from repoproviders.resolvers.git import Git, GitLabResolver
from repoproviders.resolvers.repos import GitLabURL
@pytest.mark.parametrize(
("url", "expected"),
(
# GitLab URLs that are not repos
(
GitLabURL(URL("https://gitlab.com"), URL("https://gitlab.com/mosaik/")),
None,
),
(
GitLabURL(
URL("https://gitlab.com"),
URL("https://gitlab.com/groups/mosaik/-/packages"),
),
None,
),
(
GitLabURL(
URL("https://gitlab.com"),
# Put a - at the end to test for edge cases
URL("https://gitlab.com/groups/mosaik/something/something/-"),
),
None,
),
(
GitLabURL(
URL("https://gitlab.com"),
URL("https://gitlab.com/mosaik/mosaik/-/merge_requests/194"),
),
None,
),
# Simple GitLab repo URL
(
GitLabURL(
URL("https://gitlab.com"), URL("https://gitlab.com/mosaik/mosaik")
),
MaybeExists(Git("https://gitlab.com/mosaik/mosaik", "HEAD")),
),
# blobs and tree
(
GitLabURL(
URL("https://gitlab.com"),
URL(
"https://gitlab.com/mosaik/examples/mosaik-tutorials-on-binder/-/blob/b2c44e7f804dc1634681540582b1731e0393f69b/03_Same_Time_Loop/_01_controller_master.ipynb?ref_type=heads"
),
),
MaybeExists(
Git(
"https://gitlab.com/mosaik/examples/mosaik-tutorials-on-binder",
"b2c44e7f804dc1634681540582b1731e0393f69b",
)
),
),
(
GitLabURL(
URL("https://gitlab.com"),
URL(
"https://gitlab.com/mosaik/examples/mosaik-tutorials-on-binder/-/tree/b2c44e7f804dc1634681540582b1731e0393f69b/03_Same_Time_Loop?ref_type=heads"
),
),
MaybeExists(
Git(
"https://gitlab.com/mosaik/examples/mosaik-tutorials-on-binder",
"b2c44e7f804dc1634681540582b1731e0393f69b",
)
),
),
(
GitLabURL(
URL("https://gitlab.wikimedia.org"),
URL(
"https://gitlab.wikimedia.org/toolforge-repos/toolviews/-/tree/main/toolviews?ref_type=heads"
),
),
MaybeExists(
Git("https://gitlab.wikimedia.org/toolforge-repos/toolviews", "main")
),
),
(
GitLabURL(
URL("https://gitlab.wikimedia.org"),
URL(
"https://gitlab.wikimedia.org/toolforge-repos/toolviews/-/blob/main/toolviews/__init__.py?ref_type=heads"
),
),
MaybeExists(
Git("https://gitlab.wikimedia.org/toolforge-repos/toolviews", "main")
),
),
(
GitLabURL(
URL("https://gitlab.com"),
URL(
"https://gitlab.com/mosaik/examples/mosaik-tutorials-on-binder/-/tree/b2c44e7f804dc1634681540582b1731e0393f69b/03_Same_Time_Loop?ref_type=heads"
),
),
MaybeExists(
Git(
"https://gitlab.com/mosaik/examples/mosaik-tutorials-on-binder",
"b2c44e7f804dc1634681540582b1731e0393f69b",
)
),
),
(
GitLabURL(
URL("https://gitlab.com"),
URL("https://gitlab.com/yuvipanda/does-not-exist-e43"),
),
MaybeExists(
Git(repo="https://gitlab.com/yuvipanda/does-not-exist-e43", ref="HEAD")
),
),
# Non repo URLs should simply be detected to be not a repo
(
GitLabURL(
URL("https://gitlab.wikimedia.org"),
URL(
"https://gitlab.wikimedia.org/toolforge-repos/toolviews/-/merge_requests/11"
),
),
None,
),
),
)
async def test_gitlab(url, expected, log):
gl = GitLabResolver()
assert await gl.resolve(url, log) == expected
| yuvipanda/repoproviders | 1 | Detect, resolve and fetch repositories of content | Python | yuvipanda | Yuvi | 2i2c-org |
tests/resolvers/test_giturl.py | Python | import pytest
from yarl import URL
from repoproviders.resolvers.base import MaybeExists
from repoproviders.resolvers.git import Git, GitUrlResolver
@pytest.mark.parametrize(
("url", "expected"),
(
# Not a git url
("https://example.com/something", None),
# Not a real repo, but looks like one
(
"git+https://example.com/something",
MaybeExists(Git("https://example.com/something", "HEAD")),
),
(
"git+ssh://example.com/something",
MaybeExists(Git("ssh://example.com/something", "HEAD")),
),
# With a ref
(
"git+https://github.com/yuvipanda/requirements@main",
MaybeExists(Git("https://github.com/yuvipanda/requirements", "main")),
),
# With edge case refs
(
"git+https://github.com/yuvipanda/requirements@tag/something",
MaybeExists(
Git("https://github.com/yuvipanda/requirements", "tag/something")
),
),
(
"git+https://yuvipanda@github.com/yuvipanda/requirements@tag/something",
MaybeExists(
Git(
"https://yuvipanda@github.com/yuvipanda/requirements",
"tag/something",
)
),
),
(
"git+https://yuvipanda@github.com/yuvipanda/requirements@tag@something",
MaybeExists(
Git(
"https://yuvipanda@github.com/yuvipanda/requirements",
"tag@something",
)
),
),
),
)
async def test_giturl(url, expected, log):
gu = GitUrlResolver()
assert await gu.resolve(URL(url), log) == expected
| yuvipanda/repoproviders | 1 | Detect, resolve and fetch repositories of content | Python | yuvipanda | Yuvi | 2i2c-org |
tests/resolvers/test_google_drive.py | Python | import pytest
from repoproviders.resolvers.base import DoesNotExist, Exists
from repoproviders.resolvers.rclone import (
GoogleDriveFolder,
GoogleDriveFolderResolver,
ImmutableGoogleDriveFolder,
)
@pytest.mark.parametrize(
("url", "expected"),
(
# Immutable directory
(
GoogleDriveFolder(id="1OBwu72mlrWymv8DLepOwPY-GWHPrgYN8"),
Exists(
ImmutableGoogleDriveFolder(
id="1OBwu72mlrWymv8DLepOwPY-GWHPrgYN8",
dir_hash="hHea2vn_tW34FEXigXp2Eurs3adDso6XYgdeRhzPWPo=",
)
),
),
# Empty but public directory
(
GoogleDriveFolder("1sokbMclA4UaiXPdBBGyThlBEGv961psp"),
DoesNotExist(
GoogleDriveFolder,
"The Google Drive Folder either does not exist, is empty or is not public",
),
),
# Invalid ID
(
GoogleDriveFolder(id="1OBwu72mlrWymv8DLepOwPY-GWHPr8"),
DoesNotExist(
GoogleDriveFolder,
"NOTICE: Failed to lsjson: error in ListJSON: couldn't list directory: googleapi: Error 404: File not found: ., notFound",
),
),
# Private unshared folder
(
GoogleDriveFolder(id="1lvm0Co_aYa0iC__5QLyeL2ptcCNFpsIL"),
DoesNotExist(
GoogleDriveFolder,
"The Google Drive Folder either does not exist, is empty or is not public",
),
),
),
)
async def test_gist(url, expected, log):
gh = GoogleDriveFolderResolver()
assert await gh.resolve(url, log) == expected
| yuvipanda/repoproviders | 1 | Detect, resolve and fetch repositories of content | Python | yuvipanda | Yuvi | 2i2c-org |
tests/resolvers/test_immutablegit.py | Python | from logging import Logger
import pytest
from repoproviders.resolvers.base import DoesNotExist, Exists, MaybeExists
from repoproviders.resolvers.git import Git, ImmutableGit, ImmutableGitResolver
@pytest.mark.parametrize(
("question", "expected"),
(
# Random URL, not a git repo
(
Git("https://example.com/something", "HEAD"),
DoesNotExist(
ImmutableGit,
"Could not access git repository at https://example.com/something",
),
),
# Resolve a tag
(
Git("https://github.com/jupyterhub/zero-to-jupyterhub-k8s", "0.8.0"),
Exists(
ImmutableGit(
"https://github.com/jupyterhub/zero-to-jupyterhub-k8s",
"ada2170a2181ae1760d85eab74e5264d0c6bb67f",
)
),
),
# Resolve a commit we know exists, although this isn't verified
(
Git(
"https://github.com/jupyterhub/zero-to-jupyterhub-k8s",
"f7f3ff6d1bf708bdc12e5f10e18b2a90a4795603",
),
MaybeExists(
ImmutableGit(
"https://github.com/jupyterhub/zero-to-jupyterhub-k8s",
"f7f3ff6d1bf708bdc12e5f10e18b2a90a4795603",
)
),
),
# Repo doesn't exist
(
Git(repo="https://github.com/yuvipanda/does-not-exist-e43", ref="HEAD"),
DoesNotExist(
ImmutableGit,
"Could not access git repository at https://github.com/yuvipanda/does-not-exist-e43",
),
),
# Ref doesn't exist
(
Git(
"https://github.com/jupyterhub/zero-to-jupyterhub-k8s", "does-not-exist"
),
DoesNotExist(
ImmutableGit,
"No ref does-not-exist found in repo https://github.com/jupyterhub/zero-to-jupyterhub-k8s",
),
),
),
)
async def test_immutable_git(question, expected, log):
ig = ImmutableGitResolver()
assert await ig.resolve(question, log) == expected
async def test_immutable_git_HEAD(log: Logger):
"""
Extra test to test resolving HEAD, making sure it's the same as resolving "main".
This can't be fixtured because HEAD and `mail` will keep mocing
"""
ig = ImmutableGitResolver()
assert (
await ig.resolve(
Git("https://github.com/jupyterhub/zero-to-jupyterhub-k8s", "HEAD"), log
)
) == (
await ig.resolve(
Git("https://github.com/jupyterhub/zero-to-jupyterhub-k8s", "main"), log
)
)
| yuvipanda/repoproviders | 1 | Detect, resolve and fetch repositories of content | Python | yuvipanda | Yuvi | 2i2c-org |
tests/resolvers/test_resolve.py | Python | import pytest
from yarl import URL
from repoproviders.resolvers import resolve
from repoproviders.resolvers.base import DoesNotExist, Exists, MaybeExists
from repoproviders.resolvers.repos import (
DataverseDataset,
DataverseURL,
Doi,
FigshareDataset,
FigshareInstallation,
FigshareURL,
GistURL,
Git,
GitHubPR,
GitHubURL,
GitLabURL,
ImmutableFigshareDataset,
ImmutableGit,
ZenodoDataset,
ZenodoURL,
)
@pytest.mark.parametrize(
("url", "expected"),
(
("https://example.com/something", []),
# GitHub URLs that are not repos, but are still GitHub URLs
(
"https://github.com/pyOpenSci",
[
MaybeExists(
GitHubURL(
URL("https://github.com"), URL("https://github.com/pyOpenSci")
)
)
],
),
(
"https://github.com/yuvipanda/repoproviders/actions/runs/12552733471/job/34999118812",
[
MaybeExists(
GitHubURL(
URL("https://github.com"),
URL(
"https://github.com/yuvipanda/repoproviders/actions/runs/12552733471/job/34999118812"
),
)
)
],
),
(
"https://github.com/yuvipanda/repoproviders/settings",
[
MaybeExists(
GitHubURL(
URL("https://github.com"),
URL("https://github.com/yuvipanda/repoproviders/settings"),
)
)
],
),
(
"https://github.com/jupyter/docker-stacks/pull/2194",
[
MaybeExists(
GitHubURL(
URL("https://github.com"),
URL("https://github.com/jupyter/docker-stacks/pull/2194"),
)
)
],
),
# Simple github repo URL that are actual repos
(
GitHubURL(
URL("https://github.com"),
URL("https://github.com/pyOpenSci/pyos-package-template"),
),
[
MaybeExists(
Git("https://github.com/pyOpenSci/pyos-package-template", "HEAD")
)
],
),
# Trailing slash normalized?
(
GitHubURL(
URL("https://github.com"),
URL("https://github.com/pyOpenSci/pyos-package-template/"),
),
[
MaybeExists(
Git("https://github.com/pyOpenSci/pyos-package-template", "HEAD")
)
],
),
# blobs and tree
(
GitHubURL(
URL("https://github.com"),
URL(
"https://github.com/pyOpenSci/pyos-package-template/tree/main/includes/licenses"
),
),
[
MaybeExists(
Git("https://github.com/pyOpenSci/pyos-package-template", "main")
)
],
),
(
GitHubURL(
URL("https://github.com"),
URL(
"https://github.com/pyOpenSci/pyos-package-template/tree/original-cookie/docs"
),
),
[
MaybeExists(
Git(
"https://github.com/pyOpenSci/pyos-package-template",
"original-cookie",
)
)
],
),
(
GitHubURL(
URL("https://github.com"),
URL(
"https://github.com/pyOpenSci/pyos-package-template/blob/b912433bfae541972c83529359f4181ef0fe9b67/README.md"
),
),
[
MaybeExists(
Git(
"https://github.com/pyOpenSci/pyos-package-template",
ref="b912433bfae541972c83529359f4181ef0fe9b67",
)
)
],
),
# Random URL, not a git repo
(
Git("https://example.com/something", "HEAD"),
[
DoesNotExist(
ImmutableGit,
"Could not access git repository at https://example.com/something",
)
],
),
# Resolve a tag
(
Git("https://github.com/jupyterhub/zero-to-jupyterhub-k8s", "0.8.0"),
[
Exists(
ImmutableGit(
"https://github.com/jupyterhub/zero-to-jupyterhub-k8s",
"ada2170a2181ae1760d85eab74e5264d0c6bb67f",
)
)
],
),
# Resolve a commit we know exists, although this isn't verified
(
Git(
"https://github.com/jupyterhub/zero-to-jupyterhub-k8s",
"f7f3ff6d1bf708bdc12e5f10e18b2a90a4795603",
),
[
MaybeExists(
ImmutableGit(
"https://github.com/jupyterhub/zero-to-jupyterhub-k8s",
"f7f3ff6d1bf708bdc12e5f10e18b2a90a4795603",
)
)
],
),
(
Git(
"https://github.com/jupyterhub/zero-to-jupyterhub-k8s", "does-not-exist"
),
[
DoesNotExist(
ImmutableGit,
"No ref does-not-exist found in repo https://github.com/jupyterhub/zero-to-jupyterhub-k8s",
)
],
),
),
)
async def test_resolve(url, expected, log):
assert await resolve(url, False, log) == expected
@pytest.mark.parametrize(
("url", "expected"),
(
("https://example.com/something", []),
# doi schema'd URI
(
"doi:10.7910/DVN/6ZXAGT/3YRRYJ",
[
Exists(
Doi(
URL(
"https://dataverse.harvard.edu/file.xhtml?persistentId=doi:10.7910/DVN/6ZXAGT/3YRRYJ"
)
)
)
],
),
# handle schema'd URI
(
"hdl:11529/10016",
[
Exists(
Doi(
URL(
"https://data.cimmyt.org/dataset.xhtml?persistentId=hdl:11529/10016"
)
)
)
],
),
# For convenience, we do accept DOIs without a scheme
(
"10.7910/DVN/6ZXAGT/3YRRYJ",
[
Exists(
Doi(
URL(
"https://dataverse.harvard.edu/file.xhtml?persistentId=doi:10.7910/DVN/6ZXAGT/3YRRYJ"
)
)
)
],
),
# But not handles without a scheme
("11529/10016", []),
# Three DOI resolution URLs
(
"https://doi.org/10.7910/DVN/6ZXAGT/3YRRYJ",
[
Exists(
Doi(
URL(
"https://dataverse.harvard.edu/file.xhtml?persistentId=doi:10.7910/DVN/6ZXAGT/3YRRYJ"
)
)
)
],
),
(
"https://www.doi.org/10.7910/DVN/6ZXAGT/3YRRYJ",
[
Exists(
Doi(
URL(
"https://dataverse.harvard.edu/file.xhtml?persistentId=doi:10.7910/DVN/6ZXAGT/3YRRYJ"
)
)
)
],
),
(
"https://hdl.handle.net/10.7910/DVN/6ZXAGT/3YRRYJ",
[
Exists(
Doi(
URL(
"https://dataverse.harvard.edu/file.xhtml?persistentId=doi:10.7910/DVN/6ZXAGT/3YRRYJ"
)
)
)
],
),
),
)
async def test_norecurse(url, expected, log):
assert await resolve(URL(url), False, log) == expected
@pytest.mark.parametrize(
("url", "expected"),
(
# doi schema'd URI
(
"doi:10.7910/DVN/6ZXAGT/3YRRYJ",
[
Exists(
Doi(
URL(
"https://dataverse.harvard.edu/file.xhtml?persistentId=doi:10.7910/DVN/6ZXAGT/3YRRYJ"
)
)
),
MaybeExists(
DataverseURL(
URL("https://dataverse.harvard.edu"),
URL(
"https://dataverse.harvard.edu/file.xhtml?persistentId=doi:10.7910/DVN/6ZXAGT/3YRRYJ"
),
),
),
Exists(
DataverseDataset(
URL("https://dataverse.harvard.edu"), "doi:10.7910/DVN/6ZXAGT"
)
),
],
),
# handle schema'd URI
(
"hdl:11529/10016",
[
Exists(
Doi(
URL(
"https://data.cimmyt.org/dataset.xhtml?persistentId=hdl:11529/10016"
)
)
),
MaybeExists(
DataverseURL(
URL("https://data.cimmyt.org"),
URL(
"https://data.cimmyt.org/dataset.xhtml?persistentId=hdl:11529/10016"
),
),
),
Exists(
DataverseDataset(URL("https://data.cimmyt.org"), "hdl:11529/10016")
),
],
),
# For convenience, we do accept DOIs without a scheme
(
"10.7910/DVN/6ZXAGT",
[
Exists(
Doi(
URL(
"https://dataverse.harvard.edu/citation?persistentId=doi:10.7910/DVN/6ZXAGT"
)
)
),
MaybeExists(
DataverseURL(
URL("https://dataverse.harvard.edu"),
URL(
"https://dataverse.harvard.edu/citation?persistentId=doi:10.7910/DVN/6ZXAGT"
),
),
),
Exists(
DataverseDataset(
URL("https://dataverse.harvard.edu"), "doi:10.7910/DVN/6ZXAGT"
)
),
],
),
# Something that's only a DOI, and won't resolve further
(
"10.1126/science.aar3646",
[Exists(Doi(URL("https://www.science.org/doi/10.1126/science.aar3646")))],
),
# GitHub URLs that recurse into ImmutableGit
(
"https://github.com/jupyterhub/zero-to-jupyterhub-k8s/tree/f7f3ff6d1bf708bdc12e5f10e18b2a90a4795603",
[
MaybeExists(
GitHubURL(
URL("https://github.com"),
URL(
"https://github.com/jupyterhub/zero-to-jupyterhub-k8s/tree/f7f3ff6d1bf708bdc12e5f10e18b2a90a4795603"
),
)
),
MaybeExists(
Git(
"https://github.com/jupyterhub/zero-to-jupyterhub-k8s",
"f7f3ff6d1bf708bdc12e5f10e18b2a90a4795603",
)
),
MaybeExists(
ImmutableGit(
"https://github.com/jupyterhub/zero-to-jupyterhub-k8s",
"f7f3ff6d1bf708bdc12e5f10e18b2a90a4795603",
)
),
],
),
(
"https://github.com/jupyterhub/zero-to-jupyterhub-k8s/tree/0.8.0",
[
MaybeExists(
GitHubURL(
URL("https://github.com"),
URL(
"https://github.com/jupyterhub/zero-to-jupyterhub-k8s/tree/0.8.0"
),
)
),
MaybeExists(
Git(
"https://github.com/jupyterhub/zero-to-jupyterhub-k8s",
"0.8.0",
)
),
Exists(
ImmutableGit(
"https://github.com/jupyterhub/zero-to-jupyterhub-k8s",
"ada2170a2181ae1760d85eab74e5264d0c6bb67f",
)
),
],
),
# Git URLs that recurse into ImmutableGit
(
"git+https://github.com/jupyterhub/zero-to-jupyterhub-k8s@f7f3ff6d1bf708bdc12e5f10e18b2a90a4795603",
[
MaybeExists(
Git(
"https://github.com/jupyterhub/zero-to-jupyterhub-k8s",
"f7f3ff6d1bf708bdc12e5f10e18b2a90a4795603",
)
),
MaybeExists(
ImmutableGit(
"https://github.com/jupyterhub/zero-to-jupyterhub-k8s",
"f7f3ff6d1bf708bdc12e5f10e18b2a90a4795603",
)
),
],
),
(
"git+https://github.com/jupyterhub/zero-to-jupyterhub-k8s@0.8.0",
[
MaybeExists(
Git(
"https://github.com/jupyterhub/zero-to-jupyterhub-k8s",
"0.8.0",
)
),
Exists(
ImmutableGit(
"https://github.com/jupyterhub/zero-to-jupyterhub-k8s",
"ada2170a2181ae1760d85eab74e5264d0c6bb67f",
)
),
],
),
(
"10.5281/zenodo.3232985",
[
Exists(Doi(URL("https://zenodo.org/record/3232985"))),
MaybeExists(
ZenodoURL(
URL("https://zenodo.org"),
URL("https://zenodo.org/record/3232985"),
)
),
MaybeExists(ZenodoDataset(URL("https://zenodo.org/"), "3232985")),
],
),
(
"https://doi.org/10.6084/m9.figshare.9782777.v3",
[
Exists(
Doi(
URL(
"https://figshare.com/articles/Binder-ready_openSenseMap_Analysis/9782777/3"
)
)
),
MaybeExists(
FigshareURL(
FigshareInstallation(
URL("https://figshare.com/"),
URL("https://api.figshare.com/v2/"),
),
URL(
"https://figshare.com/articles/Binder-ready_openSenseMap_Analysis/9782777/3"
),
)
),
MaybeExists(
FigshareDataset(
FigshareInstallation(
URL("https://figshare.com/"),
URL("https://api.figshare.com/v2/"),
),
9782777,
3,
)
),
MaybeExists(
ImmutableFigshareDataset(
FigshareInstallation(
URL("https://figshare.com/"),
URL("https://api.figshare.com/v2/"),
),
9782777,
3,
)
),
],
),
(
"https://figshare.com/articles/Binder-ready_openSenseMap_Analysis/9782777",
[
MaybeExists(
FigshareURL(
FigshareInstallation(
URL("https://figshare.com/"),
URL("https://api.figshare.com/v2/"),
),
URL(
"https://figshare.com/articles/Binder-ready_openSenseMap_Analysis/9782777"
),
)
),
MaybeExists(
FigshareDataset(
FigshareInstallation(
URL("https://figshare.com/"),
URL("https://api.figshare.com/v2/"),
),
9782777,
None,
)
),
Exists(
ImmutableFigshareDataset(
FigshareInstallation(
URL("https://figshare.com/"),
URL("https://api.figshare.com/v2/"),
),
9782777,
3,
)
),
],
),
# A zenodo doi that uses the /doi redirect
(
"https://doi.org/10.5281/zenodo.805993",
[
Exists(Doi(url=URL("https://zenodo.org/doi/10.5281/zenodo.805993"))),
MaybeExists(
ZenodoURL(
URL("https://zenodo.org"),
URL("https://zenodo.org/doi/10.5281/zenodo.805993"),
)
),
MaybeExists(ZenodoDataset(URL("https://zenodo.org/"), "14007206")),
],
),
# A bare git URL, that we'll have to have guessed
(
# Using this as HEAD hasn't changed in 16 years
"https://git.kernel.org/pub/scm/fs/fat/fatattr/fatattr.git/",
[
Exists(
repo=Git(
"https://git.kernel.org/pub/scm/fs/fat/fatattr/fatattr.git/",
ref="HEAD",
)
),
Exists(
repo=ImmutableGit(
"https://git.kernel.org/pub/scm/fs/fat/fatattr/fatattr.git/",
"3df926a6a9ad5ea02c9f63157a0588125f046441",
)
),
],
),
# A dataverse URL that does *not* exist in our list of well known dataverse installations
(
"https://demo.dataverse.org/dataset.xhtml?persistentId=doi:10.70122/FK2/MBQA9G",
[
MaybeExists(
DataverseURL(
URL("https://demo.dataverse.org"),
URL(
"https://demo.dataverse.org/dataset.xhtml?persistentId=doi:10.70122/FK2/MBQA9G"
),
)
),
Exists(
DataverseDataset(
URL("https://demo.dataverse.org"), "doi:10.70122/FK2/MBQA9G"
)
),
],
),
# A GitLab URL on GitLab
(
"https://gitlab.com/mosaik/examples/mosaik-tutorials-on-binder/-/blob/b2c44e7f804dc1634681540582b1731e0393f69b/03_Same_Time_Loop/_01_controller_master.ipynb?ref_type=heads",
[
MaybeExists(
repo=GitLabURL(
installation=URL("https://gitlab.com"),
url=URL(
"https://gitlab.com/mosaik/examples/mosaik-tutorials-on-binder/-/blob/b2c44e7f804dc1634681540582b1731e0393f69b/03_Same_Time_Loop/_01_controller_master.ipynb?ref_type=heads"
),
)
),
MaybeExists(
repo=Git(
repo="https://gitlab.com/mosaik/examples/mosaik-tutorials-on-binder",
ref="b2c44e7f804dc1634681540582b1731e0393f69b",
)
),
MaybeExists(
repo=ImmutableGit(
repo="https://gitlab.com/mosaik/examples/mosaik-tutorials-on-binder",
ref="b2c44e7f804dc1634681540582b1731e0393f69b",
)
),
],
),
# A GitLab URL that isn't on gitlab.com
(
"https://gitlab.wikimedia.org/toolforge-repos/toolviews/-/tree/bb42ab4dc4ddf0712f83ec4add58005a3ae75de5/toolviews?ref_type=heads",
[
MaybeExists(
repo=GitLabURL(
installation=URL("https://gitlab.wikimedia.org/"),
url=URL(
"https://gitlab.wikimedia.org/toolforge-repos/toolviews/-/tree/bb42ab4dc4ddf0712f83ec4add58005a3ae75de5/toolviews?ref_type=heads"
),
)
),
MaybeExists(
repo=Git(
repo="https://gitlab.wikimedia.org/toolforge-repos/toolviews",
ref="bb42ab4dc4ddf0712f83ec4add58005a3ae75de5",
)
),
MaybeExists(
repo=ImmutableGit(
repo="https://gitlab.wikimedia.org/toolforge-repos/toolviews",
ref="bb42ab4dc4ddf0712f83ec4add58005a3ae75de5",
)
),
],
),
# A gist URL
(
"https://gist.github.com/JakeWharton/5423616",
[
MaybeExists(
GistURL(
installation=URL("https://gist.github.com"),
url=URL("https://gist.github.com/JakeWharton/5423616"),
)
),
MaybeExists(
repo=Git(
repo="https://gist.github.com/JakeWharton/5423616", ref="HEAD"
)
),
Exists(
repo=ImmutableGit(
repo="https://gist.github.com/JakeWharton/5423616",
ref="76d24e01654211591b7bea8ae4557f6ff5283343",
)
),
],
),
# A github PR that has been merged and branch deleted
(
"https://github.com/yuvipanda/repoproviders/pull/1",
[
MaybeExists(
repo=GitHubURL(
installation=URL("https://github.com"),
url=URL("https://github.com/yuvipanda/repoproviders/pull/1"),
)
),
MaybeExists(
repo=GitHubPR(
installation=URL("https://github.com"),
url=URL("https://github.com/yuvipanda/repoproviders/pull/1"),
)
),
MaybeExists(
repo=Git(
repo="https://github.com/yuvipanda/repoproviders", ref="types-1"
)
),
DoesNotExist(
kind=ImmutableGit,
message="No ref types-1 found in repo https://github.com/yuvipanda/repoproviders",
),
],
),
),
)
async def test_recurse(url, expected, log):
assert await resolve(URL(url), True, log) == expected
| yuvipanda/repoproviders | 1 | Detect, resolve and fetch repositories of content | Python | yuvipanda | Yuvi | 2i2c-org |
tests/resolvers/test_serialize.py | Python | import json
import pytest
from repoproviders import resolve
from repoproviders.resolvers.serialize import to_json
@pytest.mark.parametrize(
("url", "expected"),
(
("https://example.com", None),
(
"https://github.com/pyOpenSci",
{
"certainity": "MaybeExists",
"kind": "GitHubURL",
"data": {
"installation": "https://github.com",
"url": "https://github.com/pyOpenSci",
},
},
),
(
"https://github.com/pyOpenSci/pyos-package-template/tree/main/includes/licenses",
{
"certainity": "Exists",
"kind": "ImmutableGit",
"data": {
"repo": "https://github.com/pyOpenSci/pyos-package-template",
"ref": "c77ad6399f713ee3a021ef52b069e56b17de24a7",
},
},
),
(
"doi:10.7910/DVN/6ZXAGT/3YRRYJ",
{
"certainity": "Exists",
"kind": "DataverseDataset",
"data": {
"installationUrl": "https://dataverse.harvard.edu",
"persistentId": "doi:10.7910/DVN/6ZXAGT",
},
},
),
(
"https://figshare.com/articles/Binder-ready_openSenseMap_Analysis/9782777",
{
"certainity": "Exists",
"kind": "ImmutableFigshareDataset",
"data": {
"installation": {
"url": "https://figshare.com/",
"apiUrl": "https://api.figshare.com/v2/",
},
"articleId": 9782777,
"version": 3,
},
},
),
(
"https://github.com/yuvipanda/does-not-exist-e43",
{
"certainity": "DoesNotExist",
"kind": "ImmutableGit",
"data": {
"kind": "ImmutableGit",
"message": "Could not access git repository at https://github.com/yuvipanda/does-not-exist-e43",
},
},
),
),
)
async def test_to_json(url, expected, log):
# This also tests to_dict anyway
answers = await resolve(url, True, log)
if expected is None:
assert answers == []
else:
assert answers is not None
assert to_json(answers[-1]) == json.dumps(expected)
| yuvipanda/repoproviders | 1 | Detect, resolve and fetch repositories of content | Python | yuvipanda | Yuvi | 2i2c-org |
tests/resolvers/test_wellknown.py | Python | import pytest
from yarl import URL
from repoproviders.resolvers.base import MaybeExists
from repoproviders.resolvers.rclone import GoogleDriveFolder
from repoproviders.resolvers.repos import (
DataverseURL,
Doi,
FigshareInstallation,
FigshareURL,
GistURL,
GitHubURL,
GitLabURL,
HydroshareDataset,
ZenodoURL,
)
from repoproviders.resolvers.wellknown import WellKnownProvidersResolver
@pytest.mark.parametrize(
("question", "expected"),
(
("https://example.com/something", None),
# Try a raw git repo - this should be only detected by feature detector
(
"https://git.kernel.org/pub/scm/virt/kvm/kvm.git/",
None,
),
# A github URL even if it's not actually a valid repo
(
"https://github.com/settings",
MaybeExists(
GitHubURL(URL("https://github.com"), URL("https://github.com/settings"))
),
),
# A dataverse URL that doesn't exist in our well known set, so should be None
(
"https://demo.dataverse.org/dataset.xhtml?persistentId=doi:10.70122/FK2/MBQA9G",
None,
),
(
"https://dataverse.harvard.edu/dataset.xhtml?persistentId=doi:10.7910/DVN/TJCLKP",
MaybeExists(
DataverseURL(
URL("https://dataverse.harvard.edu"),
URL(
"https://dataverse.harvard.edu/dataset.xhtml?persistentId=doi:10.7910/DVN/TJCLKP"
),
)
),
),
(
Doi(URL("https://zenodo.org/record/3232985")),
MaybeExists(
ZenodoURL(
URL("https://zenodo.org"),
URL("https://zenodo.org/record/3232985"),
)
),
),
(
"https://zenodo.org/settings",
MaybeExists(
ZenodoURL(
URL("https://zenodo.org"),
URL("https://zenodo.org/settings"),
)
),
),
(
"https://figshare.com/browse",
MaybeExists(
FigshareURL(
FigshareInstallation(
URL("https://figshare.com/"),
URL("https://api.figshare.com/v2/"),
),
URL("https://figshare.com/browse"),
)
),
),
(
"https://gitlab.com/browse",
MaybeExists(
GitLabURL(URL("https://gitlab.com"), URL("https://gitlab.com/browse"))
),
),
(
"https://gist.github.com/yuvipanda",
MaybeExists(
GistURL(
URL("https://gist.github.com"),
URL("https://gist.github.com/yuvipanda"),
)
),
),
(
"https://gist.github.com/JakeWharton/5423616",
MaybeExists(
GistURL(
URL("https://gist.github.com"),
URL("https://gist.github.com/JakeWharton/5423616"),
)
),
),
(
# We support directory links
"https://drive.google.com/drive/folders/1o3okM5hYOgUGHYipyjiblEzbp29UX9cF",
MaybeExists(GoogleDriveFolder("1o3okM5hYOgUGHYipyjiblEzbp29UX9cF")),
),
(
# We don't support file links
"https://drive.google.com/file/d/110LCoTV6NM7YpMc7MqooQ9pJ0PhsOzFY/view?usp=drive_link",
None,
),
(
# Support URLs without www.
"https://hydroshare.org/resource/76502ab28c5744f98e2bbad5155e39c7/",
MaybeExists(HydroshareDataset("76502ab28c5744f98e2bbad5155e39c7")),
),
(
# Support URLs with www.
"https://www.hydroshare.org/resource/76502ab28c5744f98e2bbad5155e39c7/",
MaybeExists(HydroshareDataset("76502ab28c5744f98e2bbad5155e39c7")),
),
(
# Handle lack of trailing /
"https://www.hydroshare.org/resource/76502ab28c5744f98e2bbad5155e39c7",
MaybeExists(HydroshareDataset("76502ab28c5744f98e2bbad5155e39c7")),
),
(
# Random hydroshare URLs don't work
"https://hydroshare.org/search/",
None,
),
),
)
async def test_doi(question, expected, log):
wk = WellKnownProvidersResolver()
if isinstance(question, str):
question = URL(question)
assert await wk.resolve(question, log) == expected
| yuvipanda/repoproviders | 1 | Detect, resolve and fetch repositories of content | Python | yuvipanda | Yuvi | 2i2c-org |
tests/resolvers/test_zenodo.py | Python | import pytest
from yarl import URL
from repoproviders.resolvers.base import DoesNotExist, MaybeExists
from repoproviders.resolvers.doi import ZenodoResolver
from repoproviders.resolvers.repos import ZenodoDataset, ZenodoURL
@pytest.mark.parametrize(
("url", "expected"),
(
# A valid Zenodo URL that isn't actually a dataset
(
ZenodoURL(URL("https://zenodo.org"), URL("https://zenodo.org/communities")),
None,
),
# Simple /record and /records
(
ZenodoURL(
URL("https://zenodo.org"), URL("https://zenodo.org/record/3232985")
),
MaybeExists(ZenodoDataset(URL("https://zenodo.org/"), "3232985")),
),
(
ZenodoURL(
URL("https://zenodo.org"), URL("https://zenodo.org/records/3232985")
),
MaybeExists(ZenodoDataset(URL("https://zenodo.org/"), "3232985")),
),
# Note we normalize output to have the HTTPS URL, even if we're passed in the HTTP URL
(
ZenodoURL(
URL("https://zenodo.org"), URL("http://zenodo.org/record/3232985")
),
MaybeExists(ZenodoDataset(URL("https://zenodo.org/"), "3232985")),
),
(
ZenodoURL(
URL("https://zenodo.org"), URL("https://zenodo.org/records/3232985")
),
MaybeExists(ZenodoDataset(URL("https://zenodo.org/"), "3232985")),
),
# A non-zenodo.org URL
(
ZenodoURL(
URL("https://data.caltech.edu"),
URL("https://data.caltech.edu/records/996aw-mf266"),
),
MaybeExists(ZenodoDataset(URL("https://data.caltech.edu/"), "996aw-mf266")),
),
# A doi reference
(
ZenodoURL(
URL("https://zenodo.org"),
URL("https://zenodo.org/doi/10.5281/zenodo.805993"),
),
MaybeExists(ZenodoDataset(URL("https://zenodo.org/"), recordId="14007206")),
),
# A doi reference to a bad doi
(
ZenodoURL(
URL("https://zenodo.org"),
URL("https://zenodo.org/doi/10.5281/zdo.805993"),
),
DoesNotExist(
ZenodoDataset,
"https://zenodo.org/doi/10.5281/zdo.805993 is not a valid Zenodo DOI URL",
),
),
),
)
async def test_zenodo(url, expected, log):
zr = ZenodoResolver()
assert await zr.resolve(url, log) == expected
| yuvipanda/repoproviders | 1 | Detect, resolve and fetch repositories of content | Python | yuvipanda | Yuvi | 2i2c-org |
tests/test_utils.py | Python | import asyncio
import secrets
import socket
import sys
import tempfile
from logging import Logger
from pathlib import Path
import aiohttp
from yarl import URL
from repoproviders.utils import download_file
def random_port() -> int:
"""
Get a single random port that is *probably* unused
"""
sock = socket.socket()
sock.bind(("", 0))
port = sock.getsockname()[1]
sock.close()
return port
async def test_download_file(log: Logger):
port = random_port()
with tempfile.TemporaryDirectory() as src, tempfile.TemporaryDirectory() as dest:
test_file = Path(src) / secrets.token_hex(8)
test_contents = secrets.token_hex(8)
proc = await asyncio.create_subprocess_exec(
sys.executable, "-m", "http.server", str(port), "-d", src, "-b", "127.0.0.1"
)
try:
# FIXME: Do this a little more dynamically?
# Wait for the HTTP server to come up
await asyncio.sleep(1)
test_file.write_text(test_contents)
# Create a nested subdirectory
dest_file = Path(dest) / secrets.token_hex(8) / secrets.token_hex(8)
async with aiohttp.ClientSession() as session:
await download_file(
session,
URL(f"http://127.0.0.1:{port}/{test_file.name}"),
dest_file,
log,
)
assert dest_file.exists()
assert dest_file.read_text() == test_contents
finally:
proc.kill()
await proc.wait()
| yuvipanda/repoproviders | 1 | Detect, resolve and fetch repositories of content | Python | yuvipanda | Yuvi | 2i2c-org |
src/ruamelfmt/__main__.py | Python | import argparse
import sys
from ruamel.yaml import YAML
from ruamel.yaml.constructor import DuplicateKeyError
from ruamel.yaml.scanner import ScannerError
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--line-length', default=sys.maxsize, type=int)
parser.add_argument('filepath', nargs='+')
args = parser.parse_args()
yaml = YAML(typ='rt')
yaml.width = args.line_length
should_fail = False
for filepath in args.filepath:
with open(filepath) as f:
try:
data = yaml.load(f)
except (ScannerError, DuplicateKeyError) as e:
print(f"Error {str(e.problem_mark).strip()}: {e.problem.strip()}")
should_fail = True
# Continue formatting other files
continue
with open(filepath, 'w') as f:
yaml.dump(data, f)
if should_fail:
sys.exit(1)
if __name__ == '__main__':
main() | yuvipanda/ruamelfmt | 2 | Autoformat YAML files as ruamel.yaml does | Python | yuvipanda | Yuvi | 2i2c-org |
tests/system/test_import.py | Python | # Copyright (c) 2024 Yuvi
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Test that the package can be imported."""
def test_import():
"""Test that the package can be imported."""
import ruamelfmt # noqa: F401
| yuvipanda/ruamelfmt | 2 | Autoformat YAML files as ruamel.yaml does | Python | yuvipanda | Yuvi | 2i2c-org |
src/Setup.lhs | Haskell | This file was generated by `cargo-cabal`, its goal is to define few hooks to
call `cargo` on the fly and link correctly the generated library.
While it's an acceptable hack as this project is currently a prototype, this
should be removed before `cargo-cabal` stable release.
> import Data.Maybe
> import qualified Distribution.PackageDescription as PD
> import Distribution.Simple
> ( Args,
> UserHooks (confHook, preConf),
> defaultMainWithHooks,
> simpleUserHooks,
> )
> import Distribution.Simple.LocalBuildInfo
> ( LocalBuildInfo (localPkgDescr),
> )
> import Distribution.Simple.Setup
> ( BuildFlags (buildVerbosity),
> ConfigFlags (configVerbosity),
> fromFlag,
> )
> import Distribution.Simple.UserHooks
> ( UserHooks (buildHook, confHook),
> )
> import Distribution.Simple.Utils (rawSystemExit)
> import System.Directory (getCurrentDirectory)
>
> main :: IO ()
> main =
> defaultMainWithHooks
> simpleUserHooks
> { confHook = rustConfHook
> -- , buildHook = rustBuildHook
> }
This hook could be remove if at some point, likely if this issue is resolved
https://github.com/haskell/cabal/issues/2641
> rustConfHook ::
> (PD.GenericPackageDescription, PD.HookedBuildInfo) ->
> ConfigFlags ->
> IO LocalBuildInfo
> rustConfHook (description, buildInfo) flags = do
> localBuildInfo <- confHook simpleUserHooks (description, buildInfo) flags
> let packageDescription = localPkgDescr localBuildInfo
> library = fromJust $ PD.library packageDescription
> libraryBuildInfo = PD.libBuildInfo library
> dir <- getCurrentDirectory
> return localBuildInfo
> { localPkgDescr = packageDescription
> { PD.library = Just $ library
> { PD.libBuildInfo = libraryBuildInfo
> { PD.extraLibDirs = (dir ++ "/target/release") :
> (dir ++ "/target/debug") :
> PD.extraLibDirs libraryBuildInfo
> } } } }
It would be nice to remove this hook ot some point, e.g., if this RFC is merged
in Cabal https://github.com/haskell/cabal/issues/7906
% rustBuildHook ::
% PD.PackageDescription ->
% LocalBuildInfo ->
% UserHooks ->
% BuildFlags ->
% IO ()
% rustBuildHook description localBuildInfo hooks flags = do
% putStrLn "******************************************************************"
% putStrLn "Call `cargo build --release` to build a dependency written in Rust"
% -- FIXME: add `--target $TARGET` flag to support cross-compiling to $TARGET
% rawSystemExit (fromFlag $ buildVerbosity flags) "cargo" ["build","--release"]
% putStrLn "... `rustc` compilation seems to succeed 🦀! Back to Cabal build:"
% putStrLn "******************************************************************"
% putStrLn "Back to Cabal build"
% buildHook simpleUserHooks description localBuildInfo hooks flags
This handy automation (particularly useful when you want to quickly prototype
without having to spawn manually `cargo` commands) is disabled by default.
Feel free to re-enable it while debugging your library, but I discourage you
strongly to publish anything on Hackage that contains this hook!
| yvan-sraka/cargo-cabal | 109 | A tool that helps you to turn in one command a Rust crate into a Haskell Cabal library! | Rust | yvan-sraka | Yvan Sraka | |
src/build.rs | Rust | //! Support to dynamic library, require in user crate a small `build.rs` script,
//! that link to generated Rust library a filename with GHC version suffix, e.g.
//! `libNAME-ghcVERSION.so`, `libNAME-ghcVERSION.dylib` or `NAME-ghcVERSION.dll`
//!
//! Version is the one of `ghc` in `$PATH`, but could be overide with
//! `$CABAL_PACK_GHC_VERSION` env variable!
//!
//! This build file was written with the constraint in mind of no-dependency to
//! keep user setup simple and be easily mergeable with an existing `build.rs`
//! automation!
fn main() {
let path = format!("target/{}", std::env::var("PROFILE").unwrap());
#[cfg(target_family = "windows")]
let prefix = "";
#[cfg(target_family = "unix")]
let prefix = "lib";
let name = env!("CARGO_PKG_NAME");
let suffix = format!(
"-ghc{}",
std::env::var("CABAL_PACK_GHC_VERSION")
.or_else(|_| String::from_utf8(
std::process::Command::new("ghc")
.arg("--version")
.output()
.unwrap()
.stdout
))
.unwrap()
.trim()
.strip_prefix("The Glorious Glasgow Haskell Compilation System, version ")
.unwrap()
);
#[cfg(target_os = "windows")]
let ext = "dll";
#[cfg(target_os = "macos")]
let ext = "dylib";
#[cfg(target_os = "linux")]
let ext = "so";
let source = format!("{prefix}{name}.{ext}");
let target = format!("{prefix}{name}{suffix}.{ext}");
if !std::path::Path::new(&format!("{path}/{target}")).exists() {
std::env::set_current_dir(path).unwrap();
std::fs::OpenOptions::new()
.create(true)
.write(true)
.open(&source)
.unwrap();
#[cfg(target_family = "windows")]
std::os::windows::fs::symlink_file(source, target).unwrap();
#[cfg(target_family = "unix")]
std::os::unix::fs::symlink(source, target).unwrap();
}
}
| yvan-sraka/cargo-cabal | 109 | A tool that helps you to turn in one command a Rust crate into a Haskell Cabal library! | Rust | yvan-sraka | Yvan Sraka | |
src/cabal.rs | Rust | /// Generate user `.cabal`, taking `--enable-nix` option into account
pub(crate) fn generate(name: &str, module: &str, version: &str, enable_nix: bool) -> String {
let build_type = if enable_nix {
"
build-type: Simple"
} else {
"
-- This let us hook Cabal steps to Setup.lhs script.
build-type: Custom
custom-setup
setup-depends: Cabal, base, directory, process"
};
let lib_name = name.replace('-', "_"); // In library generated by Cargo, '-' is replaced by '_'
let package_name = name.replace('_', "-"); // Cabal does not expect '_' for packages names
let extra = if enable_nix {
format!(
"
-- `haskell.nix` tell GHC linker where to find the `libNAME.a` by setting
-- automatically `extra-lib-dirs`:
-- https://input-output-hk.github.io/haskell.nix/tutorials/pkg-map.html
extra-libraries: {lib_name}
-- Cross-compilation to target `x86_64-w64-mingw32-cc` thrown a lot of
-- `undefined reference to 'X'` errors during linking stage ...
if os(windows)
extra-libraries: userenv ws2_32 bcrypt
-- Here is a mapping between library names and missing symbols:
-- `bcrypt` -> `BCryptGenRandom`
-- `userenv` -> `GetUserProfileDirectoryW`
-- `ws2_32` -> `freeaddrinfo getaddrinfo WSASend WSARecv WSASocketW`"
)
} else {
format!(
"
-- Libraries that are bundled with the package.
extra-bundled-libraries: {lib_name}"
)
};
format!(
"cabal-version: 2.4
-- The cabal-version field refers to the version of the .cabal specification,
-- and can be different from the cabal-install (the tool) version and the
-- Cabal (the library) version you are using. As such, the Cabal (the library)
-- version used must be equal or greater than the version stated in this field.
-- Starting from the specification version 2.2, the cabal-version field must be
-- the first thing in the cabal file.
-- Initial package description generated by 'cabal init'. For further
-- documentation, see: http://haskell.org/cabal/users-guide/
--
-- The name of the package.
name: {package_name}
-- The package version.
-- See the Haskell package versioning policy (PVP) for standards
-- guiding when and how versions should be incremented.
-- https://pvp.haskell.org
-- PVP summary: +-+------- breaking API changes
-- | | +----- non-breaking API additions
-- | | | +--- code changes with no API change
version: {version}
-- A short (one-line) description of the package.
-- synopsis:
-- A longer description of the package.
-- description:
-- The license under which the package is released.
-- license:
-- The package author(s).
-- author:
-- An email address to which users can send suggestions, bug reports, and
-- patches.
-- maintainer:
-- A copyright notice.
-- copyright:
{build_type}
-- Extra doc files to be distributed with the package, such as a CHANGELOG or a
-- README.
-- extra-doc-files:
-- Extra source files to be distributed with the package, such as examples, or
-- a tutorial module.
-- extra-source-files:
--
-- FIXME: It's still unclear to me what would be the best strategy to let users
-- publish packages generated by `cargo-cabal` on Hackage. While it is pretty
-- hazardous to put Rust code in sdist archive (because that would require that
-- the library end-user have a Rust developer environment on this machine and
-- that wouldn't play well with cross-compilation), is it a good idea to
-- package generated platform-dependent library as source?
common warnings
ghc-options: -Wall
library
-- Import common warning flags.
import: warnings
-- Modules exported by the library.
exposed-modules: {module}
-- Modules included in this library but not exported.
-- other-modules:
-- LANGUAGE extensions used by modules in this package.
-- other-extensions:
-- Other library packages from which modules are imported.
build-depends: base
-- Directories containing source files.
hs-source-dirs: src
-- Base language which the package is written in.
default-language: Haskell2010
{extra}
-- This file was generated by `cargo-cabal`, but please don't hesitate to edit it!
-- We would rather rely on `cabal init --non-interactive` to generate this file
-- but there is no CLI arg to set `build-type: Custom` on which it sadly
-- currently have to rely on."
)
}
| yvan-sraka/cargo-cabal | 109 | A tool that helps you to turn in one command a Rust crate into a Haskell Cabal library! | Rust | yvan-sraka | Yvan Sraka | |
src/cargo.rs | Rust | //! This module defines data-structure into which user `Cargo.toml` is parsed
use serde::Deserialize;
#[derive(Clone, Deserialize)]
pub(crate) struct Root {
pub(crate) package: Option<Package>,
pub(crate) lib: Option<Lib>,
}
#[derive(Clone, Deserialize)]
pub(crate) struct Package {
pub(crate) name: Option<String>,
pub(crate) version: Option<String>,
}
#[derive(Clone, Deserialize)]
pub(crate) struct Lib {
#[serde(alias = "crate-type")]
pub(crate) crate_type: Option<Vec<String>>,
}
/// We allow `staticlib` and `cdylib` target only since `dylib` doesn't offer
/// the same ABI stability guarantees:
///
/// - https://users.rust-lang.org/t/what-is-the-difference-between-dylib-and-cdylib/28847
/// - https://users.rust-lang.org/t/abi-stability-guarantee-of-dylib-vs-cdylib/50879
#[derive(Debug, PartialEq)]
pub(crate) enum CrateType {
/// `staticlib` target, which is what you want (really)
StaticLib,
/// `cdylib` target (overide `staticlib` target since `staticlib` require no
/// `cargo-cabal` extra step that wouldn't require `cdylib`)
DynLib,
}
/// From a list a targets return the one that represent the strategy used by
/// `cargo-cabal`, return `None` when there is no target usable by `cargo-cabal`
/// like `rlib` or `dylib`.
pub(crate) fn get_crate_type(cargo: Root) -> Option<CrateType> {
let crate_type = cargo.lib?.crate_type?;
crate_type
.contains(&"cdylib".to_string())
.then_some(CrateType::DynLib)
.or_else(|| {
crate_type
.contains(&"staticlib".to_string())
.then_some(CrateType::StaticLib)
})
}
| yvan-sraka/cargo-cabal | 109 | A tool that helps you to turn in one command a Rust crate into a Haskell Cabal library! | Rust | yvan-sraka | Yvan Sraka | |
src/errors.rs | Rust | use displaydoc::Display;
use thiserror::Error;
/// CLI errors displayed by `cargo-cabal` to help end-users to set up correctly
/// their Rust project!
#[derive(Display, Error, Debug)]
pub enum Error {
/** Fail to read content of `Cargo.toml` file
* n.b. you have to run the command from the root folder of your Rust project
*/
NoCargoToml,
/// Fail to parse TOML content of `Cargo.toml` file
WrongCargoToml,
/** Your `Cargo.toml` file should contain a [package] section
* n.b. Cargo Workspace aren't currently supported by `cargo-cabal`
*/
NotCargoPackage,
/// Your `Cargo.toml` [package] section should contain a `name` field
NoCargoNameField,
/** Your `Cargo.toml` file should contain a [lib] section with a `crate-type` field
* that contains either `staticlib` or `cdylib` value, e.g.:
*
* [lib]
* crate-type = ["staticlib"]
*/
NoCargoLibTarget,
/// Fail to write `{0}` file
FailedToWriteFile(String),
/** `{0}.cabal`, `hsbindgen.toml`, `Setup.hs` or `Setup.lhs` file already exist,
* please back up it before re-running `cargo cabal init --overwrite` command
*/
CabalFilesExist(String),
/** `build.rs` file already exist, but `crates-type = [ "cdylib" ]` target
* need to generate one, please either remove this option or back up it
* before re-running `cargo cabal init --overwrite` command
*/
BuildFileExist,
/** `flake.nix` file already exist, but `--enable-nix` option need to
* generate one, please either remove this CLI arg or back up it before
* re-running `cargo cabal init --overwrite` command
*/
FlakeFileExist,
}
| yvan-sraka/cargo-cabal | 109 | A tool that helps you to turn in one command a Rust crate into a Haskell Cabal library! | Rust | yvan-sraka | Yvan Sraka | |
src/flake.rs | Rust | /// Generate content of a `flake.nix` using `haskell.nix` and `naersk` and which
/// is a good alternative to hacking with a custom `Setup.lhs`! This file is
/// generated by the `--enable-nix` CLI option.
pub(crate) fn generate(name: &str) -> String {
format!(
"{{
inputs = {{
haskell-nix.url = \"github:input-output-hk/haskell.nix\";
nixpkgs.follows = \"haskell-nix/nixpkgs-unstable\";
utils.url = \"github:numtide/flake-utils\";
naersk.url = \"github:nix-community/naersk/master\";
}};
outputs = {{ self, nixpkgs, utils, haskell-nix, naersk }}:
utils.lib.eachDefaultSystem (system:
let
naersk' = pkgs.callPackage naersk {{ }};
overlays = [
haskell-nix.overlay
(final: prev: {{
# Add `extra-libraries` dependencies
{name} = naersk'.buildPackage {{
src = ./.;
copyLibs = true;
}};
# This overlay adds our project to pkgs
project = final.haskell-nix.project' {{
src = ./.;
compiler-nix-name = \"ghc924\";
# This is used by `nix develop .` to open a shell for use with
# `cabal`, `hlint` and `haskell-language-server`
shell.tools = {{
cabal = \"latest\";
hlint = \"latest\";
haskell-language-server = \"latest\";
}};
# Non-Haskell shell tools go here
shell.buildInputs = [ ];
}};
}})
];
pkgs = import nixpkgs {{
inherit system overlays;
inherit (haskell-nix) config;
}};
flake = pkgs.project.flake {{ }};
in flake // {{
# Built by `nix build .`
packages.default = flake.packages.\"{name}:lib:{name}\";
}});
}}"
)
}
| yvan-sraka/cargo-cabal | 109 | A tool that helps you to turn in one command a Rust crate into a Haskell Cabal library! | Rust | yvan-sraka | Yvan Sraka | |
src/hsbindgen.rs | Rust | // FIXME: rather than living in this custom file, these options could be moved
// under an `[hs-bindgen]` manifest key directly in `Cargo.toml` (even if this
// would trigger a `unused manifest key` warning at `cargo build`)?
const VERSION: &str = "0.8.0";
/// Generate content of `hsbindgen.toml` file, a neat way to share config option
/// between `hs-bindgen` and `cargo-cabal`!
pub(crate) fn generate(module: &str) -> String {
format!(
"# Since the only `.cabal` format parser implementation and specification live
# in Cabal itself ... this deadly simple config file is used by `hs-bindgen`
# Rust crate to get needed data (like default exposed module name).
default = \"{module}\"
# There is an unlikely future where instead we have a Rust `.cabal` parser,
# that most likely would rely under the hood on a Haskell static lib wrapper
# of `Cabal.Parse` or https://hackage.haskell.org/package/Cabal-syntax library.
# But even in this case, it would be nice to know the `cargo-cabal` version that
# generated the `.cabal` file used.
version = \"{VERSION}\"",
)
}
| yvan-sraka/cargo-cabal | 109 | A tool that helps you to turn in one command a Rust crate into a Haskell Cabal library! | Rust | yvan-sraka | Yvan Sraka | |
src/lib.rs | Rust | mod cabal;
mod cargo;
#[macro_use]
mod errors;
mod flake;
mod hsbindgen;
use cargo::{get_crate_type, CrateType};
use clap::{arg, Parser, Subcommand};
use errors::Error;
use std::{fs, path::Path};
/// A tool that helps you to turn in one command a Rust crate into a Haskell Cabal library
#[derive(Parser)]
#[command(version)]
struct Args {
#[command(subcommand)]
cabal: Wrapper,
}
#[derive(Subcommand)]
enum Wrapper {
#[command(subcommand)]
Cabal(Commands),
}
#[derive(Subcommand)]
enum Commands {
/// Initialize the project by generating custom Cabal files
Init {
/// Generate a haskell.nix / naersk based flake.nix
#[arg(long)]
enable_nix: bool,
/// Run a clean before generating files
#[arg(long)]
overwrite: bool,
},
/// Remove files generated by cargo-cabal, except flake.nix
Clean,
}
// TODO: rather use https://crates.io/crates/cargo_metadata?!
struct CargoMetadata {
root: cargo::Root,
version: String,
name: String,
module: String,
}
/// Parse Cargo.toml file content ...
fn parse_cargo_toml() -> Result<CargoMetadata, Error> {
let cargo = fs::read_to_string("Cargo.toml").or(Err(Error::NoCargoToml))?;
let root: cargo::Root = toml::from_str(&cargo).or(Err(Error::WrongCargoToml))?;
let package = root.clone().package.ok_or(Error::NotCargoPackage)?;
let version = package.version.unwrap_or_else(|| "0.1.0.0".to_owned());
let name = package.name.ok_or(Error::NoCargoNameField)?;
let module = name
.split(&['-', '_'])
.map(|s| format!("{}{}", &s[..1].to_uppercase(), &s[1..]))
.collect::<Vec<String>>()
.join("");
Ok(CargoMetadata {
root,
version,
name,
module,
})
}
/// Parse `cargo-cabal` CLI arguments
pub fn parse_cli_args(args: Vec<String>) -> Result<(), Error> {
let metadata = parse_cargo_toml()?;
match Args::parse_from(args).cabal {
Wrapper::Cabal(command) => match command {
Commands::Init { .. } => cmd_init(command, metadata),
Commands::Clean => cmd_clean(&metadata.name),
},
}
}
/// Initialize the project by generating custom Cabal files
fn cmd_init(args: Commands, metadata: CargoMetadata) -> Result<(), Error> {
let Commands::Init {
enable_nix,
overwrite,
} = args else { unreachable!() };
let CargoMetadata {
root,
version,
name,
module,
} = metadata;
// `cargo cabal init --overwrite` == `cargo cabal clean && cargo cabal init`
if overwrite {
cmd_clean(&name)?;
}
// Check that project have a `crate-type` target ...
let crate_type = get_crate_type(root).ok_or(Error::NoCargoLibTarget)?;
// Check that `cargo cabal init` have not been already run ...
let cabal = format!("{name}.cabal");
(!(Path::new(&cabal).exists()
|| Path::new(".hsbindgen").exists()
|| Path::new("hsbindgen.toml").exists()
|| Path::new("Setup.hs").exists()
|| Path::new("Setup.lhs").exists()))
.then_some(())
.ok_or_else(|| Error::CabalFilesExist(name.to_owned()))?;
// ... and that no existing file would conflict ...
if crate_type == CrateType::DynLib {
(!Path::new("build.rs").exists())
.then_some(())
.ok_or(Error::BuildFileExist)?;
}
if enable_nix {
(!Path::new("flake.rs").exists())
.then_some(())
.ok_or(Error::FlakeFileExist)?;
}
// Generate wanted files from templates ... starting by a `.cabal` ...
fs::write(
cabal.clone(),
cabal::generate(&name, &module, &version, enable_nix),
)
.or(Err(Error::FailedToWriteFile(cabal)))?;
// `hsbindgen.toml` is a config file readed by `#[hs_bindgen]` proc macro ...
fs::write("hsbindgen.toml", hsbindgen::generate(&module))
.map_err(|_| Error::FailedToWriteFile("hsbindgen.toml".to_owned()))?;
// If `crate-type = [ "cdylib" ]` then a custom `build.rs` is needed ...
if crate_type == CrateType::DynLib {
fs::write("build.rs", include_str!("build.rs"))
.map_err(|_| Error::FailedToWriteFile("build.rs".to_owned()))?;
}
// `--enable-nix` CLI option generate a `flake.nix` rather than a `Setup.lhs`
if enable_nix {
fs::write("flake.nix", flake::generate(&name))
.map_err(|_| Error::FailedToWriteFile("flake.nix".to_owned()))?;
} else {
fs::write("Setup.lhs", include_str!("Setup.lhs"))
.map_err(|_| Error::FailedToWriteFile("Setup.lhs".to_owned()))?;
}
println!(
"\
Cabal files generated!
**********************
You should now be able to compile your library with `cabal build` and should
add `hs-bindgen` to your crate dependencies list and decorate the Rust function
you want to expose with `#[hs_bindgen]` attribute macro."
);
Ok(())
}
/// Remove files generated by cargo-cabal, except flake.nix
fn cmd_clean(name: &str) -> Result<(), Error> {
let _ = fs::remove_file(format!("{name}.cabal"));
let _ = fs::remove_file(".hsbindgen");
let _ = fs::remove_file("hsbindgen.toml");
let _ = fs::remove_file("Setup.hs");
let _ = fs::remove_file("Setup.lhs");
Ok(())
}
| yvan-sraka/cargo-cabal | 109 | A tool that helps you to turn in one command a Rust crate into a Haskell Cabal library! | Rust | yvan-sraka | Yvan Sraka | |
src/main.rs | Rust | //! # `cargo-cabal`
//!
//! A tool that helps you to turn in one command a Rust crate into a Haskell
//! Cabal library!
//!
//! To generate bindings, you need to annotate the Rust function you want to
//! expose with [`hs-bindgen`](https://github.com/yvan-sraka/hs-bindgen) macro.
//!
//! ## Getting started
//!
//! Here a little screencast demonstrating how it works (commands walkthrough
//! are just pasted below):
//!
//! 
//!
//! > **N.B.** You need in your `$PATH` a working Rust and Haskell environment,
//! > if you use [Nix](https://nixos.org) you can just enter:
//! > `nix-shell -p cabal-install ghc cargo rustc`
//!
//! ---
//!
//! Welcome in this little `cargo-cabal` / `hs-bindgen` demo 🙂
//!
//! Let's start by creating a dumb Rust library!
//!
//! ```text
//! $ cargo new --lib greetings
//! Created library `greetings` package
//!
//! $ tree greetings
//! greetings
//! ├── Cargo.toml
//! └── src
//! └── lib.rs
//!
//! 1 directory, 2 files
//!
//! $ cd greetings
//! ```
//!
//! Add `hs-bindgen` to the dependencies list:
//!
//! ```text
//! $ cargo add hs-bindgen --features full
//! Updating crates.io index
//! Adding hs-bindgen v0.8.0 to dependencies.
//! Features:
//! + antlion
//! + full
//! + std
//! ```
//!
//! And use it to decorate the function we want to expose:
//!
//! * `src/lib.rs`:
//!
//! ```rust
//! use hs_bindgen::*;
//!
//! #[hs_bindgen]
//! fn hello(name: &str) {
//! println!("Hello, {name}!");
//! }
//! ```
//!
//! ```text
//! $ cargo build
//! Compiling proc-macro2 v1.0.47
//! Compiling quote v1.0.21
//! Compiling unicode-ident v1.0.5
//! Compiling syn v1.0.105
//! Compiling serde_derive v1.0.149
//! Compiling semver-parser v0.7.0
//! Compiling serde v1.0.149
//! Compiling thiserror v1.0.37
//! Compiling antlion v0.3.1
//! Compiling semver v0.9.0
//! Compiling semver v1.0.14
//! Compiling lazy_static v1.4.0
//! Compiling hs-bindgen-traits v0.8.0
//! Compiling rustc_version v0.2.3
//! Compiling hs-bindgen-attribute v0.7.2
//! Compiling thiserror-impl v1.0.37
//! Compiling displaydoc v0.2.3
//! Compiling hs-bindgen-types v0.8.0
//! Compiling toml v0.5.9
//! Compiling hs-bindgen v0.8.0
//! Compiling greetings v0.1.0 (/Users/yvan/demo/greetings)
//! error: custom attribute panicked
//! --> src/lib.rs:3:1
//! |
//! 3 | #[hs_bindgen]
//! | ^^^^^^^^^^^^^
//! |
//! = help: message: fail to read content of `hsbindgen.toml` configuration file
//! n.b. you have to run the command `cargo-cabal` to generate it: Os { code: 2, kind: NotFound, message: "No such file or directory" }
//!
//! error: could not compile `greetings` due to previous error
//! ```
//!
//! So, we will use `cargo-cabal` to check our setup and generate Cabal files:
//!
//! ```text
//! $ cargo install cargo-cabal
//! Updating crates.io index
//! Ignored package `cargo-cabal v0.7.0` is already installed, use --force to override
//!
//! $ cargo cabal init
//! Error: Your `Cargo.toml` file should contain a [lib] section with a `crate-type` field
//! that contains either `staticlib` or `cdylib` value, e.g.:
//!
//! [lib]
//! crate-type = ["staticlib"]
//! ```
//!
//! > **N.B.** if you're a Nix user, rather than rely on impure `cargo install`,
//! > feel free to just `nix run github:yvan-sraka/cargo-cabal -- cabal init`
//!
//! Right, we edit the `Cargo.toml` accordingly:
//!
//! * `Cargo.toml`:
//!
//! ```toml
//! [package]
//! name = "greetings"
//! version = "0.1.0"
//! edition = "2021"
//!
//! # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
//!
//! [dependencies]
//! hs-bindgen = { version = "0.8.0", features = ["full"] }
//!
//! [lib]
//! crate-type = ["staticlib"]
//! ```
//!
//! ```text
//! $ cargo cabal init
//! Cabal files generated!
//! **********************
//! You should now be able to compile your library with `cabal build` and should
//! add `hs-bindgen` to your crate dependencies list and decorate the Rust function
//! you want to expose with `#[hs_bindgen]` attribute macro.
//!
//! $ ls
//! Cargo.lock Cargo.toml Setup.lhs greetings.cabal src target
//! ```
//!
//! ```text
//! $ cargo build
//! Compiling greetings v0.1.0 (/Users/yvan/demo/greetings)
//! Finished dev [unoptimized + debuginfo] target(s) in 1.06s
//!
//! $ cabal build
//! Build profile: -w ghc-9.0.2 -O1
//! In order, the following will be built (use -v for more details):
//! - greetings-0.1.0 (lib:greetings) (first run)
//! [1 of 1] Compiling Main ( omitted ... )
//! Linking /Users/yvan/demo/dist-newstyle/build/aarch64-osx/ghc-9.0.2/greetings-0.1.0/setup/setup ...
//! Configuring greetings-0.1.0...
//! Preprocessing library for greetings-0.1.0..
//! Building library for greetings-0.1.0..
//! [1 of 1] Compiling Greetings ( src/Greetings.hs, omitted ... )
//! ```
//!
//! It works! And so `cargo build` too if you just want to use the library in a
//! Rust project!
//!
//! ---
//!
//! Now let's try to use our freshly generated library in an Haskell app 😉
//!
//! ```text
//! $ cd ..
//! $ cabal init --non-interactive test
//! [Log] Guessing dependencies...
//! [Log] Using cabal specification: 3.8
//! [Warning] unknown license type, you must put a copy in LICENSE yourself.
//! [Log] Creating fresh file CHANGELOG.md...
//! [Log] Creating fresh directory ./app...
//! [Log] Creating fresh file app/Main.hs...
//! [Log] Creating fresh file test.cabal...
//! [Warning] No synopsis given. You should edit the .cabal file and add one.
//! [Info] You may want to edit the .cabal file and add a Description field.
//!
//! $ tree test
//! test
//! ├── app
//! │ └── Main.hs
//! ├── CHANGELOG.md
//! └── test.cabal
//!
//! 1 directory, 3 files
//! ```
//!
//! We create a `cabal.project` (equivalent to cargo workspace) to perform a
//! local test without having to upload `greetings` on hackage:
//!
//! * `cabal.project`:
//!
//! ```cabal
//! packages: ./greetings ./test
//! ```
//!
//! We edit `test.cabal` to make it depends on `greetings` library:
//!
//! * `test/test.cabal` (content partially omitted):
//!
//! ```cabal
//! executable test
//! -- Other library packages from which modules are imported.
//! build-depends: base, greetings
//! ```
//!
//! We write a minimalist `main` function that will make call `hello` from
//! `Greetings` module
//!
//! * `test/app/Main.hs`:
//!
//! ```haskell
//! module Main where
//!
//! import Foreign.C.String
//! import Greetings
//!
//! main :: IO ()
//! main = withCString "Rust 🦀" hello
//! ```
//!
//! Let's check if everything works as expected:
//!
//! ```text
//! $ cabal run test
//! Build profile: -w ghc-9.0.2 -O1
//! In order, the following will be built (use -v for more details):
//! - test-0.1.0.0 (exe:test) (first run)
//! Configuring executable 'test' for test-0.1.0.0..
//! Preprocessing executable 'test' for test-0.1.0.0..
//! Building executable 'test' for test-0.1.0.0..
//! [1 of 1] Compiling Main ( app/Main.hs, omitted ... )
//! Linking /Users/yvan/demo/dist-newstyle/build/aarch64-osx/ghc-9.0.2/test-0.1.0.0/x/test/build/test/test ...
//! Hello, Rust 🦀!
//! ```
//!
//! Now let's see if we can use the GHCi repl to call the functions defined in Rust.
//!
//! ```text
//! λ> withCString "aaa" hello
//! ghc-9.4.8: ^^ Could not load '__c_hello', dependency unresolved. See top entry above.
//!
//!
//! GHC.ByteCode.Linker: can't find label
//! During interactive linking, GHCi couldn't find the following symbol:
//! __c_hello
//! This may be due to you not asking GHCi to load extra object files,
//! archives or DLLs needed by your current session. Restart GHCi, specifying
//! the missing library using the -L/path/to/object/dir and -lmissinglibname
//! flags, or simply by naming the relevant files on the GHCi command line.
//! Alternatively, this link failure might indicate a bug in GHCi.
//! If you suspect the latter, please report this as a GHC bug:
//! https://www.haskell.org/ghc/reportabug
//! ```
//!
//! It seems like GHCi doesn't know how to link the library containing external
//! functions. To fix this we first need to change the type of the Rust crate to
//! `cdylib` in `Cargo.toml`. The reason why we need to do this is that GHCi can
//! only load dynamic libraries, not static ones.
//!
//! ```toml
//! [lib]
//! crate-type = ["cdylib"]
//! ```
//! Now we need to tell GHCi explicitly where to look for those libraries. We can
//! do that by specifying the path in a `.ghci` file at the root of the project.
//!
//! ```ghci
//! :set -Ltarget/debug -lgreetings
//! ```
//!
//! After rebuilding the project with the necessary changes we can try once again:
//!
//! ```text
//! $ cabal repl
//! Build profile: -w ghc-9.4.8 -O1
//! In order, the following will be built (use -v for more details):
//! - greetings-0.1.0 (ephemeral targets)
//! Preprocessing library for greetings-0.1.0..
//! GHCi, version 9.4.8: https://www.haskell.org/ghc/ :? for help
//! Loaded GHCi configuration from /path/to/project/greetings/.ghci
//! [1 of 1] Compiling Greetings ( src/Greetings.hs, interpreted )
//! Ok, one module loaded.
//! λ> withCString "Rust" hello
//! Hello, Rust!
//! ```
//!
//! That's all folks! Happy hacking 🙂
//!
//! ## Nix support
//!
//! The `--enable-nix` CLI arg makes `cargo-cabal` generate a
//! [haskell.nix](https://github.com/input-output-hk/haskell.nix) /
//! [naersk](https://github.com/nix-community/naersk) based `flake.nix` rather
//! than the `Setup.lhs`.
//!
//! > **N.B.** when first working with `hs-bindgen` and Nix flakes, checking if
//! > `Cargo.lock` isn't in `.gitignore` and running `cargo build` and
//! > `git add --all` before `nix build`, will save you a lot of pain 😉
//!
//! ## Acknowledgments
//!
//! ⚠️ This is still a working experiment, not yet production ready.
//!
//! `cargo-cabal` was heavily inspired by other interoperability initiatives, as
//! [`wasm-pack`](https://github.com/rustwasm/wasm-pack) and
//! [`Maturin`](https://github.com/PyO3/maturin).
//!
//! This project was part of a work assignment as an
//! [IOG](https://github.com/input-output-hk) contractor.
//!
//! ## License
//!
//! Licensed under either of [Apache License](LICENSE-APACHE), Version 2.0 or
//! [MIT license](LICENSE-MIT) at your option.
//!
//! Unless you explicitly state otherwise, any contribution intentionally submitted
//! for inclusion in this project by you, as defined in the Apache-2.0 license,
//! shall be dual licensed as above, without any additional terms or conditions.
#![forbid(unsafe_code)]
fn main() {
let args: Vec<String> = std::env::args().collect();
if let Err(e) = lib::parse_cli_args(args) {
println!("{}{}", ansi_term::Colour::Red.bold().paint("Error: "), e);
}
}
| yvan-sraka/cargo-cabal | 109 | A tool that helps you to turn in one command a Rust crate into a Haskell Cabal library! | Rust | yvan-sraka | Yvan Sraka | |
tests/greetings/src/lib.rs | Rust | use hs_bindgen::*;
#[hs_bindgen]
fn hello(name: &str) {
println!("Hello, {name}!");
}
| yvan-sraka/cargo-cabal | 109 | A tool that helps you to turn in one command a Rust crate into a Haskell Cabal library! | Rust | yvan-sraka | Yvan Sraka | |
tests/haskell-nix.sh | Shell | #! /usr/bin/env nix-shell
#! nix-shell -i bash -p cargo rustc
set -euxo pipefail
pushd greetings
cargo add hs-bindgen --features full
../../result/bin/cargo-cabal cabal init --overwrite --enable-nix
git add flake.nix
nix build
| yvan-sraka/cargo-cabal | 109 | A tool that helps you to turn in one command a Rust crate into a Haskell Cabal library! | Rust | yvan-sraka | Yvan Sraka | |
tests/simple.sh | Shell | #! /usr/bin/env nix-shell
#! nix-shell -i bash -p cargo rustc cabal-install ghc
set -euxo pipefail
pushd greetings
cargo clean
cargo add hs-bindgen --features full
../../result/bin/cargo-cabal cabal init --overwrite
cargo build
popd
cabal clean
cabal run test
| yvan-sraka/cargo-cabal | 109 | A tool that helps you to turn in one command a Rust crate into a Haskell Cabal library! | Rust | yvan-sraka | Yvan Sraka | |
tests/test/app/Main.hs | Haskell | module Main where
import Foreign.C.String
import Greetings
main :: IO ()
main = withCString "Rust 🦀" hello
| yvan-sraka/cargo-cabal | 109 | A tool that helps you to turn in one command a Rust crate into a Haskell Cabal library! | Rust | yvan-sraka | Yvan Sraka | |
src/lib.rs | Rust | //! # `hs-bindgen`
//!
//! Handy macro to generate C-FFI bindings to Rust for Haskell.
//!
//! This library intended to work best in a project configured by
//! [`cargo-cabal`](https://github.com/yvan-sraka/cargo-cabal).
//!
//! **N.B.** The MSRV is **1.64.0** since it use `core_ffi_c` feature.
//!
//! ## Examples
//!
//! A minimal example would be to have a function annotated like this:
//!
//! ```rust
//! use hs_bindgen::*;
//!
//! /// Haskell type signatures are auto-magically inferred from Rust function
//! /// types! This feature could slow down compilation, and be enabled with:
//! /// `hs-bindgen = { ..., features = [ "full" ] }`
//! #[hs_bindgen]
//! fn greetings(name: &str) {
//! println!("Hello, {name}!");
//! }
//! ```
//!
//! This will be expanded to (you can try yourself with `cargo expand`):
//!
//! ```rust
//! use hs_bindgen::*;
//!
//! fn greetings(name: &str) {
//! println!("Hello, {name}!");
//! }
//!
//! #[no_mangle] // Mangling makes symbol names more difficult to predict.
//! // We disable it to ensure that the resulting symbol is really `__c_greetings`.
//! extern "C" fn __c_greetings(__0: *const core::ffi::c_char) -> () {
//! // `traits` module is `hs-bindgen::hs-bindgen-traits`
//! // n.b. do not forget to import it, e.g., with `use hs-bindgen::*`
//! traits::FromReprC::from(greetings(traits::FromReprRust::from(__0),))
//! }
//! ```
//!
//! A more complete example, that use `borsh` to serialize ADT from Rust to Haskell
//! can be found [here](https://github.com/yvan-sraka/hs-bindgen-borsh-example).
//!
//! ## Design
//!
//! First, I would thank [Michael Gattozzi](https://twitter.com/mgattozzi) who
//! implement [a (no longer maintained) implementation](https://github.com/mgattozzi/curryrs)
//! to binding generation between Rust and Haskell and
//! [his writings](https://blog.mgattozzi.dev/haskell-rust/) and guidance
//! really help me to quick start this project.
//!
//! I try to architect `hs-bindgen` with these core design principles:
//!
//! - **Simplicity:** as KISS UNIX philosophy of minimalism, meaning here I
//! tried to never re-implement feature already handled by Rust programming
//! language (parsing code, infer types, etc.), I rather rely on capabilities
//! of macro and trait systems. E.g. the only bit of parsing left in this
//! code its Haskell function signature (which is trivial giving the feature
//! set of authorized C-FFI safe types) ;
//!
//! - **Modularity:** this library is design in mind to work in a broader range
//! of usage, so this library should work in `#[no_std]` setting and most
//! features could be opt-out. E.g. the type inference offered by
//! [`antlion`](https://github.com/yvan-sraka/antlion) library is optional ;
//!
//! - **Stability:** this library implements no trick outside the scope of
//! stable C ABI (with well-defined memory layout convention), and ensure to
//! provide ergonomics without breaking this safety rule of thumb. There is
//! no magic that could be break by any `rustc` or GHC update!
//!
//! ## Acknowledgments
//!
//! ⚠️ This is still a working experiment, not yet production ready.
//!
//! `hs-bindgen` was heavily inspired by other interoperability initiatives, as
//! [`wasm-bindgen`](https://github.com/rustwasm/wasm-bindgen) and
//! [`PyO3`](https://github.com/PyO3/pyo3).
//!
//! This project was part of a work assignment as an
//! [IOG](https://github.com/input-output-hk) contractor.
//!
//! ## License
//!
//! Licensed under either of [Apache License](LICENSE-APACHE), Version 2.0 or
//! [MIT license](LICENSE-MIT) at your option.
//!
//! Unless you explicitly state otherwise, any contribution intentionally submitted
//! for inclusion in this project by you, as defined in the Apache-2.0 license,
//! shall be dual licensed as above, without any additional terms or conditions.
#![forbid(unsafe_code)]
pub use hs_bindgen_attribute::hs_bindgen;
pub use hs_bindgen_traits as traits;
| yvan-sraka/hs-bindgen | 70 | Handy macro to generate C-FFI bindings to Rust for Haskell | Rust | yvan-sraka | Yvan Sraka | |
build.rs | Rust | //! Enable proc-macro diagnostics by default when toolchain is set on nightly!
fn main() {
if let Ok(v) = rustc_version::version_meta() {
if v.channel == rustc_version::Channel::Nightly {
println!("cargo:rustc-cfg=DIAGNOSTICS");
}
}
}
| yvan-sraka/hs-bindgen-attribute | 1 | Handy macro to generate C-FFI bindings from Rust to Haskell. | Rust | yvan-sraka | Yvan Sraka | |
src/haskell.rs | Rust | use displaydoc::Display;
use hs_bindgen_types::{ArrowIter, HsType};
use std::str::FromStr;
use thiserror::Error;
/// Produce the content of `lib/{module}.hs` given a list of Signature
pub(crate) fn template(module: &str, signatures: &[Signature]) -> String {
let modulename = module.replace("/", ".");
let names = signatures
.iter()
.map(|x| x.fn_name.clone())
.collect::<Vec<String>>()
.join(", ");
let imports = signatures
.iter()
.map(|sig| {
format!(
"foreign import ccall {} \"__c_{}\" {sig}",
if sig.fn_safe {
"safe"
} else {
warning(sig);
"unsafe"
},
sig.fn_name
)
})
.collect::<Vec<String>>()
.join("\n");
format!(
"-- This file was generated by `hs-bindgen` crate and contains C FFI bindings
-- wrappers for every Rust function annotated with `#[hs_bindgen]`
{{-# LANGUAGE ForeignFunctionInterface #-}}
-- Why not rather using `{{-# LANGUAGE CApiFFI #-}}` language extension?
--
-- * Because it's GHC specific and not part of the Haskell standard:
-- https://ghc.gitlab.haskell.org/ghc/doc/users_guide/exts/ffi.html ;
--
-- * Because the capabilities it gave (by rather works on top of symbols of a C
-- header file) can't work in our case. Maybe we want a future with an
-- {{-# LANGUAGE RustApiFFI #-}} language extension that would enable us to
-- work on top of a `.rs` source file (or a `.rlib`, but this is unlikely as
-- this format has purposely no public specification).
{{-# OPTIONS_GHC -Wno-unused-imports #-}}
module {modulename} ({names}) where
import Data.Int
import Data.Word
import Foreign.C.String
import Foreign.C.Types
import Foreign.Ptr
{imports}"
)
}
/// Warn user about what Haskell `unsafe` keyword does ...
pub(crate) fn warning(_sig: &Signature) {
#[cfg(DIAGNOSTICS)]
proc_macro::Diagnostic::spanned(
[proc_macro::Span::call_site()].as_ref(),
proc_macro::Level::Warning,
format!(
"Using: `foreign import ccall unsafe __c_, {} {_sig}`
means that Haskell Garbage-Collector will be locked during the foreign call.
/!\\ Do not use it for long computations in a multithreaded application or
it will slow down a lot your whole program ...",
_sig.fn_name
),
)
.emit();
}
#[derive(Display, Error, Debug)]
pub enum Error {
/** you should provide targeted Haskell type signature as attribute:
* `#[hs_bindgen(HS SIGNATURE)]`
*/
MissingSig,
/** given Haskell function definition is `{0}` but should have the form:
* `NAME :: TYPE`
*
* n.b. you can prefix function name like "unsafe NAME :: TYPE" and it will
* expand as: foreign import ccall unsafe __c_NAME NAME :: TYPE (knowing it
* default to foreign import ccall safe __c_NAME NAME :: TYPE ) ...
* ... /!\ Hope you know what you're doing!
*/
MalformedSig(String),
/// Haskell type error: {0}
HsType(String),
}
/// Data structure that represent an Haskell function signature:
/// {fn_name} :: {fn_type[0]} -> {fn_type[1]} -> ... -> {fn_type[n-1]}
///
/// FIXME: consider moving this struct and its traits' implementation into
/// `hs-bindgen-types`
pub(crate) struct Signature {
pub(crate) fn_name: String,
pub(crate) fn_safe: bool,
pub(crate) fn_type: Vec<HsType>,
}
impl std::fmt::Display for Signature {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"{} :: {}",
self.fn_name,
self.fn_type
.iter()
.map(|x| x.to_string())
.collect::<Vec<String>>()
.join(" -> ")
)
}
}
impl FromStr for Signature {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
(!s.is_empty()).then_some(()).ok_or(Error::MissingSig)?;
let mut x = s.split("::");
let fn_name = x.next().ok_or(Error::MissingSig)?.trim();
let fn_safe = !fn_name.starts_with("unsafe ");
let fn_name = if fn_safe {
fn_name.trim_start_matches("safe ")
} else {
fn_name.trim_start_matches("unsafe ")
}
.trim_start()
.to_string();
let fn_type = ArrowIter::from(x.next().ok_or_else(|| Error::MalformedSig(s.to_string()))?)
.map(|ty| {
ty.parse::<HsType>()
.map_err(|ty| Error::HsType(ty.to_string()))
})
.collect::<Result<Vec<HsType>, Error>>()?;
assert!(x.next().is_none(), "{}", Error::MalformedSig(s.to_string()));
Ok(Signature {
fn_name,
fn_safe,
fn_type,
})
}
}
| yvan-sraka/hs-bindgen-attribute | 1 | Handy macro to generate C-FFI bindings from Rust to Haskell. | Rust | yvan-sraka | Yvan Sraka | |
src/lib.rs | Rust | //! # `hs-bindgen-attribute`
//!
//! This library define the `#[hs_bindgen]` procedural macro used by
//! [`hs-bindgen`](https://github.com/yvan-sraka/hs-bindgen) library.
//!
//! ## Acknowledgments
//!
//! ⚠️ This is still a working experiment, not yet production ready.
//!
//! This project was part of a work assignment as an
//! [IOG](https://github.com/input-output-hk) contractor.
//!
//! ## License
//!
//! Licensed under either of [Apache License](LICENSE-APACHE), Version 2.0 or
//! [MIT license](LICENSE-MIT) at your option.
//!
//! Unless you explicitly state otherwise, any contribution intentionally submitted
//! for inclusion in this project by you, as defined in the Apache-2.0 license,
//! shall be dual licensed as above, without any additional terms or conditions.
#![forbid(unsafe_code)]
#![cfg_attr(DIAGNOSTICS, feature(proc_macro_diagnostic))]
use proc_macro::TokenStream;
use std::{fs, path::Path, sync::Mutex};
mod haskell;
mod reflexive;
mod rust;
mod toml;
#[proc_macro_attribute]
pub fn hs_bindgen(attrs: TokenStream, input: TokenStream) -> TokenStream {
let mut output = input.clone();
let item_fn: syn::ItemFn = syn::parse(input)
.expect("failed to parse as Rust code the content of `#[hs_bindgen]` macro");
// Generate extra Rust code that wrap our exposed function ...
let (signature, extern_c_wrapper) = rust::generate(attrs, item_fn);
// Neat hack to keep track of all exposed functions ...
static SIGNATURES: Mutex<Vec<haskell::Signature>> = Mutex::new(vec![]);
let signatures = &mut *SIGNATURES.lock().unwrap();
signatures.push(signature);
// Generate Haskell bindings into module defined in `hsbindgen.toml` config ...
let module = toml::config()
.default
.expect("your `hsbindgen.toml` file should contain a `default` field");
let cargo_manifest_dir = std::env::var("CARGO_MANIFEST_DIR")
.expect("environment variable `CARGO_MANIFEST_DIR` must be set");
let path = Path::new(&cargo_manifest_dir).join(format!("src/{}.hs", module));
fs::write(&path, haskell::template(&module, signatures))
.unwrap_or_else(|_| panic!("fail to write `{}` file", path.display()));
output.extend(extern_c_wrapper);
output
}
| yvan-sraka/hs-bindgen-attribute | 1 | Handy macro to generate C-FFI bindings from Rust to Haskell. | Rust | yvan-sraka | Yvan Sraka | |
src/reflexive.rs | Rust | use crate::haskell;
#[cfg(feature = "reflexive")]
use hs_bindgen_types::HsType;
#[cfg(feature = "reflexive")]
lazy_static::lazy_static! {
static ref SANDBOX: reflexive::Sandbox =
reflexive::Sandbox::new("hs-bindgen")
.unwrap()
.deps(&["hs-bindgen-types@0.8"])
.unwrap()
;
}
/// Use Rust type inference (inside a `reflexive` sandbox) to deduce targeted
/// Haskell type signature that match a given `TokenStream` of a Rust `fn`
pub(crate) trait Eval<T> {
fn from(_: T) -> Self;
}
impl Eval<&syn::ItemFn> for haskell::Signature {
#[cfg(feature = "reflexive")]
fn from(item_fn: &syn::ItemFn) -> Self {
let fn_name = item_fn.sig.ident.to_string();
let fn_safe = true;
let mut fn_type = vec![];
for arg in &item_fn.sig.inputs {
fn_type.push(<HsType as Eval<&syn::Type>>::from(match arg {
syn::FnArg::Typed(p) => &p.ty,
_ => panic!("functions using `self` are not supported by `hs-bindgen`"),
}));
}
fn_type.push(HsType::IO(Box::new(match &item_fn.sig.output {
syn::ReturnType::Type(_, p) => <HsType as Eval<&syn::Type>>::from(p),
_ => HsType::Empty,
})));
haskell::Signature {
fn_name,
fn_safe,
fn_type,
}
}
#[cfg(not(feature = "reflexive"))]
fn from(_: &syn::ItemFn) -> Self {
unreachable!()
}
}
#[cfg(feature = "reflexive")]
impl Eval<&syn::Type> for HsType {
fn from(ty: &syn::Type) -> HsType {
use quote::quote;
SANDBOX
.eval(quote! {
<#ty as hs_bindgen_types::ReprHs>::into()
})
.unwrap_or_else(|_| {
panic!(
"type `{}` doesn't implement `ReprHs` trait
consider opening an issue https://github.com/yvan-sraka/hs_bindgen_types
n.b. if you trying to use a custom defined type, you need to specify the
Haskell type signature of your binding: #[hs_bindgen(HASKELL TYPE SIGNATURE)]",
quote! { #ty }
)
})
}
}
/// Warn user about the build-time cost of relying on `reflexive` ...
///
/// n.b. proc-macro diagnostics require nightly `proc_macro_diagnostic` feature
pub(crate) fn warning(_sig: &haskell::Signature) {
#[cfg(DIAGNOSTICS)]
proc_macro::Diagnostic::spanned(
[proc_macro::Span::call_site()].as_ref(),
proc_macro::Level::Warning,
format!(
"Implicit Haskell signature declaration could slow down compilation,
rather derive it as: #[hs_bindgen({_sig})]"
),
)
.emit();
}
| yvan-sraka/hs-bindgen-attribute | 1 | Handy macro to generate C-FFI bindings from Rust to Haskell. | Rust | yvan-sraka | Yvan Sraka | |
src/rust.rs | Rust | use crate::{haskell, reflexive};
use hs_bindgen_types::HsType;
use proc_macro::TokenStream;
use quote::{format_ident, quote};
/// Generate extra Rust code that wrap our exposed function
pub(crate) fn generate(
attrs: TokenStream,
item_fn: syn::ItemFn,
) -> (haskell::Signature, TokenStream) {
let rust_fn = format_ident!("{}", item_fn.sig.ident.to_string());
// Parse targeted Haskell function signature either from proc macro
// attributes or either from types from Rust `fn` item (using feature
// `reflexive` which is enabled by default) ...
let mut sig = {
let s = attrs.to_string();
if cfg!(feature = "reflexive") && s.is_empty() {
let sig = <haskell::Signature as reflexive::Eval<&syn::ItemFn>>::from(&item_fn);
reflexive::warning(&sig);
sig
} else {
s.parse().unwrap_or_else(|e| panic!("{e}"))
}
};
// Ensure that signature not contain too much args ...
if sig.fn_type.len() > 8 {
panic!(
"Too many arguments! GHC C-ABI implementation does not currently behave well \
with function with more than 8 arguments on platforms apart from x86_64 ..."
)
}
let ret = match sig.fn_type.pop().unwrap_or(HsType::Empty) {
HsType::IO(x) => x,
x => Box::new(x),
};
// Iterate through function argument types ...
let mut c_fn_args = quote! {};
let mut rust_fn_values = quote! {};
for (i, hs_c_ffi_type) in sig.fn_type.iter().enumerate() {
let arg = format_ident!("__{i}");
let c_ffi_safe_type = hs_c_ffi_type.quote();
c_fn_args.extend(quote! { #arg: #c_ffi_safe_type, });
rust_fn_values.extend(quote! { traits::FromReprRust::from(#arg), });
}
// Generate C-FFI wrapper of Rust function ...
let c_fn = format_ident!("__c_{}", sig.fn_name);
let c_ret = ret.quote();
let extern_c_wrapper = quote! {
#[no_mangle] // Mangling makes symbol names more difficult to predict.
// We disable it to ensure that the resulting symbol is really `#c_fn`.
extern "C" fn #c_fn(#c_fn_args) -> #c_ret {
// `traits` module is `hs-bindgen::hs-bindgen-traits`
// n.b. do not forget to import it, e.g., with `use hs-bindgen::*`
traits::FromReprC::from(#rust_fn(#rust_fn_values))
}
};
sig.fn_type.push(HsType::IO(ret));
(sig, extern_c_wrapper.into())
}
| yvan-sraka/hs-bindgen-attribute | 1 | Handy macro to generate C-FFI bindings from Rust to Haskell. | Rust | yvan-sraka | Yvan Sraka | |
src/toml.rs | Rust | use semver::{Version, VersionReq};
use serde::Deserialize;
use std::{env, fs, path::Path};
/// Struct that map the content of `hsbindgen.toml` config file
#[derive(Deserialize)]
pub(crate) struct Config {
pub(crate) default: Option<String>,
pub(crate) version: Option<String>,
}
/// Read `hsbindgen.toml` config file generated by `cargo-cabal`
pub(crate) fn config() -> Config {
let cargo_manifest_dir = env::var("CARGO_MANIFEST_DIR")
.expect("environment variable `CARGO_MANIFEST_DIR` must be set");
let cfg_path = Path::new(&cargo_manifest_dir).join("hsbindgen.toml");
let cfg = fs::read_to_string(cfg_path).expect(
"fail to read content of `hsbindgen.toml` configuration file
n.b. you have to run the command `cargo-cabal` to generate it",
);
let cfg = toml::from_str(&cfg).expect("fail to parse TOML content of `hsbindgen.toml` file");
check_version(&cfg);
cfg
}
/// Compatibility constraints on `cargo-cabal` version used
fn check_version(config: &Config) {
let req = VersionReq::parse("<=0.8").unwrap();
let version = config
.version
.as_ref()
.expect("a version field is required in `hsbindgen.toml`");
let version = Version::parse(version)
.expect("version field of `hsbindgen.toml` does not follow SemVer format");
assert!(
req.matches(&version),
"incompatible versions of `cargo-cabal`/`hs-bindgen` used, please update"
);
}
| yvan-sraka/hs-bindgen-attribute | 1 | Handy macro to generate C-FFI bindings from Rust to Haskell. | Rust | yvan-sraka | Yvan Sraka | |
src/fun.rs | Rust | use crate::{private, FromReprRust};
macro_rules! repr_rust_fn {
() => {
impl<Output> FromReprRust<unsafe extern "C" fn() -> Output> for Box<dyn Fn() -> Output>
where
Output: private::CFFISafe + 'static,
{
fn from(f: unsafe extern "C" fn() -> Output) -> Self {
unsafe { Box::new(move || f())}
}
}
};
($x:ident, $y:ident $(,$xs:ident, $ys: ident)*) => {
repr_rust_fn!($( $xs, $ys ),*);
impl<$x, $($xs,)* Output> FromReprRust<unsafe extern "C" fn($x $(,$xs)*) -> Output> for Box<dyn Fn($x $(,$xs)*) -> Output>
where
Output: private::CFFISafe + 'static,
$x: private::CFFISafe + 'static$(,
$xs: private::CFFISafe + 'static)*
{
fn from(f: unsafe extern "C" fn($x $(, $xs )*) -> Output) -> Self {
unsafe { Box::new(move |$y $(,$ys)*| f($y $(,$ys)*))}
}
}
};
}
repr_rust_fn!(A, a, B, b, C, c, D, d, E, e, F, f);
| yvan-sraka/hs-bindgen-traits | 2 | Utility traits behind hs-bindgen ergonomics | Rust | yvan-sraka | Yvan Sraka | |
src/lib.rs | Rust | //! # `hs-bingen-traits`
//!
//! Utility traits behind [`hs-bindgen`](https://github.com/yvan-sraka/hs-bindgen)
//! ergonomics. It helps user to easily define wrapper function to derive a Rust
//! type from and into a C-FFI safe target type (that match the memory layout of
//! an Haskell type).
//!
//! ## What's this library for?
//!
//! [Does `repr(C)` define a trait I can use to check structs were declared with `#repr(C)`?](https://users.rust-lang.org/t/16323)
//! The answer is sadly no ... that's what this library trying to provide, like
//! what [`safer_ffi`](https://docs.rs/safer-ffi/latest/safer_ffi/layout/trait.ReprC.html)
//! does, but in a simpler and more minimal way, since the goal here is only to
//! target Haskell FFI.
//!
//! ## Acknowledgments
//!
//! ⚠️ This is still a working experiment, not yet production ready.
//!
//! This project was part of a work assignment as an
//! [IOG](https://github.com/input-output-hk) contractor.
//!
//! ## License
//!
//! Licensed under either of [Apache License](LICENSE-APACHE), Version 2.0 or
//! [MIT license](LICENSE-MIT) at your option.
//!
//! Unless you explicitly state otherwise, any contribution intentionally submitted
//! for inclusion in this project by you, as defined in the Apache-2.0 license,
//! shall be dual licensed as above, without any additional terms or conditions.
#![cfg_attr(not(feature = "std"), no_std)]
#![cfg_attr(not(feature = "std"), forbid(unsafe_code))]
#[cfg(feature = "std")]
mod fun;
#[cfg(feature = "std")]
mod str;
#[cfg(feature = "std")]
mod vec;
#[cfg(feature = "std")]
pub use self::{str::*, vec::*};
/// Generate C-FFI cast from a given Rust type.
///
/// `impl FromReprC<Foo> for Bar` -> means `from` Rust `Foo` type into C `Bar` repr
pub trait FromReprC<T>: private::CFFISafe {
#[must_use]
fn from(_: T) -> Self;
}
/// `impl IntoReprC<Foo> for Bar` -> means `from` C `Foo` type into Rust `Bar` repr
pub trait IntoReprC<T> {
#[must_use]
fn into(self) -> T;
}
impl<T, U> IntoReprC<U> for T
where
U: FromReprC<T>,
T: private::CFFISafe,
{
#[inline]
fn into(self) -> U {
U::from(self)
}
}
/// Generate safe Rust wrapper from a given C-FFI type.
///
/// `impl FromReprRust<Foo> for Bar` -> means `from` C `Foo` type into Rust `Bar` repr
pub trait FromReprRust<T: private::CFFISafe> {
#[must_use]
fn from(_: T) -> Self;
}
/// `impl IntoReprRust<Foo> for Bar` -> means `from` Rust `Foo` type into C `Bar` repr
pub trait IntoReprRust<T> {
#[must_use]
fn into(self) -> T;
}
impl<T, U> IntoReprRust<U> for T
where
U: FromReprRust<T>,
T: private::CFFISafe,
{
fn into(self) -> U {
U::from(self)
}
}
mod private {
/// The trait `CFFISafe` is sealed and cannot be implemented for types outside this crate.
/// c.f. https://rust-lang.github.io/api-guidelines/future-proofing.html#c-sealed
pub trait CFFISafe {}
macro_rules! c_ffi_safe {
($($ty:ty),*) => {$(
impl CFFISafe for $ty {}
// `*const T` is C-FFI safe if `T` is C-FFI safe
impl CFFISafe for *const $ty {}
)*};
}
// C-FFI safe types (the previous macro avoid redundant code)
c_ffi_safe![(), i8, i16, i32, i64, u8, u16, u32, u64, f32, f64];
macro_rules! c_ffi_safe_fun {
() => {
impl<Output: CFFISafe> CFFISafe for unsafe extern "C" fn() -> Output {}
};
($x:ident $(,$xs:ident)*) => {
c_ffi_safe_fun!($( $xs ),*);
impl<$x $(,$xs)*, Output> CFFISafe for unsafe extern "C" fn($x, $($xs),*) -> Output
where
Output: CFFISafe,
$x: CFFISafe,
$($xs: CFFISafe),
* {}
};
}
c_ffi_safe_fun!(A, B, C, D, E, F);
}
macro_rules! transparent {
($($ty:ty),*) => {$(
impl FromReprRust<$ty> for $ty {
#[inline]
fn from(x: $ty) -> Self { x }
}
impl FromReprC<$ty> for $ty {
#[inline]
fn from(x: $ty) -> Self { x }
}
impl FromReprRust<*const $ty> for *const $ty {
#[inline]
fn from(x: *const $ty) -> Self { x }
}
impl FromReprC<*const $ty> for *const $ty {
#[inline]
fn from(x: *const $ty) -> Self { x }
}
)*};
}
// C-FFI safe type trivially implement both traits
transparent![i8, i16, i32, i64, u8, u16, u32, u64, f32, f64];
/// This is used by Rust function that doesn’t return any value
/// (`void` C equivalent).
impl FromReprC<()> for () {
#[inline]
fn from(_: ()) -> Self {}
}
impl<T> FromReprRust<*const T> for *mut T
where
*const T: private::CFFISafe,
{
#[inline]
fn from(x: *const T) -> Self {
x as *mut T
}
}
| yvan-sraka/hs-bindgen-traits | 2 | Utility traits behind hs-bindgen ergonomics | Rust | yvan-sraka | Yvan Sraka | |
src/str.rs | Rust | //! This module defines convenient traits to let user-defined function take as
//! argument or return type either `CString`, `&CStr`, `String` or `&str`
use crate::{FromReprC, FromReprRust};
use std::ffi::{c_char, CStr, CString};
impl FromReprRust<*const c_char> for CString {
#[inline]
fn from(ptr: *const c_char) -> Self {
let r: &str = FromReprRust::from(ptr);
CString::new(r).unwrap()
}
}
impl FromReprRust<*const c_char> for &CStr {
#[inline]
#[allow(clippy::not_unsafe_ptr_arg_deref)]
fn from(ptr: *const c_char) -> Self {
unsafe { CStr::from_ptr(ptr) }
}
}
impl FromReprRust<*const c_char> for String {
#[inline]
fn from(ptr: *const c_char) -> Self {
let r: &str = FromReprRust::from(ptr);
r.to_string()
}
}
impl FromReprRust<*const c_char> for &str {
#[inline]
fn from(ptr: *const c_char) -> Self {
let r: &CStr = FromReprRust::from(ptr);
r.to_str().unwrap()
}
}
impl FromReprC<CString> for *const c_char {
#[inline]
fn from(s: CString) -> Self {
let x = s.as_ptr();
// FIXME: this pattern is somehow duplicated in `vec` module and should
// rather live behind in a `AsPtr` trait, similar to the one defined by
// https://crates.io/crates/ptrplus
std::mem::forget(s);
x
}
}
impl FromReprC<String> for *const c_char {
#[inline]
fn from(s: String) -> Self {
FromReprC::from(CString::new(s).unwrap())
}
}
#[test]
fn _1() {
let x = "hello"; // FIXME: use Arbitrary crate
let y: &str = FromReprRust::from(FromReprC::from(x.to_string()));
assert!(x == y);
}
| yvan-sraka/hs-bindgen-traits | 2 | Utility traits behind hs-bindgen ergonomics | Rust | yvan-sraka | Yvan Sraka | |
src/vec.rs | Rust | use crate::{private, FromReprC, FromReprRust};
// FIXME: study what could be a good `Vec<T>`/`&[T]` traits ergonomics ...
// n.b. the concept of `slice` have no C equivalent ...
// https://users.rust-lang.org/t/55118
impl<T, const N: usize> FromReprRust<*const T> for &[T; N]
where
*const T: private::CFFISafe,
{
#[inline]
#[allow(clippy::not_unsafe_ptr_arg_deref)]
fn from(ptr: *const T) -> Self {
let s = unsafe { std::slice::from_raw_parts(ptr, N) };
s.try_into().unwrap_or_else(|_| {
let ty = std::any::type_name::<T>();
panic!("impossible to convert &[{ty}] into &[{ty}; {N}]");
})
}
}
impl<T> FromReprC<Vec<T>> for *const T
where
*const T: private::CFFISafe,
{
#[inline]
fn from(v: Vec<T>) -> Self {
let x: *const T = v.as_ptr();
// since the value is passed to Haskell runtime we want Rust to never
// drop it!
std::mem::forget(v);
// FIXME: I should double-check that this does not leak memory and
// that the value is well handled by GHC tracing Garbage Collector
x
// if not, we should export a utility function to let user drop
// the value, this technique was suggested e.g. here:
// https://stackoverflow.com/questions/39224904
}
}
#[test]
fn _1() {
let x = &[1, 2, 3]; // FIXME: use Arbitrary crate
let y: &[i32; 3] = FromReprRust::from(FromReprC::from(x.to_vec()));
assert!(x == y);
}
| yvan-sraka/hs-bindgen-traits | 2 | Utility traits behind hs-bindgen ergonomics | Rust | yvan-sraka | Yvan Sraka | |
src/lib.rs | Rust | use cfg_if::cfg_if;
use core::ffi::*;
use displaydoc::Display;
use proc_macro2::TokenStream;
use quote::quote;
use thiserror::Error;
/// Enumeration of all Haskell C-FFI safe types as the string representation of
/// their token in Haskell.
///
/// FIXME: `Errno(c_int)` should be implemented as a Rust `enum` ...
/// https://hackage.haskell.org/package/base/docs/Foreign-C-Error.html
/// ... using `#[repr(i32)]` https://doc.rust-lang.org/nomicon/other-reprs.html
#[non_exhaustive]
pub enum HsType {
/// `Int32`
CInt,
/// `Int8`
CChar,
/// `Int8`
CSChar,
/// `Word8`
CUChar,
/// `Int16`
CShort,
/// `Word16`
CUShort,
/// `Word32`
CUInt,
/// `Int64`
CLong,
/// `Word64`
CULong,
/// `Int64`
CLLong,
/// `Word64`
CULLong,
/// `Word8`
CBool,
/// `Ptr CChar`
CString,
/// `Double`
CDouble,
/// `Float`
CFloat,
/// `()`
Empty,
/// `Ptr T`
Ptr(Box<HsType>),
/// `IO T`
IO(Box<HsType>),
/// FunPtr (S -> T)
FunPtr(Vec<HsType>),
}
impl std::fmt::Display for HsType {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"{}",
match self {
HsType::CBool => "CBool".to_string(),
HsType::CChar => "CChar".to_string(),
HsType::CDouble => "CDouble".to_string(),
HsType::CFloat => "CFloat".to_string(),
HsType::CInt => "CInt".to_string(),
HsType::CLLong => "CLLong".to_string(),
HsType::CLong => "CLong".to_string(),
HsType::CSChar => "CSChar".to_string(),
HsType::CShort => "CShort".to_string(),
HsType::CString => "CString".to_string(),
HsType::CUChar => "CUChar".to_string(),
HsType::CUInt => "CUInt".to_string(),
HsType::CULLong => "CULLong".to_string(),
HsType::CULong => "CULong".to_string(),
HsType::CUShort => "CUShort".to_string(),
HsType::Empty => "()".to_string(),
HsType::Ptr(x) => format!("Ptr ({x})"),
HsType::IO(x) => format!("IO ({x})"),
HsType::FunPtr(types) => {
let args: Vec<String> = types.iter().map(|arg| format!("{arg}")).collect();
format!("FunPtr({})", args.join(" -> "))
}
}
)
}
}
#[derive(Debug, Display, Error)]
pub enum Error {
/** type `{0}` isn't in the list of supported Haskell C-FFI types.
* Consider opening an issue https://github.com/yvan-sraka/hs-bindgen-types
*
* The list of available Haskell C-FFI types could be found here:
* https://hackage.haskell.org/package/base/docs/Foreign-C.html
*/
UnsupportedHsType(String),
/// found an open `(` without the matching closing `)`
UnmatchedParenthesis,
/// FunPtr is missing type parameter
FunPtrWithoutTypeArgument,
}
pub struct ArrowIter<'a> {
remaining: &'a str,
}
impl<'a> Iterator for ArrowIter<'a> {
type Item = &'a str;
fn next(&mut self) -> Option<Self::Item> {
let ArrowIter { remaining } = self;
let mut open = 0;
let mut offset = 0;
if remaining.trim().is_empty() {
return None;
}
let mut matched: &str = "";
for c in remaining.chars() {
if c == '(' {
open += 1;
} else if c == ')' {
open -= 1;
} else if open == 0 && remaining[offset..].starts_with("->") {
matched = &remaining[..offset];
offset += "->".len();
break;
}
offset += c.len_utf8();
matched = &remaining[..offset];
}
*remaining = &remaining[offset..];
Some(matched)
}
}
impl<'a> From<&'a str> for ArrowIter<'a> {
fn from(value: &'a str) -> Self {
Self { remaining: value }
}
}
impl std::str::FromStr for HsType {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let s = s.trim();
if s == "()" {
Ok(HsType::Empty)
} else if !s.is_empty() && &s[..1] == "(" {
Ok(s[1..]
.strip_suffix(')')
.ok_or(Error::UnmatchedParenthesis)?
.parse()?)
} else if s.len() >= 2 && &s[..2] == "IO" {
Ok(HsType::IO(Box::new(s[2..].parse()?)))
} else if s.len() >= 3 && &s[..3] == "Ptr" {
Ok(HsType::Ptr(Box::new(s[3..].parse()?)))
} else if s.len() >= 6 && &s[..6] == "FunPtr" {
let mut s = s[6..].trim();
if let Some('(') = s.chars().next() {
s = s[1..]
.strip_suffix(')')
.ok_or(Error::UnmatchedParenthesis)?;
}
let types: Vec<_> = ArrowIter { remaining: s }
.map(|s| s.parse::<Self>())
.collect::<Result<_, _>>()?;
if types.is_empty() {
return Err(Error::FunPtrWithoutTypeArgument);
}
Ok(HsType::FunPtr(types))
} else {
match s {
"CBool" => Ok(HsType::CBool),
"CChar" => Ok(HsType::CChar),
"CDouble" => Ok(HsType::CDouble),
"CFloat" => Ok(HsType::CFloat),
"CInt" => Ok(HsType::CInt),
"CLLong" => Ok(HsType::CLLong),
"CLong" => Ok(HsType::CLong),
"CSChar" => Ok(HsType::CSChar),
"CShort" => Ok(HsType::CShort),
"CString" => Ok(HsType::CString),
"CUChar" => Ok(HsType::CUChar),
"CUInt" => Ok(HsType::CUInt),
"CULLong" => Ok(HsType::CULLong),
"CULong" => Ok(HsType::CULong),
"CUShort" => Ok(HsType::CUShort),
ty => Err(Error::UnsupportedHsType(ty.to_string())),
}
}
}
}
impl HsType {
/// Get the C-FFI Rust type that match the memory layout of a given HsType.
///
/// This function return a `OUTPUT: proc_macro2::TokenStream` that should
/// be valid (considered as FFI-safe by `rustc`) in the context of a block
/// of form: `quote! { extern C fn _(_: #OUTPUT) {} }`
///
/// c.f. https://doc.rust-lang.org/core/ffi/
pub fn quote(&self) -> TokenStream {
match self {
// FIXME: add https://doc.rust-lang.org/core/ffi/enum.c_void.html
HsType::CBool => quote! { bool },
HsType::CChar => quote! { core::ffi::c_char },
HsType::CDouble => quote! { core::ffi::c_double },
HsType::CFloat => quote! { core::ffi::c_float },
HsType::CInt => quote! { core::ffi::c_int },
HsType::CLLong => quote! { core::ffi::c_longlong },
HsType::CLong => quote! { core::ffi::c_long },
HsType::CSChar => quote! { core::ffi::c_schar },
HsType::CShort => quote! { core::ffi::c_short },
HsType::CString => HsType::Ptr(Box::new(HsType::CChar)).quote(),
HsType::CUChar => quote! { core::ffi::c_uchar },
HsType::CUInt => quote! { core::ffi::c_uint },
HsType::CULLong => quote! { core::ffi::c_ulonglong },
HsType::CULong => quote! { core::ffi::c_ulong },
HsType::CUShort => quote! { core::ffi::c_ushort },
HsType::Empty => quote! { () },
HsType::Ptr(x) => {
let ty = x.quote();
quote! { *const #ty }
}
HsType::IO(x) => x.quote(),
HsType::FunPtr(types) => {
let ret = types.last().unwrap().quote();
let args: Vec<_> = types[..types.len() - 1]
.iter()
.map(|arg| arg.quote())
.collect();
quote!(unsafe extern "C" fn(#(#args),*) -> #ret)
}
}
}
}
/// Turn a given Rust type into his `HsType` target.
///
/// Deducing what's the right Haskell type target given an arbitrary Rust type
/// is provided by `reflexive` feature of `hs-bingen-derive` and rely mostly on
/// Rust type inference through this trait.
pub trait ReprHs {
fn into() -> HsType;
}
macro_rules! repr_hs {
($($ty:ty => $ident:ident,)*) => {$(
impl ReprHs for $ty {
fn into() -> HsType {
HsType::$ident
}
}
)*};
}
pub(crate) use repr_hs;
repr_hs! {
c_char => CChar,
c_double => CDouble,
c_float => CFloat,
c_int => CInt,
c_short => CShort,
c_uchar => CUChar,
c_uint => CUInt,
c_ushort => CUShort,
() => Empty,
}
cfg_if! {
if #[cfg(all(target_pointer_width = "64", not(windows)))] {
repr_hs! {
c_long => CLong,
c_ulong => CULong,
}
} else {
repr_hs! {
c_longlong => CLLong,
c_ulonglong => CULLong,
}
}
}
impl<T> ReprHs for *const T
where
T: ReprHs,
{
fn into() -> HsType {
HsType::Ptr(Box::new(T::into()))
}
}
impl<T> ReprHs for *mut T
where
T: ReprHs,
{
fn into() -> HsType {
HsType::Ptr(Box::new(T::into()))
}
}
/* ********** Vector & Slices ********** */
impl<T> ReprHs for Vec<T>
where
T: ReprHs,
{
fn into() -> HsType {
HsType::Ptr(Box::new(T::into()))
}
}
impl<T, const N: usize> ReprHs for &[T; N]
where
T: ReprHs,
{
fn into() -> HsType {
HsType::Ptr(Box::new(T::into()))
}
}
/* ********** Strings ********** */
use std::ffi::CString;
repr_hs! {
CString => CString,
&CStr => CString,
String => CString,
&str => CString,
}
| yvan-sraka/hs-bindgen-types | 1 | Rust | yvan-sraka | Yvan Sraka | ||
dataLoader/__init__.py | Python | from .blender import BlenderDataset
from .ord import ORD
from .tensoIR_rotation_setting import TensoIR_Dataset_unknown_rotated_lights
from .tensoIR_relighting_test import tensoIR_Relighting_test
from .tensoIR_simple import TensoIR_Dataset_simple
from .tensoIR_material_editing_test import tensoIR_Material_Editing_test
from .tensoIR_general_multi_lights import TensoIR_Dataset_unknown_general_multi_lights
dataset_dict = {'ord': ORD,
'blender': BlenderDataset,
'tensoIR_unknown_rotated_lights':TensoIR_Dataset_unknown_rotated_lights,
'tensoIR_unknown_general_multi_lights': TensoIR_Dataset_unknown_general_multi_lights,
'tensoIR_relighting_test':tensoIR_Relighting_test,
'tensoIR_material_editing_test':tensoIR_Material_Editing_test,
'tensoIR_simple':TensoIR_Dataset_simple,
}
| yxlao/TensoIR | 3 | TensoIR Experiments for "Objects with Lighting: A Real-World Dataset for Evaluating Reconstruction and Rendering for Object Relighting" | Python | yxlao | Yixing Lao | HKU-CS |
dataLoader/blender.py | Python | import torch,cv2
from torch.utils.data import Dataset
import json
from tqdm import tqdm
import os
from PIL import Image
from torchvision import transforms as T
from dataLoader.ray_utils import *
from dataLoader.plotter import plot_cameras_and_scene_bbox
import camtools as ct
class BlenderDataset(Dataset):
def __init__(self, datadir, split='train', downsample=1.0, is_stack=False, N_vis=-1, **kwargs):
self.N_vis = N_vis
self.root_dir = datadir
self.split = split
self.is_stack = is_stack
self.img_wh = (int(800/downsample),int(800/downsample))
self.define_transforms()
self.scene_bbox = torch.tensor([[-1.5, -1.5, -1.5], [1.5, 1.5, 1.5]])
self.blender2opencv = np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])
self.read_meta()
self.define_proj_mat()
self.white_bg = True
self.near_far = [2.0,6.0]
self.center = torch.mean(self.scene_bbox, axis=0).float().view(1, 1, 3)
self.radius = (self.scene_bbox[1] - self.center).float().view(1, 1, 3)
self.downsample=downsample
# Print all properties.
import ipdb; ipdb.set_trace(); pass
all_properties = [
"N_vis",
"all_depth",
"all_light_idx",
"all_masks",
"all_rays",
"all_rgbs",
"blender2opencv",
"center",
"directions",
"downsample",
"focal",
"image_paths",
"img_wh",
"intrinsics",
"is_stack",
"meta",
"near_far",
"poses",
"proj_mat",
"radius",
"root_dir",
"scene_bbox",
"split",
"transform",
"white_bg",
]
for key in all_properties:
try:
val = getattr(self, key)
except:
val = None
if isinstance(val, torch.Tensor):
print(f"{key}: {val.shape}, {val.dtype}")
elif key == "image_paths":
print(f"{key}: {len(val)} image paths")
elif key == "meta":
print(f"{key}: with keys {val.keys()}")
else:
print(f"{key}: {val}")
import ipdb; ipdb.set_trace(); pass
def read_depth(self, filename):
depth = np.array(read_pfm(filename)[0], dtype=np.float32) # (800, 800)
return depth
def read_meta(self):
with open(os.path.join(self.root_dir, f"transforms_{self.split}.json"), 'r') as f:
self.meta = json.load(f)
w, h = self.img_wh
self.focal = 0.5 * 800 / np.tan(0.5 * self.meta['camera_angle_x']) # original focal length, fov -> focal
self.focal *= self.img_wh[0] / 800 # modify focal length to match size self.img_wh
# ray directions for all pixels, same for all images (same H, W, focal)
self.directions = get_ray_directions(h, w, [self.focal,self.focal]) # (h, w, 3)
self.directions = self.directions / torch.norm(self.directions, dim=-1, keepdim=True)
self.intrinsics = torch.tensor([[self.focal,0,w/2],[0,self.focal,h/2],[0,0,1]]).float()
self.image_paths = []
self.poses = []
self.all_rays = []
self.all_rgbs = []
self.all_masks = []
self.all_depth = []
self.downsample=1.0
img_eval_interval = 1 if self.N_vis < 0 else len(self.meta['frames']) // self.N_vis
idxs = list(range(0, len(self.meta['frames']), img_eval_interval))
for i in tqdm(idxs, desc=f'Loading data {self.split} ({len(idxs)})'):#img_list:#
frame = self.meta['frames'][i]
pose = np.array(frame['transform_matrix']) @ self.blender2opencv
c2w = torch.FloatTensor(pose)
self.poses += [c2w]
image_path = os.path.join(self.root_dir, f"{frame['file_path']}.png")
self.image_paths += [image_path]
img = Image.open(image_path)
if self.downsample!=1.0:
img = img.resize(self.img_wh, Image.LANCZOS)
img = self.transform(img) # (4, h, w)
img = img.view(4, -1).permute(1, 0) # (h*w, 4) RGBA
img = img[:, :3] * img[:, -1:] + (1 - img[:, -1:]) # blend A to RGB
self.all_rgbs += [img]
img_mask = ~(img[:, -1:] == 0)
self.all_masks += [img_mask.squeeze(0)]
rays_o, rays_d = get_rays(self.directions, c2w) # both (h*w, 3)
self.all_rays += [torch.cat([rays_o, rays_d], 1)] # (h*w, 6)
self.poses = torch.stack(self.poses)
if not self.is_stack:
self.all_rays = torch.cat(self.all_rays, 0) # (len(self.meta['frames'])*h*w, 6)
self.all_rgbs = torch.cat(self.all_rgbs, 0) # (len(self.meta['frames'])*h*w, 3)
self.all_masks = torch.cat(self.all_masks, 0) # (len(self.meta['frames'])*h*w, 1)
# self.all_depth = torch.cat(self.all_depth, 0) # (len(self.meta['frames'])*h*w, 3)
self.all_light_idx = torch.zeros((*self.all_rays.shape[:-1], 1),dtype=torch.long)
else:
self.all_rays = torch.stack(self.all_rays, 0) # (len(self.meta['frames']),h*w, 6)
self.all_rgbs = torch.stack(self.all_rgbs, 0).reshape(-1,*self.img_wh[::-1], 3) # (len(self.meta['frames']),h,w,3)
self.all_masks = torch.stack(self.all_masks, 0).reshape(-1,*self.img_wh[::-1]) # (len(self.meta['frames']),h,w,1)
self.all_light_idx = torch.zeros((*self.all_rays.shape[:-1], 1),dtype=torch.long).reshape(-1,*self.img_wh[::-1])
# Try plotting with camtools
if False:
plot_cameras_and_scene_bbox(
Ks=[self.intrinsics.cpu().numpy() for _ in range(len(self.poses))],
Ts=[ct.convert.pose_to_T(pose) for pose in self.poses.cpu().numpy()],
scene_bbox=self.scene_bbox.cpu().numpy(),
)
def define_transforms(self):
self.transform = T.ToTensor()
def define_proj_mat(self):
self.proj_mat = self.intrinsics.unsqueeze(0) @ torch.inverse(self.poses)[:,:3]
def world2ndc(self,points,lindisp=None):
device = points.device
return (points - self.center.to(device)) / self.radius.to(device)
def __len__(self):
"""
Returns the number of images.
"""
if self.split == "train":
raise NotImplementedError("In train, you should not call __len__")
num_rays = len(self.all_rgbs) # (len(self.meta['frames'])*h*w, 3)
width, height = self.img_wh
num_images = int(num_rays / (width * height))
return num_images
def __getitem__(self, idx):
print(f"BlenderDataset.__getitem__(): {idx}")
# use data in the buffers
if self.split == 'train':
sample = {
'rays': self.all_rays[idx],
'rgbs': self.all_rgbs[idx]
}
raise NotImplementedError("In train, you should not call __getitem__")
# create data for each image separately
else:
width, height = self.img_wh
wth = width * height
num_images = self.__len__()
# [128000000, 3] -> [200, 800 * 800, 3]
all_rgbs = self.all_rgbs.reshape(num_images, height * width, 3)
# [128000000, 6] -> [200, 800 * 800, 6]
all_rays = self.all_rays.reshape(num_images, height * width, 6)
# [128000000, 1] -> [200, 800 * 800, 1]
all_masks = self.all_masks.reshape(num_images, height * width, 1)
# [128000000, 1] -> [200, 800 * 800, 1]
all_light_idx = self.all_light_idx.reshape(num_images, height * width, 1)
sample = {
'img_wh': self.img_wh, # (int, int)
'light_idx': all_light_idx[idx].view(-1, wth, 1), # [light_num, H*W, 1]
'rays': all_rays[idx], # [H*W, 6]
'rgbs': all_rgbs[idx].view(-1, wth, 3), # [light_num, H*W, 3]
'rgbs_mask': all_masks[idx] # [H*W, 1]
}
print(f"light_idx.shape: {sample['light_idx'].shape}")
print(f"rays.shape : {sample['rays'].shape}")
print(f"rgbs.shape : {sample['rgbs'].shape}")
print(f"rgbs_mask.shape: {sample['rgbs_mask'].shape}")
return sample
if __name__ == '__main__':
dataset = BlenderDataset(datadir='../data/nerf_synthetic/lego')
item = dataset.__getitem__(0)
for key, value in item.items():
if type(value) == torch.Tensor:
print(f'key:{key} tensor.shape:{value.shape}')
else:
print(f'key:{key} value:{value.shape}')
print(f'rays.shape {dataset.all_rays.shape}') # [640000, 6]
print(f'rgbs.shape : {dataset.all_rgbs.shape}') # [640000, 3]
| yxlao/TensoIR | 3 | TensoIR Experiments for "Objects with Lighting: A Real-World Dataset for Evaluating Reconstruction and Rendering for Object Relighting" | Python | yxlao | Yixing Lao | HKU-CS |
dataLoader/colmap2nerf.py | Python | #!/usr/bin/env python3
# Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import argparse
import os
from pathlib import Path, PurePosixPath
import numpy as np
import json
import sys
import math
import cv2
import os
import shutil
def parse_args():
parser = argparse.ArgumentParser(description="convert a text colmap export to nerf format transforms.json; optionally convert video to images, and optionally run colmap in the first place")
parser.add_argument("--video_in", default="", help="run ffmpeg first to convert a provided video file into a set of images. uses the video_fps parameter also")
parser.add_argument("--video_fps", default=2)
parser.add_argument("--time_slice", default="", help="time (in seconds) in the format t1,t2 within which the images should be generated from the video. eg: \"--time_slice '10,300'\" will generate images only from 10th second to 300th second of the video")
parser.add_argument("--run_colmap", action="store_true", help="run colmap first on the image folder")
parser.add_argument("--colmap_matcher", default="sequential", choices=["exhaustive","sequential","spatial","transitive","vocab_tree"], help="select which matcher colmap should use. sequential for videos, exhaustive for adhoc images")
parser.add_argument("--colmap_db", default="colmap.db", help="colmap database filename")
parser.add_argument("--images", default="images", help="input path to the images")
parser.add_argument("--text", default="colmap_text", help="input path to the colmap text files (set automatically if run_colmap is used)")
parser.add_argument("--aabb_scale", default=16, choices=["1","2","4","8","16"], help="large scene scale factor. 1=scene fits in unit cube; power of 2 up to 16")
parser.add_argument("--skip_early", default=0, help="skip this many images from the start")
parser.add_argument("--out", default="transforms.json", help="output path")
args = parser.parse_args()
return args
def do_system(arg):
print(f"==== running: {arg}")
err = os.system(arg)
if err:
print("FATAL: command failed")
sys.exit(err)
def run_ffmpeg(args):
if not os.path.isabs(args.images):
args.images = os.path.join(os.path.dirname(args.video_in), args.images)
images = args.images
video = args.video_in
fps = float(args.video_fps) or 1.0
print(f"running ffmpeg with input video file={video}, output image folder={images}, fps={fps}.")
if (input(f"warning! folder '{images}' will be deleted/replaced. continue? (Y/n)").lower().strip()+"y")[:1] != "y":
sys.exit(1)
try:
shutil.rmtree(images)
except:
pass
do_system(f"mkdir {images}")
time_slice_value = ""
time_slice = args.time_slice
if time_slice:
start, end = time_slice.split(",")
time_slice_value = f",select='between(t\,{start}\,{end})'"
do_system(f"ffmpeg -i {video} -qscale:v 1 -qmin 1 -vf \"fps={fps}{time_slice_value}\" {images}/%04d.jpg")
def run_colmap(args):
db=args.colmap_db
images=args.images
db_noext=str(Path(db).with_suffix(""))
if args.text=="text":
args.text=db_noext+"_text"
text=args.text
sparse=db_noext+"_sparse"
print(f"running colmap with:\n\tdb={db}\n\timages={images}\n\tsparse={sparse}\n\ttext={text}")
if (input(f"warning! folders '{sparse}' and '{text}' will be deleted/replaced. continue? (Y/n)").lower().strip()+"y")[:1] != "y":
sys.exit(1)
if os.path.exists(db):
os.remove(db)
do_system(f"colmap feature_extractor --ImageReader.camera_model OPENCV --SiftExtraction.estimate_affine_shape=true --SiftExtraction.domain_size_pooling=true --ImageReader.single_camera 1 --database_path {db} --image_path {images}")
do_system(f"colmap {args.colmap_matcher}_matcher --SiftMatching.guided_matching=true --database_path {db}")
try:
shutil.rmtree(sparse)
except:
pass
do_system(f"mkdir {sparse}")
do_system(f"colmap mapper --database_path {db} --image_path {images} --output_path {sparse}")
do_system(f"colmap bundle_adjuster --input_path {sparse}/0 --output_path {sparse}/0 --BundleAdjustment.refine_principal_point 1")
try:
shutil.rmtree(text)
except:
pass
do_system(f"mkdir {text}")
do_system(f"colmap model_converter --input_path {sparse}/0 --output_path {text} --output_type TXT")
def variance_of_laplacian(image):
return cv2.Laplacian(image, cv2.CV_64F).var()
def sharpness(imagePath):
image = cv2.imread(imagePath)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
fm = variance_of_laplacian(gray)
return fm
def qvec2rotmat(qvec):
return np.array([
[
1 - 2 * qvec[2]**2 - 2 * qvec[3]**2,
2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3],
2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2]
], [
2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3],
1 - 2 * qvec[1]**2 - 2 * qvec[3]**2,
2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1]
], [
2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2],
2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1],
1 - 2 * qvec[1]**2 - 2 * qvec[2]**2
]
])
def rotmat(a, b):
a, b = a / np.linalg.norm(a), b / np.linalg.norm(b)
v = np.cross(a, b)
c = np.dot(a, b)
s = np.linalg.norm(v)
kmat = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])
return np.eye(3) + kmat + kmat.dot(kmat) * ((1 - c) / (s ** 2 + 1e-10))
def closest_point_2_lines(oa, da, ob, db): # returns point closest to both rays of form o+t*d, and a weight factor that goes to 0 if the lines are parallel
da = da / np.linalg.norm(da)
db = db / np.linalg.norm(db)
c = np.cross(da, db)
denom = np.linalg.norm(c)**2
t = ob - oa
ta = np.linalg.det([t, db, c]) / (denom + 1e-10)
tb = np.linalg.det([t, da, c]) / (denom + 1e-10)
if ta > 0:
ta = 0
if tb > 0:
tb = 0
return (oa+ta*da+ob+tb*db) * 0.5, denom
if __name__ == "__main__":
args = parse_args()
if args.video_in != "":
run_ffmpeg(args)
if args.run_colmap:
run_colmap(args)
AABB_SCALE = int(args.aabb_scale)
SKIP_EARLY = int(args.skip_early)
IMAGE_FOLDER = args.images
TEXT_FOLDER = args.text
OUT_PATH = args.out
print(f"outputting to {OUT_PATH}...")
with open(os.path.join(TEXT_FOLDER,"cameras.txt"), "r") as f:
angle_x = math.pi / 2
for line in f:
# 1 SIMPLE_RADIAL 2048 1536 1580.46 1024 768 0.0045691
# 1 OPENCV 3840 2160 3178.27 3182.09 1920 1080 0.159668 -0.231286 -0.00123982 0.00272224
# 1 RADIAL 1920 1080 1665.1 960 540 0.0672856 -0.0761443
if line[0] == "#":
continue
els = line.split(" ")
w = float(els[2])
h = float(els[3])
fl_x = float(els[4])
fl_y = float(els[4])
k1 = 0
k2 = 0
p1 = 0
p2 = 0
cx = w / 2
cy = h / 2
if els[1] == "SIMPLE_PINHOLE":
cx = float(els[5])
cy = float(els[6])
elif els[1] == "PINHOLE":
fl_y = float(els[5])
cx = float(els[6])
cy = float(els[7])
elif els[1] == "SIMPLE_RADIAL":
cx = float(els[5])
cy = float(els[6])
k1 = float(els[7])
elif els[1] == "RADIAL":
cx = float(els[5])
cy = float(els[6])
k1 = float(els[7])
k2 = float(els[8])
elif els[1] == "OPENCV":
fl_y = float(els[5])
cx = float(els[6])
cy = float(els[7])
k1 = float(els[8])
k2 = float(els[9])
p1 = float(els[10])
p2 = float(els[11])
else:
print("unknown camera model ", els[1])
# fl = 0.5 * w / tan(0.5 * angle_x);
angle_x = math.atan(w / (fl_x * 2)) * 2
angle_y = math.atan(h / (fl_y * 2)) * 2
fovx = angle_x * 180 / math.pi
fovy = angle_y * 180 / math.pi
print(f"camera:\n\tres={w,h}\n\tcenter={cx,cy}\n\tfocal={fl_x,fl_y}\n\tfov={fovx,fovy}\n\tk={k1,k2} p={p1,p2} ")
with open(os.path.join(TEXT_FOLDER,"images.txt"), "r") as f:
i = 0
bottom = np.array([0.0, 0.0, 0.0, 1.0]).reshape([1, 4])
out = {
"camera_angle_x": angle_x,
"camera_angle_y": angle_y,
"fl_x": fl_x,
"fl_y": fl_y,
"k1": k1,
"k2": k2,
"p1": p1,
"p2": p2,
"cx": cx,
"cy": cy,
"w": w,
"h": h,
"aabb_scale": AABB_SCALE,
"frames": [],
}
up = np.zeros(3)
for line in f:
line = line.strip()
if line[0] == "#":
continue
i = i + 1
if i < SKIP_EARLY*2:
continue
if i % 2 == 1:
elems=line.split(" ") # 1-4 is quat, 5-7 is trans, 9ff is filename (9, if filename contains no spaces)
#name = str(PurePosixPath(Path(IMAGE_FOLDER, elems[9])))
# why is this requireing a relitive path while using ^
image_rel = os.path.relpath(IMAGE_FOLDER)
name = str(f"./{image_rel}/{'_'.join(elems[9:])}")
b=sharpness(name)
print(name, "sharpness=",b)
image_id = int(elems[0])
qvec = np.array(tuple(map(float, elems[1:5])))
tvec = np.array(tuple(map(float, elems[5:8])))
R = qvec2rotmat(-qvec)
t = tvec.reshape([3,1])
m = np.concatenate([np.concatenate([R, t], 1), bottom], 0)
c2w = np.linalg.inv(m)
c2w[0:3,2] *= -1 # flip the y and z axis
c2w[0:3,1] *= -1
c2w = c2w[[1,0,2,3],:] # swap y and z
c2w[2,:] *= -1 # flip whole world upside down
up += c2w[0:3,1]
frame={"file_path":name,"sharpness":b,"transform_matrix": c2w}
out["frames"].append(frame)
nframes = len(out["frames"])
up = up / np.linalg.norm(up)
print("up vector was", up)
R = rotmat(up,[0,0,1]) # rotate up vector to [0,0,1]
R = np.pad(R,[0,1])
R[-1, -1] = 1
for f in out["frames"]:
f["transform_matrix"] = np.matmul(R, f["transform_matrix"]) # rotate up to be the z axis
# find a central point they are all looking at
print("computing center of attention...")
totw = 0.0
totp = np.array([0.0, 0.0, 0.0])
for f in out["frames"]:
mf = f["transform_matrix"][0:3,:]
for g in out["frames"]:
mg = g["transform_matrix"][0:3,:]
p, w = closest_point_2_lines(mf[:,3], mf[:,2], mg[:,3], mg[:,2])
if w > 0.01:
totp += p*w
totw += w
totp /= totw
print(totp) # the cameras are looking at totp
for f in out["frames"]:
f["transform_matrix"][0:3,3] -= totp
avglen = 0.
for f in out["frames"]:
avglen += np.linalg.norm(f["transform_matrix"][0:3,3])
avglen /= nframes
print("avg camera distance from origin", avglen)
for f in out["frames"]:
f["transform_matrix"][0:3,3] *= 4.0 / avglen # scale to "nerf sized"
for f in out["frames"]:
f["transform_matrix"] = f["transform_matrix"].tolist()
print(nframes,"frames")
print(f"writing {OUT_PATH}")
with open(OUT_PATH, "w") as outfile:
json.dump(out, outfile, indent=2) | yxlao/TensoIR | 3 | TensoIR Experiments for "Objects with Lighting: A Real-World Dataset for Evaluating Reconstruction and Rendering for Object Relighting" | Python | yxlao | Yixing Lao | HKU-CS |
dataLoader/ord.py | Python | import json
import os
from pathlib import Path
from typing import List
import camtools as ct
import cv2
import numpy as np
import open3d as o3d
import torch
from matplotlib import pyplot as plt
from torch.utils.data import Dataset
from tqdm import tqdm
from dataLoader.plotter import plot_cameras_and_scene_bbox, plot_rays
from dataLoader.ray_utils import get_ray_directions, get_rays
class ORD(Dataset):
def __init__(
self,
scene_dir: Path,
split="train",
downsample=1.0,
light_name=None, # Ignored
light_rotation=None, # Ignored
scene_bbox=None, # Ignored
is_stack=None, # Ignored
random_test=None, # Ignored
):
# Remember inputs.
self.scene_dir = scene_dir
self.split = split
self.downsample = downsample
# Read entire dataset.
result_dict = ORD.parse_ord_dataset(self.scene_dir, self.downsample)
# Unpack result_dict
if self.split == "train":
Ks = result_dict["train_Ks"]
Ts = result_dict["train_Ts"]
im_rgbs = result_dict["train_im_rgbs"]
im_masks = result_dict["train_im_masks"]
elif self.split == "test" or self.split == "val":
Ks = result_dict["test_Ks"]
Ts = result_dict["test_Ts"]
im_rgbs = result_dict["test_im_rgbs"]
im_masks = result_dict["test_im_masks"]
self.light_names = result_dict["light_names"]
else:
raise ValueError(f"split must be train, test or val, got {split}.")
# Use im_masks to set im_rgbs's background to white.
im_rgbs = im_rgbs * im_masks.unsqueeze(3) + (1 - im_masks.unsqueeze(3))
# plt.imshow(im_rgbs[0].numpy())
# plt.show()
num_images = len(im_rgbs)
self.img_wh = (im_rgbs[0].shape[1], im_rgbs[0].shape[0])
scene_bbox = result_dict["scene_bbox"]
near_far = result_dict["near_far"]
# Compute directions
w, h = self.img_wh # Pay attention to the order.
fx = Ks[0][0, 0]
fy = Ks[0][1, 1]
self.directions = get_ray_directions(h, w, [fx, fy]) # (h, w, 3)
self.directions = self.directions / torch.norm(
self.directions, dim=-1, keepdim=True)
# Compute rays
self.all_rays = []
for i in range(num_images):
c2w = torch.linalg.inv(Ts[i])
rays_o, rays_d = get_rays(self.directions, c2w) # both (h*w, 3)
self.all_rays += [torch.cat([rays_o, rays_d], 1)] # (h*w, 6)
self.all_rays = torch.cat(self.all_rays, 0)
# T = result_dict["train_Ts"][0].astype(np.float32)
# c2w = torch.tensor(ct.convert.T_to_pose(T))
# rays_o, rays_d = get_rays(self.directions, c2w)
# All properties, some are not needed.
# All tensors are stored in Torch on CPU
# Below are the values from Blender mic scene.
# - N_vis: -1
# - all_depth: []
# - all_light_idx: torch.Size([64000000, 1]), torch.int64
# - all_masks: torch.Size([64000000, 1]), torch.bool
# - all_rays: torch.Size([64000000, 6]), torch.float32
# - all_rgbs: torch.Size([64000000, 3]), torch.float32
# - blender2opencv: [[ 1 0 0 0]
# - [ 0 -1 0 0]
# - [ 0 0 -1 0]
# - [ 0 0 0 1]]
# - center: torch.Size([1, 1, 3]), torch.float32
# - directions: torch.Size([800, 800, 3]), torch.float32
# - downsample: 1.0
# - focal: 1111.1110311937682
# - image_paths: 100 image paths
# - img_wh: (800, 800)
# - intrinsics: torch.Size([3, 3]), torch.float32
# - is_stack: False
# - meta: with keys dict_keys(['camera_angle_x', 'frames'])
# - near_far: [2.0, 6.0]
# - poses: torch.Size([100, 4, 4]), torch.float32
# - proj_mat: torch.Size([100, 3, 4]), torch.float32
# - radius: torch.Size([1, 1, 3]), torch.float32
# - root_dir: ./data/nerf_synthetic/mic/
# - scene_bbox: torch.Size([2, 3]), torch.float32
# - split: train
# - transform: ToTensor()
# - white_bg: True
total_num_pixels = num_images * h * w
self.N_vis = -1
self.all_depth = []
self.all_light_idx = torch.zeros((total_num_pixels, 1),
dtype=torch.long)
all_masks = im_masks.reshape((total_num_pixels, -1))
all_masks[all_masks > 0.5] = 1
all_masks[all_masks <= 0.5] = 0
all_masks = all_masks.bool()
self.all_masks = all_masks
self.all_rays = self.all_rays
self.all_rgbs = im_rgbs.reshape((total_num_pixels, -1))
self.blender2opencv = None
self.scene_bbox = scene_bbox
self.center = torch.mean(self.scene_bbox, axis=0).float().view(1, 1, 3)
self.radius = (self.scene_bbox[1] - self.center).float().view(1, 1, 3)
self.directions = self.directions
self.downsample = downsample
self.focal = fx
self.image_paths = None
self.img_wh = self.img_wh
self.intrinsics = Ks[0]
self.is_stack = False
self.meta = None
self.near_far = near_far
self.poses = torch.tensor(
np.array([ct.convert.T_to_pose(T.numpy()) for T in Ts])).float()
self.proj_mat = None
self.root_dir = None
self.split = self.split
self.transform = None
self.white_bg = True
# Visualize.
if False:
plot_cameras_and_scene_bbox(
Ks=[
self.intrinsics.cpu().numpy()
for _ in range(len(self.poses))
],
Ts=[
ct.convert.pose_to_T(pose)
for pose in self.poses.cpu().numpy()
],
scene_bbox=self.scene_bbox.cpu().numpy(),
mesh=result_dict["mesh"],
camera_size=self.near_far[0] / 5,
)
# plot_rays(
# ray_os=self.all_rays[:h * w, :3].cpu().numpy(),
# ray_ds=self.all_rays[:h * w, 3:].cpu().numpy(),
# # near=self.near_far[0],
# # far=self.near_far[1],
# sample_rate=0.01,
# near=0.01,
# far=1.0,
# )
def __len__(self):
"""
Returns the number of images.
"""
if self.split == "train":
raise NotImplementedError("In train, you should not call __len__")
num_rays = len(self.all_rgbs) # (len(self.meta['frames'])*h*w, 3)
width, height = self.img_wh
num_images = int(num_rays / (width * height))
# assert num_images == len(self.meta['frames'])
return num_images
def __getitem__(self, idx):
print(f"BlenderDataset.__getitem__(): {idx}")
# use data in the buffers
if self.split == 'train':
sample = {'rays': self.all_rays[idx], 'rgbs': self.all_rgbs[idx]}
raise NotImplementedError(
"In train, you should not call __getitem__")
# create data for each image separately
else:
width, height = self.img_wh
wth = width * height
num_images = self.__len__()
# [128000000, 3] -> [200, 800 * 800, 3]
all_rgbs = self.all_rgbs.reshape(num_images, height * width, 3)
# [128000000, 6] -> [200, 800 * 800, 6]
all_rays = self.all_rays.reshape(num_images, height * width, 6)
# [128000000, 1] -> [200, 800 * 800, 1]
all_masks = self.all_masks.reshape(num_images, height * width, 1)
# [128000000, 1] -> [200, 800 * 800, 1]
all_light_idx = self.all_light_idx.reshape(num_images,
height * width, 1)
sample = {
# (int, int)
'img_wh': self.img_wh,
# [light_num, H*W, 1]
'light_idx': all_light_idx[idx].view(-1, wth, 1),
# [H*W, 6]
'rays': all_rays[idx],
# [light_num, H*W, 3]
'rgbs': all_rgbs[idx].view(-1, wth, 3),
# [H*W, 1]
'rgbs_mask': all_masks[idx],
# str, currently, lights for test views are hard-coded
'light_name': self.light_names[idx],
}
print(f"light_idx.shape: {sample['light_idx'].shape}")
print(f"rays.shape : {sample['rays'].shape}")
print(f"rgbs.shape : {sample['rgbs'].shape}")
print(f"rgbs_mask.shape: {sample['rgbs_mask'].shape}")
return sample
@staticmethod
def read_camera_txt(camera_path: Path):
# Must be .txt
assert camera_path.suffix == ".txt"
params = np.loadtxt(camera_path)
K, R, t, (width, height, channels) = (
params[:3],
params[3:6],
params[6],
params[7].astype(int),
)
T = ct.convert.R_t_to_T(R, t)
return K, T
@staticmethod
def read_cameras_from_txts(camera_paths: List[Path]):
cameras = [
ORD.read_camera_txt(camera_path) for camera_path in camera_paths
]
Ks = [K for K, _ in cameras]
Ts = [T for _, T in cameras]
return Ks, Ts
@staticmethod
def transform_T_with_normalize_mat(T, normalize_mat):
"""
Transform T with normalize_mat computed by ct.normalize.compute_normalize_mat().
"""
C = ct.convert.T_to_C(T)
C_new = ct.transform.transform_points(C.reshape((-1, 3)),
normalize_mat).flatten()
pose_new = np.linalg.inv(T)
pose_new[:3, 3] = C_new
T_new = ct.convert.pose_to_T(pose_new)
return T_new
@staticmethod
def parse_ord_dataset(scene_dir, downsample=1.0):
"""
Parse train, test, and env light data from the scene directory.
Args:
- scene_dir: Scene directory, containing both train and test.
Return:
- result_dict["train_Ks"] : (num_train, 3, 3).
- result_dict["train_Ts"] : (num_train, 4, 4).
- result_dict["train_im_rgbs"] : (num_train, height, width, 3).
- result_dict["train_im_masks"]: (num_train, height, width), 0-1, float.
- result_dict["test_Ks"] : (num_test, 3, 3).
- result_dict["test_Ts"] : (num_test, 4, 4).
- result_dict["test_im_rgbs"] : (num_test, height, width, 3).
- result_dict["test_im_masks"] : (num_test, height, width), 0-1, float.
- result_dict["scene_bbox"] : [[x_min, y_min, z_min],
[x_max, y_max, z_max]].
- result_dict["light_names"] : (num_env_lights, 3).
- result_dict["mesh"] : open3d.geometry.TriangleMesh GT mesh.
"""
scene_dir = Path(scene_dir)
if not scene_dir.is_dir():
raise ValueError(f"scene_dir {scene_dir} is not a directory.")
# Guess the dataset name for scene_dir. Of course, this is not robust,
# but it is enough for now.
# TODO: improve this.
if scene_dir.name == "test":
dataset_name = scene_dir.parent.parent.name
else:
dataset_name = scene_dir.parent.name
print(f"Parsed dataset name from scene_dir: {dataset_name}")
# Load the ground-truth mesh, this is privileged information, it is only
# used for scaling the scene, and it shall NOT be used for training.
mesh_path = scene_dir / "neus_mesh.ply"
if not mesh_path.is_file():
raise ValueError(f"mesh_path {mesh_path} does not exist.")
mesh = o3d.io.read_triangle_mesh(str(mesh_path))
print(f"Loaded mesh from {mesh_path}")
mesh.compute_vertex_normals()
points = np.array(mesh.vertices)
normalize_mat = ct.normalize.compute_normalize_mat(points)
if dataset_name == "dtu" or dataset_name == "bmvs":
print("Normalize mesh with normalize_mat")
points_normalized = ct.transform.transform_points(points, normalize_mat)
mesh.vertices = o3d.utility.Vector3dVector(points_normalized)
# Load the training set: {scene_dir}/inputs.
inputs_dir = scene_dir / "inputs"
if not inputs_dir.is_dir():
raise ValueError(f"inputs_dir {inputs_dir} is not a directory.")
train_camera_paths = sorted(inputs_dir.glob("camera_*.txt"))
train_im_rgb_paths = sorted(inputs_dir.glob("image_*.png"))
train_im_mask_paths = sorted(inputs_dir.glob("mask_*.png"))
num_train = len(train_camera_paths)
assert num_train == len(train_camera_paths)
assert num_train == len(train_im_rgb_paths)
assert num_train == len(train_im_mask_paths)
train_Ks, train_Ts = ORD.read_cameras_from_txts(train_camera_paths)
if dataset_name == "dtu" or dataset_name == "bmvs":
print("Normalize train_Ts with normalize_mat.")
train_Ts = [
ORD.transform_T_with_normalize_mat(train_T, normalize_mat)
for train_T in train_Ts
]
# (num_train, h, w)
train_im_rgbs = np.array([ct.io.imread(p) for p in train_im_rgb_paths])
# (num_train, 1165, 1746), float, from 0-1
train_im_masks = np.array(
[ct.io.imread(p) for p in train_im_mask_paths])
train_im_masks[train_im_masks < 0.5] = 0.0
train_im_masks[train_im_masks >= 0.5] = 1.0
assert train_im_masks.ndim == 3
assert train_im_masks.shape[0] == num_train
print(f"Num train images: {num_train}")
# Load test set: {scene_dir}.
test_camera_paths = sorted(scene_dir.glob("gt_camera_*.txt"))
test_im_rgb_paths = sorted(scene_dir.glob("gt_image_*.png"))
test_im_mask_paths = sorted(scene_dir.glob("gt_mask_*.png"))
num_test = len(test_camera_paths)
assert num_test == len(test_camera_paths)
assert num_test == len(test_im_rgb_paths)
assert num_test == len(test_im_mask_paths)
test_Ks, test_Ts = ORD.read_cameras_from_txts(test_camera_paths)
if dataset_name == "dtu" or dataset_name == "bmvs":
print("Normalize test_Ts with normalize_mat.")
test_Ts = [
ORD.transform_T_with_normalize_mat(test_T, normalize_mat)
for test_T in test_Ts
]
# (num_test, h, w, 3)
test_im_rgbs = np.array([ct.io.imread(p) for p in test_im_rgb_paths])
# (num_test, 1165, 1746), float, from 0-1
test_im_masks = np.array([ct.io.imread(p) for p in test_im_mask_paths])
test_im_masks[test_im_masks < 0.5] = 0.0
test_im_masks[test_im_masks >= 0.5] = 1.0
assert (test_im_masks.shape[-1] == 3)
test_im_masks = test_im_masks[..., 0]
print(f"Num test images: {num_test}")
# For every image in the test set, there is one corresponding
# environment light. The environment light may be shared across
# multiple images, which is dataset dependent.
if dataset_name == "ord":
light_names = [
"gt_env_512_rotated_0000",
"gt_env_512_rotated_0001",
"gt_env_512_rotated_0002",
"gt_env_512_rotated_0003",
"gt_env_512_rotated_0004",
"gt_env_512_rotated_0005",
"gt_env_512_rotated_0006",
"gt_env_512_rotated_0007",
"gt_env_512_rotated_0008",
]
elif dataset_name == "synth4relight_subsampled":
light_names = []
light_names += ["gt_env_512_rotated_0000"] * 13
light_names += ["gt_env_512_rotated_0013"] * 13
light_names += ["gt_env_512_rotated_0026"] * 13
elif dataset_name == "dtu":
light_names = [None] * num_test
elif dataset_name == "bmvs":
light_names = [None] * num_test
else:
raise ValueError(f"Unknown dataset type: {dataset_name}")
# Print.
print("Found test lights:")
for test_im_rgb_path, light_name in zip(test_im_rgb_paths,
light_names):
print(f"- {test_im_rgb_path.name}: {light_name}")
# Check numbers matching.
if num_test != len(light_names):
raise ValueError(
f"num_test ({num_test}) != len(light_names) ({len(light_names)})."
)
# Check .hdr file exist.
for light_name in light_names:
if light_name is None:
continue
light_path = scene_dir / f"{light_name}.hdr"
if not light_path.is_file():
raise ValueError(f"Light path {light_path} does not exist.")
# Downsample: changes the image and intrinsics
if downsample != 1.0:
assert downsample in {2.0, 4.0}
def downsample_K(K):
K_new = K.copy()
K_new[0, 0] /= downsample
K_new[1, 1] /= downsample
K_new[0, 2] /= downsample
K_new[1, 2] /= downsample
return K_new
def downsample_image(im):
width = int(im.shape[1] / downsample)
height = int(im.shape[0] / downsample)
return ct.image.resize(im, shape_wh=(width, height))
train_Ks = np.array([downsample_K(K) for K in train_Ks])
train_im_rgbs = np.array(
[downsample_image(im) for im in train_im_rgbs])
train_im_masks = np.array(
[downsample_image(im) for im in train_im_masks])
test_Ks = np.array([downsample_K(K) for K in test_Ks])
test_im_rgbs = np.array(
[downsample_image(im) for im in test_im_rgbs])
test_im_masks = np.array(
[downsample_image(im) for im in test_im_masks])
# Read bounding boxes.
# dataset/antman/test/inputs/object_bounding_box.txt
# xmin xmax ymin ymax zmin zmax, one value per line
bbox_path = scene_dir / "inputs" / "object_bounding_box.txt"
bbox = np.loadtxt(bbox_path)
x_min, x_max, y_min, y_max, z_min, z_max = bbox
if dataset_name == "dtu" or dataset_name == "bmvs":
print("Normalize the bounding box.")
bbox_diag_vertices = np.array([
[x_min, y_min, z_min],
[x_max, y_max, z_max],
])
bbox_diag_vertices = ct.transform.transform_points(bbox_diag_vertices,
normalize_mat)
x_min, y_min, z_min = bbox_diag_vertices[0]
x_max, y_max, z_max = bbox_diag_vertices[1]
# Compute min/max distance to bounding box vertex to get near far estimate.
# Camera centers (N, 3)
train_Cs = np.array([ct.convert.T_to_C(T) for T in train_Ts])
bbox_vertices = np.array([
[x_min, y_min, z_min],
[x_min, y_min, z_max],
[x_min, y_max, z_min],
[x_min, y_max, z_max],
[x_max, y_min, z_min],
[x_max, y_min, z_max],
[x_max, y_max, z_min],
[x_max, y_max, z_max],
])
distances = np.linalg.norm(train_Cs[:, None, :] -
bbox_vertices[None, :, :],
axis=-1)
estimated_near = float(np.min(distances))
estimated_far = float(np.max(distances))
print(f"Estimated near: {estimated_near:.3f}, "
f"far: {estimated_far:.3f}")
# Give it some slacks.
scene_bbox_from_config = np.array([[x_min, y_min, z_min],
[x_max, y_max, z_max]])
print(f"scene_bbox_from_config:\n{scene_bbox_from_config}")
if dataset_name == "ord":
scene_bbox = np.array([[-1.5, -1.5, -1.5], [1.5, 1.5, 1.5]])
elif dataset_name == "synth4relight_subsampled":
scene_bbox = np.array([[-1.5, -1.5, -1.5], [1.5, 1.5, 1.5]])
elif dataset_name == "dtu":
scene_bbox = np.array([[-1.5, -1.5, -1.5], [1.5, 1.5, 1.5]])
elif dataset_name == "bmvs":
scene_bbox = np.array([[-1.5, -1.5, -1.5], [1.5, 1.5, 1.5]])
else:
raise ValueError(f"Unknown dataset type: {dataset_name}")
print(f"scene_bbox :\n{scene_bbox} (actually used)")
# Write to result_dict
result_dict = {}
result_dict["train_Ks"] = torch.tensor(train_Ks).float()
result_dict["train_Ts"] = torch.tensor(train_Ts).float()
result_dict["train_im_rgbs"] = torch.tensor(train_im_rgbs).float()
result_dict["train_im_masks"] = torch.tensor(train_im_masks).float()
result_dict["test_Ks"] = torch.tensor(test_Ks).float()
result_dict["test_Ts"] = torch.tensor(test_Ts).float()
result_dict["test_im_rgbs"] = torch.tensor(test_im_rgbs).float()
result_dict["test_im_masks"] = torch.tensor(test_im_masks).float()
result_dict["scene_bbox"] = torch.tensor(scene_bbox).float()
result_dict["near_far"] = [estimated_near, estimated_far]
result_dict["light_names"] = light_names
result_dict["mesh"] = mesh
return result_dict
def main():
scene_name = "antman"
ord = ORD(scene_dir=scene_name)
if __name__ == "__main__":
main()
| yxlao/TensoIR | 3 | TensoIR Experiments for "Objects with Lighting: A Real-World Dataset for Evaluating Reconstruction and Rendering for Object Relighting" | Python | yxlao | Yixing Lao | HKU-CS |
dataLoader/plotter.py | Python | import os
import pickle
import tempfile
from pathlib import Path
from typing import Any, Dict, List, Tuple, Type
import camtools as ct
import numpy as np
import open3d as o3d
# - _geometries_cache_path will be set by get_geometries_cache_path() in the
# first run. Example of the path: /tmp/open3d_lf05831.pkl
# - Every time get_geometries_cache_path() is called, it will return the same
# value, this value will be persisted during the whole execution of the
# program.
# - Tempfiles will be stored in the /tmp folder, and it will NOT be be cleared.
# Typically, the /tmp folder will be cleared by the OS in the next reboot.
# - If you want to clear the cache manually, run: rm -rf /tmp/open3d_*.pkl
_geometries_cache_path = None
def plot_geometries(
geometries: List[o3d.geometry.Geometry],
load_cache: bool = True,
update_cache: bool = True,
) -> None:
"""
Plot and cache geometries.
"""
# Handle cache.
if load_cache:
geometries = load_geometries(get_geometries_cache_path()) + geometries
if update_cache:
save_geometries(get_geometries_cache_path(), geometries)
o3d.visualization.draw_geometries(geometries)
def get_geometries_cache_path() -> Path:
global _geometries_cache_path
if _geometries_cache_path is None:
_geometries_cache_path = Path(
tempfile.NamedTemporaryFile(delete=False,
prefix="open3d",
suffix=".pkl").name)
save_geometries(_geometries_cache_path, [])
print(f"[plotter] Geometries cache created: {_geometries_cache_path}")
return _geometries_cache_path
def save_geometries(path: Path,
geometries: List[o3d.geometry.Geometry]) -> None:
"""
Save geometries to a file using pickle.
PointCloud : points, colors, normals
TriangleMesh: vertices, triangles, vertex_colors, vertex_normals, triangle_normals
LineSet : points, lines, colors
"""
# data = [
# {"type": "PointCloud", "points": xxx, "colors": xxx, "normals": xxx},
# {"type": "TriangleMesh", "vertices": xxx, "triangles": xxx, "vertex_colors": xxx, ...},
# ...
# ]
data = []
for geometry in geometries:
if isinstance(geometry, o3d.geometry.PointCloud):
data.append({
"type": "PointCloud",
"points": np.asarray(geometry.points),
"colors": np.asarray(geometry.colors),
"normals": np.asarray(geometry.normals),
})
elif isinstance(geometry, o3d.geometry.TriangleMesh):
data.append({
"type":
"TriangleMesh",
"vertices":
np.asarray(geometry.vertices),
"triangles":
np.asarray(geometry.triangles),
"vertex_colors":
np.asarray(geometry.vertex_colors),
"vertex_normals":
np.asarray(geometry.vertex_normals),
"triangle_normals":
np.asarray(geometry.triangle_normals),
})
elif isinstance(geometry, o3d.geometry.LineSet):
data.append({
"type": "LineSet",
"points": np.asarray(geometry.points),
"lines": np.asarray(geometry.lines),
"colors": np.asarray(geometry.colors),
})
else:
raise NotImplementedError("Unsupported geometry type.")
with open(path, "wb") as f:
pickle.dump(data, f)
print(f"[plotter] Saved {len(data)} geometries to {path}")
def load_geometries(path: Path) -> List[o3d.geometry.Geometry]:
"""
Load geometries from a file using pickle.
"""
with open(path, "rb") as f:
data = pickle.load(f)
geometries = []
for item in data:
if item["type"] == "PointCloud":
geometry = o3d.geometry.PointCloud()
geometry.points = o3d.utility.Vector3dVector(item["points"])
geometry.colors = o3d.utility.Vector3dVector(item["colors"])
geometry.normals = o3d.utility.Vector3dVector(item["normals"])
elif item["type"] == "TriangleMesh":
geometry = o3d.geometry.TriangleMesh()
geometry.vertices = o3d.utility.Vector3dVector(item["vertices"])
geometry.triangles = o3d.utility.Vector3iVector(item["triangles"])
geometry.vertex_colors = o3d.utility.Vector3dVector(
item["vertex_colors"])
geometry.vertex_normals = o3d.utility.Vector3dVector(
item["vertex_normals"])
geometry.triangle_normals = o3d.utility.Vector3dVector(
item["triangle_normals"])
elif item["type"] == "LineSet":
geometry = o3d.geometry.LineSet()
geometry.points = o3d.utility.Vector3dVector(item["points"])
geometry.lines = o3d.utility.Vector2iVector(item["lines"])
geometry.colors = o3d.utility.Vector3dVector(item["colors"])
else:
raise NotImplementedError("Unsupported geometry type.")
geometries.append(geometry)
print(f"[plotter] Loaded {len(geometries)} geometries from {path}")
return geometries
def plot_cameras_and_scene_bbox(
Ks,
Ts,
scene_bbox,
mesh=None,
camera_size=None,
load_cache: bool = True,
update_cache: bool = True,
):
"""
Ks: list of intrinsics, (N, 3, 3).
Ts: list of extrinsics, (N, 4, 4).
scene_bbox: [[x_min, y_min, z_min], [x_max, y_max, z_max]], (2, 3).
"""
# Camera frames.
camera_size = 0.1 if camera_size is None else camera_size
camera_frames = ct.camera.create_camera_frames(Ks,
Ts,
size=camera_size,
center_line=False)
# Scene box frames.
x_min, y_min, z_min = scene_bbox[0]
x_max, y_max, z_max = scene_bbox[1]
scene_bbox_points = np.array([
[x_min, y_min, z_min],
[x_min, y_min, z_max],
[x_min, y_max, z_min],
[x_min, y_max, z_max],
[x_max, y_min, z_min],
[x_max, y_min, z_max],
[x_max, y_max, z_min],
[x_max, y_max, z_max],
])
scene_bbox_lines = np.array([
[0, 1],
[0, 2],
[0, 4],
[1, 3],
[1, 5],
[2, 3],
[2, 6],
[3, 7],
[4, 5],
[4, 6],
[5, 7],
[6, 7],
])
scene_bbox_frame = o3d.geometry.LineSet()
scene_bbox_frame.points = o3d.utility.Vector3dVector(scene_bbox_points)
scene_bbox_frame.lines = o3d.utility.Vector2iVector(scene_bbox_lines)
scene_bbox_frame.colors = o3d.utility.Vector3dVector(
np.array([[1, 0, 0]] * len(scene_bbox_lines)))
geometries = [scene_bbox_frame, camera_frames]
if mesh is not None:
mesh.compute_vertex_normals()
geometries.append(mesh)
# Handle cache.
if load_cache:
geometries = load_geometries(get_geometries_cache_path()) + geometries
if update_cache:
save_geometries(get_geometries_cache_path(), geometries)
o3d.visualization.draw_geometries(geometries)
def plot_rays(ray_os,
ray_ds,
near,
far,
sample_rate=0.001,
load_cache: bool = True,
update_cache: bool = True):
"""
ray_os: (N, 3).
ray_ds: (N, 3).
"""
num_samples = int(len(ray_os) * sample_rate)
# Sample evenly
sample_indices = np.linspace(0, len(ray_os) - 1, num_samples).astype(int)
ray_os = ray_os[sample_indices]
ray_ds = ray_ds[sample_indices]
ls = o3d.geometry.LineSet()
src_points = ray_os + ray_ds * near
dst_points = ray_os + ray_ds * far
all_points = np.concatenate([src_points, dst_points], axis=0)
all_lines = np.array([[i, i + len(ray_os)] for i in range(len(ray_os))])
ls.points = o3d.utility.Vector3dVector(all_points)
ls.lines = o3d.utility.Vector2iVector(all_lines)
ls.colors = o3d.utility.Vector3dVector(np.array([[1, 0, 0]] * len(all_lines)))
geometries = [ls]
# Handle cache.
if load_cache:
geometries = load_geometries(get_geometries_cache_path()) + geometries
if update_cache:
save_geometries(get_geometries_cache_path(), geometries)
o3d.visualization.draw_geometries(geometries)
| yxlao/TensoIR | 3 | TensoIR Experiments for "Objects with Lighting: A Real-World Dataset for Evaluating Reconstruction and Rendering for Object Relighting" | Python | yxlao | Yixing Lao | HKU-CS |
dataLoader/ray_utils.py | Python | import torch, re
import numpy as np
from torch import searchsorted
from kornia import create_meshgrid
import torch.nn.functional as F
# from utils import index_point_feature
def depth2dist(z_vals, cos_angle):
# z_vals: [N_ray N_sample]
device = z_vals.device
dists = z_vals[..., 1:] - z_vals[..., :-1]
dists = torch.cat([dists, torch.Tensor([1e10]).to(device).expand(dists[..., :1].shape)], -1) # [N_rays, N_samples]
dists = dists * cos_angle.unsqueeze(-1)
return dists
def ndc2dist(ndc_pts, cos_angle):
dists = torch.norm(ndc_pts[:, 1:] - ndc_pts[:, :-1], dim=-1)
dists = torch.cat([dists, 1e10 * cos_angle.unsqueeze(-1)], -1) # [N_rays, N_samples]
return dists
def get_ray_directions(H, W, focal, center=None):
"""
Get ray directions for all pixels in camera coordinate.
Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/
ray-tracing-generating-camera-rays/standard-coordinate-systems
Inputs:
H, W, focal: image height, width and focal length
Outputs:
directions: (H, W, 3), the direction of the rays in camera coordinate
"""
grid = create_meshgrid(H, W, normalized_coordinates=False)[0] + 0.5 # 1xHxWx2
i, j = grid.unbind(-1)
# the direction here is without +0.5 pixel centering as calibration is not so accurate
# see https://github.com/bmild/nerf/issues/24
cent = center if center is not None else [W / 2, H / 2]
directions = torch.stack([(i - cent[0]) / focal[0], (j - cent[1]) / focal[1], torch.ones_like(i)], -1) # (H, W, 3)
return directions
def get_ray_directions_blender(H, W, focal, center=None):
"""
Get ray directions for all pixels in camera coordinate.
Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/
ray-tracing-generating-camera-rays/standard-coordinate-systems
Inputs:
H, W, focal: image height, width and focal length
Outputs:
directions: (H, W, 3), the direction of the rays in camera coordinate
"""
grid = create_meshgrid(H, W, normalized_coordinates=False)[0]+0.5 # +0.5 pixel centering
i, j = grid.unbind(-1)
# the direction here is without +0.5 pixel centering as calibration is not so accurate
# see https://github.com/bmild/nerf/issues/24
cent = center if center is not None else [W / 2, H / 2]
directions = torch.stack([(i - cent[0]) / focal[0], -(j - cent[1]) / focal[1], -torch.ones_like(i)],
-1) # (H, W, 3)
return directions
def get_rays(directions, c2w):
"""
Get ray origin and normalized directions in world coordinate for all pixels in one image.
Reference: https://www.scratchapixel.com/lessons/3d-basic-rendering/
ray-tracing-generating-camera-rays/standard-coordinate-systems
Inputs:
directions: (H, W, 3) precomputed ray directions in camera coordinate
c2w: (3, 4) transformation matrix from camera coordinate to world coordinate
Outputs:
rays_o: (H*W, 3), the origin of the rays in world coordinate
rays_d: (H*W, 3), the normalized direction of the rays in world coordinate
"""
# Rotate ray directions from camera coordinate to the world coordinate
rays_d = directions @ c2w[:3, :3].T # (H, W, 3)
# rays_d = rays_d / torch.norm(rays_d, dim=-1, keepdim=True)
# The origin of all rays is the camera origin in world coordinate
rays_o = c2w[:3, 3].expand(rays_d.shape) # (H, W, 3)
rays_d = rays_d.view(-1, 3)
rays_o = rays_o.view(-1, 3)
return rays_o, rays_d
def ndc_rays_blender(H, W, focal, near, rays_o, rays_d):
# Shift ray origins to near plane
t = -(near + rays_o[..., 2]) / rays_d[..., 2]
rays_o = rays_o + t[..., None] * rays_d
# Projection
o0 = -1. / (W / (2. * focal)) * rays_o[..., 0] / rays_o[..., 2]
o1 = -1. / (H / (2. * focal)) * rays_o[..., 1] / rays_o[..., 2]
o2 = 1. + 2. * near / rays_o[..., 2]
d0 = -1. / (W / (2. * focal)) * (rays_d[..., 0] / rays_d[..., 2] - rays_o[..., 0] / rays_o[..., 2])
d1 = -1. / (H / (2. * focal)) * (rays_d[..., 1] / rays_d[..., 2] - rays_o[..., 1] / rays_o[..., 2])
d2 = -2. * near / rays_o[..., 2]
rays_o = torch.stack([o0, o1, o2], -1)
rays_d = torch.stack([d0, d1, d2], -1)
return rays_o, rays_d
def ndc_rays(H, W, focal, near, rays_o, rays_d):
# Shift ray origins to near plane
t = (near - rays_o[..., 2]) / rays_d[..., 2]
rays_o = rays_o + t[..., None] * rays_d
# Projection
o0 = 1. / (W / (2. * focal)) * rays_o[..., 0] / rays_o[..., 2]
o1 = 1. / (H / (2. * focal)) * rays_o[..., 1] / rays_o[..., 2]
o2 = 1. - 2. * near / rays_o[..., 2]
d0 = 1. / (W / (2. * focal)) * (rays_d[..., 0] / rays_d[..., 2] - rays_o[..., 0] / rays_o[..., 2])
d1 = 1. / (H / (2. * focal)) * (rays_d[..., 1] / rays_d[..., 2] - rays_o[..., 1] / rays_o[..., 2])
d2 = 2. * near / rays_o[..., 2]
rays_o = torch.stack([o0, o1, o2], -1)
rays_d = torch.stack([d0, d1, d2], -1)
return rays_o, rays_d
# Hierarchical sampling (section 5.2)
def sample_pdf(bins, weights, N_samples, det=False, pytest=False):
device = weights.device
# Get pdf
weights = weights + 1e-5 # prevent nans
pdf = weights / torch.sum(weights, -1, keepdim=True)
cdf = torch.cumsum(pdf, -1)
cdf = torch.cat([torch.zeros_like(cdf[..., :1]), cdf], -1) # (batch, len(bins))
# Take uniform samples
if det:
u = torch.linspace(0., 1., steps=N_samples, device=device)
u = u.expand(list(cdf.shape[:-1]) + [N_samples])
else:
u = torch.rand(list(cdf.shape[:-1]) + [N_samples], device=device)
# Pytest, overwrite u with numpy's fixed random numbers
if pytest:
np.random.seed(0)
new_shape = list(cdf.shape[:-1]) + [N_samples]
if det:
u = np.linspace(0., 1., N_samples)
u = np.broadcast_to(u, new_shape)
else:
u = np.random.rand(*new_shape)
u = torch.Tensor(u)
# Invert CDF
u = u.contiguous()
inds = searchsorted(cdf.detach(), u, right=True)
below = torch.max(torch.zeros_like(inds - 1), inds - 1)
above = torch.min((cdf.shape[-1] - 1) * torch.ones_like(inds), inds)
inds_g = torch.stack([below, above], -1) # (batch, N_samples, 2)
matched_shape = [inds_g.shape[0], inds_g.shape[1], cdf.shape[-1]]
cdf_g = torch.gather(cdf.unsqueeze(1).expand(matched_shape), 2, inds_g)
bins_g = torch.gather(bins.unsqueeze(1).expand(matched_shape), 2, inds_g)
denom = (cdf_g[..., 1] - cdf_g[..., 0])
denom = torch.where(denom < 1e-5, torch.ones_like(denom), denom)
t = (u - cdf_g[..., 0]) / denom
samples = bins_g[..., 0] + t * (bins_g[..., 1] - bins_g[..., 0])
return samples
def dda(rays_o, rays_d, bbox_3D):
inv_ray_d = 1.0 / (rays_d + 1e-6)
t_min = (bbox_3D[:1] - rays_o) * inv_ray_d # N_rays 3
t_max = (bbox_3D[1:] - rays_o) * inv_ray_d
t = torch.stack((t_min, t_max)) # 2 N_rays 3
t_min = torch.max(torch.min(t, dim=0)[0], dim=-1, keepdim=True)[0]
t_max = torch.min(torch.max(t, dim=0)[0], dim=-1, keepdim=True)[0]
return t_min, t_max
def ray_marcher(rays,
N_samples=64,
lindisp=False,
perturb=0,
bbox_3D=None):
"""
sample points along the rays
Inputs:
rays: ()
Returns:
"""
# Decompose the inputs
N_rays = rays.shape[0]
rays_o, rays_d = rays[:, 0:3], rays[:, 3:6] # both (N_rays, 3)
near, far = rays[:, 6:7], rays[:, 7:8] # both (N_rays, 1)
if bbox_3D is not None:
# cal aabb boundles
near, far = dda(rays_o, rays_d, bbox_3D)
# Sample depth points
z_steps = torch.linspace(0, 1, N_samples, device=rays.device) # (N_samples)
if not lindisp: # use linear sampling in depth space
z_vals = near * (1 - z_steps) + far * z_steps
else: # use linear sampling in disparity space
z_vals = 1 / (1 / near * (1 - z_steps) + 1 / far * z_steps)
z_vals = z_vals.expand(N_rays, N_samples)
if perturb > 0: # perturb sampling depths (z_vals)
z_vals_mid = 0.5 * (z_vals[:, :-1] + z_vals[:, 1:]) # (N_rays, N_samples-1) interval mid points
# get intervals between samples
upper = torch.cat([z_vals_mid, z_vals[:, -1:]], -1)
lower = torch.cat([z_vals[:, :1], z_vals_mid], -1)
perturb_rand = perturb * torch.rand(z_vals.shape, device=rays.device)
z_vals = lower + (upper - lower) * perturb_rand
xyz_coarse_sampled = rays_o.unsqueeze(1) + \
rays_d.unsqueeze(1) * z_vals.unsqueeze(2) # (N_rays, N_samples, 3)
return xyz_coarse_sampled, rays_o, rays_d, z_vals
def read_pfm(filename):
file = open(filename, 'rb')
color = None
width = None
height = None
scale = None
endian = None
header = file.readline().decode('utf-8').rstrip()
if header == 'PF':
color = True
elif header == 'Pf':
color = False
else:
raise Exception('Not a PFM file.')
dim_match = re.match(r'^(\d+)\s(\d+)\s$', file.readline().decode('utf-8'))
if dim_match:
width, height = map(int, dim_match.groups())
else:
raise Exception('Malformed PFM header.')
scale = float(file.readline().rstrip())
if scale < 0: # little-endian
endian = '<'
scale = -scale
else:
endian = '>' # big-endian
data = np.fromfile(file, endian + 'f')
shape = (height, width, 3) if color else (height, width)
data = np.reshape(data, shape)
data = np.flipud(data)
file.close()
return data, scale
def ndc_bbox(all_rays):
near_min = torch.min(all_rays[...,:3].view(-1,3),dim=0)[0]
near_max = torch.max(all_rays[..., :3].view(-1, 3), dim=0)[0]
far_min = torch.min((all_rays[...,:3]+all_rays[...,3:6]).view(-1,3),dim=0)[0]
far_max = torch.max((all_rays[...,:3]+all_rays[...,3:6]).view(-1, 3), dim=0)[0]
print(f'===> ndc bbox near_min:{near_min} near_max:{near_max} far_min:{far_min} far_max:{far_max}')
return torch.stack((torch.minimum(near_min,far_min),torch.maximum(near_max,far_max)))
def safe_l2_normalize(x, dim=None, eps=1e-6):
return F.normalize(x, p=2, dim=dim, eps=eps) | yxlao/TensoIR | 3 | TensoIR Experiments for "Objects with Lighting: A Real-World Dataset for Evaluating Reconstruction and Rendering for Object Relighting" | Python | yxlao | Yixing Lao | HKU-CS |
dataLoader/tensoIR_general_multi_lights.py | Python | import os, random
import json
from pathlib import Path
import numpy as np
from PIL import Image
import cv2
from tqdm import tqdm
import torch
from torch.utils.data import Dataset
from torchvision import transforms as T
from dataLoader.ray_utils import *
from models.relight_utils import read_hdr
class TensoIR_Dataset_unknown_general_multi_lights(Dataset):
def __init__(self,
root_dir,
hdr_dir,
split='train',
random_test=False,
N_vis=-1,
downsample=1.0,
sub=0,
light_name_list=["sunset", "snow", "courtyard"],
**temp
):
"""
@param root_dir: str | Root path of dataset folder
@param hdr_dir: str | Root path for HDR folder
@param split: str | e.g. 'train' / 'test'
@param random_test: bool | Whether to randomly select a test view and a lighting
else [frames, h*w, 6]
@param N_vis: int | If N_vis > 0, select N_vis frames from the dataset, else (-1) import entire dataset
@param downsample: float | Downsample ratio for input rgb images
"""
assert split in ['train', 'test']
self.N_vis = N_vis
self.root_dir = Path(root_dir)
self.split = split
self.split_list = [x for x in self.root_dir.iterdir() if x.stem.startswith(self.split)]
if not random_test:
self.split_list.sort() # to render video
if sub > 0:
self.split_list = self.split_list[:sub]
self.img_wh = (int(800 / downsample), int(800 / downsample))
self.white_bg = True
self.downsample = downsample
self.transform = self.define_transforms()
self.light_name_list = light_name_list
self.near_far = [2.0, 6.0]
self.scene_bbox = torch.tensor([[-1.5, -1.5, -1.5], [1.5, 1.5, 1.5]]) * self.downsample
self.center = torch.mean(self.scene_bbox, axis=0).float().view(1, 1, 3)
self.radius = (self.scene_bbox[1] - self.center).float().view(1, 1, 3)
self.blender2opencv = np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])
# HDR configs
self.scan = self.root_dir.stem # Scan name e.g. 'lego', 'hotdog'
self.light_num = len(self.light_name_list)
## Load light data
self.hdr_dir = Path(hdr_dir)
self.read_lights()
# when trainning, we will load all the rays and rgbs
if split == 'train':
self.read_all_frames()
def define_transforms(self):
transforms = T.Compose([
T.ToTensor(),
])
return transforms
def read_lights(self):
"""
Read hdr file from local path
"""
self.lights_probes = dict()
for light_name in self.light_name_list:
hdr_path = self.hdr_dir / f'{light_name}.hdr'
if os.path.exists(hdr_path):
light_rgb = read_hdr(hdr_path)
self.envir_map_h, self.envir_map_w = light_rgb.shape[:2]
light_rgb = light_rgb.reshape(-1, 3)
light_rgb = torch.from_numpy(light_rgb).float()
self.lights_probes[light_name] = light_rgb
def read_all_frames(self):
self.all_rays = []
self.all_rgbs = []
self.all_masks = []
self.all_light_idx = []
for idx in tqdm(range(self.__len__()), desc=f'Loading {self.split} data, view number: {self.__len__()}, lighting number: {self.light_num}'):
item_path = self.split_list[idx]
item_meta_path = item_path / 'metadata.json'
with open(item_meta_path, 'r') as f:
meta = json.load(f)
img_wh = (int(meta['imw'] / self.downsample), int(meta['imh'] / self.downsample))
# Get ray directions for all pixels, same for all images (with same H, W, focal)
focal = 0.5 * int(meta['imw']) / np.tan(0.5 * meta['cam_angle_x']) # fov -> focal length
focal *= img_wh[0] / meta['imw']
directions = get_ray_directions(img_wh[1], img_wh[0], [focal, focal]) # [H, W, 3]
directions = directions / torch.norm(directions, dim=-1, keepdim=True)
cam_trans = np.array(list(map(float, meta["cam_transform_mat"].split(',')))).reshape(4, 4)
pose = cam_trans @ self.blender2opencv
c2w = torch.FloatTensor(pose) # [4, 4]
w2c = torch.linalg.inv(c2w) # [4, 4]
# Read ray data
rays_o, rays_d = get_rays(directions, c2w)
rays = torch.cat([rays_o, rays_d], 1) # [H*W, 6]
# light_kind_to_choose = int(np.random.randint(len(self.light_name_list))) # temp
for light_kind_idx in range(len(self.light_name_list)):
# # used to control the number of input images (limited general multi-light)
# if light_kind_to_choose != light_kind_idx:
# continue
# #
# Read RGB data
light_name = self.light_name_list[light_kind_idx]
relight_img_path = item_path / f'rgba_{light_name}.png'
relight_img = Image.open(relight_img_path)
if self.downsample != 1.0:
relight_img = relight_img.resize(img_wh, Image.Resampling.LANCZOS)
relight_img = self.transform(relight_img) # [4, H, W]
relight_img = relight_img.view(4, -1).permute(1, 0) # [H*W, 4]
## Blend RGBA to RGB
relight_rgbs = relight_img[:, :3] * relight_img[:, -1:] + (1 - relight_img[:, -1:]) # [H*W, 3]
light_idx = torch.tensor(light_kind_idx, dtype=torch.int8).repeat((img_wh[0] * img_wh[1], 1)).to(torch.int8) # [H*W, 1], transform to in8 to save memory
self.all_rays.append(rays)
self.all_rgbs.append(relight_rgbs)
# self.all_masks.append(relight_mask)
self.all_light_idx.append(light_idx)
self.all_rays = torch.cat(self.all_rays, dim=0) # [N*H*W, 6]
self.all_rgbs = torch.cat(self.all_rgbs, dim=0) # [N*H*W, 3]
# self.all_masks = torch.cat(self.all_masks, dim=0) # [N*H*W, 1]
self.all_light_idx = torch.cat(self.all_light_idx, dim=0) # [N*H*W, 1]
def world2ndc(self, points, lindisp=None):
device = points.device
return (points - self.center.to(device)) / self.radius.to(device)
def read_stack(self):
for idx in range(self.__len__()):
item = self.__getitem__(idx)
rays = item['rays']
rgbs = item['rgbs']
self.all_rays += [rays]
self.all_rgbs += [rgbs]
self.all_rays = torch.stack(self.all_rays, 0) # [len(self), H*W, 6]
self.all_rgbs = torch.stack(self.all_rgbs, 0) # [len(self), H*W, 3]
def __len__(self):
return len(self.split_list)
def __getitem__(self, idx):
item_path = self.split_list[idx]
item_meta_path = item_path / 'metadata.json'
with open(item_meta_path, 'r') as f:
meta = json.load(f)
img_wh = (int(meta['imw'] / self.downsample), int(meta['imh'] / self.downsample))
# Get ray directions for all pixels, same for all images (with same H, W, focal)
focal = 0.5 * int(meta['imw']) / np.tan(0.5 * meta['cam_angle_x']) # fov -> focal length
focal *= img_wh[0] / meta['imw']
directions = get_ray_directions(img_wh[1], img_wh[0], [focal, focal]) # [H, W, 3]
directions = directions / torch.norm(directions, dim=-1, keepdim=True)
cam_trans = np.array(list(map(float, meta["cam_transform_mat"].split(',')))).reshape(4, 4)
pose = cam_trans @ self.blender2opencv
c2w = torch.FloatTensor(pose) # [4, 4]
w2c = torch.linalg.inv(c2w) # [4, 4]
relight_rgbs_list = []
light_idx_list = []
for light_kind_idx in range(len(self.light_name_list)):
# Read RGB data
light_name = self.light_name_list[light_kind_idx]
relight_img_path = item_path / f'rgba_{light_name}.png'
relight_img = Image.open(relight_img_path)
if self.downsample != 1.0:
relight_img = relight_img.resize(img_wh, Image.Resampling.LANCZOS)
relight_img = self.transform(relight_img) # [4, H, W]
relight_img = relight_img.view(4, -1).permute(1, 0) # [H*W, 4]
## Blend RGBA to RGB
relight_rgbs = relight_img[:, :3] * relight_img[:, -1:] + (1 - relight_img[:, -1:]) # [H*W, 3]
light_idx = torch.tensor(light_kind_idx, dtype=torch.int8).repeat((img_wh[0] * img_wh[1], 1)).to(torch.int8) # [H*W, 1], transform to in8 to save memory
relight_rgbs_list.append(relight_rgbs)
light_idx_list.append(light_idx)
relight_rgbs = torch.stack(relight_rgbs_list, dim=0) # [rotation_num, H*W, 3]
light_idx = torch.stack(light_idx_list, dim=0) # [rotation_num, H*W, 1]
## Obtain background mask, bg = False
relight_mask = ~(relight_img[:, -1:] == 0)
# Read albedo image
albedo_path = item_path / f'albedo.png'
albedo = Image.open(albedo_path)
if self.downsample != 1.0:
albedo = albedo.resize(img_wh, Image.Resampling.LANCZOS)
albedo = self.transform(albedo)
albedo = albedo.view(4, -1).permute(1, 0)
## Blend A to RGB
albedo = albedo[:, :3] * albedo[:, -1:] + (1 - albedo[:, -1:]) # [H*W, 3]
# Read ray data
rays_o, rays_d = get_rays(directions, c2w)
rays = torch.cat([rays_o, rays_d], 1) # [H*W, 6]
# Read normal data
normal_path = item_path / 'normal.png'
normal_img = Image.open(normal_path)
normal = np.array(normal_img)[..., :3] / 255 # [H, W, 3] in range [0, 1]
normal = (normal - 0.5) * 2.0 # [H, W, 3] in range (-1, 1)
normal_bg = np.array([0.0, 0.0, 1.0])
normal_alpha = np.array(normal_img)[..., [-1]] / 255 # [H, W, 1] in range [0, 1]
normal = normal * normal_alpha + normal_bg * (1.0 - normal_alpha) # [H, W, 3]
## Downsample
if self.downsample != 1.0:
normal = cv2.resize(normal, img_wh[::-1], interpolation=cv2.INTER_NEAREST)
normal = torch.FloatTensor(normal) # [H, W, 3]
normal = normal / torch.norm(normal, dim=-1, keepdim=True)
normals = normal.view(-1, 3) # [H*W, 3]
item = {
'img_wh': img_wh, # (int, int)
'light_idx': light_idx, # [rotation_num, H*W, 1]
'rgbs': relight_rgbs, # [rotation_num, H*W, 3],
'rgbs_mask': relight_mask, # [H*W, 1]
'albedo': albedo, # [H*W, 3]
'rays': rays, # [H*W, 6]
'normals': normals, # [H*W, 3],
'c2w': c2w, # [4, 4]
'w2c': w2c # [4, 4]
}
return item
if __name__ == "__main__":
from opt import config_parser
args = config_parser()
dataset = TensoIR_Dataset_unknown_general_multi_lights(
root_dir='/home/haian/Dataset/NeRF_DATA/hotdog_rotate',
hdr_dir='/home/haian/Dataset/light_probes/',
split='test',
random_test=False,
downsample=1.0
)
# Test 1: Get single item
item = dataset.__getitem__(0)
print(item['albedo'].shape)
print(item['rgbs_mask'].shape)
# import ipdb; ipdb.set_trace()
# Test 2: Iteration
# train_dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, num_workers=1, drop_last=True, shuffle=True)
# train_iter = iter(train_dataloader)
# for i in range(20):
# try:
# item = next(train_iter)
# print(item.keys())
# print(item['rays'].shape)
# except StopIteration:
# print('Start a new iteration from the dataloader')
# train_iter = iter(train_dataloader)
# Test 3: Test dataset all stack
# test_dataset = TensoRFactorDataset(
# root_dir='/code/MVSNeRFactor/data/nerfactor_synthesis/hotdog',
# hdr_dir='/code/MVSNeRFactor/data/low_res_envmaps_32_16',
# split='test',
# downsample=1.0,
# is_stack=True
# )
# print(test_dataset.all_rays.shape) # [4, 640000, 6]
# print(test_dataset.all_rgbs.shape) # [4, 640000, 3]
| yxlao/TensoIR | 3 | TensoIR Experiments for "Objects with Lighting: A Real-World Dataset for Evaluating Reconstruction and Rendering for Object Relighting" | Python | yxlao | Yixing Lao | HKU-CS |
dataLoader/tensoIR_material_editing_test.py | Python |
import os, random
import json
from pathlib import Path
import numpy as np
from PIL import Image
import cv2
from tqdm import tqdm
import torch
from torch.utils.data import Dataset
from torchvision import transforms as T
from dataLoader.ray_utils import *
import torch.nn as nn
class tensoIR_Material_Editing_test(Dataset):
def __init__(self,
root_dir,
hdr_dir,
split='train',
random_test=True,
N_vis=-1,
downsample=1.0,
sub=0,
light_rotation=['000', '045', '090', '135', '180', '225', '270', '315'],
light_names=["sunset"]
):
"""
@param root_dir: str | Root path of dataset folder
@param hdr_dir: str | Root path for HDR folder
@param split: str | e.g. 'train' / 'test'
@param random_test: bool | Whether to randomly select a test view and a lighting
else [frames, h*w, 6]
@param N_vis: int | If N_vis > 0, select N_vis frames from the dataset, else (-1) import entire dataset
@param downsample: float | Downsample ratio for input rgb images
"""
assert split in ['train', 'test']
self.N_vis = N_vis
self.root_dir = Path(root_dir)
self.split = split
self.split_list = [x for x in self.root_dir.iterdir() if x.stem.startswith(self.split)]
if not random_test:
self.split_list.sort() # to render video
if sub > 0:
self.split_list = self.split_list[:sub]
self.img_wh = (int(800 / downsample), int(800 / downsample))
self.white_bg = True
self.downsample = downsample
self.transform = self.define_transforms()
self.light_names = light_names
self.near_far = [2.0, 6.0]
self.scene_bbox = torch.tensor([[-1.5, -1.5, -1.5], [1.5, 1.5, 1.5]]) * self.downsample
self.center = torch.mean(self.scene_bbox, axis=0).float().view(1, 1, 3)
self.radius = (self.scene_bbox[1] - self.center).float().view(1, 1, 3)
self.blender2opencv = np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])
# HDR configs
self.scan = self.root_dir.stem # Scan name e.g. 'lego', 'hotdog'
self.light_rotation = light_rotation
self.light_num = len(self.light_rotation)
## Load light data
self.hdr_dir = Path(hdr_dir)
def define_transforms(self):
transforms = T.Compose([
T.ToTensor(),
])
return transforms
def read_all_frames(self):
self.all_rays = []
self.all_rgbs = []
self.all_masks = []
self.all_light_idx = []
for idx in tqdm(range(self.__len__()), desc=f'Loading {self.split} data, view number: {self.__len__()}, rotaion number: {self.light_num}'):
item_path = self.split_list[idx]
item_meta_path = item_path / 'metadata.json'
with open(item_meta_path, 'r') as f:
meta = json.load(f)
img_wh = (int(meta['imw'] / self.downsample), int(meta['imh'] / self.downsample))
# Get ray directions for all pixels, same for all images (with same H, W, focal)
focal = 0.5 * int(meta['imw']) / np.tan(0.5 * meta['cam_angle_x']) # fov -> focal length
focal *= img_wh[0] / meta['imw']
directions = get_ray_directions(img_wh[1], img_wh[0], [focal, focal]) # [H, W, 3]
directions = directions / torch.norm(directions, dim=-1, keepdim=True)
cam_trans = np.array(list(map(float, meta["cam_transform_mat"].split(',')))).reshape(4, 4)
pose = cam_trans @ self.blender2opencv
c2w = torch.FloatTensor(pose) # [4, 4]
w2c = torch.linalg.inv(c2w) # [4, 4]
# Read ray data
rays_o, rays_d = get_rays(directions, c2w)
rays = torch.cat([rays_o, rays_d], 1) # [H*W, 6]
for light_name_idx in range(len(self.light_names)):
# Read RGB data
cur_light_name = self.light_names[light_name_idx]
relight_img_path = item_path / f'rgba_{cur_light_name}.png'
relight_img = Image.open(relight_img_path)
if self.downsample != 1.0:
relight_img = relight_img.resize(img_wh, Image.Resampling.LANCZOS)
relight_img = self.transform(relight_img) # [4, H, W]
relight_img = relight_img.view(4, -1).permute(1, 0) # [H*W, 4]
## Blend RGBA to RGB
relight_rgbs = relight_img[:, :3] * relight_img[:, -1:] + (1 - relight_img[:, -1:]) # [H*W, 3]
## Obtain background mask, bg = False
relight_mask = (~(relight_img[:, -1:] == 0)).to(torch.bool) # [H*W, 1]
light_idx = torch.tensor(0, dtype=torch.int8).repeat((img_wh[0] * img_wh[1], 1)).to(torch.int8) # [H*W, 1], transform to in8 to save memory
self.all_rays.append(rays)
self.all_rgbs.append(relight_rgbs)
self.all_masks.append(relight_mask)
self.all_light_idx.append(light_idx)
self.all_rays = torch.cat(self.all_rays, dim=0) # [N*H*W, 6]
self.all_rgbs = torch.cat(self.all_rgbs, dim=0) # [N*H*W, 3]
# self.all_masks = torch.cat(self.all_masks, dim=0) # [N*H*W, 1]
self.all_light_idx = torch.cat(self.all_light_idx, dim=0) # [N*H*W, 1]
def world2ndc(self, points, lindisp=None):
device = points.device
return (points - self.center.to(device)) / self.radius.to(device)
def __len__(self):
return len(self.split_list)
def __getitem__(self, idx):
item_path = self.split_list[idx]
item_meta_path = item_path / 'metadata.json'
with open(item_meta_path, 'r') as f:
meta = json.load(f)
img_wh = (int(meta['imw'] / self.downsample), int(meta['imh'] / self.downsample))
# Get ray directions for all pixels, same for all images (with same H, W, focal)
focal = 0.5 * int(meta['imw']) / np.tan(0.5 * meta['cam_angle_x']) # fov -> focal length
focal *= img_wh[0] / meta['imw']
directions = get_ray_directions(img_wh[1], img_wh[0], [focal, focal]) # [H, W, 3]
directions = directions / torch.norm(directions, dim=-1, keepdim=True)
cam_trans = np.array(list(map(float, meta["cam_transform_mat"].split(',')))).reshape(4, 4)
pose = cam_trans @ self.blender2opencv
c2w = torch.FloatTensor(pose) # [4, 4]
w2c = torch.linalg.inv(c2w) # [4, 4]
relight_rgbs_list = []
light_idx_list = []
for light_name_idx in range(len(self.light_names)):
# Read RGB data
cur_light_name = self.light_names[light_name_idx]
relight_img_path = item_path / 'rgba_city.png'
relight_img = Image.open(relight_img_path)
if self.downsample != 1.0:
relight_img = relight_img.resize(img_wh, Image.Resampling.LANCZOS)
relight_img = self.transform(relight_img) # [4, H, W]
relight_img = relight_img.view(4, -1).permute(1, 0) # [H*W, 4]
## Blend A to RGB
relight_rgbs = relight_img[:, :3] * relight_img[:, -1:] + (1 - relight_img[:, -1:]) # [H*W, 3]
light_idx = torch.tensor(0, dtype=torch.int).repeat((img_wh[0] * img_wh[1], 1)) # [H*W, 1]
relight_rgbs_list.append(relight_rgbs)
light_idx_list.append(light_idx)
relight_rgbs = torch.stack(relight_rgbs_list, dim=0) # [rotation_num, H*W, 3]
light_idx = torch.stack(light_idx_list, dim=0) # [rotation_num, H*W, 1]
## Obtain background mask, bg = False
relight_mask = ~(relight_img[:, -1:] == 0)
# Read albedo image
albedo_path = item_path / f'albedo.png'
albedo = Image.open(albedo_path)
if self.downsample != 1.0:
albedo = albedo.resize(img_wh, Image.Resampling.LANCZOS)
albedo = self.transform(albedo)
albedo = albedo.view(4, -1).permute(1, 0)
## Blend A to RGB
albedo = albedo[:, :3] * albedo[:, -1:] + (1 - albedo[:, -1:]) # [H*W, 3]
# Read ray data
rays_o, rays_d = get_rays(directions, c2w)
rays = torch.cat([rays_o, rays_d], 1) # [H*W, 6]
# Read normal data
normal_path = item_path / 'normal.png'
normal_img = Image.open(normal_path)
normal = np.array(normal_img)[..., :3] / 255 # [H, W, 3] in range [0, 1]
normal = (normal - 0.5) * 2.0 # [H, W, 3] in range (-1, 1)
normal_bg = np.array([0.0, 0.0, 1.0])
normal_alpha = np.array(normal_img)[..., [-1]] / 255 # [H, W, 1] in range [0, 1]
normal = normal * normal_alpha + normal_bg * (1.0 - normal_alpha) # [H, W, 3]
## Downsample
if self.downsample != 1.0:
normal = cv2.resize(normal, img_wh[::-1], interpolation=cv2.INTER_NEAREST)
normal = torch.FloatTensor(normal) # [H, W, 3]
normal = normal / torch.norm(normal, dim=-1, keepdim=True)
normals = normal.view(-1, 3) # [H*W, 3]
item = {
'img_wh': img_wh, # (int, int)
'light_idx': light_idx, # [light_num, H*W, 1]
'rgbs': relight_rgbs, # [light_num, H*W, 3],
'rgbs_mask': relight_mask, # [H*W, 1]
'albedo': albedo, # [H*W, 3]
'rays': rays, # [H*W, 6]
'normals': normals, # [H*W, 3],
'c2w': c2w, # [4, 4]
'w2c': w2c # [4, 4]
}
return item
if __name__ == "__main__":
from opt import config_parser
args = config_parser()
light_names= ['bridge','city', 'courtyard', 'forest', 'fireplace', 'forest', 'interior', 'museum', 'night', 'snow', 'square', 'studio',
'sunrise', 'sunset', 'tunnel']
dataset = tensoIR_Relighting_test(
root_dir='/home/haian/Dataset/NeRF_DATA/Eight_Rotation/hotdog_rotate/',
hdr_dir='/home/haian/Dataset/light_probes/low_res_envmaps/',
split='test',
random_test=False,
light_names=light_names,
downsample=1.0
)
# Test 1: Get single item
item = dataset.__getitem__(0)
print(item['albedo'].shape)
print(item['rgbs_mask'].shape)
import ipdb; ipdb.set_trace()
# Test 2: Iteration
# train_dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, num_workers=1, drop_last=True, shuffle=True)
# train_iter = iter(train_dataloader)
# for i in range(20):
# try:
# item = next(train_iter)
# print(item.keys())
# print(item['rays'].shape)
# except StopIteration:
# print('Start a new iteration from the dataloader')
# train_iter = iter(train_dataloader)
# Test 3: Test dataset all stack
# test_dataset = TensoRFactorDataset(
# root_dir='/code/MVSNeRFactor/data/nerfactor_synthesis/hotdog',
# hdr_dir='/code/MVSNeRFactor/data/low_res_envmaps_32_16',
# split='test',
# downsample=1.0,
# is_stack=True
# )
# print(test_dataset.all_rays.shape) # [4, 640000, 6]
# print(test_dataset.all_rgbs.shape) # [4, 640000, 3]
| yxlao/TensoIR | 3 | TensoIR Experiments for "Objects with Lighting: A Real-World Dataset for Evaluating Reconstruction and Rendering for Object Relighting" | Python | yxlao | Yixing Lao | HKU-CS |
dataLoader/tensoIR_relighting_test.py | Python | import os, random
import json
from pathlib import Path
import numpy as np
from PIL import Image
import cv2
from tqdm import tqdm
import torch
from torch.utils.data import Dataset
from torchvision import transforms as T
from dataLoader.ray_utils import *
class tensoIR_Relighting_test(Dataset):
def __init__(self,
root_dir,
hdr_dir,
split='train',
random_test=True,
N_vis=-1,
downsample=1.0,
sub=0,
light_rotation=['000', '045', '090', '135', '180', '225', '270', '315'],
light_names=["sunrise"]
):
"""
@param root_dir: str | Root path of dataset folder
@param hdr_dir: str | Root path for HDR folder
@param split: str | e.g. 'train' / 'test'
@param random_test: bool | Whether to randomly select a test view and a lighting
else [frames, h*w, 6]
@param N_vis: int | If N_vis > 0, select N_vis frames from the dataset, else (-1) import entire dataset
@param downsample: float | Downsample ratio for input rgb images
"""
assert split in ['train', 'test']
self.N_vis = N_vis
self.root_dir = Path(root_dir)
self.split = split
self.split_list = [x for x in self.root_dir.iterdir() if x.stem.startswith(self.split)]
if not random_test:
self.split_list.sort() # to render video
if sub > 0:
self.split_list = self.split_list[:sub]
self.img_wh = (int(800 / downsample), int(800 / downsample))
self.white_bg = True
self.downsample = downsample
self.transform = self.define_transforms()
self.light_names = light_names
self.near_far = [2.0, 6.0]
self.scene_bbox = torch.tensor([[-1.5, -1.5, -1.5], [1.5, 1.5, 1.5]]) * self.downsample
self.center = torch.mean(self.scene_bbox, axis=0).float().view(1, 1, 3)
self.radius = (self.scene_bbox[1] - self.center).float().view(1, 1, 3)
self.blender2opencv = np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])
# HDR configs
self.scan = self.root_dir.stem # Scan name e.g. 'lego', 'hotdog'
self.light_rotation = light_rotation
self.light_num = len(self.light_rotation)
## Load light data
self.hdr_dir = Path(hdr_dir)
def define_transforms(self):
transforms = T.Compose([
T.ToTensor(),
])
return transforms
def read_all_frames(self):
self.all_rays = []
self.all_rgbs = []
self.all_masks = []
self.all_light_idx = []
for idx in tqdm(range(self.__len__()), desc=f'Loading {self.split} data, view number: {self.__len__()}, rotaion number: {self.light_num}'):
item_path = self.split_list[idx]
item_meta_path = item_path / 'metadata.json'
with open(item_meta_path, 'r') as f:
meta = json.load(f)
img_wh = (int(meta['imw'] / self.downsample), int(meta['imh'] / self.downsample))
# Get ray directions for all pixels, same for all images (with same H, W, focal)
focal = 0.5 * int(meta['imw']) / np.tan(0.5 * meta['cam_angle_x']) # fov -> focal length
focal *= img_wh[0] / meta['imw']
directions = get_ray_directions(img_wh[1], img_wh[0], [focal, focal]) # [H, W, 3]
directions = directions / torch.norm(directions, dim=-1, keepdim=True)
cam_trans = np.array(list(map(float, meta["cam_transform_mat"].split(',')))).reshape(4, 4)
pose = cam_trans @ self.blender2opencv
c2w = torch.FloatTensor(pose) # [4, 4]
w2c = torch.linalg.inv(c2w) # [4, 4]
# Read ray data
rays_o, rays_d = get_rays(directions, c2w)
rays = torch.cat([rays_o, rays_d], 1) # [H*W, 6]
for light_name_idx in range(len(self.light_names)):
# Read RGB data
cur_light_name = self.light_names[light_name_idx]
relight_img_path = item_path / f'rgba_{cur_light_name}.png'
relight_img = Image.open(relight_img_path)
if self.downsample != 1.0:
relight_img = relight_img.resize(img_wh, Image.Resampling.LANCZOS)
relight_img = self.transform(relight_img) # [4, H, W]
relight_img = relight_img.view(4, -1).permute(1, 0) # [H*W, 4]
## Blend A to RGB
relight_rgbs = relight_img[:, :3] * relight_img[:, -1:] + (1 - relight_img[:, -1:]) # [H*W, 3]
## Obtain background mask, bg = False
relight_mask = (~(relight_img[:, -1:] == 0)).to(torch.bool) # [H*W, 1]
light_idx = torch.tensor(0, dtype=torch.int8).repeat((img_wh[0] * img_wh[1], 1)).to(torch.int8) # [H*W, 1], transform to in8 to save memory
self.all_rays.append(rays)
self.all_rgbs.append(relight_rgbs)
self.all_masks.append(relight_mask)
self.all_light_idx.append(light_idx)
self.all_rays = torch.cat(self.all_rays, dim=0) # [N*H*W, 6]
self.all_rgbs = torch.cat(self.all_rgbs, dim=0) # [N*H*W, 3]
# self.all_masks = torch.cat(self.all_masks, dim=0) # [N*H*W, 1]
self.all_light_idx = torch.cat(self.all_light_idx, dim=0) # [N*H*W, 1]
def world2ndc(self, points, lindisp=None):
device = points.device
return (points - self.center.to(device)) / self.radius.to(device)
def __len__(self):
return len(self.split_list)
def __getitem__(self, idx):
item_path = self.split_list[idx]
item_meta_path = item_path / 'metadata.json'
with open(item_meta_path, 'r') as f:
meta = json.load(f)
img_wh = (int(meta['imw'] / self.downsample), int(meta['imh'] / self.downsample))
# Get ray directions for all pixels, same for all images (with same H, W, focal)
focal = 0.5 * int(meta['imw']) / np.tan(0.5 * meta['cam_angle_x']) # fov -> focal length
focal *= img_wh[0] / meta['imw']
directions = get_ray_directions(img_wh[1], img_wh[0], [focal, focal]) # [H, W, 3]
directions = directions / torch.norm(directions, dim=-1, keepdim=True)
cam_trans = np.array(list(map(float, meta["cam_transform_mat"].split(',')))).reshape(4, 4)
pose = cam_trans @ self.blender2opencv
c2w = torch.FloatTensor(pose) # [4, 4]
w2c = torch.linalg.inv(c2w) # [4, 4]
relight_rgbs_list = []
light_idx_list = []
for light_name_idx in range(len(self.light_names)):
# Read RGB data
cur_light_name = self.light_names[light_name_idx]
relight_img_path = item_path / f'rgba_{cur_light_name}.png'
# relight_img_path = item_path / f'rgba_city.png'
relight_img = Image.open(relight_img_path)
if self.downsample != 1.0:
relight_img = relight_img.resize(img_wh, Image.Resampling.LANCZOS)
relight_img = self.transform(relight_img) # [4, H, W]
relight_img = relight_img.view(4, -1).permute(1, 0) # [H*W, 4]
## Blend A to RGB |<- white background ->|
relight_rgbs = relight_img[:, :3] * relight_img[:, -1:] + (1 - relight_img[:, -1:]) # [H*W, 3]
# relight_rgbs = relight_img[:, :3]
light_idx = torch.tensor(0, dtype=torch.int).repeat((img_wh[0] * img_wh[1], 1)) # [H*W, 1]
relight_rgbs_list.append(relight_rgbs)
light_idx_list.append(light_idx)
relight_rgbs = torch.stack(relight_rgbs_list, dim=0) # [rotation_num, H*W, 3]
light_idx = torch.stack(light_idx_list, dim=0) # [rotation_num, H*W, 1]
## Obtain background mask, bg = False
relight_mask = ~(relight_img[:, -1:] == 0)
# Read albedo image
albedo_path = item_path / f'albedo.png'
albedo = Image.open(albedo_path)
if self.downsample != 1.0:
albedo = albedo.resize(img_wh, Image.Resampling.LANCZOS)
albedo = self.transform(albedo)
albedo = albedo.view(4, -1).permute(1, 0)
## Blend A to RGB
albedo = albedo[:, :3] * albedo[:, -1:] + (1 - albedo[:, -1:]) # [H*W, 3]
# Read ray data
rays_o, rays_d = get_rays(directions, c2w)
rays = torch.cat([rays_o, rays_d], 1) # [H*W, 6]
# Read normal data
normal_path = item_path / 'normal.png'
normal_img = Image.open(normal_path)
normal = np.array(normal_img)[..., :3] / 255 # [H, W, 3] in range [0, 1]
normal = (normal - 0.5) * 2.0 # [H, W, 3] in range (-1, 1)
normal_bg = np.array([0.0, 0.0, 1.0])
normal_alpha = np.array(normal_img)[..., [-1]] / 255 # [H, W, 1] in range [0, 1]
normal = normal * normal_alpha + normal_bg * (1.0 - normal_alpha) # [H, W, 3]
normal_wihte_bg = np.array([1.0, 1.0, 1.0])
normal_alpha = np.array(normal_img)[..., [-1]] / 255 # [H, W, 1] in range [0, 1]
normal_white = normal * normal_alpha + normal_wihte_bg * (1.0 - normal_alpha) # [H, W, 3]
## Downsample
if self.downsample != 1.0:
normal = cv2.resize(normal, img_wh[::-1], interpolation=cv2.INTER_NEAREST)
normal = torch.FloatTensor(normal) # [H, W, 3]
normal = normal / torch.norm(normal, dim=-1, keepdim=True)
normals = normal.view(-1, 3) # [H*W, 3]
normal_white = torch.FloatTensor(normal_white) # [H, W, 3]
# normal_white = normal_white / torch.norm(normal_white, dim=-1, keepdim=True)
normals_white = normal_white.view(-1, 3) # [H*W, 3]
item = {
'img_wh': img_wh, # (int, int)
'light_idx': light_idx, # [light_num, H*W, 1]
'rgbs': relight_rgbs, # [light_num, H*W, 3],
'rgbs_mask': relight_mask, # [H*W, 1]
'albedo': albedo, # [H*W, 3]
'rays': rays, # [H*W, 6]
'normals': normals, # [H*W, 3],
'normals_white': normals_white, # [H*W, 3],
'c2w': c2w, # [4, 4]
'w2c': w2c # [4, 4]
}
return item
if __name__ == "__main__":
from opt import config_parser
args = config_parser()
light_names= ['bridge','city', 'courtyard', 'forest', 'fireplace', 'forest', 'interior', 'museum', 'night', 'snow', 'square', 'studio',
'sunrise', 'sunset', 'tunnel']
dataset = tensoIR_Relighting_test(
root_dir='/home/haian/Dataset/NeRF_DATA/Eight_Rotation/hotdog_rotate/',
hdr_dir='/home/haian/Dataset/light_probes/low_res_envmaps/',
split='test',
random_test=False,
light_names=light_names,
downsample=1.0
)
# Test 1: Get single item
item = dataset.__getitem__(0)
print(item['albedo'].shape)
print(item['rgbs_mask'].shape)
import ipdb; ipdb.set_trace()
# Test 2: Iteration
# train_dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, num_workers=1, drop_last=True, shuffle=True)
# train_iter = iter(train_dataloader)
# for i in range(20):
# try:
# item = next(train_iter)
# print(item.keys())
# print(item['rays'].shape)
# except StopIteration:
# print('Start a new iteration from the dataloader')
# train_iter = iter(train_dataloader)
# Test 3: Test dataset all stack
# test_dataset = TensoRFactorDataset(
# root_dir='/code/MVSNeRFactor/data/nerfactor_synthesis/hotdog',
# hdr_dir='/code/MVSNeRFactor/data/low_res_envmaps_32_16',
# split='test',
# downsample=1.0,
# is_stack=True
# )
# print(test_dataset.all_rays.shape) # [4, 640000, 6]
# print(test_dataset.all_rgbs.shape) # [4, 640000, 3]
| yxlao/TensoIR | 3 | TensoIR Experiments for "Objects with Lighting: A Real-World Dataset for Evaluating Reconstruction and Rendering for Object Relighting" | Python | yxlao | Yixing Lao | HKU-CS |
dataLoader/tensoIR_rotation_setting.py | Python | import os, random
import json
from pathlib import Path
import numpy as np
from PIL import Image
import cv2
from tqdm import tqdm
import torch
from torch.utils.data import Dataset
from torchvision import transforms as T
from dataLoader.ray_utils import *
from models.relight_utils import read_hdr
import torch.nn as nn
class TensoIR_Dataset_unknown_rotated_lights(Dataset):
def __init__(self,
root_dir,
hdr_dir,
split='train',
random_test=False,
N_vis=-1,
downsample=1.0,
sub=0,
light_rotation=['000', '045', '090', '135', '180', '225', '270', '315'],
light_name="sunset",
**temp,
):
"""
@param root_dir: str | Root path of dataset folder
@param hdr_dir: str | Root path for HDR folder
@param split: str | e.g. 'train' / 'test'
@param random_test: bool | Whether to randomly select a test view and a lighting
else [frames, h*w, 6]
@param N_vis: int | If N_vis > 0, select N_vis frames from the dataset, else (-1) import entire dataset
@param downsample: float | Downsample ratio for input rgb images
"""
assert split in ['train', 'test']
self.N_vis = N_vis
self.root_dir = Path(root_dir)
self.split = split
self.split_list = [x for x in self.root_dir.iterdir() if x.stem.startswith(self.split)]
if not random_test:
self.split_list.sort() # to render video
if sub > 0:
self.split_list = self.split_list[:sub]
self.img_wh = (int(800 / downsample), int(800 / downsample))
self.white_bg = True
self.downsample = downsample
self.transform = self.define_transforms()
self.light_name = light_name
self.near_far = [2.0, 6.0]
self.scene_bbox = torch.tensor([[-1.5, -1.5, -1.5], [1.5, 1.5, 1.5]]) * self.downsample
self.center = torch.mean(self.scene_bbox, axis=0).float().view(1, 1, 3)
self.radius = (self.scene_bbox[1] - self.center).float().view(1, 1, 3)
self.blender2opencv = np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])
# HDR configs
self.scan = self.root_dir.stem # Scan name e.g. 'lego', 'hotdog'
self.light_rotation = light_rotation
self.light_num = len(self.light_rotation)
## Load light data
self.hdr_dir = Path(hdr_dir)
self.read_lights()
# when trainning, we will load all the rays and rgbs
if split == 'train':
self.read_all_frames()
def define_transforms(self):
transforms = T.Compose([
T.ToTensor(),
])
return transforms
def read_lights(self):
"""
Read hdr file from local path
"""
self.lights_probes = None
hdr_path = self.hdr_dir / f'{self.light_name}.hdr'
if os.path.exists(hdr_path):
light_rgb = read_hdr(hdr_path)
self.envir_map_h, self.envir_map_w = light_rgb.shape[:2]
light_rgb = light_rgb.reshape(-1, 3)
light_rgb = torch.from_numpy(light_rgb).float()
self.lights_probes = light_rgb
def read_all_frames(self):
self.all_rays = []
self.all_rgbs = []
self.all_masks = []
self.all_light_idx = []
for idx in tqdm(range(self.__len__()), desc=f'Loading {self.split} data, view number: {self.__len__()}, rotaion number: {self.light_num}'):
item_path = self.split_list[idx]
item_meta_path = item_path / 'metadata.json'
with open(item_meta_path, 'r') as f:
meta = json.load(f)
img_wh = (int(meta['imw'] / self.downsample), int(meta['imh'] / self.downsample))
# Get ray directions for all pixels, same for all images (with same H, W, focal)
focal = 0.5 * int(meta['imw']) / np.tan(0.5 * meta['cam_angle_x']) # fov -> focal length
focal *= img_wh[0] / meta['imw']
directions = get_ray_directions(img_wh[1], img_wh[0], [focal, focal]) # [H, W, 3]
directions = directions / torch.norm(directions, dim=-1, keepdim=True)
cam_trans = np.array(list(map(float, meta["cam_transform_mat"].split(',')))).reshape(4, 4)
pose = cam_trans @ self.blender2opencv
c2w = torch.FloatTensor(pose) # [4, 4]
w2c = torch.linalg.inv(c2w) # [4, 4]
# Read ray data
rays_o, rays_d = get_rays(directions, c2w)
rays = torch.cat([rays_o, rays_d], 1) # [H*W, 6]
for light_rotation_idx in range(len(self.light_rotation)):
# Read RGB data
light_rotation = self.light_rotation[light_rotation_idx]
relight_img_path = item_path / f'rgba_{self.light_name}_{light_rotation}.png'
relight_img = Image.open(relight_img_path)
if self.downsample != 1.0:
relight_img = relight_img.resize(img_wh, Image.Resampling.LANCZOS)
relight_img = self.transform(relight_img) # [4, H, W]
relight_img = relight_img.view(4, -1).permute(1, 0) # [H*W, 4]
## Blend RGBA to RGB
relight_rgbs = relight_img[:, :3] * relight_img[:, -1:] + (1 - relight_img[:, -1:]) # [H*W, 3]
## Obtain background mask, bg = False
# relight_mask = (~(relight_img[:, -1:] == 0)).to(torch.bool) # [H*W, 1]
light_idx = torch.tensor(light_rotation_idx, dtype=torch.int8).repeat((img_wh[0] * img_wh[1], 1)).to(torch.int8) # [H*W, 1], transform to in8 to save memory
self.all_rays.append(rays)
self.all_rgbs.append(relight_rgbs)
# self.all_masks.append(relight_mask)
self.all_light_idx.append(light_idx)
self.all_rays = torch.cat(self.all_rays, dim=0) # [N*H*W, 6]
self.all_rgbs = torch.cat(self.all_rgbs, dim=0) # [N*H*W, 3]
# self.all_masks = torch.cat(self.all_masks, dim=0) # [N*H*W, 1]
self.all_light_idx = torch.cat(self.all_light_idx, dim=0) # [N*H*W, 1]
def world2ndc(self, points, lindisp=None):
device = points.device
return (points - self.center.to(device)) / self.radius.to(device)
def read_stack(self):
for idx in range(self.__len__()):
item = self.__getitem__(idx)
rays = item['rays']
rgbs = item['rgbs']
self.all_rays += [rays]
self.all_rgbs += [rgbs]
self.all_rays = torch.stack(self.all_rays, 0) # [len(self), H*W, 6]
self.all_rgbs = torch.stack(self.all_rgbs, 0) # [len(self), H*W, 3]
def __len__(self):
return len(self.split_list)
def __getitem__(self, idx):
item_path = self.split_list[idx]
item_meta_path = item_path / 'metadata.json'
with open(item_meta_path, 'r') as f:
meta = json.load(f)
img_wh = (int(meta['imw'] / self.downsample), int(meta['imh'] / self.downsample))
# Get ray directions for all pixels, same for all images (with same H, W, focal)
focal = 0.5 * int(meta['imw']) / np.tan(0.5 * meta['cam_angle_x']) # fov -> focal length
focal *= img_wh[0] / meta['imw']
directions = get_ray_directions(img_wh[1], img_wh[0], [focal, focal]) # [H, W, 3]
directions = directions / torch.norm(directions, dim=-1, keepdim=True)
cam_trans = np.array(list(map(float, meta["cam_transform_mat"].split(',')))).reshape(4, 4)
pose = cam_trans @ self.blender2opencv
c2w = torch.FloatTensor(pose) # [4, 4]
w2c = torch.linalg.inv(c2w) # [4, 4]
relight_rgbs_list = []
light_idx_list = []
for light_rotation_idx in range(len(self.light_rotation)):
# Read RGB data
light_rotation = self.light_rotation[light_rotation_idx]
relight_img_path = item_path / f'rgba_{self.light_name}_{light_rotation}.png'
relight_img = Image.open(relight_img_path)
if self.downsample != 1.0:
relight_img = relight_img.resize(img_wh, Image.Resampling.LANCZOS)
relight_img = self.transform(relight_img) # [4, H, W]
relight_img = relight_img.view(4, -1).permute(1, 0) # [H*W, 4]
## Blend A to RGB
relight_rgbs = relight_img[:, :3] * relight_img[:, -1:] + (1 - relight_img[:, -1:]) # [H*W, 3]
light_idx = torch.tensor(light_rotation_idx, dtype=torch.int).repeat((img_wh[0] * img_wh[1], 1)) # [H*W, 1]
relight_rgbs_list.append(relight_rgbs)
light_idx_list.append(light_idx)
relight_rgbs = torch.stack(relight_rgbs_list, dim=0) # [rotation_num, H*W, 3]
light_idx = torch.stack(light_idx_list, dim=0) # [rotation_num, H*W, 1]
## Obtain background mask, bg = False
relight_mask = ~(relight_img[:, -1:] == 0)
# Read albedo image
albedo_path = item_path / f'albedo.png'
albedo = Image.open(albedo_path)
if self.downsample != 1.0:
albedo = albedo.resize(img_wh, Image.Resampling.LANCZOS)
albedo = self.transform(albedo)
albedo = albedo.view(4, -1).permute(1, 0)
## Blend A to RGB
albedo = albedo[:, :3] * albedo[:, -1:] + (1 - albedo[:, -1:]) # [H*W, 3]
# Read ray data
rays_o, rays_d = get_rays(directions, c2w)
rays = torch.cat([rays_o, rays_d], 1) # [H*W, 6]
# Read normal data
normal_path = item_path / 'normal.png'
normal_img = Image.open(normal_path)
normal = np.array(normal_img)[..., :3] / 255 # [H, W, 3] in range [0, 1]
normal = (normal - 0.5) * 2.0 # [H, W, 3] in range (-1, 1)
normal_bg = np.array([0.0, 0.0, 1.0])
normal_alpha = np.array(normal_img)[..., [-1]] / 255 # [H, W, 1] in range [0, 1]
normal = normal * normal_alpha + normal_bg * (1.0 - normal_alpha) # [H, W, 3]
## Downsample
if self.downsample != 1.0:
normal = cv2.resize(normal, img_wh[::-1], interpolation=cv2.INTER_NEAREST)
normal = torch.FloatTensor(normal) # [H, W, 3]
normal = normal / torch.norm(normal, dim=-1, keepdim=True)
normals = normal.view(-1, 3) # [H*W, 3]
item = {
'img_wh': img_wh, # (int, int)
'light_idx': light_idx, # [rotation_num, H*W, 1]
'rgbs': relight_rgbs, # [rotation_num, H*W, 3],
'rgbs_mask': relight_mask, # [H*W, 1]
'albedo': albedo, # [H*W, 3]
'rays': rays, # [H*W, 6]
'normals': normals, # [H*W, 3],
'c2w': c2w, # [4, 4]
'w2c': w2c # [4, 4]
}
return item
if __name__ == "__main__":
from opt import config_parser
args = config_parser()
dataset = TensoIR_Dataset_unknown_rotated_lights(
root_dir='/home/haian/Dataset/NeRF_DATA/hotdog_rotate',
hdr_dir='/home/haian/Dataset/light_probes/low_res_envmaps_rotated/',
split='test',
random_test=False,
downsample=1.0
)
# Test 1: Get single item
item = dataset.__getitem__(0)
print(item['albedo'].shape)
print(item['rgbs_mask'].shape)
import ipdb; ipdb.set_trace()
# Test 2: Iteration
# train_dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, num_workers=1, drop_last=True, shuffle=True)
# train_iter = iter(train_dataloader)
# for i in range(20):
# try:
# item = next(train_iter)
# print(item.keys())
# print(item['rays'].shape)
# except StopIteration:
# print('Start a new iteration from the dataloader')
# train_iter = iter(train_dataloader)
# Test 3: Test dataset all stack
# test_dataset = TensoRFactorDataset(
# root_dir='/code/MVSNeRFactor/data/nerfactor_synthesis/hotdog',
# hdr_dir='/code/MVSNeRFactor/data/low_res_envmaps_32_16',
# split='test',
# downsample=1.0,
# is_stack=True
# )
# print(test_dataset.all_rays.shape) # [4, 640000, 6]
# print(test_dataset.all_rgbs.shape) # [4, 640000, 3]
| yxlao/TensoIR | 3 | TensoIR Experiments for "Objects with Lighting: A Real-World Dataset for Evaluating Reconstruction and Rendering for Object Relighting" | Python | yxlao | Yixing Lao | HKU-CS |
dataLoader/tensoIR_simple.py | Python |
import os
import json
from pathlib import Path
import numpy as np
from PIL import Image
from tqdm import tqdm
import torch
from torch.utils.data import Dataset
from torchvision import transforms as T
from dataLoader.ray_utils import *
from models.relight_utils import read_hdr
import torch.nn as nn
class TensoIR_Dataset_simple(Dataset):
def __init__(self,
root_dir=None,
# hdr_dir=None,
split='train',
random_test = True,
light_names=[],
N_vis=-1,
downsample=1.0,
sub=0,
light_rotation=['000', '120', '240'],
light_name="sunset",
scene_bbox=[[-1.5, -1.5, -1.5], [1.5, 1.5, 1.5]],
img_height=900,
img_width=1200,
near=1.0,
far=12.0,
test_new_pose=False,
**kwargs
):
assert split in ['train', 'test']
self.N_vis = N_vis
self.root_dir = Path(root_dir)
transforms_file_path = os.path.join(self.root_dir, f'transforms_{split}.json')
with open(transforms_file_path, 'r') as f:
self.transforms_json = json.load(f)
self.split = split
self.light_rotation = light_rotation
self.light_names = light_names
self.light_num = len(self.light_rotation)
self.split_list = []
self.chosen_frame_idx = []
for idx, x in enumerate(self.transforms_json['frames']):
if self.transforms_json['frames'][x]['light_idx'] < self.light_num:
self.split_list.append(self.transforms_json['frames'][x]['file_path'])
self.chosen_frame_idx.append(idx)
if not random_test:
# sort split_list and chosen_frame_idx according to the file name
sorted_idx = np.argsort(self.split_list)
self.split_list = [self.split_list[i] for i in sorted_idx]
self.chosen_frame_idx = [self.chosen_frame_idx[i] for i in sorted_idx]
if sub > 0:
self.split_list = self.split_list[:sub]
self.img_wh = (int(int(img_width) / downsample), int(int(img_height) / downsample))
self.white_bg = True
self.downsample = downsample
self.transform = self.define_transforms()
self.light_name = light_name
self.near_far = [near, far]
scene_bbox = [eval(item) for item in scene_bbox]
self.scene_bbox = torch.tensor(scene_bbox)
self.center = torch.mean(self.scene_bbox, axis=0).float().view(1, 1, 3)
self.radius = (self.scene_bbox[1] - self.center).float().view(1, 1, 3)
## Load light data
# self.hdr_dir = Path(hdr_dir)
# self.read_lights()
if split == 'train':
self.read_stack()
if split == 'test' and test_new_pose:
def normalize(x: np.ndarray) -> np.ndarray:
"""Normalization helper function."""
return x / np.linalg.norm(x)
self.read_stack()
poses = self.all_poses.numpy()
centroid = poses[:,:3,3].mean(0)
radcircle = 1.0 * np.linalg.norm(poses[:,:3,3] - centroid, axis=-1).mean()
centroid[0] += 0
centroid[1] += 0
centroid[2] += 0.5 # dog
# centroid[2] += 0.4 # bread
new_up_rad = 30 * np.pi / 180
target_z = radcircle * np.tan(new_up_rad) * (-1)
render_poses = []
for th in np.linspace(0., 2.*np.pi, 150):
camorigin = np.array([radcircle * np.cos(th), radcircle * np.sin(th), 0])
up = np.array([0,0,1])
vec2 = normalize(camorigin)
vec0 = normalize(np.cross(up, vec2))
vec1 = normalize(np.cross(vec2, vec0))
pos = camorigin + centroid
# rotate to align with new pitch rotation
lookat = -camorigin
# rotate the Z axis to the target_z
lookat[2] = target_z
lookat = normalize(lookat)
lookat *= -1
vec2 = lookat
vec1 = normalize(np.cross(vec2, vec0))
p = np.stack([vec0, vec1, vec2, pos], 1)
render_poses.append(p)
render_poses = np.stack(render_poses, 0)
render_poses = np.concatenate([render_poses, np.broadcast_to(poses[0,:3,-1:], render_poses[:,:3,-1:].shape)], -1)
render_poses = render_poses[...,:4]
img_wh = self.img_wh
# Get ray directions for all pixels, same for all images (with same H, W, focal)
fov = self.transforms_json["camera_angle_x"]
focal = 0.5 * int(img_wh[0]) / np.tan(0.5 * fov) # fov -> focal length
# directions = get_ray_directions_blender(img_wh[1], img_wh[0], [focal, focal]) # [H, W, 3]
directions = get_ray_directions_blender(img_wh[1], img_wh[0], [focal, focal]) # [H, W, 3]
directions = directions / torch.norm(directions, dim=-1, keepdim=True)
self.test_rays = []
self.test_w2c = []
for pose_idx in tqdm(range(render_poses.shape[0])):
pose = render_poses[pose_idx]
pose = torch.from_numpy(pose).float()
c2w = torch.cat([pose, torch.tensor([[0, 0, 0, 1]])], dim=0)
# c2w = torch.from_numpy(pose)
# import ipdb; ipdb.set_trace()
rays_o, rays_d = get_rays(directions, c2w)
rays = torch.cat([rays_o, rays_d], 1) # [H*W, 6]
self.test_rays.append(rays)
w2c = torch.inverse(c2w)
self.test_w2c.append(w2c)
self.test_rays = torch.stack(self.test_rays, dim=0)
self.test_w2c = torch.stack(self.test_w2c, dim=0)
del self.all_rays, self.all_rgbs, self.all_light_idx, self.all_masks, self.all_poses
def define_transforms(self):
transforms = T.Compose([
T.ToTensor(),
])
return transforms
def read_lights(self):
"""
Read hdr file from local path
"""
self.lights_probes = None
hdr_path = self.hdr_dir / f'{self.light_name}.hdr'
if os.path.exists(hdr_path):
light_rgb = read_hdr(hdr_path)
self.envir_map_h, self.envir_map_w = light_rgb.shape[:2]
light_rgb = light_rgb.reshape(-1, 3)
light_rgb = torch.from_numpy(light_rgb).float()
self.lights_probes = light_rgb
def world2ndc(self, points, lindisp=None):
device = points.device
return (points - self.center.to(device)) / self.radius.to(device)
def read_stack(self):
self.all_rays = []
self.all_rgbs = []
self.all_light_idx = []
self.all_masks = []
self.all_poses = []
for idx in tqdm(range(self.__len__())):
item = self.__getitem__(idx)
rays = item['rays']
rgbs = item['rgbs']
light_idx = item['light_idx']
self.all_rays += [rays]
self.all_rgbs += [rgbs.squeeze(0)]
self.all_light_idx += [light_idx.squeeze(0)]
self.all_masks += [item['rgbs_mask'].squeeze(0)]
self.all_poses += [item['c2w'].squeeze(0)]
self.all_rays = torch.cat(self.all_rays, dim=0) # [N*H*W, 6]
self.all_rgbs = torch.cat(self.all_rgbs, dim=0) # [N*H*W, 3]
self.all_light_idx = torch.cat(self.all_light_idx, dim=0) # [N*H*W, 1]
self.all_masks = torch.cat(self.all_masks, dim=0) # [N*H*W, 1]
self.all_poses = torch.stack(self.all_poses, dim=0) # [N, 4, 4]
def __len__(self):
return len(self.split_list)
def __getitem__(self, idx):
item_path = self.split_list[idx]
if item_path.startswith('./'):
item_path = item_path[2:]
frame_idx = self.chosen_frame_idx[idx]
img_wh = self.img_wh
# Get ray directions for all pixels, same for all images (with same H, W, focal)
fov = self.transforms_json["camera_angle_x"]
focal = 0.5 * int(img_wh[0]) / np.tan(0.5 * fov) # fov -> focal length
directions = get_ray_directions_blender(img_wh[1], img_wh[0], [focal, focal]) # [H, W, 3]
directions = directions / torch.norm(directions, dim=-1, keepdim=True)
cam_trans = self.transforms_json['frames'][str(frame_idx)]["transform_matrix"]
cam_trans = np.array(cam_trans).reshape(4, 4)
c2w = torch.FloatTensor(cam_trans) # [4, 4]
w2c = torch.linalg.inv(c2w) # [4, 4]
light_idx = self.transforms_json['frames'][str(frame_idx)]["light_idx"]
# light_idx = 0
light_idx = torch.tensor(light_idx, dtype=torch.int).repeat((img_wh[0] * img_wh[1], 1)) # [H*W, 1]
img_path = os.path.join(self.root_dir, item_path)
img = Image.open(img_path)
img = self.transform(img) # [4, H, W]
img = img.view(4, -1).permute(1, 0) # [H*W, 4]
## Blend A to RGB
img_rgbs = img[:, :3] * img[:, -1:] + (1 - img[:, -1:]) # [H*W, 3]
## Obtain background mask, bg = False
img_mask = ~(img[:, -1:] == 0)
rays_o, rays_d = get_rays(directions, c2w)
rays = torch.cat([rays_o, rays_d], 1) # [H*W, 6]
item = {
'img_wh': img_wh, # (int, int)
'light_idx': light_idx.view(1, -1, 1), # [1, H*W, 1]
'rgbs': img_rgbs.view(1, -1, 3), # [1, H*W, 3]
'rgbs_mask': img_mask, # [H*W, 1]
'rays': rays, # [H*W, 6]
'c2w': c2w, # [4, 4]
'w2c': w2c # [4, 4]
}
return item
if __name__ == "__main__":
from opt import config_parser
args = config_parser()
dataset = TensoIR_Dataset_simple(
root_dir='/home/haian/Dataset/real_captured/dog_all_colmap/images',
hdr_dir='/home/haian/Dataset/light_probes/low_res_envmaps_rotated/',
split='train',
random_test=False,
downsample=1.0,
light_rotation=['000']
)
| yxlao/TensoIR | 3 | TensoIR Experiments for "Objects with Lighting: A Real-World Dataset for Evaluating Reconstruction and Rendering for Object Relighting" | Python | yxlao | Yixing Lao | HKU-CS |
docker_patch.sh | Shell | #!/bin/bash
pip uninstall camtools -y
pip install git+https://gitee.com/yxlao/camtools.git -U
pip install tqdm scikit-image opencv-python configargparse lpips imageio-ffmpeg kornia lpips tensorboard loguru plyfile
pip install setuptools==59.5.0 imageio==2.11.1 yapf==0.30.0
| yxlao/TensoIR | 3 | TensoIR Experiments for "Objects with Lighting: A Real-World Dataset for Evaluating Reconstruction and Rendering for Object Relighting" | Python | yxlao | Yixing Lao | HKU-CS |
eval/prepare_eval.py | Python | from pathlib import Path
import json
import shutil
import camtools as ct
script_dir = Path(__file__).parent.absolute()
def all_eval_items_are_valid(eval_items):
"""
eval_items: list of dicts, with key: "gt_path", "pd_src_path", "pd_dst_path".
Return True if all paths are valid.
"""
all_paths = set()
is_path_missing = False
is_path_duplicated = False
for eval_item in eval_items:
gt_path = Path(eval_item["gt_path"])
pd_src_path = Path(eval_item["pd_src_path"])
pd_dst_path = Path(eval_item["pd_dst_path"])
if not gt_path.exists():
is_path_missing = True
print(f"{gt_path} does not exist.")
if not pd_src_path.exists():
is_path_missing = True
print(f"{pd_src_path} does not exist.")
if gt_path in all_paths:
is_path_duplicated = True
print(f"{gt_path} is duplicated.")
else:
all_paths.add(gt_path)
if pd_src_path in all_paths:
is_path_duplicated = True
print(f"{pd_src_path} is duplicated.")
else:
all_paths.add(pd_src_path)
if pd_dst_path in all_paths:
is_path_duplicated = True
print(f"{pd_dst_path} is duplicated.")
else:
all_paths.add(pd_dst_path)
return not is_path_missing and not is_path_duplicated
def prepare_relight(json_path):
with open(json_path, "r") as f:
eval_items = json.load(f)
if not all_eval_items_are_valid(eval_items):
print("Aborted.")
return
# Prepare.
# Copy pd_src_path -> pd_dst_path, mkdir if not exists.
for eval_item in eval_items:
pd_src_path = Path(eval_item["pd_src_path"])
pd_dst_path = Path(eval_item["pd_dst_path"])
pd_dst_path.parent.mkdir(parents=True, exist_ok=True)
shutil.copy(pd_src_path, pd_dst_path)
print(f"Copy {pd_src_path} -> {pd_dst_path}")
def prepare_nvs(json_path):
with open(json_path, "r") as f:
eval_items = json.load(f)
if not all_eval_items_are_valid(eval_items):
print("Aborted.")
return
# Prepare.
# Copy pd_src_path -> pd_dst_path, mkdir if not exists.
# Instead of simple copying, we need to crop the image by the left 1/3.
for eval_item in eval_items:
pd_src_path = Path(eval_item["pd_src_path"])
pd_dst_path = Path(eval_item["pd_dst_path"])
pd_dst_path.parent.mkdir(parents=True, exist_ok=True)
im_pd = ct.io.imread(pd_src_path)
src_shape = im_pd.shape
im_pd = im_pd[:, :src_shape[1] // 3, :]
dst_shape = im_pd.shape
ct.io.imwrite(pd_dst_path, im_pd)
print(f"Copy {pd_src_path} -> {pd_dst_path}, "
f"shape: {src_shape} -> {dst_shape}")
def main():
eval_nvs_dir = script_dir.parent / "eval_nvs"
eval_relight_dir = script_dir.parent / "eval_relight"
if eval_nvs_dir.is_dir():
print(f"Removing {eval_nvs_dir}")
shutil.rmtree(eval_nvs_dir)
if eval_relight_dir.is_dir():
print(f"Removing {eval_relight_dir}")
shutil.rmtree(eval_relight_dir)
prepare_relight(script_dir / "ord_relight.json")
prepare_nvs(script_dir / "ord_nvs.json")
# prepare_relight(script_dir / "synth4relight_relight.json")
# prepare_nvs(script_dir / "synth4relight_nvs.json")
# prepare_nvs(script_dir / "dtu_nvs.json")
# prepare_nvs(script_dir / "bmvs_nvs.json")
if __name__ == "__main__":
main()
| yxlao/TensoIR | 3 | TensoIR Experiments for "Objects with Lighting: A Real-World Dataset for Evaluating Reconstruction and Rendering for Object Relighting" | Python | yxlao | Yixing Lao | HKU-CS |
gen_commands.py | Python | from pathlib import Path
_datasets_scenes = [
("ord", "antman"),
("ord", "apple"),
("ord", "chest"),
("ord", "gamepad"),
("ord", "ping_pong_racket"),
("ord", "porcelain_mug"),
("ord", "tpiece"),
("ord", "wood_bowl"),
# ("synth4relight_subsampled", "air_baloons"),
# ("synth4relight_subsampled", "chair"),
# ("synth4relight_subsampled", "hotdog"),
# ("synth4relight_subsampled", "jugs"),
# ("bmvs", "bear"),
# ("bmvs", "clock"),
# ("bmvs", "dog"),
# ("bmvs", "durian"),
# ("bmvs", "jade"),
# ("bmvs", "man"),
# ("bmvs", "sculpture"),
# ("bmvs", "stone"),
# ("dtu", "scan37"),
# ("dtu", "scan40"),
# ("dtu", "scan55"),
# ("dtu", "scan63"),
# ("dtu", "scan65"),
# ("dtu", "scan69"),
# ("dtu", "scan83"),
# ("dtu", "scan97"),
]
def get_latest_checkpoint_path(dataset, scene):
"""
- In the latest timestamp folder
- Find the largest iteration number, return that path
Example checkpoint path:
"log/ord_antman-20230530-181906/checkpoints/ord_antman_70000.th"
"""
print(f"Getting the latest checkpoint path for {dataset}_{scene}...")
log_dir = Path("log")
dataset_scene = f"{dataset}_{scene}"
exp_dirs = [
d for d in log_dir.iterdir() if d.is_dir() and d.name.startswith(dataset_scene)
]
if len(exp_dirs) == 0:
raise ValueError(f"No experiment directory found for {dataset_scene}.")
max_ckpt_iter = 0
ckpt_path = None
for exp_dir in exp_dirs:
ckpt_dir = exp_dir / "checkpoints"
ckpt_paths = sorted([
p for p in ckpt_dir.iterdir() if p.is_file() and p.suffix == ".th"
])
print(ckpt_paths)
if len(ckpt_paths) == 0:
continue
else:
ckpt_iter = int(ckpt_paths[-1].stem.split("_")[-1])
if ckpt_iter > max_ckpt_iter:
max_ckpt_iter = ckpt_iter
ckpt_path = ckpt_paths[-1]
if max_ckpt_iter == 0:
raise ValueError(f"No checkpoint found for {dataset_scene}, "
f"searched {exp_dirs}.")
return ckpt_path
def gen_commands(dataset, scene):
"""
Generate train, nvs render, and relighting commands.
# Example train command:
python train_ord.py \
--config ./configs/single_light/ord.txt \
--datadir ./data/ord/antman/test \
--expname ord_antman
# Example render command:
python train_ord.py \
--config ./configs/single_light/ord.txt \
--datadir ./data/ord/antman/test \
--expname ord_antman \
--render_only 1 \
--render_test 1 \
--ckpt log/ord_antman-20230531-013113/checkpoints/ord_antman_10000.th
# Example relighting command:
python scripts/relight_ord.py \
--config configs/relighting_test/ord_relight.txt \
--batch_size 800 \
--datadir ./data/ord/antman/test \
--hdrdir ./data/ord/antman/test \
--geo_buffer_path ./relighting/ord_antman \
--ckpt log/ord_antman-20230531-013113/checkpoints/ord_antman_10000.th
"""
# Get the latest checkpoint path.
ckpt_path = get_latest_checkpoint_path(dataset, scene)
print(f"{dataset}_{scene}: {ckpt_path}")
test_suffix = "test" if dataset == "ord" else ""
# Train.
train_cmd = (f"python train_ord.py "
f"--config ./configs/single_light/ord.txt "
f"--datadir ./data/{dataset}/{scene}/{test_suffix} "
f"--expname {dataset}_{scene}")
# Render.
render_cmd = (f"python train_ord.py "
f"--config ./configs/single_light/ord.txt "
f"--datadir ./data/{dataset}/{scene}/{test_suffix} "
f"--expname {dataset}_{scene} "
f"--render_only 1 "
f"--render_test 1 "
f"--ckpt {ckpt_path}")
# Relighting.
relight_cmd = (f"python scripts/relight_ord.py "
f"--config configs/relighting_test/ord_relight.txt "
f"--batch_size 800 "
f"--datadir ./data/{dataset}/{scene}/{test_suffix} "
f"--hdrdir ./data/{dataset}/{scene}/{test_suffix} "
f"--geo_buffer_path ./relighting/{dataset}_{scene} "
f"--ckpt {ckpt_path}")
# Render and relight in one command.
render_relight_cmd = (
f"python train_ord.py "
f"--config ./configs/single_light/ord.txt "
f"--datadir ./data/{dataset}/{scene}/{test_suffix} "
f"--expname {dataset}_{scene} "
f"--render_only 1 "
f"--render_test 1 "
f"--ckpt {ckpt_path} && "
f"python scripts/relight_ord.py "
f"--config configs/relighting_test/ord_relight.txt "
f"--batch_size 800 "
f"--datadir ./data/{dataset}/{scene}/{test_suffix} "
f"--hdrdir ./data/{dataset}/{scene}/{test_suffix} "
f"--geo_buffer_path ./relighting/{dataset}_{scene} "
f"--ckpt {ckpt_path}")
return train_cmd, render_cmd, relight_cmd, render_relight_cmd
def main():
all_cmds = []
for dataset, scene in _datasets_scenes:
train_cmd, render_cmd, relight_cmd, render_relight_cmd = gen_commands(
dataset, scene)
print("######################")
print(f"{dataset}_{scene}:")
print(train_cmd)
print(render_cmd)
print(relight_cmd)
# all_cmds.append(train_cmd)
all_cmds.append(render_cmd)
all_cmds.append(relight_cmd)
with open("commands.txt", "w") as f:
f.write("\n".join(all_cmds))
if __name__ == "__main__":
main()
| yxlao/TensoIR | 3 | TensoIR Experiments for "Objects with Lighting: A Real-World Dataset for Evaluating Reconstruction and Rendering for Object Relighting" | Python | yxlao | Yixing Lao | HKU-CS |
models/relight_utils.py | Python |
import numpy as np
import cv2
from loguru import logger
import torch
import torch.nn.functional as F
from models.relight_utils import *
from models.tensoRF_init import raw2alpha
import os
from pathlib import Path
def safe_l2_normalize(x, dim=None, eps=1e-6):
return F.normalize(x, p=2, dim=dim, eps=eps)
def GGX_specular(
normal,
pts2c,
pts2l,
roughness,
fresnel
):
L = F.normalize(pts2l, dim=-1) # [nrays, nlights, 3]
V = F.normalize(pts2c, dim=-1) # [nrays, 3]
H = F.normalize((L + V[:, None, :]) / 2.0, dim=-1) # [nrays, nlights, 3]
N = F.normalize(normal, dim=-1) # [nrays, 3]
NoV = torch.sum(V * N, dim=-1, keepdim=True) # [nrays, 1]
N = N * NoV.sign() # [nrays, 3]
NoL = torch.sum(N[:, None, :] * L, dim=-1, keepdim=True).clamp_(1e-6, 1) # [nrays, nlights, 1] TODO check broadcast
NoV = torch.sum(N * V, dim=-1, keepdim=True).clamp_(1e-6, 1) # [nrays, 1]
NoH = torch.sum(N[:, None, :] * H, dim=-1, keepdim=True).clamp_(1e-6, 1) # [nrays, nlights, 1]
VoH = torch.sum(V[:, None, :] * H, dim=-1, keepdim=True).clamp_(1e-6, 1) # [nrays, nlights, 1]
alpha = roughness * roughness # [nrays, 3]
alpha2 = alpha * alpha # [nrays, 3]
k = (alpha + 2 * roughness + 1.0) / 8.0
FMi = ((-5.55473) * VoH - 6.98316) * VoH
frac0 = fresnel[:, None, :] + (1 - fresnel[:, None, :]) * torch.pow(2.0, FMi) # [nrays, nlights, 3]
frac = frac0 * alpha2[:, None, :] # [nrays, 1]
nom0 = NoH * NoH * (alpha2[:, None, :] - 1) + 1
nom1 = NoV * (1 - k) + k
nom2 = NoL * (1 - k[:, None, :]) + k[:, None, :]
nom = (4 * np.pi * nom0 * nom0 * nom1[:, None, :] * nom2).clamp_(1e-6, 4 * np.pi)
spec = frac / nom
return spec
# !!!
brdf_specular = GGX_specular
def grid_sample(image, optical):
N, C, IH, IW = image.shape
_, H, W, _ = optical.shape
ix = optical[..., 0]
iy = optical[..., 1]
ix = ((ix + 1) / 2) * (IW-1)
iy = ((iy + 1) / 2) * (IH-1)
with torch.no_grad():
ix_nw = torch.floor(ix)
iy_nw = torch.floor(iy)
ix_ne = ix_nw + 1
iy_ne = iy_nw
ix_sw = ix_nw
iy_sw = iy_nw + 1
ix_se = ix_nw + 1
iy_se = iy_nw + 1
nw = (ix_se - ix) * (iy_se - iy)
ne = (ix - ix_sw) * (iy_sw - iy)
sw = (ix_ne - ix) * (iy - iy_ne)
se = (ix - ix_nw) * (iy - iy_nw)
with torch.no_grad():
torch.clamp(ix_nw, 0, IW-1, out=ix_nw)
torch.clamp(iy_nw, 0, IH-1, out=iy_nw)
torch.clamp(ix_ne, 0, IW-1, out=ix_ne)
torch.clamp(iy_ne, 0, IH-1, out=iy_ne)
torch.clamp(ix_sw, 0, IW-1, out=ix_sw)
torch.clamp(iy_sw, 0, IH-1, out=iy_sw)
torch.clamp(ix_se, 0, IW-1, out=ix_se)
torch.clamp(iy_se, 0, IH-1, out=iy_se)
image = image.contiguous().view(N, C, IH * IW)
nw_val = torch.gather(image, 2, (iy_nw * IW + ix_nw).long().view(N, 1, H * W).repeat(1, C, 1))
ne_val = torch.gather(image, 2, (iy_ne * IW + ix_ne).long().view(N, 1, H * W).repeat(1, C, 1))
sw_val = torch.gather(image, 2, (iy_sw * IW + ix_sw).long().view(N, 1, H * W).repeat(1, C, 1))
se_val = torch.gather(image, 2, (iy_se * IW + ix_se).long().view(N, 1, H * W).repeat(1, C, 1))
out_val = (nw_val.view(N, C, H, W) * nw.view(N, 1, H, W) +
ne_val.view(N, C, H, W) * ne.view(N, 1, H, W) +
sw_val.view(N, C, H, W) * sw.view(N, 1, H, W) +
se_val.view(N, C, H, W) * se.view(N, 1, H, W))
return out_val
class Environment_Light():
def __init__(self, hdr_directory, light_names, device='cuda'):
# transverse the hdr image to get the environment light
files = os.listdir(hdr_directory)
self.hdr_rgbs = dict()
self.hdr_pdf_sample = dict()
self.hdr_pdf_return = dict()
self.hdr_dir = dict()
# Get hdr paths.
hdr_directory = Path(hdr_directory)
hdr_paths = [hdr_directory / f"{light_name}.hdr"
for light_name in light_names]
for hdr_path in hdr_paths:
if not hdr_path.is_file():
raise ValueError(f"hdr_path {hdr_path} does not exist.")
# TODO: This part is hard-coded.
# hdr_path = gt_env_512_rotated_0000.hdr
# exposure_path = gt_exposure_0000.txt
hdr_idx = [int(hdr_path.stem[-4:]) for hdr_path in hdr_paths]
exposure_paths = [hdr_directory / f"gt_exposure_{idx:04d}.txt"
for idx in hdr_idx]
for exposure_path in exposure_paths:
if not exposure_path.is_file():
raise ValueError(f"exposure_path {exposure_path} does not exist.")
if not len(light_names) == len(hdr_paths) == len(exposure_paths):
raise ValueError(f"light_names, hdr_paths, and exposure_paths "
f"should have the same length, but got "
f"{len(light_names)}, {len(hdr_paths)}, and "
f"{len(exposure_paths)} respectively.")
for light_name, hdr_path, exposure_path in zip(light_names, hdr_paths, exposure_paths):
light_rgbs = read_hdr(hdr_path)
light_rgbs = torch.from_numpy(light_rgbs)
# Apply exposure compensation.
exposure_comp = float(exposure_path.read_text())
print(f"Envmap: {light_name}, EV: {exposure_comp}")
light_rgbs = light_rgbs * (2.0 ** exposure_comp)
self.hdr_rgbs[light_name] = light_rgbs.to(device)
# compute the pdf of importance sampling of the environment map
light_intensity = torch.sum(light_rgbs, dim=2, keepdim=True) # [H, W, 1]
env_map_h, env_map_w, _ = light_intensity.shape
h_interval = 1.0 / env_map_h
sin_theta = torch.sin(torch.linspace(0 + 0.5 * h_interval, np.pi - 0.5 * h_interval, env_map_h))
pdf = light_intensity * sin_theta.view(-1, 1, 1) # [H, W, 1]
pdf = pdf / torch.sum(pdf)
pdf_return = pdf * env_map_h * env_map_w / (2 * np.pi * np.pi * sin_theta.view(-1, 1, 1))
self.hdr_pdf_sample[light_name] = pdf.to(device)
self.hdr_pdf_return[light_name] = pdf_return.to(device)
lat_step_size = np.pi / env_map_h
lng_step_size = 2 * np.pi / env_map_w
phi, theta = torch.meshgrid([torch.linspace(np.pi / 2 - 0.5 * lat_step_size, -np.pi / 2 + 0.5 * lat_step_size, env_map_h),
torch.linspace(np.pi - 0.5 * lng_step_size, -np.pi + 0.5 * lng_step_size, env_map_w)], indexing='ij')
view_dirs = torch.stack([ torch.cos(theta) * torch.cos(phi),
torch.sin(theta) * torch.cos(phi),
torch.sin(phi)], dim=-1).view(env_map_h, env_map_w, 3) # [envH, envW, 3]
self.hdr_dir[light_name] = view_dirs.to(device)
self.envir_map_uniform_pdf = torch.ones_like(light_intensity) * sin_theta.view(-1, 1, 1) / (env_map_h * env_map_w)
self.envir_map_uniform_pdf = (self.envir_map_uniform_pdf / torch.sum(self.envir_map_uniform_pdf)).to(device)
self.envir_map_uniform_pdf_return = self.envir_map_uniform_pdf * env_map_h * env_map_w / (2 * np.pi * np.pi * sin_theta.view(-1, 1, 1).to(device))
@torch.no_grad()
def sample_light(self, light_name, bs, num_samples, sample_type="importance"):
'''
- Args:
- light_name: the name of the light
- bs: batch size
- num_samples: the number of samples
- Returns:
- light_dir: the direction of the light [bs, num_samples, 3]
- light_rgb: the rgb of the light [bs, num_samples, 3]
- light_pdf: the pdf of the light [bs, num_samples, 1]
'''
if sample_type == "importance":
environment_map = self.hdr_rgbs[light_name]
environment_map_pdf_sample = self.hdr_pdf_sample[light_name].view(-1).expand(bs, -1) # [bs, env_map_h * env_map_w]
environment_map_pdf_return = self.hdr_pdf_return[light_name].view(-1).expand(bs, -1) # [bs, env_map_h * env_map_w]
environment_map_dir = self.hdr_dir[light_name].view(-1, 3).expand(bs, -1, -1) # [bs, env_map_h * env_map_w, 3]
environment_map_rgb = environment_map.view(-1, 3).expand(bs, -1, -1) # [bs, env_map_h * env_map_w, 3]
# sampled the light directions
light_dir_idx = torch.multinomial(environment_map_pdf_sample, num_samples, replacement=True) # [bs, num_samples]
light_dir = environment_map_dir.gather(1, light_dir_idx.unsqueeze(-1).expand(-1, -1, 3)).view(bs, num_samples, 3) # [bs, num_samples, 3]
# sampled light rgbs and pdfs
light_rgb = environment_map_rgb.gather(1, light_dir_idx.unsqueeze(-1).expand(-1, -1, 3)).view(bs, num_samples, 3) # [bs, num_samples, 3]
light_pdf = environment_map_pdf_return.gather(1, light_dir_idx).unsqueeze(-1) # [bs, num_samples, 1]
elif sample_type == "uniform":
environment_map = self.hdr_rgbs[light_name]
environment_map_pdf_sample = self.envir_map_uniform_pdf.view(-1).expand(bs, -1) # [bs, env_map_h * env_map_w]
environment_map_pdf_return = self.envir_map_uniform_pdf_return.view(-1).expand(bs, -1) # [bs, env_map_h * env_map_w]
environment_map_dir = self.hdr_dir[light_name].view(-1, 3).expand(bs, -1, -1) # [bs, env_map_h * env_map_w, 3]
environment_map_rgb = environment_map.view(-1, 3).expand(bs, -1, -1) # [bs, env_map_h * env_map_w, 3]
# sampled the light directions
light_dir_idx = torch.multinomial(environment_map_pdf_sample, num_samples, replacement=True) # [bs, num_samples]
light_dir = environment_map_dir.gather(1, light_dir_idx.unsqueeze(-1).expand(-1, -1, 3)).view(bs, num_samples, 3) # [bs, num_samples, 3]
# sampled light rgbs and pdfs
light_rgb = environment_map_rgb.gather(1, light_dir_idx.unsqueeze(-1).expand(-1, -1, 3)).view(bs, num_samples, 3) # [bs, num_samples, 3]
light_pdf = environment_map_pdf_return.gather(1, light_dir_idx).unsqueeze(-1) # [bs, num_samples, 1]
return light_dir, light_rgb, light_pdf
def get_light(self, light_name, incident_dir):
envir_map = self.hdr_rgbs[light_name]
envir_map = envir_map.permute(2, 0, 1).unsqueeze(0) # [1, 3, H, W]
phi = torch.arccos(incident_dir[:, 2]).reshape(-1) - 1e-6
theta = torch.atan2(incident_dir[:, 1], incident_dir[:, 0]).reshape(-1)
# normalize to [-1, 1]
query_y = (phi / np.pi) * 2 - 1
query_x = - theta / np.pi
grid = torch.stack((query_x, query_y)).permute(1, 0).unsqueeze(0).unsqueeze(0)
light_rgbs = F.grid_sample(envir_map, grid, align_corners=True).squeeze().permute(1, 0).reshape(-1, 3)
return light_rgbs
def predict_visibility_by_chunk(vis_model,
surface_pts,
surf2light,
chunk_size=40960,
device='cuda'):
'''predict visibility for each point at each direction using visbility network
- args:
- vis_model: visibility network used to predict visibility for each point at each direction
- surface_pts: [N, 3] surface points
- surf2light: [N, 3], light incident direction for each surface point, pointing from surface to light
- return:
- visibility: [N, ] visibility for each point at each direction
'''
# expand the shape for pts to make it the same as light_xyz
visibility =torch.zeros((surface_pts.shape[0], 1), dtype=torch.float32).to(device) # [N, 1]
chunk_idxs = torch.split(torch.arange(surface_pts.shape[0]), chunk_size) # to save memory TODO: chunk size should be configurable
for chunk_idx in chunk_idxs:
chunk_surf2light = surf2light[chunk_idx]
chunk_surface_pts = surface_pts[chunk_idx]
chunk_visibility = vis_model(chunk_surface_pts, chunk_surf2light) # [N, 1]
visibility[chunk_idx] = chunk_visibility
return visibility
@torch.no_grad()
def get_visibility_and_indirect_light( visibility_net,
tensoIR,
surface_pts,
surf2light,
light_idx,
nSample=96,
vis_near=0.05,
vis_far=1.5,
chunk_size=40960,
device='cuda'):
'''predict visibility for each point at each direction using visbility network
- args:
- visibility_net: visibility network used to predict visibility for each point at each direction
- tensoIR: tensoIR model is used to compute the visibility and indirect lighting
- surface_pts: [N, 3] surface points location
- surf2light: [N, 3], light incident direction for each surface point, pointing from surface to light
- light_idx: [N, 1], index of lighitng
- nSample: number of samples for each ray along incident light direction
- return:
- visibility_predict: [N, 1] visibility result from the visibility net
- visibility_compute: [N, 1] visibility result by choosing some directions and then computing the density
- indirect_light: [N, 3] indirect light in the corresponding direction
- computed_visbility_mask: [N, 1] mask indicating whether the direction is invisible to the direct light
'''
visibility_predict = torch.zeros((surface_pts.shape[0]), dtype=torch.float32).to(device) # [N, 1]
visibility_compute = torch.zeros((surface_pts.shape[0]), dtype=torch.float32).to(device) # [N, 1]
indirect_light = torch.zeros((surface_pts.shape[0], 3), dtype=torch.float32).to(device) # [N, 1]
with torch.enable_grad():
chunk_idxs_vis_predict = torch.split(torch.arange(surface_pts.shape[0]), 40960) # TODO: chunk size should be configurable
# predict all directions
for chunk_idx in chunk_idxs_vis_predict:
chunk_surf2light = surf2light[chunk_idx]
chunk_surface_pts = surface_pts[chunk_idx]
chunk_visibility = visibility_net(chunk_surface_pts, chunk_surf2light) # [N, 1]
visibility_predict[chunk_idx] = chunk_visibility.squeeze(-1) # [N, ]
invisibile_to_direct_light_mask = visibility_predict < 0.5 # [N, 1] index of ray where the direct light is not visible
visibility_predict = visibility_predict.reshape(-1, 1) # [N, 1]
surface_pts_masked = surface_pts[invisibile_to_direct_light_mask] # [masked(N), 3]
surf2light_masked = surf2light[invisibile_to_direct_light_mask] # [masked(N), 3]
light_idx_masked = light_idx[invisibile_to_direct_light_mask] # [masked(N), 1]
visibility_masked = torch.zeros((surface_pts_masked.shape[0]), dtype=torch.float32).to(device) # [masked(N), 1]
indirect_light_masked = torch.zeros((surface_pts_masked.shape[0], 3), dtype=torch.float32).to(device) # [masked(N), 1]
chunk_idxs_vis_compute = torch.split(torch.arange(surface_pts_masked.shape[0]), 20480) # TODO: chunk size should be configurable
# compute the directions where the direct light is not visible
for chunk_idx in chunk_idxs_vis_compute:
chunk_surface_pts = surface_pts_masked[chunk_idx] # [chunk_size, 3]
chunk_surf2light = surf2light_masked[chunk_idx] # [chunk_size, 3]
chunk_light_idx = light_idx_masked[chunk_idx] # [chunk_size, 1]
nerv_vis_chunk, nerfactor_vis_chunk, indirect_light_chunk = compute_radiance(tensoIR=tensoIR,
surf_pts=chunk_surface_pts,
light_in_dir=chunk_surf2light,
light_idx=chunk_light_idx,
nSample=nSample,
vis_near=vis_near,
vis_far=vis_far,
device=device
)
visibility_chunk = nerv_vis_chunk
visibility_masked[chunk_idx] = visibility_chunk
indirect_light_masked[chunk_idx] = indirect_light_chunk
visibility_compute[invisibile_to_direct_light_mask] = visibility_masked
# randomly sample some rays uniformly to make supervision of visibility more robust
uniform_random_sample_mask = (torch.rand(invisibile_to_direct_light_mask.shape, device=device) < 0.4) # [N, 1]
recompute_visibility_mask = torch.logical_and(uniform_random_sample_mask, ~invisibile_to_direct_light_mask) # [N, 1]
surface_pts_masked = surface_pts[recompute_visibility_mask] # [masked(N), 3]
surf2light_masked = surf2light[recompute_visibility_mask] # [masked(N), 3]
recompute_visibility_masked = torch.zeros((surface_pts_masked.shape[0]), dtype=torch.float32).to(device) # [masked(N), 1]
chunk_idxs_vis_recompute = torch.split(torch.arange(surface_pts_masked.shape[0]), 20480) # to save memory; TODO: chunk size should be configurable
for chunk_idx in chunk_idxs_vis_recompute:
chunk_surface_pts = surface_pts_masked[chunk_idx] # [chunk_size, 3]
chunk_surf2light = surf2light_masked[chunk_idx] # [chunk_size, 3]
nerv_vis, nerfactor_vis = compute_transmittance(tensoIR=tensoIR,
surf_pts=chunk_surface_pts,
light_in_dir=chunk_surf2light,
nSample=nSample,
vis_near=vis_near,
vis_far=vis_far,
device=device
) # [N, 1]
chunk_visibility = nerv_vis
recompute_visibility_masked[chunk_idx] = chunk_visibility
visibility_compute[recompute_visibility_mask] = recompute_visibility_masked
visibility_compute = visibility_compute.reshape(-1, 1) # [N, 1]
indirect_light[invisibile_to_direct_light_mask] = indirect_light_masked # [N, 3]
indirect_light = indirect_light.reshape(-1, 3) # [N, 3]
computed_visbility_mask = torch.logical_or(invisibile_to_direct_light_mask, uniform_random_sample_mask) # [N, 1]
computed_visbility_mask = computed_visbility_mask.reshape(-1, 1) # [N, 1]
return visibility_predict, visibility_compute, indirect_light, computed_visbility_mask
@torch.no_grad()
def compute_secondary_shading_effects(
tensoIR,
surface_pts,
surf2light,
light_idx,
nSample=96,
vis_near=0.05,
vis_far=1.5,
chunk_size=15000,
device='cuda'
):
'''compute visibility for each point at each direction without visbility network
- args:
- tensoIR: tensoIR model is used to compute the visibility and indirect lighting
- surface_pts: [N, 3] surface points location
- surf2light: [N, 3], light incident direction for each surface point, pointing from surface to light
- light_idx: [N, 1], index of lighitng
- nSample: number of samples for each ray along incident light direction
- return:
- visibility_compute: [N, 1] visibility result by choosing some directions and then computing the density
- indirect_light: [N, 3] indirect light in the corresponding direction
'''
visibility_compute = torch.zeros((surface_pts.shape[0]), dtype=torch.float32).to(device) # [N, 1]
indirect_light = torch.zeros((surface_pts.shape[0], 3), dtype=torch.float32).to(device) # [N, 1]
chunk_idxs_vis_compute = torch.split(torch.arange(surface_pts.shape[0]), chunk_size)
for chunk_idx in chunk_idxs_vis_compute:
chunk_surface_pts = surface_pts[chunk_idx] # [chunk_size, 3]
chunk_surf2light = surf2light[chunk_idx] # [chunk_size, 3]
chunk_light_idx = light_idx[chunk_idx] # [chunk_size, 1]
nerv_vis_chunk, nerfactor_vis_chunk, indirect_light_chunk = compute_radiance(
tensoIR=tensoIR,
surf_pts=chunk_surface_pts,
light_in_dir=chunk_surf2light,
light_idx=chunk_light_idx,
nSample=nSample,
vis_near=vis_near,
vis_far=vis_far,
device=device
)
visibility_chunk = nerv_vis_chunk
visibility_compute[chunk_idx] = visibility_chunk
indirect_light[chunk_idx] = indirect_light_chunk
visibility_compute = visibility_compute.reshape(-1, 1) # [N, 1]
indirect_light = indirect_light.reshape(-1, 3) # [N, 3]
return visibility_compute, indirect_light
def render_with_BRDF(
depth_map,
normal_map,
albedo_map,
roughness_map,
fresnel_map,
rays,
tensoIR,
light_idx,
sample_method='fixed_envirmap',
chunk_size=15000,
device='cuda',
use_linear2srgb=True,
args=None
):
# Relight module
## Sample surface points using depth prediction
surface_z = depth_map # [bs,]
rays_o, rays_d = rays[..., :3].to(device), rays[..., 3:].to(device) # [bs, 3]
surface_xyz = rays_o + (surface_z).unsqueeze(-1) * rays_d # [bs, 3]
## Get incident light direction
light_area_weight = tensoIR.light_area_weight.to(device) # [envW * envH, ]
incident_light_dirs = tensoIR.gen_light_incident_dirs(method=sample_method).to(device) # [envW * envH, 3]
surf2l = incident_light_dirs.reshape(1, -1, 3).repeat(surface_xyz.shape[0], 1, 1) # [bs, envW * envH, 3]
surf2c = -rays_d # [bs, 3]
surf2c = safe_l2_normalize(surf2c, dim=-1) # [bs, 3]
## get visibilty map from visibility network or compute it using density
cosine = torch.einsum("ijk,ik->ij", surf2l, normal_map) # surf2l:[bs, envW * envH, 3] * normal_map:[bs, 3] -> cosine:[bs, envW * envH]
cosine = torch.clamp(cosine, min=0.0) # [bs, envW * envH]
cosine_mask = (cosine > 1e-6) # [bs, envW * envH], mask half of the incident light that is behind the surface
visibility_compute = torch.zeros((*cosine_mask.shape, 1), device=device) # [bs, envW * envH, 1]
indirect_light = torch.zeros((*cosine_mask.shape, 3), device=device) # [bs, envW * envH, 3]
visibility_compute[cosine_mask], \
indirect_light[cosine_mask] = compute_secondary_shading_effects(
tensoIR=tensoIR,
surface_pts=surface_xyz.unsqueeze(1).expand(-1, surf2l.shape[1], -1)[cosine_mask],
surf2light=surf2l[cosine_mask],
light_idx=light_idx.view(-1, 1, 1).expand((*cosine_mask.shape, 1))[cosine_mask],
nSample=args.second_nSample,
vis_near=args.second_near,
vis_far=args.second_far,
chunk_size=chunk_size,
device=device
)
visibility_to_use = visibility_compute
## Get BRDF specs
nlights = surf2l.shape[1]
specular = brdf_specular(normal_map, surf2c, surf2l, roughness_map, fresnel_map) # [bs, envW * envH, 3]
surface_brdf = albedo_map.unsqueeze(1).expand(-1, nlights, -1) / np.pi + specular # [bs, envW * envH, 3]
## Compute rendering equation
envir_map_light_rgbs = tensoIR.get_light_rgbs(incident_light_dirs, device=device).to(device) # [light_num, envW * envH, 3]
direct_light_rgbs = torch.index_select(envir_map_light_rgbs, dim=0, index=light_idx.squeeze(-1)).to(device) # [bs, envW * envH, 3]
light_rgbs = visibility_to_use * direct_light_rgbs + indirect_light # [bs, envW * envH, 3]
# # no visibility and indirect light
# light_rgbs = direct_light_rgbs
# # # no indirect light
# light_rgbs = visibility_to_use * direct_light_rgbs # [bs, envW * envH, 3]
if sample_method == 'stratifed_sample_equal_areas':
rgb_with_brdf = torch.mean(4 * torch.pi * surface_brdf * light_rgbs * cosine[:, :, None], dim=1) # [bs, 3]
else:
light_pix_contrib = surface_brdf * light_rgbs * cosine[:, :, None] * light_area_weight[None,:, None] # [bs, envW * envH, 3]
rgb_with_brdf = torch.sum(light_pix_contrib, dim=1) # [bs, 3]
### Tonemapping
rgb_with_brdf = torch.clamp(rgb_with_brdf, min=0.0, max=1.0)
### Colorspace transform
if use_linear2srgb and rgb_with_brdf.shape[0] > 0:
rgb_with_brdf = linear2srgb_torch(rgb_with_brdf)
return rgb_with_brdf
def linear2srgb_torch(tensor_0to1):
if isinstance(tensor_0to1, torch.Tensor):
pow_func = torch.pow
where_func = torch.where
elif isinstance(tensor_0to1, np.ndarray):
pow_func = np.power
where_func = np.where
else:
raise NotImplementedError(f'Do not support dtype {type(tensor_0to1)}')
srgb_linear_thres = 0.0031308
srgb_linear_coeff = 12.92
srgb_exponential_coeff = 1.055
srgb_exponent = 2.4
tensor_0to1 = _clip_0to1_warn_torch(tensor_0to1)
tensor_linear = tensor_0to1 * srgb_linear_coeff
tensor_nonlinear = srgb_exponential_coeff * (
pow_func(tensor_0to1 + 1e-6, 1 / srgb_exponent)
) - (srgb_exponential_coeff - 1)
is_linear = tensor_0to1 <= srgb_linear_thres
tensor_srgb = where_func(is_linear, tensor_linear, tensor_nonlinear)
return tensor_srgb
def _clip_0to1_warn_torch(tensor_0to1):
"""Enforces [0, 1] on a tensor/array that should be already [0, 1].
"""
msg = "Some values outside [0, 1], so clipping happened"
if isinstance(tensor_0to1, torch.Tensor):
if torch.min(tensor_0to1) < 0 or torch.max(tensor_0to1) > 1:
logger.debug(msg)
tensor_0to1 = torch.clamp(
tensor_0to1, min=0, max=1)
elif isinstance(tensor_0to1, np.ndarray):
if tensor_0to1.min() < 0 or tensor_0to1.max() > 1:
logger.debug(msg)
tensor_0to1 = np.clip(tensor_0to1, 0, 1)
else:
raise NotImplementedError(f'Do not support dtype {type(tensor_0to1)}')
return tensor_0to1
def _convert_sph_conventions(pts_r_angle1_angle2, what2what):
"""Internal function converting between different conventions for
spherical coordinates. See :func:`cart2sph` for conventions.
"""
if what2what == 'lat-lng_to_theta-phi':
pts_r_theta_phi = np.zeros(pts_r_angle1_angle2.shape)
# Radius is the same
pts_r_theta_phi[:, 0] = pts_r_angle1_angle2[:, 0]
# Angle 1
pts_r_theta_phi[:, 1] = np.pi / 2 - pts_r_angle1_angle2[:, 1]
# Angle 2
ind = pts_r_angle1_angle2[:, 2] < 0
pts_r_theta_phi[ind, 2] = 2 * np.pi + pts_r_angle1_angle2[ind, 2]
pts_r_theta_phi[np.logical_not(ind), 2] = \
pts_r_angle1_angle2[np.logical_not(ind), 2]
return pts_r_theta_phi
if what2what == 'theta-phi_to_lat-lng':
pts_r_lat_lng = np.zeros(pts_r_angle1_angle2.shape)
# Radius is the same
pts_r_lat_lng[:, 0] = pts_r_angle1_angle2[:, 0]
# Angle 1
pts_r_lat_lng[:, 1] = np.pi / 2 - pts_r_angle1_angle2[:, 1]
# Angle 2
ind = pts_r_angle1_angle2[:, 2] > np.pi
pts_r_lat_lng[ind, 2] = pts_r_angle1_angle2[ind, 2] - 2 * np.pi
pts_r_lat_lng[np.logical_not(ind), 2] = \
pts_r_angle1_angle2[np.logical_not(ind), 2]
return pts_r_lat_lng
raise NotImplementedError(what2what)
def sph2cart(pts_sph: np.array, convention='lat-lng'):
# Check input shape
assert (pts_sph.ndim == 2 and pts_sph.shape[-1] == 3), "Shape of input mush be (n, 3)"
# Check degree range
assert (np.abs(pts_sph[:, 1:]) <= 2 * np.pi).all(), "Input degree falls out of [-2pi, 2pi]"
# Convert to lat-lng convention
if convention == 'lat-lng':
pts_r_lat_lng = pts_sph
elif convention == 'theta-phi':
pts_r_lat_lng = _convert_sph_conventions(pts_sph, 'theta-phi_to_lat-lng')
else:
raise NotImplementedError(convention)
# Compute xyz coord
r = pts_r_lat_lng[:, 0]
lat = pts_r_lat_lng[:, 1]
lng = pts_r_lat_lng[:, 2]
z = r * np.sin(lat)
x = r * np.cos(lat) * np.cos(lng)
y = r * np.cos(lat) * np.sin(lng)
pts_cart = np.stack((x, y, z), axis=-1)
return pts_cart
def read_hdr(path):
"""Reads an HDR map from disk.
Args:
path (str): Path to the .hdr file.
Returns:
numpy.ndarray: Loaded (float) HDR map with RGB channels in order.
"""
with open(path, 'rb') as h:
buffer_ = np.frombuffer(h.read(), np.uint8)
bgr = cv2.imdecode(buffer_, cv2.IMREAD_UNCHANGED)
rgb = cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB)
return rgb
@torch.no_grad()
def compute_visibility(tensoIR, pts, light_xyz, nSample, vis_near, vis_far, args, device='cuda'):
'''compute visibility for each point at each direction by calculating the density compostion
- args:
- tensoIR: base model
- pts: [N, 3] surface points
- light_xyz: [preditected_light_num, 3], locations for each pixel in the environment map
- nSample: number of samples for each ray along incident light direction
- return:
- visibility: [N, preditected_light_num] visibility for each point at each direction
'''
surf2light = light_xyz[None, :, :] - pts[:, None, :] # [N, preditected_light_num, 3]
surf2light = safe_l2_normalize(surf2light, dim=-1) # [N, preditected_light_num, 3]
# expand the shape for pts to make it the same as light_xyz
surface_pts = pts.unsqueeze(1).expand(-1, light_xyz.shape[0], -1) # [N, preditected_light_num, 3]
surface_pts = surface_pts.reshape(-1, 3) # [N*preditected_light_num, 3]
surf2light = surf2light.reshape(-1, 3) # [N*preditected_light_num, 3]
visibility = torch.zeros((surface_pts.shape[0]), dtype=torch.float32).to(device) # [N*preditected_light_num, 1]
chunk_idxs = torch.split(torch.arange(surface_pts.shape[0]), 81920) # to save memory TODO: chunk size should be configurable
for chunk_idx in chunk_idxs:
chunk_surface_pts = surface_pts[chunk_idx] # [chunk_size, 3]
chunk_surf2light = surf2light[chunk_idx] # [chunk_size, 3]
nerv_vis, nerfactor_vis = compute_transmittance(tensoIR=tensoIR,
surf_pts=chunk_surface_pts,
light_in_dir=chunk_surf2light,
nSample=nSample,
vis_near=vis_near,
vis_far=vis_far,
device=device
) # [N*preditected_light_num, 1]
if args.vis_equation == 'nerv':
chunk_visibility = nerv_vis
elif args.vis_equation == 'nerfactor':
chunk_visibility = nerfactor_vis
visibility[chunk_idx] = chunk_visibility
visibility = visibility.reshape(-1, light_xyz.shape[0], 1) # [N, preditected_light_num]
return visibility
@torch.no_grad()
def compute_transmittance(tensoIR, surf_pts, light_in_dir, nSample=128, vis_near=0.1, vis_far=2, device='cuda'):
'''same way as in NeRV
- args:
- tensoIR: base model
- surf_pts: [N, 3] surface points locations
- light_in_dir: [N, 3], normalized light incident direction, pointing from surface to light
- nSample: number of samples for each ray along incident light direction
- near: sample begin from this distance
- far: sample end at this distance
- return:
- nerv_vis: [N, preditected_light_num] transmittance for each point at each direction, using the eqaution mentioned in NeRV
- nerfactor_vis: [N, preditected_light_num] transmittance for each point at each direction, using the eqaution implemented in the code of NeRFactor
'''
xyz_sampled, z_vals, ray_valid = sample_ray_equally(tensoIR,
surf_pts,
light_in_dir,
nSample=nSample,
vis_near=vis_near,
vis_far=vis_far
)
dists = torch.cat((z_vals[:, 1:] - z_vals[:, :-1], torch.zeros_like(z_vals[:, :1])), dim=-1)
if tensoIR.alphaMask is not None:
alphas = tensoIR.alphaMask.sample_alpha(xyz_sampled[ray_valid])
alpha_mask = alphas > 0
ray_invalid = ~ray_valid
ray_invalid[ray_valid] |= (~alpha_mask)
ray_valid = ~ray_invalid
# Create empty tensor to store sigma and rgb
sigma = torch.zeros(xyz_sampled.shape[:-1], device=xyz_sampled.device)
if ray_valid.any():
xyz_sampled = tensoIR.normalize_coord(xyz_sampled)
sigma_feature = tensoIR.compute_densityfeature(xyz_sampled[ray_valid]) # [..., 1] # detach unremoved
validsigma = tensoIR.feature2density(sigma_feature)
sigma[ray_valid] = validsigma
alpha, weight, transimittance = raw2alpha(sigma, dists * tensoIR.distance_scale)
acc_map = torch.sum(weight, -1) # [N, ]
nerv_vis = transimittance.squeeze(-1)
nerfactor_vis = 1 - acc_map
return nerv_vis, nerfactor_vis
@torch.no_grad()
def sample_ray_equally(tensoIR, rays_o, rays_d, nSample=-1, vis_near=0.03, vis_far=1.5, device='cuda'):
'''
The major differences from the original sample_ray in tensoIR model are:
1. The near and far are not fixed, but work as input parameters for this function
2. each ray sample points equally spaced along the ray, without any disturbance
'''
t = torch.linspace(0., 1., nSample, device=device) # [nSample,]
z_vals = (vis_near * (1. - t) + vis_far * t).unsqueeze(0) # [1, nSample]
rays_pts = rays_o[..., None, :] + rays_d[..., None, :] * z_vals.view(1, -1, 1) # [N, nSample, 3]
mask_outbbox = ((tensoIR.aabb[0] > rays_pts) | (rays_pts > tensoIR.aabb[1])).any(dim=-1)
return rays_pts, z_vals, ~mask_outbbox
@torch.no_grad()
def compute_visibility_and_indirect_light(tensoIR, pts, light_xyz, light_idx, nSample, vis_near, vis_far, args, device='cuda'):
'''compute visibility and indirect light (represented as radiance field)
for each point at each direction by calculating the density compostion
- args:
- tensoIR: base model
- pts: [N, 3] surface points
- light_xyz: [preditected_light_num, 3], locations for each pixel in the environment map
- light_idx: [N, 1], index for each pixel in the environment map
- nSample: number of samples for each ray along incident light direction
- return:
- visibility: [N, preditected_light_num, 1] visibility for each point at each direction
- indirect_light: [N, preditected_light_num, 3] visibility for each point at each direction
'''
surf2light = light_xyz[None, :, :] - pts[:, None, :] # [N, preditected_light_num, 3]
surf2light = safe_l2_normalize(surf2light, dim=-1) # [N, preditected_light_num, 3]
light_idx = light_idx.view(-1, 1, 1).expand((-1, surf2light.shape[1], 1)).reshape(-1, 1) # [N*preditected_light_num, 1]
# expand the shape for pts to make it the same as light_xyz
surface_pts = pts.unsqueeze(1).expand(-1, light_xyz.shape[0], -1) # [N, preditected_light_num, 3]
surface_pts = surface_pts.reshape(-1, 3) # [N*preditected_light_num, 3]
surf2light = surf2light.reshape(-1, 3) # [N*preditected_light_num, 3]
visibility = torch.zeros((surface_pts.shape[0]), dtype=torch.float32).to(device) # [N*preditected_light_num, 1]
indirect_light = torch.zeros((surface_pts.shape[0], 3), dtype=torch.float32).to(device) # [N*preditected_light_num, 1]
chunk_idxs = torch.split(torch.arange(surface_pts.shape[0]), 81920) # to save memory TODO: chunk size should be configurable
for chunk_idx in chunk_idxs:
chunk_surface_pts = surface_pts[chunk_idx] # [chunk_size, 3]
chunk_surf2light = surf2light[chunk_idx] # [chunk_size, 3]
chunk_light_idx = light_idx[chunk_idx] # [chunk_size, 1]
nerv_vis_chunk, nerfactor_vis_chunk, indirect_light_chunk = compute_radiance(tensoIR=tensoIR,
surf_pts=chunk_surface_pts,
light_in_dir=chunk_surf2light,
light_idx=chunk_light_idx,
nSample=nSample,
vis_near=vis_near,
vis_far=vis_far,
device=device
) # [N*preditected_light_num, 1]
if args.vis_equation == 'nerv':
visibility_chunk = nerv_vis_chunk
elif args.vis_equation == 'nerfactor':
visibility_chunk = nerfactor_vis_chunk
visibility[chunk_idx] = visibility_chunk
indirect_light[chunk_idx] = indirect_light_chunk
visibility = visibility.reshape(-1, light_xyz.shape[0], 1) # [N, preditected_light_num, 1]
indirect_light = indirect_light.reshape(-1, light_xyz.shape[0], 3) # [N, preditected_light_num, 3]
return visibility, indirect_light
@torch.no_grad()
def compute_radiance(tensoIR, surf_pts, light_in_dir, light_idx, nSample=128, vis_near=0.05, vis_far=1.5, device=None):
'''
- args:
- tensoIR: base model
- surf_pts: [N, 3] surface points locations
- light_in_dir: [N, 3], normalized light incident direction, pointing from surface to light
- light_idx: [N, 1], index of light
- nSample: number of samples for each ray along incident light direction
- near: sample begin from this distance
- far: sample end at this distance
- return:
- [N, 3] indirect light in the corresponding direction
'''
xyz_sampled, z_vals, ray_valid = sample_ray_equally(tensoIR,
surf_pts,
light_in_dir,
nSample=nSample,
vis_near=vis_near,
vis_far=vis_far
)
dists = torch.cat((z_vals[:, 1:] - z_vals[:, :-1], torch.zeros_like(z_vals[:, :1])), dim=-1)
light_idx = light_idx.view(-1, 1, 1).expand((*xyz_sampled.shape[:-1], 1)) # (batch_N, n_sammple, 1)
viewdirs = light_in_dir.view(-1, 1, 3).expand(xyz_sampled.shape) # (batch_N, N_samples, 3)
if tensoIR.alphaMask is not None:
alphas = tensoIR.alphaMask.sample_alpha(xyz_sampled[ray_valid])
alpha_mask = alphas > 0
ray_invalid = ~ray_valid
ray_invalid[ray_valid] |= (~alpha_mask)
ray_valid = ~ray_invalid
# Create empty tensor to store sigma and rgb
sigma = torch.zeros(xyz_sampled.shape[:-1], device=xyz_sampled.device)
indirect_light = torch.zeros((*xyz_sampled.shape[:2], 3), device=xyz_sampled.device)
if ray_valid.any():
xyz_sampled = tensoIR.normalize_coord(xyz_sampled)
sigma_feature = tensoIR.compute_densityfeature(xyz_sampled[ray_valid]) # [..., 1]
sigma[ray_valid] = tensoIR.feature2density(sigma_feature)
alpha, weight, transmittance = raw2alpha(sigma, dists * tensoIR.distance_scale)
app_mask = weight > tensoIR.rayMarch_weight_thres
if app_mask.any():
radiance_field_feat = tensoIR.compute_appfeature(xyz_sampled[app_mask], light_idx[app_mask])
indirect_light[app_mask] = tensoIR.renderModule(xyz_sampled[app_mask], viewdirs[app_mask], radiance_field_feat)
acc_map = torch.sum(weight, -1) # [N, ]
nerv_vis = transmittance.squeeze(-1) # NeRV's way to accumulate visibility
nerfactor_vis = 1 - acc_map # nerfactor's way to accumulate visibility
indirect_light = torch.sum(weight[..., None] * indirect_light, -2)
return nerv_vis, nerfactor_vis, indirect_light
if __name__ == "__main__":
pass
| yxlao/TensoIR | 3 | TensoIR Experiments for "Objects with Lighting: A Real-World Dataset for Evaluating Reconstruction and Rendering for Object Relighting" | Python | yxlao | Yixing Lao | HKU-CS |
models/sh.py | Python | import torch
################## sh function ##################
C0 = 0.28209479177387814
C1 = 0.4886025119029199
C2 = [
1.0925484305920792,
-1.0925484305920792,
0.31539156525252005,
-1.0925484305920792,
0.5462742152960396
]
C3 = [
-0.5900435899266435,
2.890611442640554,
-0.4570457994644658,
0.3731763325901154,
-0.4570457994644658,
1.445305721320277,
-0.5900435899266435
]
C4 = [
2.5033429417967046,
-1.7701307697799304,
0.9461746957575601,
-0.6690465435572892,
0.10578554691520431,
-0.6690465435572892,
0.47308734787878004,
-1.7701307697799304,
0.6258357354491761,
]
def eval_sh(deg, sh, dirs):
"""
Evaluate spherical harmonics at unit directions
using hardcoded SH polynomials.
Works with torch/np/jnp.
... Can be 0 or more batch dimensions.
:param deg: int SH max degree. Currently, 0-4 supported
:param sh: torch.Tensor SH coeffs (..., C, (max degree + 1) ** 2)
:param dirs: torch.Tensor unit directions (..., 3)
:return: (..., C)
"""
assert deg <= 4 and deg >= 0
assert (deg + 1) ** 2 == sh.shape[-1]
C = sh.shape[-2]
result = C0 * sh[..., 0]
if deg > 0:
x, y, z = dirs[..., 0:1], dirs[..., 1:2], dirs[..., 2:3]
result = (result -
C1 * y * sh[..., 1] +
C1 * z * sh[..., 2] -
C1 * x * sh[..., 3])
if deg > 1:
xx, yy, zz = x * x, y * y, z * z
xy, yz, xz = x * y, y * z, x * z
result = (result +
C2[0] * xy * sh[..., 4] +
C2[1] * yz * sh[..., 5] +
C2[2] * (2.0 * zz - xx - yy) * sh[..., 6] +
C2[3] * xz * sh[..., 7] +
C2[4] * (xx - yy) * sh[..., 8])
if deg > 2:
result = (result +
C3[0] * y * (3 * xx - yy) * sh[..., 9] +
C3[1] * xy * z * sh[..., 10] +
C3[2] * y * (4 * zz - xx - yy)* sh[..., 11] +
C3[3] * z * (2 * zz - 3 * xx - 3 * yy) * sh[..., 12] +
C3[4] * x * (4 * zz - xx - yy) * sh[..., 13] +
C3[5] * z * (xx - yy) * sh[..., 14] +
C3[6] * x * (xx - 3 * yy) * sh[..., 15])
if deg > 3:
result = (result + C4[0] * xy * (xx - yy) * sh[..., 16] +
C4[1] * yz * (3 * xx - yy) * sh[..., 17] +
C4[2] * xy * (7 * zz - 1) * sh[..., 18] +
C4[3] * yz * (7 * zz - 3) * sh[..., 19] +
C4[4] * (zz * (35 * zz - 30) + 3) * sh[..., 20] +
C4[5] * xz * (7 * zz - 3) * sh[..., 21] +
C4[6] * (xx - yy) * (7 * zz - 1) * sh[..., 22] +
C4[7] * xz * (xx - 3 * yy) * sh[..., 23] +
C4[8] * (xx * (xx - 3 * yy) - yy * (3 * xx - yy)) * sh[..., 24])
return result
def eval_sh_bases(deg, dirs):
"""
Evaluate spherical harmonics bases at unit directions,
without taking linear combination.
At each point, the final result may the be
obtained through simple multiplication.
:param deg: int SH max degree. Currently, 0-4 supported
:param dirs: torch.Tensor (..., 3) unit directions
:return: torch.Tensor (..., (deg+1) ** 2)
"""
assert deg <= 4 and deg >= 0
result = torch.empty((*dirs.shape[:-1], (deg + 1) ** 2), dtype=dirs.dtype, device=dirs.device)
result[..., 0] = C0
if deg > 0:
x, y, z = dirs.unbind(-1)
result[..., 1] = -C1 * y;
result[..., 2] = C1 * z;
result[..., 3] = -C1 * x;
if deg > 1:
xx, yy, zz = x * x, y * y, z * z
xy, yz, xz = x * y, y * z, x * z
result[..., 4] = C2[0] * xy;
result[..., 5] = C2[1] * yz;
result[..., 6] = C2[2] * (2.0 * zz - xx - yy);
result[..., 7] = C2[3] * xz;
result[..., 8] = C2[4] * (xx - yy);
if deg > 2:
result[..., 9] = C3[0] * y * (3 * xx - yy);
result[..., 10] = C3[1] * xy * z;
result[..., 11] = C3[2] * y * (4 * zz - xx - yy);
result[..., 12] = C3[3] * z * (2 * zz - 3 * xx - 3 * yy);
result[..., 13] = C3[4] * x * (4 * zz - xx - yy);
result[..., 14] = C3[5] * z * (xx - yy);
result[..., 15] = C3[6] * x * (xx - 3 * yy);
if deg > 3:
result[..., 16] = C4[0] * xy * (xx - yy);
result[..., 17] = C4[1] * yz * (3 * xx - yy);
result[..., 18] = C4[2] * xy * (7 * zz - 1);
result[..., 19] = C4[3] * yz * (7 * zz - 3);
result[..., 20] = C4[4] * (zz * (35 * zz - 30) + 3);
result[..., 21] = C4[5] * xz * (7 * zz - 3);
result[..., 22] = C4[6] * (xx - yy) * (7 * zz - 1);
result[..., 23] = C4[7] * xz * (xx - 3 * yy);
result[..., 24] = C4[8] * (xx * (xx - 3 * yy) - yy * (3 * xx - yy));
return result
| yxlao/TensoIR | 3 | TensoIR Experiments for "Objects with Lighting: A Real-World Dataset for Evaluating Reconstruction and Rendering for Object Relighting" | Python | yxlao | Yixing Lao | HKU-CS |
models/tensoRF_general_multi_lights.py | Python | from .tensorBase_general_multi_lights import *
from .relight_utils import grid_sample
class TensorVMSplit(TensorBase):
def __init__(self, aabb, gridSize, device, **kargs):
super(TensorVMSplit, self).__init__(aabb, gridSize, device, **kargs)
def init_svd_volume(self, res, device):
self.density_plane, self.density_line = self.init_one_svd(self.density_n_comp, self.gridSize, 0.1, device)
self.app_plane, self.app_line = self.init_one_svd(self.app_n_comp, self.gridSize, 0.1, device)
self.basis_mat = torch.nn.Linear(sum(self.app_n_comp), self.app_dim, bias=False).to(device)
# used for factorize the radiance fields under different lighting conditions
self.light_line = torch.nn.Embedding(self.light_num, sum(self.app_n_comp)).to(device) # (light_num, sum(self.app_n_comp)), such as (10, 16+16+16)
def init_one_svd(self, n_component, gridSize, scale, device):
plane_coef, line_coef = [], []
for i in range(len(self.vecMode)):
vec_id = self.vecMode[i]
mat_id_0, mat_id_1 = self.matMode[i]
plane_coef.append(torch.nn.Parameter(
scale * torch.randn((1, n_component[i], gridSize[mat_id_1], gridSize[mat_id_0]))))
line_coef.append(
torch.nn.Parameter(scale * torch.randn((1, n_component[i], gridSize[vec_id], 1))))
return torch.nn.ParameterList(plane_coef).to(device), torch.nn.ParameterList(line_coef).to(device)
def get_optparam_groups(self, lr_init_spatialxyz = 0.02, lr_init_network = 0.001):
grad_vars = [ {'params': self.density_line, 'lr': lr_init_spatialxyz},
{'params': self.density_plane, 'lr': lr_init_spatialxyz},
{'params': self.app_line, 'lr': lr_init_spatialxyz},
{'params': self.app_plane, 'lr': lr_init_spatialxyz},
{'params': self.basis_mat.parameters(), 'lr':lr_init_network}]
# TODO: merge the learing rates of the following two groups into config file
grad_vars += [ {'params': self.light_line.parameters(), 'lr':0.001}]
if self.light_kind == 'pixel':
grad_vars += [{'params': self._light_rgbs, 'lr':0.001}]
elif self.light_kind == 'sg':
for i in range(self.light_num):
grad_vars += [{'params': self.lgtSGs_list[i], 'lr':0.001}]
if isinstance(self.renderModule, torch.nn.Module):
grad_vars += [{'params':self.renderModule.parameters(), 'lr':lr_init_network}]
if isinstance(self.renderModule_brdf, torch.nn.Module):
grad_vars += [{'params':self.renderModule_brdf.parameters(), 'lr':lr_init_network}]
if (self.normals_kind == "purely_predicted" or self.normals_kind == "derived_plus_predicted" or self.normals_kind == "residue_prediction") \
and isinstance(self.renderModule_normal, torch.nn.Module):
grad_vars += [{'params':self.renderModule_normal.parameters(), 'lr':lr_init_network}]
return grad_vars
def vectorDiffs(self, vector_comps):
total = 0
for idx in range(len(vector_comps)):
n_comp, n_size = vector_comps[idx].shape[1:-1]
dotp = torch.matmul(vector_comps[idx].view(n_comp,n_size), vector_comps[idx].view(n_comp,n_size).transpose(-1,-2))
non_diagonal = dotp.view(-1)[1:].view(n_comp-1, n_comp+1)[...,:-1]
total = total + torch.mean(torch.abs(non_diagonal))
return total
def vector_comp_diffs(self):
return self.vectorDiffs(self.density_line) + self.vectorDiffs(self.app_line)
def density_L1(self):
total = 0
for idx in range(len(self.density_plane)):
total = total + torch.mean(torch.abs(self.density_plane[idx])) + torch.mean(torch.abs(self.density_line[idx]))# + torch.mean(torch.abs(self.app_plane[idx])) + torch.mean(torch.abs(self.density_plane[idx]))
return total
def TV_loss_density(self, reg):
total = 0
for idx in range(len(self.density_plane)):
total = total + reg(self.density_plane[idx]) * 1e-2 #+ reg(self.density_line[idx]) * 1e-3
# total = total + reg(self.density_plane[idx]) * 1e-2 + reg(self.density_line[idx]) * 1e-3
return total
def TV_loss_app(self, reg):
total = 0
for idx in range(len(self.app_plane)):
total = total + reg(self.app_plane[idx]) * 1e-2 #+ reg(self.app_line[idx]) * 1e-3
# total = total + reg(self.app_plane[idx]) * 1e-2 + reg(self.app_line[idx]) * 1e-3
return total
def compute_densityfeature(self, xyz_sampled):
# plane + line basis
coordinate_plane = torch.stack((xyz_sampled[..., self.matMode[0]], xyz_sampled[..., self.matMode[1]], xyz_sampled[..., self.matMode[2]])).detach().view(3, -1, 1, 2)
coordinate_line = torch.stack((xyz_sampled[..., self.vecMode[0]], xyz_sampled[..., self.vecMode[1]], xyz_sampled[..., self.vecMode[2]]))
coordinate_line = torch.stack((torch.zeros_like(coordinate_line), coordinate_line), dim=-1).detach().view(3, -1, 1, 2)
sigma_feature = torch.zeros((xyz_sampled.shape[0],), device=xyz_sampled.device)
for idx_plane in range(len(self.density_plane)):
plane_coef_point = F.grid_sample(self.density_plane[idx_plane], coordinate_plane[[idx_plane]],
align_corners=True).view(-1, *xyz_sampled.shape[:1]) # [16, h*w*n] [component_num, point_num]
line_coef_point = F.grid_sample(self.density_line[idx_plane], coordinate_line[[idx_plane]],
align_corners=True).view(-1, *xyz_sampled.shape[:1]) # [16, h*w*n]
sigma_feature = sigma_feature + torch.sum(plane_coef_point * line_coef_point, dim=0)
return sigma_feature
def compute_densityfeature_with_xyz_grad(self, xyz_sampled):
# the diffenrence this function and compute_densityfeature() is .detach() is removed,
# and this function replace F.grid_sample with a rewritten function, which support second order gradient
# plane + line basis
coordinate_plane = torch.stack((xyz_sampled[..., self.matMode[0]], xyz_sampled[..., self.matMode[1]], xyz_sampled[..., self.matMode[2]])).view(3, -1, 1, 2)
coordinate_line = torch.stack((xyz_sampled[..., self.vecMode[0]], xyz_sampled[..., self.vecMode[1]], xyz_sampled[..., self.vecMode[2]]))
coordinate_line = torch.stack((torch.zeros_like(coordinate_line), coordinate_line), dim=-1).view(3, -1, 1, 2)
sigma_feature = torch.zeros((xyz_sampled.shape[0],), device=xyz_sampled.device)
for idx_plane in range(len(self.density_plane)):
# The current F.grif_sample in pytorch doesn't support backpropagation
plane_coef_point = grid_sample(self.density_plane[idx_plane], coordinate_plane[[idx_plane]]).view(-1, *xyz_sampled.shape[:1]) # [16, h*w*n] [component_num, point_num]
line_coef_point = grid_sample(self.density_line[idx_plane], coordinate_line[[idx_plane]]).view(-1, *xyz_sampled.shape[:1]) # [16, h*w*n]
sigma_feature = sigma_feature + torch.sum(plane_coef_point * line_coef_point, dim=0)
return sigma_feature
def compute_bothfeature(self, xyz_sampled, light_idx):
'''
args:
xyz_sampled: (sampled_pts, 3)
light_idx: (sampled_pts, )
'''
# plane + line basis
coordinate_plane = torch.stack((xyz_sampled[..., self.matMode[0]], xyz_sampled[..., self.matMode[1]], xyz_sampled[..., self.matMode[2]])).detach().view(3, -1, 1, 2)
coordinate_line = torch.stack((xyz_sampled[..., self.vecMode[0]], xyz_sampled[..., self.vecMode[1]], xyz_sampled[..., self.vecMode[2]]))
coordinate_line = torch.stack((torch.zeros_like(coordinate_line), coordinate_line), dim=-1).detach().view(3, -1, 1, 2)
plane_coef_point, line_coef_point = [], []
for idx_plane in range(len(self.app_plane)):
plane_coef_point.append(F.grid_sample(self.app_plane[idx_plane], coordinate_plane[[idx_plane]],
align_corners=True).view(-1, *xyz_sampled.shape[:1]))
line_coef_point.append(F.grid_sample(self.app_line[idx_plane], coordinate_line[[idx_plane]],
align_corners=True).view(-1, *xyz_sampled.shape[:1]))
plane_coef_point, line_coef_point = torch.cat(plane_coef_point), torch.cat(line_coef_point)
light_coef_point = self.light_line(light_idx.to(xyz_sampled.device)).squeeze(1).permute(1,0)
radiance_field_feat = self.basis_mat((plane_coef_point * line_coef_point * light_coef_point).T)
static_index = torch.arange(self.light_num).to(xyz_sampled.device, dtype=torch.int32) # [light_num, ]
mean_weight = torch.mean(self.light_line(static_index), dim=0).unsqueeze(-1).expand_as(light_coef_point)
intrinsic_feat = self.basis_mat((plane_coef_point * line_coef_point * mean_weight).T)
return radiance_field_feat, intrinsic_feat
def compute_intrinfeature(self, xyz_sampled):
'''
args:
xyz_sampled: (sampled_pts, 3)
'''
# plane + line basis
coordinate_plane = torch.stack((xyz_sampled[..., self.matMode[0]], xyz_sampled[..., self.matMode[1]], xyz_sampled[..., self.matMode[2]])).detach().view(3, -1, 1, 2)
coordinate_line = torch.stack((xyz_sampled[..., self.vecMode[0]], xyz_sampled[..., self.vecMode[1]], xyz_sampled[..., self.vecMode[2]]))
coordinate_line = torch.stack((torch.zeros_like(coordinate_line), coordinate_line), dim=-1).detach().view(3, -1, 1, 2)
plane_coef_point, line_coef_point = [], []
for idx_plane in range(len(self.app_plane)):
plane_coef_point.append(F.grid_sample(self.app_plane[idx_plane], coordinate_plane[[idx_plane]],
align_corners=True).view(-1, *xyz_sampled.shape[:1]))
line_coef_point.append(F.grid_sample(self.app_line[idx_plane], coordinate_line[[idx_plane]],
align_corners=True).view(-1, *xyz_sampled.shape[:1]))
plane_coef_point, line_coef_point = torch.cat(plane_coef_point), torch.cat(line_coef_point)
static_index = torch.arange(self.light_num).to(xyz_sampled.device, dtype=torch.int32) # [light_num, ]
mean_weight = torch.mean(self.light_line(static_index), dim=0).unsqueeze(-1).expand_as(plane_coef_point)
intrinsic_feat = self.basis_mat((plane_coef_point * line_coef_point * mean_weight).T)
return intrinsic_feat
def compute_appfeature(self, xyz_sampled, light_idx):
'''
args:
xyz_sampled: (sampled_pts, 3)
light_idx: (sampled_pts, )
'''
# plane + line basis
coordinate_plane = torch.stack((xyz_sampled[..., self.matMode[0]], xyz_sampled[..., self.matMode[1]], xyz_sampled[..., self.matMode[2]])).detach().view(3, -1, 1, 2)
coordinate_line = torch.stack((xyz_sampled[..., self.vecMode[0]], xyz_sampled[..., self.vecMode[1]], xyz_sampled[..., self.vecMode[2]]))
coordinate_line = torch.stack((torch.zeros_like(coordinate_line), coordinate_line), dim=-1).detach().view(3, -1, 1, 2)
plane_coef_point, line_coef_point = [], []
for idx_plane in range(len(self.app_plane)):
plane_coef_point.append(F.grid_sample(self.app_plane[idx_plane], coordinate_plane[[idx_plane]],
align_corners=True).view(-1, *xyz_sampled.shape[:1]))
line_coef_point.append(F.grid_sample(self.app_line[idx_plane], coordinate_line[[idx_plane]],
align_corners=True).view(-1, *xyz_sampled.shape[:1]))
plane_coef_point, line_coef_point = torch.cat(plane_coef_point), torch.cat(line_coef_point)
light_coef_point = self.light_line(light_idx.to(xyz_sampled.device)).squeeze(1).permute(1,0)
radiance_field_feat = self.basis_mat((plane_coef_point * line_coef_point * light_coef_point).T)
return radiance_field_feat
@torch.no_grad()
def up_sampling_VM(self, plane_coef, line_coef, res_target):
for i in range(len(self.vecMode)):
vec_id = self.vecMode[i]
mat_id_0, mat_id_1 = self.matMode[i]
plane_coef[i] = torch.nn.Parameter(
F.interpolate(plane_coef[i].data, size=(res_target[mat_id_1], res_target[mat_id_0]), mode='bilinear',
align_corners=True))
line_coef[i] = torch.nn.Parameter(
F.interpolate(line_coef[i].data, size=(res_target[vec_id], 1), mode='bilinear', align_corners=True))
return plane_coef, line_coef
@torch.no_grad()
def upsample_volume_grid(self, res_target):
self.app_plane, self.app_line = self.up_sampling_VM(self.app_plane, self.app_line, res_target)
self.density_plane, self.density_line = self.up_sampling_VM(self.density_plane, self.density_line, res_target)
self.update_stepSize(res_target)
print(f'upsamping to {res_target}')
@torch.no_grad()
def shrink(self, new_aabb):
print("====> shrinking ...")
xyz_min, xyz_max = new_aabb
t_l, b_r = (xyz_min - self.aabb[0]) / self.units, (xyz_max - self.aabb[0]) / self.units
# print(new_aabb, self.aabb)
# print(t_l, b_r,self.alphaMask.alpha_volume.shape)
t_l, b_r = torch.round(torch.round(t_l)).long(), torch.round(b_r).long() + 1
b_r = torch.stack([b_r, self.gridSize]).amin(0)
for i in range(len(self.vecMode)):
mode0 = self.vecMode[i]
self.density_line[i] = torch.nn.Parameter(
self.density_line[i].data[...,t_l[mode0]:b_r[mode0],:]
)
self.app_line[i] = torch.nn.Parameter(
self.app_line[i].data[...,t_l[mode0]:b_r[mode0],:]
)
mode0, mode1 = self.matMode[i]
self.density_plane[i] = torch.nn.Parameter(
self.density_plane[i].data[...,t_l[mode1]:b_r[mode1],t_l[mode0]:b_r[mode0]]
)
self.app_plane[i] = torch.nn.Parameter(
self.app_plane[i].data[...,t_l[mode1]:b_r[mode1],t_l[mode0]:b_r[mode0]]
)
if not torch.all(self.alphaMask.gridSize == self.gridSize):
t_l_r, b_r_r = t_l / (self.gridSize-1), (b_r-1) / (self.gridSize-1)
correct_aabb = torch.zeros_like(new_aabb)
correct_aabb[0] = (1-t_l_r)*self.aabb[0] + t_l_r*self.aabb[1]
correct_aabb[1] = (1-b_r_r)*self.aabb[0] + b_r_r*self.aabb[1]
print("aabb", new_aabb, "\ncorrect aabb", correct_aabb)
new_aabb = correct_aabb
newSize = b_r - t_l
self.aabb = new_aabb
self.update_stepSize((newSize[0], newSize[1], newSize[2]))
| yxlao/TensoIR | 3 | TensoIR Experiments for "Objects with Lighting: A Real-World Dataset for Evaluating Reconstruction and Rendering for Object Relighting" | Python | yxlao | Yixing Lao | HKU-CS |
models/tensoRF_init.py | Python | from .tensorBase_init import *
class TensorVM(TensorBase_Init):
def __init__(self, aabb, gridSize, device, **kargs):
super(TensorVM, self).__init__(aabb, gridSize, device, **kargs)
def init_svd_volume(self, res, device):
self.plane_coef = torch.nn.Parameter(
0.1 * torch.randn((3, self.app_n_comp + self.density_n_comp, res, res), device=device))
self.line_coef = torch.nn.Parameter(
0.1 * torch.randn((3, self.app_n_comp + self.density_n_comp, res, 1), device=device))
self.basis_mat = torch.nn.Linear(self.app_n_comp * 3, self.app_dim, bias=False, device=device)
def get_optparam_groups(self, lr_init_spatialxyz = 0.02, lr_init_network = 0.001):
grad_vars = [{'params': self.line_coef, 'lr': lr_init_spatialxyz}, {'params': self.plane_coef, 'lr': lr_init_spatialxyz},
{'params': self.basis_mat.parameters(), 'lr':lr_init_network}]
if isinstance(self.renderModule, torch.nn.Module):
grad_vars += [{'params':self.renderModule.parameters(), 'lr':lr_init_network}]
return grad_vars
def compute_features(self, xyz_sampled):
coordinate_plane = torch.stack((xyz_sampled[..., self.matMode[0]], xyz_sampled[..., self.matMode[1]], xyz_sampled[..., self.matMode[2]])).detach()
coordinate_line = torch.stack((xyz_sampled[..., self.vecMode[0]], xyz_sampled[..., self.vecMode[1]], xyz_sampled[..., self.vecMode[2]]))
coordinate_line = torch.stack((torch.zeros_like(coordinate_line), coordinate_line), dim=-1).detach()
plane_feats = F.grid_sample(self.plane_coef[:, -self.density_n_comp:], coordinate_plane, align_corners=True).view(
-1, *xyz_sampled.shape[:1])
line_feats = F.grid_sample(self.line_coef[:, -self.density_n_comp:], coordinate_line, align_corners=True).view(
-1, *xyz_sampled.shape[:1])
sigma_feature = torch.sum(plane_feats * line_feats, dim=0)
plane_feats = F.grid_sample(self.plane_coef[:, :self.app_n_comp], coordinate_plane, align_corners=True).view(3 * self.app_n_comp, -1)
line_feats = F.grid_sample(self.line_coef[:, :self.app_n_comp], coordinate_line, align_corners=True).view(3 * self.app_n_comp, -1)
app_features = self.basis_mat((plane_feats * line_feats).T)
return sigma_feature, app_features
def compute_densityfeature(self, xyz_sampled):
coordinate_plane = torch.stack((xyz_sampled[..., self.matMode[0]], xyz_sampled[..., self.matMode[1]], xyz_sampled[..., self.matMode[2]])).detach().view(3, -1, 1, 2)
coordinate_line = torch.stack((xyz_sampled[..., self.vecMode[0]], xyz_sampled[..., self.vecMode[1]], xyz_sampled[..., self.vecMode[2]]))
coordinate_line = torch.stack((torch.zeros_like(coordinate_line), coordinate_line), dim=-1).detach().view(3, -1, 1, 2)
plane_feats = F.grid_sample(self.plane_coef[:, -self.density_n_comp:], coordinate_plane, align_corners=True).view(
-1, *xyz_sampled.shape[:1])
line_feats = F.grid_sample(self.line_coef[:, -self.density_n_comp:], coordinate_line, align_corners=True).view(
-1, *xyz_sampled.shape[:1])
sigma_feature = torch.sum(plane_feats * line_feats, dim=0)
return sigma_feature
def compute_appfeature(self, xyz_sampled):
coordinate_plane = torch.stack((xyz_sampled[..., self.matMode[0]], xyz_sampled[..., self.matMode[1]], xyz_sampled[..., self.matMode[2]])).detach().view(3, -1, 1, 2)
coordinate_line = torch.stack((xyz_sampled[..., self.vecMode[0]], xyz_sampled[..., self.vecMode[1]], xyz_sampled[..., self.vecMode[2]]))
coordinate_line = torch.stack((torch.zeros_like(coordinate_line), coordinate_line), dim=-1).detach().view(3, -1, 1, 2)
plane_feats = F.grid_sample(self.plane_coef[:, :self.app_n_comp], coordinate_plane, align_corners=True).view(3 * self.app_n_comp, -1)
line_feats = F.grid_sample(self.line_coef[:, :self.app_n_comp], coordinate_line, align_corners=True).view(3 * self.app_n_comp, -1)
app_features = self.basis_mat((plane_feats * line_feats).T)
return app_features
def vectorDiffs(self, vector_comps):
total = 0
for idx in range(len(vector_comps)):
# print(self.line_coef.shape, vector_comps[idx].shape)
n_comp, n_size = vector_comps[idx].shape[:-1]
dotp = torch.matmul(vector_comps[idx].view(n_comp,n_size), vector_comps[idx].view(n_comp,n_size).transpose(-1,-2))
# print(vector_comps[idx].shape, vector_comps[idx].view(n_comp,n_size).transpose(-1,-2).shape, dotp.shape)
non_diagonal = dotp.view(-1)[1:].view(n_comp-1, n_comp+1)[...,:-1]
# print(vector_comps[idx].shape, vector_comps[idx].view(n_comp,n_size).transpose(-1,-2).shape, dotp.shape,non_diagonal.shape)
total = total + torch.mean(torch.abs(non_diagonal))
return total
def vector_comp_diffs(self):
return self.vectorDiffs(self.line_coef[:,-self.density_n_comp:]) + self.vectorDiffs(self.line_coef[:,:self.app_n_comp])
@torch.no_grad()
def up_sampling_VM(self, plane_coef, line_coef, res_target):
for i in range(len(self.vecMode)):
vec_id = self.vecMode[i]
mat_id_0, mat_id_1 = self.matMode[i]
plane_coef[i] = torch.nn.Parameter(
F.interpolate(plane_coef[i].data, size=(res_target[mat_id_1], res_target[mat_id_0]), mode='bilinear',
align_corners=True))
line_coef[i] = torch.nn.Parameter(
F.interpolate(line_coef[i].data, size=(res_target[vec_id], 1), mode='bilinear', align_corners=True))
# plane_coef[0] = torch.nn.Parameter(
# F.interpolate(plane_coef[0].data, size=(res_target[1], res_target[0]), mode='bilinear',
# align_corners=True))
# line_coef[0] = torch.nn.Parameter(
# F.interpolate(line_coef[0].data, size=(res_target[2], 1), mode='bilinear', align_corners=True))
# plane_coef[1] = torch.nn.Parameter(
# F.interpolate(plane_coef[1].data, size=(res_target[2], res_target[0]), mode='bilinear',
# align_corners=True))
# line_coef[1] = torch.nn.Parameter(
# F.interpolate(line_coef[1].data, size=(res_target[1], 1), mode='bilinear', align_corners=True))
# plane_coef[2] = torch.nn.Parameter(
# F.interpolate(plane_coef[2].data, size=(res_target[2], res_target[1]), mode='bilinear',
# align_corners=True))
# line_coef[2] = torch.nn.Parameter(
# F.interpolate(line_coef[2].data, size=(res_target[0], 1), mode='bilinear', align_corners=True))
return plane_coef, line_coef
@torch.no_grad()
def upsample_volume_grid(self, res_target):
# self.app_plane, self.app_line = self.up_sampling_VM(self.app_plane, self.app_line, res_target)
# self.density_plane, self.density_line = self.up_sampling_VM(self.density_plane, self.density_line, res_target)
scale = res_target[0]/self.line_coef.shape[2] #assuming xyz have the same scale
plane_coef = F.interpolate(self.plane_coef.detach().data, scale_factor=scale, mode='bilinear',align_corners=True)
line_coef = F.interpolate(self.line_coef.detach().data, size=(res_target[0],1), mode='bilinear',align_corners=True)
self.plane_coef, self.line_coef = torch.nn.Parameter(plane_coef), torch.nn.Parameter(line_coef)
self.compute_stepSize(res_target)
print(f'upsamping to {res_target}')
class TensorVMSplit(TensorBase_Init):
def __init__(self, aabb, gridSize, device, **kargs):
super(TensorVMSplit, self).__init__(aabb, gridSize, device, **kargs)
def init_svd_volume(self, res, device):
self.density_plane, self.density_line = self.init_one_svd(self.density_n_comp, self.gridSize, 0.1, device)
self.app_plane, self.app_line = self.init_one_svd(self.app_n_comp, self.gridSize, 0.1, device)
self.basis_mat = torch.nn.Linear(sum(self.app_n_comp), self.app_dim, bias=False).to(device)
def init_one_svd(self, n_component, gridSize, scale, device):
plane_coef, line_coef = [], []
for i in range(len(self.vecMode)):
vec_id = self.vecMode[i]
mat_id_0, mat_id_1 = self.matMode[i]
plane_coef.append(torch.nn.Parameter(
scale * torch.randn((1, n_component[i], gridSize[mat_id_1], gridSize[mat_id_0])))) #
line_coef.append(
torch.nn.Parameter(scale * torch.randn((1, n_component[i], gridSize[vec_id], 1))))
return torch.nn.ParameterList(plane_coef).to(device), torch.nn.ParameterList(line_coef).to(device)
def get_optparam_groups(self, lr_init_spatialxyz = 0.02, lr_init_network = 0.001):
grad_vars = [{'params': self.density_line, 'lr': lr_init_spatialxyz}, {'params': self.density_plane, 'lr': lr_init_spatialxyz},
{'params': self.app_line, 'lr': lr_init_spatialxyz}, {'params': self.app_plane, 'lr': lr_init_spatialxyz},
{'params': self.basis_mat.parameters(), 'lr':lr_init_network}]
if isinstance(self.renderModule, torch.nn.Module):
grad_vars += [{'params':self.renderModule.parameters(), 'lr':lr_init_network}]
return grad_vars
def vectorDiffs(self, vector_comps):
total = 0
for idx in range(len(vector_comps)):
n_comp, n_size = vector_comps[idx].shape[1:-1]
dotp = torch.matmul(vector_comps[idx].view(n_comp,n_size), vector_comps[idx].view(n_comp,n_size).transpose(-1,-2))
non_diagonal = dotp.view(-1)[1:].view(n_comp-1, n_comp+1)[...,:-1]
total = total + torch.mean(torch.abs(non_diagonal))
return total
def vector_comp_diffs(self):
return self.vectorDiffs(self.density_line) + self.vectorDiffs(self.app_line)
def density_L1(self):
total = 0
for idx in range(len(self.density_plane)):
total = total + torch.mean(torch.abs(self.density_plane[idx])) + torch.mean(torch.abs(self.density_line[idx]))# + torch.mean(torch.abs(self.app_plane[idx])) + torch.mean(torch.abs(self.density_plane[idx]))
return total
def TV_loss_density(self, reg):
total = 0
for idx in range(len(self.density_plane)):
total = total + reg(self.density_plane[idx]) * 1e-2 + reg(self.density_line[idx]) * 1e-3
return total
def TV_loss_app(self, reg):
total = 0
for idx in range(len(self.app_plane)):
total = total + reg(self.app_plane[idx]) * 1e-2 + reg(self.app_line[idx]) * 1e-3
return total
def compute_densityfeature(self, xyz_sampled):
# plane + line basis
coordinate_plane = torch.stack((xyz_sampled[..., self.matMode[0]], xyz_sampled[..., self.matMode[1]], xyz_sampled[..., self.matMode[2]])).detach().view(3, -1, 1, 2)
coordinate_line = torch.stack((xyz_sampled[..., self.vecMode[0]], xyz_sampled[..., self.vecMode[1]], xyz_sampled[..., self.vecMode[2]]))
coordinate_line = torch.stack((torch.zeros_like(coordinate_line), coordinate_line), dim=-1).detach().view(3, -1, 1, 2)
sigma_feature = torch.zeros((xyz_sampled.shape[0],), device=xyz_sampled.device)
for idx_plane in range(len(self.density_plane)):
plane_coef_point = F.grid_sample(self.density_plane[idx_plane], coordinate_plane[[idx_plane]],
align_corners=True).view(-1, *xyz_sampled.shape[:1])
line_coef_point = F.grid_sample(self.density_line[idx_plane], coordinate_line[[idx_plane]],
align_corners=True).view(-1, *xyz_sampled.shape[:1])
sigma_feature = sigma_feature + torch.sum(plane_coef_point * line_coef_point, dim=0)
return sigma_feature
def compute_appfeature(self, xyz_sampled):
# plane + line basis
coordinate_plane = torch.stack((xyz_sampled[..., self.matMode[0]], xyz_sampled[..., self.matMode[1]], xyz_sampled[..., self.matMode[2]])).detach().view(3, -1, 1, 2)
coordinate_line = torch.stack((xyz_sampled[..., self.vecMode[0]], xyz_sampled[..., self.vecMode[1]], xyz_sampled[..., self.vecMode[2]]))
coordinate_line = torch.stack((torch.zeros_like(coordinate_line), coordinate_line), dim=-1).detach().view(3, -1, 1, 2)
plane_coef_point,line_coef_point = [],[]
for idx_plane in range(len(self.app_plane)):
plane_coef_point.append(F.grid_sample(self.app_plane[idx_plane], coordinate_plane[[idx_plane]],
align_corners=True).view(-1, *xyz_sampled.shape[:1]))
line_coef_point.append(F.grid_sample(self.app_line[idx_plane], coordinate_line[[idx_plane]],
align_corners=True).view(-1, *xyz_sampled.shape[:1]))
plane_coef_point, line_coef_point = torch.cat(plane_coef_point), torch.cat(line_coef_point)
return self.basis_mat((plane_coef_point * line_coef_point).T)
@torch.no_grad()
def up_sampling_VM(self, plane_coef, line_coef, res_target):
for i in range(len(self.vecMode)):
vec_id = self.vecMode[i]
mat_id_0, mat_id_1 = self.matMode[i]
plane_coef[i] = torch.nn.Parameter(
F.interpolate(plane_coef[i].data, size=(res_target[mat_id_1], res_target[mat_id_0]), mode='bilinear',
align_corners=True))
line_coef[i] = torch.nn.Parameter(
F.interpolate(line_coef[i].data, size=(res_target[vec_id], 1), mode='bilinear', align_corners=True))
return plane_coef, line_coef
@torch.no_grad()
def upsample_volume_grid(self, res_target):
self.app_plane, self.app_line = self.up_sampling_VM(self.app_plane, self.app_line, res_target)
self.density_plane, self.density_line = self.up_sampling_VM(self.density_plane, self.density_line, res_target)
self.update_stepSize(res_target)
print(f'upsamping to {res_target}')
@torch.no_grad()
def shrink(self, new_aabb):
print("====> shrinking ...")
xyz_min, xyz_max = new_aabb
t_l, b_r = (xyz_min - self.aabb[0]) / self.units, (xyz_max - self.aabb[0]) / self.units
# print(new_aabb, self.aabb)
# print(t_l, b_r,self.alphaMask.alpha_volume.shape)
t_l, b_r = torch.round(torch.round(t_l)).long(), torch.round(b_r).long() + 1
b_r = torch.stack([b_r, self.gridSize]).amin(0)
for i in range(len(self.vecMode)):
mode0 = self.vecMode[i]
self.density_line[i] = torch.nn.Parameter(
self.density_line[i].data[...,t_l[mode0]:b_r[mode0],:]
)
self.app_line[i] = torch.nn.Parameter(
self.app_line[i].data[...,t_l[mode0]:b_r[mode0],:]
)
mode0, mode1 = self.matMode[i]
self.density_plane[i] = torch.nn.Parameter(
self.density_plane[i].data[...,t_l[mode1]:b_r[mode1],t_l[mode0]:b_r[mode0]]
)
self.app_plane[i] = torch.nn.Parameter(
self.app_plane[i].data[...,t_l[mode1]:b_r[mode1],t_l[mode0]:b_r[mode0]]
)
if not torch.all(self.alphaMask.gridSize == self.gridSize):
t_l_r, b_r_r = t_l / (self.gridSize-1), (b_r-1) / (self.gridSize-1)
correct_aabb = torch.zeros_like(new_aabb)
correct_aabb[0] = (1-t_l_r)*self.aabb[0] + t_l_r*self.aabb[1]
correct_aabb[1] = (1-b_r_r)*self.aabb[0] + b_r_r*self.aabb[1]
print("aabb", new_aabb, "\ncorrect aabb", correct_aabb)
new_aabb = correct_aabb
newSize = b_r - t_l
self.aabb = new_aabb
self.update_stepSize((newSize[0], newSize[1], newSize[2]))
class TensorCP(TensorBase_Init):
def __init__(self, aabb, gridSize, device, **kargs):
super(TensorCP, self).__init__(aabb, gridSize, device, **kargs)
def init_svd_volume(self, res, device):
self.density_line = self.init_one_svd(self.density_n_comp[0], self.gridSize, 0.2, device)
self.app_line = self.init_one_svd(self.app_n_comp[0], self.gridSize, 0.2, device)
self.basis_mat = torch.nn.Linear(self.app_n_comp[0], self.app_dim, bias=False).to(device)
def init_one_svd(self, n_component, gridSize, scale, device):
line_coef = []
for i in range(len(self.vecMode)):
vec_id = self.vecMode[i]
line_coef.append(
torch.nn.Parameter(scale * torch.randn((1, n_component, gridSize[vec_id], 1))))
return torch.nn.ParameterList(line_coef).to(device)
def get_optparam_groups(self, lr_init_spatialxyz = 0.02, lr_init_network = 0.001):
grad_vars = [{'params': self.density_line, 'lr': lr_init_spatialxyz},
{'params': self.app_line, 'lr': lr_init_spatialxyz},
{'params': self.basis_mat.parameters(), 'lr':lr_init_network}]
if isinstance(self.renderModule, torch.nn.Module):
grad_vars += [{'params':self.renderModule.parameters(), 'lr':lr_init_network}]
return grad_vars
def compute_densityfeature(self, xyz_sampled):
coordinate_line = torch.stack((xyz_sampled[..., self.vecMode[0]], xyz_sampled[..., self.vecMode[1]], xyz_sampled[..., self.vecMode[2]]))
coordinate_line = torch.stack((torch.zeros_like(coordinate_line), coordinate_line), dim=-1).detach().view(3, -1, 1, 2)
line_coef_point = F.grid_sample(self.density_line[0], coordinate_line[[0]],
align_corners=True).view(-1, *xyz_sampled.shape[:1])
line_coef_point = line_coef_point * F.grid_sample(self.density_line[1], coordinate_line[[1]],
align_corners=True).view(-1, *xyz_sampled.shape[:1])
line_coef_point = line_coef_point * F.grid_sample(self.density_line[2], coordinate_line[[2]],
align_corners=True).view(-1, *xyz_sampled.shape[:1])
sigma_feature = torch.sum(line_coef_point, dim=0)
return sigma_feature
def compute_appfeature(self, xyz_sampled):
coordinate_line = torch.stack(
(xyz_sampled[..., self.vecMode[0]], xyz_sampled[..., self.vecMode[1]], xyz_sampled[..., self.vecMode[2]]))
coordinate_line = torch.stack((torch.zeros_like(coordinate_line), coordinate_line), dim=-1).detach().view(3, -1, 1, 2)
line_coef_point = F.grid_sample(self.app_line[0], coordinate_line[[0]],
align_corners=True).view(-1, *xyz_sampled.shape[:1])
line_coef_point = line_coef_point * F.grid_sample(self.app_line[1], coordinate_line[[1]],
align_corners=True).view(-1, *xyz_sampled.shape[:1])
line_coef_point = line_coef_point * F.grid_sample(self.app_line[2], coordinate_line[[2]],
align_corners=True).view(-1, *xyz_sampled.shape[:1])
return self.basis_mat(line_coef_point.T)
@torch.no_grad()
def up_sampling_Vector(self, density_line_coef, app_line_coef, res_target):
for i in range(len(self.vecMode)):
vec_id = self.vecMode[i]
density_line_coef[i] = torch.nn.Parameter(
F.interpolate(density_line_coef[i].data, size=(res_target[vec_id], 1), mode='bilinear', align_corners=True))
app_line_coef[i] = torch.nn.Parameter(
F.interpolate(app_line_coef[i].data, size=(res_target[vec_id], 1), mode='bilinear', align_corners=True))
return density_line_coef, app_line_coef
@torch.no_grad()
def upsample_volume_grid(self, res_target):
self.density_line, self.app_line = self.up_sampling_Vector(self.density_line, self.app_line, res_target)
self.update_stepSize(res_target)
print(f'upsamping to {res_target}')
@torch.no_grad()
def shrink(self, new_aabb):
print("====> shrinking ...")
xyz_min, xyz_max = new_aabb
t_l, b_r = (xyz_min - self.aabb[0]) / self.units, (xyz_max - self.aabb[0]) / self.units
t_l, b_r = torch.round(torch.round(t_l)).long(), torch.round(b_r).long() + 1
b_r = torch.stack([b_r, self.gridSize]).amin(0)
for i in range(len(self.vecMode)):
mode0 = self.vecMode[i]
self.density_line[i] = torch.nn.Parameter(
self.density_line[i].data[...,t_l[mode0]:b_r[mode0],:]
)
self.app_line[i] = torch.nn.Parameter(
self.app_line[i].data[...,t_l[mode0]:b_r[mode0],:]
)
if not torch.all(self.alphaMask.gridSize == self.gridSize):
t_l_r, b_r_r = t_l / (self.gridSize-1), (b_r-1) / (self.gridSize-1)
correct_aabb = torch.zeros_like(new_aabb)
correct_aabb[0] = (1-t_l_r)*self.aabb[0] + t_l_r*self.aabb[1]
correct_aabb[1] = (1-b_r_r)*self.aabb[0] + b_r_r*self.aabb[1]
print("aabb", new_aabb, "\ncorrect aabb", correct_aabb)
new_aabb = correct_aabb
newSize = b_r - t_l
self.aabb = new_aabb
self.update_stepSize((newSize[0], newSize[1], newSize[2]))
def density_L1(self):
total = 0
for idx in range(len(self.density_line)):
total = total + torch.mean(torch.abs(self.density_line[idx]))
return total
def TV_loss_density(self, reg):
total = 0
for idx in range(len(self.density_line)):
total = total + reg(self.density_line[idx]) * 1e-3
return total
def TV_loss_app(self, reg):
total = 0
for idx in range(len(self.app_line)):
total = total + reg(self.app_line[idx]) * 1e-3
return total | yxlao/TensoIR | 3 | TensoIR Experiments for "Objects with Lighting: A Real-World Dataset for Evaluating Reconstruction and Rendering for Object Relighting" | Python | yxlao | Yixing Lao | HKU-CS |
models/tensoRF_rotated_lights.py | Python | from .tensorBase_rotated_lights import *
from .relight_utils import grid_sample
class TensorVMSplit(TensorBase):
def __init__(self, aabb, gridSize, device, **kargs):
super(TensorVMSplit, self).__init__(aabb, gridSize, device, **kargs)
def init_svd_volume(self, res, device):
self.density_plane, self.density_line = self.init_one_svd(self.density_n_comp, self.gridSize, 0.1, device)
self.app_plane, self.app_line = self.init_one_svd(self.app_n_comp, self.gridSize, 0.1, device)
self.basis_mat = torch.nn.Linear(sum(self.app_n_comp), self.app_dim, bias=False).to(device)
# used for factorize the radiance fields under different lighting conditions
self.light_line = torch.nn.Embedding(self.light_num, sum(self.app_n_comp)).to(device) # (light_num, sum(self.app_n_comp)), such as (10, 16+16+16)
def init_one_svd(self, n_component, gridSize, scale, device):
plane_coef, line_coef = [], []
for i in range(len(self.vecMode)):
vec_id = self.vecMode[i]
mat_id_0, mat_id_1 = self.matMode[i]
plane_coef.append(torch.nn.Parameter(
scale * torch.randn((1, n_component[i], gridSize[mat_id_1], gridSize[mat_id_0]))))
line_coef.append(
torch.nn.Parameter(scale * torch.randn((1, n_component[i], gridSize[vec_id], 1))))
return torch.nn.ParameterList(plane_coef).to(device), torch.nn.ParameterList(line_coef).to(device)
def get_optparam_groups(self, lr_init_spatialxyz = 0.02, lr_init_network = 0.001):
grad_vars = [ {'params': self.density_line, 'lr': lr_init_spatialxyz},
{'params': self.density_plane, 'lr': lr_init_spatialxyz},
{'params': self.app_line, 'lr': lr_init_spatialxyz},
{'params': self.app_plane, 'lr': lr_init_spatialxyz},
{'params': self.basis_mat.parameters(), 'lr':lr_init_network}]
# TODO: merge the learing rates of the following two groups into config file
grad_vars += [ {'params': self.light_line.parameters(), 'lr':0.001}]
if self.light_kind == 'pixel':
grad_vars += [{'params': self._light_rgbs, 'lr':0.001}]
elif self.light_kind == 'sg':
grad_vars += [{'params': self.lgtSGs, 'lr':0.001}]
if isinstance(self.renderModule, torch.nn.Module):
grad_vars += [{'params':self.renderModule.parameters(), 'lr':lr_init_network}]
if isinstance(self.renderModule_brdf, torch.nn.Module):
grad_vars += [{'params':self.renderModule_brdf.parameters(), 'lr':lr_init_network}]
if (self.normals_kind == "purely_predicted" or self.normals_kind == "derived_plus_predicted" or self.normals_kind == "residue_prediction") \
and isinstance(self.renderModule_normal, torch.nn.Module):
grad_vars += [{'params':self.renderModule_normal.parameters(), 'lr':lr_init_network}]
return grad_vars
def vectorDiffs(self, vector_comps):
total = 0
for idx in range(len(vector_comps)):
n_comp, n_size = vector_comps[idx].shape[1:-1]
dotp = torch.matmul(vector_comps[idx].view(n_comp,n_size), vector_comps[idx].view(n_comp,n_size).transpose(-1,-2))
non_diagonal = dotp.view(-1)[1:].view(n_comp-1, n_comp+1)[...,:-1]
total = total + torch.mean(torch.abs(non_diagonal))
return total
def vector_comp_diffs(self):
return self.vectorDiffs(self.density_line) + self.vectorDiffs(self.app_line)
def density_L1(self):
total = 0
for idx in range(len(self.density_plane)):
total = total + torch.mean(torch.abs(self.density_plane[idx])) + torch.mean(torch.abs(self.density_line[idx]))# + torch.mean(torch.abs(self.app_plane[idx])) + torch.mean(torch.abs(self.density_plane[idx]))
return total
def TV_loss_density(self, reg):
total = 0
for idx in range(len(self.density_plane)):
total = total + reg(self.density_plane[idx]) * 1e-2 #+ reg(self.density_line[idx]) * 1e-3
# total = total + reg(self.density_plane[idx]) * 1e-2 + reg(self.density_line[idx]) * 1e-3
return total
def TV_loss_app(self, reg):
total = 0
for idx in range(len(self.app_plane)):
total = total + reg(self.app_plane[idx]) * 1e-2 #+ reg(self.app_line[idx]) * 1e-3
# total = total + reg(self.app_plane[idx]) * 1e-2 + reg(self.app_line[idx]) * 1e-3
return total
def compute_densityfeature(self, xyz_sampled):
# plane + line basis
coordinate_plane = torch.stack((xyz_sampled[..., self.matMode[0]], xyz_sampled[..., self.matMode[1]], xyz_sampled[..., self.matMode[2]])).detach().view(3, -1, 1, 2)
coordinate_line = torch.stack((xyz_sampled[..., self.vecMode[0]], xyz_sampled[..., self.vecMode[1]], xyz_sampled[..., self.vecMode[2]]))
coordinate_line = torch.stack((torch.zeros_like(coordinate_line), coordinate_line), dim=-1).detach().view(3, -1, 1, 2)
sigma_feature = torch.zeros((xyz_sampled.shape[0],), device=xyz_sampled.device)
for idx_plane in range(len(self.density_plane)):
plane_coef_point = F.grid_sample(self.density_plane[idx_plane], coordinate_plane[[idx_plane]],
align_corners=True).view(-1, *xyz_sampled.shape[:1]) # [16, h*w*n] [component_num, point_num]
line_coef_point = F.grid_sample(self.density_line[idx_plane], coordinate_line[[idx_plane]],
align_corners=True).view(-1, *xyz_sampled.shape[:1]) # [16, h*w*n]
sigma_feature = sigma_feature + torch.sum(plane_coef_point * line_coef_point, dim=0)
return sigma_feature
def compute_densityfeature_with_xyz_grad(self, xyz_sampled):
# the diffenrence this function and compute_densityfeature() is .detach() is removed,
# and this function replace F.grid_sample with a rewritten function, which support second order gradient
# plane + line basis
coordinate_plane = torch.stack((xyz_sampled[..., self.matMode[0]], xyz_sampled[..., self.matMode[1]], xyz_sampled[..., self.matMode[2]])).view(3, -1, 1, 2)
coordinate_line = torch.stack((xyz_sampled[..., self.vecMode[0]], xyz_sampled[..., self.vecMode[1]], xyz_sampled[..., self.vecMode[2]]))
coordinate_line = torch.stack((torch.zeros_like(coordinate_line), coordinate_line), dim=-1).view(3, -1, 1, 2)
sigma_feature = torch.zeros((xyz_sampled.shape[0],), device=xyz_sampled.device)
for idx_plane in range(len(self.density_plane)):
# The current F.grif_sample in pytorch doesn't support backpropagation
plane_coef_point = grid_sample(self.density_plane[idx_plane], coordinate_plane[[idx_plane]]).view(-1, *xyz_sampled.shape[:1]) # [16, h*w*n] [component_num, point_num]
line_coef_point = grid_sample(self.density_line[idx_plane], coordinate_line[[idx_plane]]).view(-1, *xyz_sampled.shape[:1]) # [16, h*w*n]
sigma_feature = sigma_feature + torch.sum(plane_coef_point * line_coef_point, dim=0)
return sigma_feature
def compute_bothfeature(self, xyz_sampled, light_idx):
'''
args:
xyz_sampled: (sampled_pts, 3)
light_idx: (sampled_pts, )
'''
# plane + line basis
coordinate_plane = torch.stack((xyz_sampled[..., self.matMode[0]], xyz_sampled[..., self.matMode[1]], xyz_sampled[..., self.matMode[2]])).detach().view(3, -1, 1, 2)
coordinate_line = torch.stack((xyz_sampled[..., self.vecMode[0]], xyz_sampled[..., self.vecMode[1]], xyz_sampled[..., self.vecMode[2]]))
coordinate_line = torch.stack((torch.zeros_like(coordinate_line), coordinate_line), dim=-1).detach().view(3, -1, 1, 2)
plane_coef_point, line_coef_point = [], []
for idx_plane in range(len(self.app_plane)):
plane_coef_point.append(F.grid_sample(self.app_plane[idx_plane], coordinate_plane[[idx_plane]],
align_corners=True).view(-1, *xyz_sampled.shape[:1]))
line_coef_point.append(F.grid_sample(self.app_line[idx_plane], coordinate_line[[idx_plane]],
align_corners=True).view(-1, *xyz_sampled.shape[:1]))
plane_coef_point, line_coef_point = torch.cat(plane_coef_point), torch.cat(line_coef_point)
light_coef_point = self.light_line(light_idx.to(xyz_sampled.device)).squeeze(1).permute(1,0)
radiance_field_feat = self.basis_mat((plane_coef_point * line_coef_point * light_coef_point).T)
static_index = torch.arange(self.light_num).to(xyz_sampled.device, dtype=torch.int32) # [light_num, ]
mean_weight = torch.mean(self.light_line(static_index), dim=0).unsqueeze(-1).expand_as(light_coef_point)
intrinsic_feat = self.basis_mat((plane_coef_point * line_coef_point * mean_weight).T)
return radiance_field_feat, intrinsic_feat
def compute_intrinfeature(self, xyz_sampled):
'''
args:
xyz_sampled: (sampled_pts, 3)
'''
# plane + line basis
coordinate_plane = torch.stack((xyz_sampled[..., self.matMode[0]], xyz_sampled[..., self.matMode[1]], xyz_sampled[..., self.matMode[2]])).detach().view(3, -1, 1, 2)
coordinate_line = torch.stack((xyz_sampled[..., self.vecMode[0]], xyz_sampled[..., self.vecMode[1]], xyz_sampled[..., self.vecMode[2]]))
coordinate_line = torch.stack((torch.zeros_like(coordinate_line), coordinate_line), dim=-1).detach().view(3, -1, 1, 2)
plane_coef_point, line_coef_point = [], []
for idx_plane in range(len(self.app_plane)):
plane_coef_point.append(F.grid_sample(self.app_plane[idx_plane], coordinate_plane[[idx_plane]],
align_corners=True).view(-1, *xyz_sampled.shape[:1]))
line_coef_point.append(F.grid_sample(self.app_line[idx_plane], coordinate_line[[idx_plane]],
align_corners=True).view(-1, *xyz_sampled.shape[:1]))
plane_coef_point, line_coef_point = torch.cat(plane_coef_point), torch.cat(line_coef_point)
static_index = torch.arange(self.light_num).to(xyz_sampled.device, dtype=torch.int32) # [light_num, ]
mean_weight = torch.mean(self.light_line(static_index), dim=0).unsqueeze(-1).expand_as(plane_coef_point)
intrinsic_feat = self.basis_mat((plane_coef_point * line_coef_point * mean_weight).T)
return intrinsic_feat
def compute_appfeature(self, xyz_sampled, light_idx):
'''
args:
xyz_sampled: (sampled_pts, 3)
light_idx: (sampled_pts, )
'''
# plane + line basis
coordinate_plane = torch.stack((xyz_sampled[..., self.matMode[0]], xyz_sampled[..., self.matMode[1]], xyz_sampled[..., self.matMode[2]])).detach().view(3, -1, 1, 2)
coordinate_line = torch.stack((xyz_sampled[..., self.vecMode[0]], xyz_sampled[..., self.vecMode[1]], xyz_sampled[..., self.vecMode[2]]))
coordinate_line = torch.stack((torch.zeros_like(coordinate_line), coordinate_line), dim=-1).detach().view(3, -1, 1, 2)
plane_coef_point, line_coef_point = [], []
for idx_plane in range(len(self.app_plane)):
plane_coef_point.append(F.grid_sample(self.app_plane[idx_plane], coordinate_plane[[idx_plane]],
align_corners=True).view(-1, *xyz_sampled.shape[:1]))
line_coef_point.append(F.grid_sample(self.app_line[idx_plane], coordinate_line[[idx_plane]],
align_corners=True).view(-1, *xyz_sampled.shape[:1]))
plane_coef_point, line_coef_point = torch.cat(plane_coef_point), torch.cat(line_coef_point)
light_coef_point = self.light_line(light_idx.to(xyz_sampled.device)).squeeze(1).permute(1,0)
radiance_field_feat = self.basis_mat((plane_coef_point * line_coef_point * light_coef_point).T)
return radiance_field_feat
@torch.no_grad()
def up_sampling_VM(self, plane_coef, line_coef, res_target):
for i in range(len(self.vecMode)):
vec_id = self.vecMode[i]
mat_id_0, mat_id_1 = self.matMode[i]
plane_coef[i] = torch.nn.Parameter(
F.interpolate(plane_coef[i].data, size=(res_target[mat_id_1], res_target[mat_id_0]), mode='bilinear',
align_corners=True))
line_coef[i] = torch.nn.Parameter(
F.interpolate(line_coef[i].data, size=(res_target[vec_id], 1), mode='bilinear', align_corners=True))
return plane_coef, line_coef
@torch.no_grad()
def upsample_volume_grid(self, res_target):
self.app_plane, self.app_line = self.up_sampling_VM(self.app_plane, self.app_line, res_target)
self.density_plane, self.density_line = self.up_sampling_VM(self.density_plane, self.density_line, res_target)
self.update_stepSize(res_target)
print(f'upsamping to {res_target}')
@torch.no_grad()
def shrink(self, new_aabb):
print("====> shrinking ...")
xyz_min, xyz_max = new_aabb
t_l, b_r = (xyz_min - self.aabb[0]) / self.units, (xyz_max - self.aabb[0]) / self.units
# print(new_aabb, self.aabb)
# print(t_l, b_r,self.alphaMask.alpha_volume.shape)
t_l, b_r = torch.round(torch.round(t_l)).long(), torch.round(b_r).long() + 1
b_r = torch.stack([b_r, self.gridSize]).amin(0)
for i in range(len(self.vecMode)):
mode0 = self.vecMode[i]
self.density_line[i] = torch.nn.Parameter(
self.density_line[i].data[...,t_l[mode0]:b_r[mode0],:]
)
self.app_line[i] = torch.nn.Parameter(
self.app_line[i].data[...,t_l[mode0]:b_r[mode0],:]
)
mode0, mode1 = self.matMode[i]
self.density_plane[i] = torch.nn.Parameter(
self.density_plane[i].data[...,t_l[mode1]:b_r[mode1],t_l[mode0]:b_r[mode0]]
)
self.app_plane[i] = torch.nn.Parameter(
self.app_plane[i].data[...,t_l[mode1]:b_r[mode1],t_l[mode0]:b_r[mode0]]
)
if not torch.all(self.alphaMask.gridSize == self.gridSize):
t_l_r, b_r_r = t_l / (self.gridSize-1), (b_r-1) / (self.gridSize-1)
correct_aabb = torch.zeros_like(new_aabb)
correct_aabb[0] = (1-t_l_r)*self.aabb[0] + t_l_r*self.aabb[1]
correct_aabb[1] = (1-b_r_r)*self.aabb[0] + b_r_r*self.aabb[1]
print("aabb", new_aabb, "\ncorrect aabb", correct_aabb)
new_aabb = correct_aabb
newSize = b_r - t_l
self.aabb = new_aabb
self.update_stepSize((newSize[0], newSize[1], newSize[2]))
| yxlao/TensoIR | 3 | TensoIR Experiments for "Objects with Lighting: A Real-World Dataset for Evaluating Reconstruction and Rendering for Object Relighting" | Python | yxlao | Yixing Lao | HKU-CS |
models/tensorBase_general_multi_lights.py | Python | import torch
import torch.nn as nn
import torch.nn.functional as F
from .sh import eval_sh_bases
import numpy as np
import time
from models.relight_utils import linear2srgb_torch
from dataLoader.ray_utils import safe_l2_normalize
# from torch_efficient_distloss import eff_distloss, eff_distloss_native, flatten_eff_distloss
def positional_encoding(positions, freqs):
freq_bands = (2 ** torch.arange(freqs).float()).to(positions.device) # (F,)
pts = (positions[..., None] * freq_bands).reshape(
positions.shape[:-1] + (freqs * positions.shape[-1],)) # (..., DF)
pts = torch.cat([torch.sin(pts), torch.cos(pts)], dim=-1)
return pts
def raw2alpha(sigma, dist):
# sigma, dist [N_rays, N_samples]
alpha = 1. - torch.exp(-sigma * dist)
T = torch.cumprod(torch.cat([torch.ones(alpha.shape[0], 1).to(alpha.device), 1. - alpha + 1e-10], -1), -1)
weights = alpha * T[:, :-1] # [N_rays, N_samples]
return alpha, weights, T[:, -1:]
def SHRender(xyz_sampled, viewdirs, features):
sh_mult = eval_sh_bases(2, viewdirs)[:, None]
rgb_sh = features.view(-1, 3, sh_mult.shape[-1])
rgb = torch.relu(torch.sum(sh_mult * rgb_sh, dim=-1) + 0.5)
return rgb
def RGBRender(xyz_sampled, viewdirs, features):
rgb = features
return rgb
def compute_energy(lgtSGs):
lgtLambda = torch.abs(lgtSGs[:, 3:4])
lgtMu = torch.abs(lgtSGs[:, 4:])
energy = lgtMu * 2.0 * np.pi / lgtLambda * (1.0 - torch.exp(-2.0 * lgtLambda))
return energy
def fibonacci_sphere(samples=1):
'''
uniformly distribute points on a sphere
reference: https://github.com/Kai-46/PhySG/blob/master/code/model/sg_envmap_material.py
'''
points = []
phi = np.pi * (3. - np.sqrt(5.)) # golden angle in radians
for i in range(samples):
z = 1 - (i / float(samples - 1)) * 2 # y goes from 1 to -1
radius = np.sqrt(1 - z * z) # radius at y
theta = phi * i # golden angle increment
x = np.cos(theta) * radius
y = np.sin(theta) * radius
points.append([x, y, z])
points = np.array(points)
return points
def render_envmap_sg(lgtSGs, viewdirs):
viewdirs = viewdirs.to(lgtSGs.device)
viewdirs = viewdirs.unsqueeze(-2) # [..., 1, 3]
# [M, 7] ---> [..., M, 7]
dots_sh = list(viewdirs.shape[:-2])
M = lgtSGs.shape[0]
lgtSGs = lgtSGs.view([1,] * len(dots_sh) + [M, 7]).expand(dots_sh + [M, 7])
lgtSGLobes = lgtSGs[..., :3] / (torch.norm(lgtSGs[..., :3], dim=-1, keepdim=True))
lgtSGLambdas = torch.abs(lgtSGs[..., 3:4])
lgtSGMus = torch.abs(lgtSGs[..., -3:])
# [..., M, 3]
rgb = lgtSGMus * torch.exp(lgtSGLambdas * \
(torch.sum(viewdirs * lgtSGLobes, dim=-1, keepdim=True) - 1.))
rgb = torch.sum(rgb, dim=-2) # [..., 3]
return rgb
def compute_envmap(lgtSGs, H, W, tensorfactor):
'''
compute environment map from spherical Gaussian light sources
'''
rgb = render_envmap_sg(lgtSGs, tensorfactor.fixed_viewdirs)
envmap = rgb.reshape((H, W, 3))
return envmap
class AlphaGridMask(torch.nn.Module):
def __init__(self, device, aabb, alpha_volume):
super(AlphaGridMask, self).__init__()
self.device = device
self.aabb = aabb.to(self.device)
self.aabbSize = self.aabb[1] - self.aabb[0]
self.invgridSize = 1.0 / self.aabbSize * 2
self.alpha_volume = alpha_volume.view(1, 1, *alpha_volume.shape[-3:])
self.gridSize = torch.LongTensor([alpha_volume.shape[-1], alpha_volume.shape[-2], alpha_volume.shape[-3]]).to(
self.device)
def sample_alpha(self, xyz_sampled):
xyz_sampled = self.normalize_coord(xyz_sampled)
alpha_vals = F.grid_sample(self.alpha_volume, xyz_sampled.view(1, -1, 1, 1, 3), align_corners=True).view(-1)
return alpha_vals
def normalize_coord(self, xyz_sampled):
return (xyz_sampled - self.aabb[0]) * self.invgridSize - 1
class MLPRender_Fea(torch.nn.Module):
def __init__(self, inChanel, viewpe=6, feape=6, featureC=128):
super(MLPRender_Fea, self).__init__()
self.in_mlpC = 2 * viewpe * 3 + 2 * feape * inChanel + 3 + inChanel
self.viewpe = viewpe
self.feape = feape
layer1 = torch.nn.Linear(self.in_mlpC, featureC)
layer2 = torch.nn.Linear(featureC, featureC)
layer3 = torch.nn.Linear(featureC, 3)
self.mlp = torch.nn.Sequential(layer1, torch.nn.ReLU(inplace=True), layer2, torch.nn.ReLU(inplace=True), layer3)
torch.nn.init.constant_(self.mlp[-1].bias, 0)
def forward(self, pts, viewdirs, features):
indata = [features, viewdirs]
if self.feape > 0:
indata += [positional_encoding(features, self.feape)]
if self.viewpe > 0:
indata += [positional_encoding(viewdirs, self.viewpe)]
mlp_in = torch.cat(indata, dim=-1)
rgb = self.mlp(mlp_in)
rgb = torch.sigmoid(rgb)
return rgb
class MLPBRDF_Fea(torch.nn.Module):
def __init__(self, inChanel, viewpe=6, feape=6, featureC=128, outc=1, act_net=nn.Sigmoid()):
super(MLPBRDF_Fea, self).__init__()
self.in_mlpC = 2 * viewpe * 3 + 2 * feape * inChanel + 3 + inChanel
self.viewpe = viewpe
self.feape = feape
self.outc = outc
self.act_net = act_net
layer1 = torch.nn.Linear(self.in_mlpC, featureC)
layer2 = torch.nn.Linear(featureC, featureC)
layer3 = torch.nn.Linear(featureC, outc)
self.mlp = torch.nn.Sequential(layer1, torch.nn.ReLU(inplace=True), layer2, torch.nn.ReLU(inplace=True), layer3)
torch.nn.init.constant_(self.mlp[-1].bias, 0)
def forward(self, pts, viewdirs, features):
indata = [features, viewdirs]
if self.feape > 0:
indata += [positional_encoding(features, self.feape)]
if self.viewpe > 0:
indata += [positional_encoding(viewdirs, self.viewpe)]
mlp_in = torch.cat(indata, dim=-1)
spec = self.mlp(mlp_in)
spec = self.act_net(spec)
return spec
class MLPBRDF_PEandFeature(torch.nn.Module):
def __init__(self, inChanel, pospe=6, feape=6, featureC=128, outc=1, act_net=nn.Sigmoid()):
super(MLPBRDF_PEandFeature, self).__init__()
self.in_mlpC = 2 * pospe * 3 + 2 * feape * inChanel + 3 + inChanel
self.pospe = pospe
self.feape = feape
self.outc = outc
self.act_net = act_net
layer1 = torch.nn.Linear(self.in_mlpC, featureC)
layer2 = torch.nn.Linear(featureC, featureC)
layer3 = torch.nn.Linear(featureC, outc)
self.mlp = torch.nn.Sequential(layer1, torch.nn.ReLU(inplace=True), layer2, torch.nn.ReLU(inplace=True), layer3)
torch.nn.init.constant_(self.mlp[-1].bias, 0)
def forward(self, pts, features):
indata = [features, pts]
if self.feape > 0:
indata += [positional_encoding(features, self.feape)]
if self.pospe > 0:
indata += [positional_encoding(pts, self.pospe)]
mlp_in = torch.cat(indata, dim=-1)
spec = self.mlp(mlp_in)
spec = self.act_net(spec)
return spec
class MLPNormal_normal_and_xyz(torch.nn.Module):
def __init__(self, inChanel, feape=6, featureC=128, outc=1, act_net=nn.Sigmoid()):
super(MLPNormal_normal_and_xyz, self).__init__()
self.in_mlpC = 2 * feape * inChanel + inChanel + 3 + 3
self.feape = feape
self.outc = outc
self.act_net = act_net
layer1 = torch.nn.Linear(self.in_mlpC, featureC)
layer2 = torch.nn.Linear(featureC, featureC)
layer3 = torch.nn.Linear(featureC, outc)
self.mlp = torch.nn.Sequential(layer1, torch.nn.ReLU(inplace=True), layer2, torch.nn.ReLU(inplace=True), layer3)
torch.nn.init.constant_(self.mlp[-1].bias, 0)
def forward(self, pts, normal, features):
indata = [pts, normal, features]
if self.feape > 0:
indata += [positional_encoding(features, self.feape)]
mlp_in = torch.cat(indata, dim=-1)
spec = self.mlp(mlp_in)
spec = self.act_net(spec)
return spec
class MLPNormal_normal_and_PExyz(torch.nn.Module):
def __init__(self, inChanel, pospe=6, feape=6, featureC=128, outc=1, act_net=nn.Sigmoid()):
super(MLPNormal_normal_and_PExyz, self).__init__()
self.in_mlpC = 2 * pospe * 3 + 2 * feape * inChanel + 3 + inChanel + 3
self.feape = feape
self.pospe = pospe
self.outc = outc
self.act_net = act_net
layer1 = torch.nn.Linear(self.in_mlpC, featureC)
layer2 = torch.nn.Linear(featureC, featureC)
layer3 = torch.nn.Linear(featureC, outc)
self.mlp = torch.nn.Sequential(layer1, torch.nn.ReLU(inplace=True), layer2, torch.nn.ReLU(inplace=True), layer3)
torch.nn.init.constant_(self.mlp[-1].bias, 0)
def forward(self, pts, normal, features):
indata = [pts, normal, features]
if self.feape > 0:
indata += [positional_encoding(features, self.feape)]
if self.pospe > 0:
indata += [positional_encoding(pts, self.pospe)]
mlp_in = torch.cat(indata, dim=-1)
spec = self.mlp(mlp_in)
spec = self.act_net(spec)
return spec
class MLPBRDF_onlyFeature(torch.nn.Module):
def __init__(self, inChanel, pospe=6, feape=6, featureC=128, outc=1, act_net=nn.Sigmoid()):
super(MLPBRDF_onlyFeature, self).__init__()
self.in_mlpC = 2 * feape * inChanel + inChanel
self.pospe = pospe
self.feape = feape
self.outc = outc
self.act_net = act_net
layer1 = torch.nn.Linear(self.in_mlpC, featureC)
layer2 = torch.nn.Linear(featureC, featureC)
layer3 = torch.nn.Linear(featureC, outc)
self.mlp = torch.nn.Sequential(layer1, torch.nn.ReLU(inplace=True), layer2, torch.nn.ReLU(inplace=True), layer3)
torch.nn.init.constant_(self.mlp[-1].bias, 0)
def forward(self, pts, viewdirs, features):
indata = [features]
if self.feape > 0:
indata += [positional_encoding(features, self.feape)]
mlp_in = torch.cat(indata, dim=-1)
spec = self.mlp(mlp_in)
spec = self.act_net(spec)
return spec
class MLPRender_PE(torch.nn.Module):
def __init__(self, inChanel, viewpe=6, pospe=6, featureC=128):
super(MLPRender_PE, self).__init__()
self.in_mlpC = (3 + 2 * viewpe * 3) + (3 + 2 * pospe * 3) + inChanel #
self.viewpe = viewpe
self.pospe = pospe
layer1 = torch.nn.Linear(self.in_mlpC, featureC)
layer2 = torch.nn.Linear(featureC, featureC)
layer3 = torch.nn.Linear(featureC, 3)
self.mlp = torch.nn.Sequential(layer1, torch.nn.ReLU(inplace=True), layer2, torch.nn.ReLU(inplace=True), layer3)
torch.nn.init.constant_(self.mlp[-1].bias, 0)
def forward(self, pts, viewdirs, features):
indata = [features, viewdirs]
if self.pospe > 0:
indata += [positional_encoding(pts, self.pospe)]
if self.viewpe > 0:
indata += [positional_encoding(viewdirs, self.viewpe)]
mlp_in = torch.cat(indata, dim=-1)
rgb = self.mlp(mlp_in)
rgb = torch.sigmoid(rgb)
return rgb
class MLPRender(torch.nn.Module):
def __init__(self, inChanel, viewpe=6, featureC=128):
super(MLPRender, self).__init__()
self.in_mlpC = (3 + 2 * viewpe * 3) + inChanel
self.viewpe = viewpe
layer1 = torch.nn.Linear(self.in_mlpC, featureC)
layer2 = torch.nn.Linear(featureC, featureC)
layer3 = torch.nn.Linear(featureC, 3)
self.mlp = torch.nn.Sequential(layer1, torch.nn.ReLU(inplace=True), layer2, torch.nn.ReLU(inplace=True), layer3)
torch.nn.init.constant_(self.mlp[-1].bias, 0)
def forward(self, pts, viewdirs, features):
indata = [features, viewdirs]
if self.viewpe > 0:
indata += [positional_encoding(viewdirs, self.viewpe)]
mlp_in = torch.cat(indata, dim=-1)
rgb = self.mlp(mlp_in)
rgb = torch.sigmoid(rgb)
return rgb
class TensorBase(torch.nn.Module):
def __init__(self, aabb, gridSize, device,
density_n_comp=8,
appearance_n_comp=24,
app_dim=27,
shadingMode='MLP_PE',
alphaMask=None,
near_far=[2.0, 6.0],
density_shift=-10,
alphaMask_thres=0.001,
distance_scale=25,
rayMarch_weight_thres=0.0001,
pos_pe=2, view_pe=2, fea_pe=2,
featureC=128,
step_ratio=2.0,
fea2denseAct='softplus',
normals_kind="purely_predicted",
light_rotation=['000', '120', '240'],
light_name_list = ["sunset", "snow", "courtyard"],
envmap_w=32,
envmap_h=16,
light_kind='pixel',
dataset= None,
numLgtSGs=128,
fixed_fresnel= 0.04,
**kwargs
):
super(TensorBase, self).__init__()
self.density_n_comp = density_n_comp
self.app_n_comp = appearance_n_comp
self.app_dim = app_dim
self.aabb = aabb
self.alphaMask = alphaMask
self.device = device
self.density_shift = density_shift
self.alphaMask_thres = alphaMask_thres
self.distance_scale = distance_scale
self.rayMarch_weight_thres = rayMarch_weight_thres
self.fea2denseAct = fea2denseAct
self.near_far = near_far
self.step_ratio = step_ratio
self.shadingMode, self.normals_kind, self.pos_pe, self.view_pe, self.fea_pe, self.featureC = shadingMode, normals_kind, pos_pe, view_pe, fea_pe, featureC
self.light_num = len(light_name_list)
self.light_rotation = [int(rotation) for rotation in light_rotation]
self.light_name_list = light_name_list
self.envmap_w = envmap_w
self.envmap_h = envmap_h
self.dataset = dataset
self.light_kind = light_kind
self.numLgtSGs = numLgtSGs
self.fixed_fresnel = fixed_fresnel
self.update_stepSize(gridSize)
self.matMode = [[0, 1], [0, 2], [1, 2]]
self.vecMode = [2, 1, 0]
self.comp_w = [1, 1, 1]
self.init_svd_volume(gridSize[0], device)
self.init_render_func(shadingMode, pos_pe, view_pe, fea_pe, featureC, device)
self.init_light()
def init_render_func(self, shadingMode, pos_pe, view_pe, fea_pe, featureC, device):
if shadingMode == 'MLP_PE':
self.renderModule = MLPRender_PE(self.app_dim, view_pe, pos_pe, featureC).to(device)
elif shadingMode == 'MLP_Fea':
self.renderModule = MLPRender_Fea(self.app_dim, view_pe, fea_pe, featureC).to(device)
elif shadingMode == 'MLP':
self.renderModule = MLPRender(self.app_dim, view_pe, featureC).to(device)
elif shadingMode == 'SH':
self.renderModule = SHRender
elif shadingMode == 'RGB':
assert self.app_dim == 3
self.renderModule = RGBRender
else:
print("Unrecognized shading module")
exit()
print("pos_pe", pos_pe, "view_pe", view_pe, "fea_pe", fea_pe)
if self.normals_kind == "purely_predicted" or self.normals_kind == "derived_plus_predicted":
self.renderModule_normal = MLPBRDF_PEandFeature(self.app_dim, pos_pe, fea_pe, featureC, outc=3,
act_net=nn.Tanh()).to(device)
elif self.normals_kind == "residue_prediction":
self.renderModule_normal = MLPNormal_normal_and_PExyz(self.app_dim, pos_pe, fea_pe, featureC, outc=3,
act_net=nn.Tanh()).to(device)
# 4 = 3 + 1: albedo + roughness
self.renderModule_brdf= MLPBRDF_PEandFeature(self.app_dim, pos_pe, fea_pe, featureC, outc=4,
act_net=nn.Sigmoid()).to(device)
print("renderModule_brdf", self.renderModule_brdf)
def generate_envir_map_dir(self, envmap_h, envmap_w, is_jittor=False):
lat_step_size = np.pi / envmap_h
lng_step_size = 2 * np.pi / envmap_w
phi, theta = torch.meshgrid([torch.linspace(np.pi / 2 - 0.5 * lat_step_size, -np.pi / 2 + 0.5 * lat_step_size, envmap_h),
torch.linspace(np.pi - 0.5 * lng_step_size, -np.pi + 0.5 * lng_step_size, envmap_w)], indexing='ij')
sin_phi = torch.sin(torch.pi / 2 - phi) # [envH, envW]
light_area_weight = 4 * torch.pi * sin_phi / torch.sum(sin_phi) # [envH, envW]
assert 0 not in light_area_weight, "There shouldn't be light pixel that doesn't contribute"
light_area_weight = light_area_weight.to(torch.float32).reshape(-1) # [envH * envW, ]
if is_jittor:
phi_jittor, theta_jittor = lat_step_size * (torch.rand_like(phi) - 0.5), lng_step_size * (torch.rand_like(theta) - 0.5)
phi, theta = phi + phi_jittor, theta + theta_jittor
view_dirs = torch.stack([ torch.cos(theta) * torch.cos(phi),
torch.sin(theta) * torch.cos(phi),
torch.sin(phi)], dim=-1).view(-1, 3) # [envH * envW, 3]
return light_area_weight, view_dirs
def init_light(self):
self.light_area_weight, self.fixed_viewdirs = self.generate_envir_map_dir(self.envmap_h, self.envmap_w)
nlights = self.envmap_w * self.envmap_h
if self.light_kind == 'pixel':
self._light_rgbs = torch.nn.Parameter(torch.FloatTensor(nlights, 3).uniform_(0, 3).to(torch.float32)) # [envH * envW, 3]
elif self.light_kind == 'sg':
self.lgtSGs_list = []
for i in range(self.light_num):
lgtSGs = nn.Parameter(torch.randn(self.numLgtSGs, 7), requires_grad=True) # [M, 7]; lobe + lambda + mu
lgtSGs.data[:, -2:] = lgtSGs.data[:, -3:-2].expand((-1, 2))
# make sure lambda is not too close to zero
lgtSGs.data[:, 3:4] = 10. + torch.abs(lgtSGs.data[:, 3:4] * 20.)
# init envmap energy
energy = compute_energy(lgtSGs.data)
lgtSGs.data[:, 4:] = torch.abs(lgtSGs.data[:, 4:]) / torch.sum(energy, dim=0, keepdim=True) * 2. * np.pi * 0.8
energy = compute_energy(lgtSGs.data)
# deterministicly initialize lobes
lobes = fibonacci_sphere(self.numLgtSGs//2).astype(np.float32)
lgtSGs.data[:self.numLgtSGs//2, :3] = torch.from_numpy(lobes)
lgtSGs.data[self.numLgtSGs//2:, :3] = torch.from_numpy(lobes)
self.lgtSGs_list.append(lgtSGs)
def gen_light_incident_dirs(self, sample_number=-1, method='fixed_envirmap', device='cuda'):
''' This function is used to generate light incident directions per iteraration,
and this function is used for the light kind of 'sg'
- args:
- sample_number: sampled incident light directions, this argumet is not always used
- method:
'fixed_envirmap': generate light incident directions on the fixed center points of the environment map
'uniform_sample': sample incident direction uniformly on the unit sphere, sample number is specified by sample_number
'stratified_sampling': random sample incident direction on each grid of envirment map
'importance_sample': sample based on light energy
- return:
- light_incident_directions: [out_putsample_number, 3]
'''
if method == 'fixed_envirmap':
light_incident_directions = self.fixed_viewdirs
elif method == 'uniform_sample':
# uniform sampling 'sample_number' points on a unit sphere
pass # TODO
elif method == 'stratified_sampling':
lat_step_size = np.pi / self.envmap_h
lng_step_size = 2 * np.pi / self.envmap_w
phi_begin, theta_begin = torch.meshgrid([
torch.linspace(np.pi / 2 - 0.5 * lat_step_size, -np.pi / 2 + 0.5 * lat_step_size, self.envmap_h),
torch.linspace(np.pi - 0.5 * lng_step_size, -np.pi + 0.5 * lng_step_size, self.envmap_w)
],
indexing='ij')
phi_jittor, theta_jittor = lat_step_size * (torch.rand_like(phi_begin) - 0.5), lng_step_size * (torch.rand_like(theta_begin) - 0.5)
phi, theta = phi_begin + phi_jittor, theta_begin + theta_jittor
light_incident_directions = torch.stack([torch.cos(theta) * torch.cos(phi),
torch.sin(theta) * torch.cos(phi),
torch.sin(phi)], dim=-1) # [H, W, 3]
elif method == 'stratifed_sample_equal_areas':
sin_phi_size = 2 / self.envmap_h
lng_step_size = 2 * np.pi / self.envmap_w
sin_phi_begin, theta_begin = torch.meshgrid([torch.linspace(1 - 0.5 * sin_phi_size, -1 + 0.5 * sin_phi_size, self.envmap_h),
torch.linspace(np.pi - 0.5 * lng_step_size, -np.pi + 0.5 * lng_step_size, self.envmap_w)], indexing='ij')
sin_phi_jittor, theta_jittor = sin_phi_size * (torch.rand_like(sin_phi_begin) - 0.5), lng_step_size * (torch.rand_like(theta_begin) - 0.5)
sin_phi, theta = sin_phi_begin + sin_phi_jittor, theta_begin + theta_jittor
phi = torch.asin(sin_phi)
light_incident_directions = torch.stack([torch.cos(theta) * torch.cos(phi),
torch.sin(theta) * torch.cos(phi),
torch.sin(phi)], dim=-1) # [H, W, 3]
elif method == 'importance_sample':
_, view_dirs = self.generate_envir_map_dir(128, 256, is_jittor=True)
envir_map = self.get_light_rgbs(view_dirs.reshape(-1, 3).to(device))[0]
with torch.no_grad():
envir_map = envir_map.reshape(128, 256, 3)
# compute the pdf of importance sampling of the environment map
light_intensity = torch.sum(envir_map, dim=2, keepdim=True) # [H, W, 1]
env_map_h, env_map_w, _ = light_intensity.shape
h_interval = 1.0 / env_map_h
sin_theta = torch.sin(torch.linspace(0 + 0.5 * h_interval, np.pi - 0.5 * h_interval, env_map_h)).to(device) # [H, ]
pdf = light_intensity * sin_theta.view(-1, 1, 1) # [H, W, 1]
pdf_to_sample = pdf / torch.sum(pdf) # [H, W, 1]
pdf_to_compute = pdf_to_sample * env_map_h * env_map_w / (2 * np.pi * np.pi * sin_theta.view(-1, 1, 1))
light_dir_idx = torch.multinomial(pdf_to_sample.view(-1), sample_number, replacement=True) # [sample_number, ]
envir_map_dir = view_dirs.view(-1, 3).to(device)
light_dir = envir_map_dir.gather(0, light_dir_idx.unsqueeze(-1).expand(-1, 3)).view(-1, 3) # [num_samples, 3]
# sample the light rgbs
envir_map_rgb = envir_map.view(-1, 3)
light_rgb = envir_map_rgb.gather(0, light_dir_idx.unsqueeze(-1).expand(-1, 3)).view(-1, 3) # [num_samples, 3]
envir_map_pdf = pdf_to_compute.view(-1, 1)
light_pdf = envir_map_pdf.gather(0, light_dir_idx.unsqueeze(-1).expand(-1, 1)).view(-1, 1) # [num_samples, 1]
return light_dir, light_rgb, light_pdf
return light_incident_directions.reshape(-1, 3) # [output_sample_number, 3]
def get_light_rgbs(self, incident_light_directions=None, device='cuda'):
'''
- args:
- incident_light_directions: [sample_number, 3]
- return:
- light_rgbs: [light_num, sample_number, 3]
'''
init_light_directions = incident_light_directions.to(device).reshape(-1, 3) # [sample_number, 3]
if self.light_kind == 'sg':
light_rgbs_list = []
for light_kind_idx in range(self.light_num):
cur_light_rgbs = render_envmap_sg(self.lgtSGs_list[light_kind_idx].to(device), init_light_directions).reshape(-1, 3) # [sample_number, 3]
light_rgbs_list.append(cur_light_rgbs)
light_rgbs = torch.stack(light_rgbs_list, dim=0) # [light_num, sample_number, 3]
else:
pass
# if self.light_kind == 'pixel':
# environment_map = torch.nn.functional.softplus(self._light_rgbs, beta=5).reshape(self.envmap_h, self.envmap_w, 3).to(device) # [H, W, 3]
# elif self.light_kind == 'gt':
# environment_map = self.dataset.lights_probes.requires_grad_(False).reshape(self.envmap_h, self.envmap_w, 3).to(device) # [H, W, 3]
# else:
# print("Illegal light kind: {}".format(self.light_kind))
# exit(1)
# environment_map = environment_map.permute(2, 0, 1).unsqueeze(0) # [1, 3, H, W]
# phi = torch.arccos(remapped_light_directions[:, 2]).reshape(-1) - 1e-6
# theta = torch.atan2(remapped_light_directions[:, 1], remapped_light_directions[:, 0]).reshape(-1)
# # normalize to [-1, 1]
# query_y = (phi / np.pi) * 2 - 1
# query_x = - theta / np.pi
# grid = torch.stack((query_x, query_y)).permute(1, 0).unsqueeze(0).unsqueeze(0)
# light_rgbs = F.grid_sample(environment_map, grid, align_corners=False).squeeze().permute(1, 0).reshape(self.light_num, -1, 3)
return light_rgbs
def update_stepSize(self, gridSize):
print("aabb", self.aabb.view(-1))
print("grid size", gridSize)
self.aabbSize = self.aabb[1] - self.aabb[0]
self.invaabbSize = 2.0 / self.aabbSize
self.gridSize = torch.LongTensor(gridSize).to(self.device)
self.units = self.aabbSize / (self.gridSize - 1)
self.stepSize = torch.mean(self.units) * self.step_ratio
self.aabbDiag = torch.sqrt(torch.sum(torch.square(self.aabbSize)))
self.nSamples = int((self.aabbDiag / self.stepSize).item()) + 1
print("sampling step size: ", self.stepSize)
print("sampling number: ", self.nSamples)
def init_svd_volume(self, res, device):
pass
def compute_densityfeature(self, xyz_sampled):
pass
def compute_densityfeature_with_xyz_grad(self, xyz_sampled):
pass
def compute_bothfeature(self, xyz_sampled, light_idx_sampled):
pass
def compute_intrinfeature(self, xyz_sampled):
pass
def compute_appfeature(self, xyz_sampled, light_idx_sampled):
pass
def normalize_coord(self, xyz_sampled):
return (xyz_sampled - self.aabb[0]) * self.invaabbSize - 1
def get_optparam_groups(self, lr_init_spatial=0.02, lr_init_network=0.001):
pass
def get_kwargs(self):
return {
'aabb': self.aabb,
'gridSize': self.gridSize.tolist(),
'density_n_comp': self.density_n_comp,
'appearance_n_comp': self.app_n_comp,
'app_dim': self.app_dim,
'density_shift': self.density_shift,
'alphaMask_thres': self.alphaMask_thres,
'distance_scale': self.distance_scale,
'rayMarch_weight_thres': self.rayMarch_weight_thres,
'fea2denseAct': self.fea2denseAct,
'near_far': self.near_far,
'step_ratio': self.step_ratio,
'shadingMode': self.shadingMode,
'pos_pe': self.pos_pe,
'view_pe': self.view_pe,
'fea_pe': self.fea_pe,
'featureC': self.featureC,
'normals_kind': self.normals_kind,
'light_num': self.light_num,
'light_kind':self.light_kind,
'numLgtSGs':self.numLgtSGs,
'light_name_list':self.light_name_list,
}
def save(self, path):
kwargs = self.get_kwargs()
ckpt = {'kwargs': kwargs, 'state_dict': self.state_dict()}
if self.alphaMask is not None:
alpha_volume = self.alphaMask.alpha_volume.bool().cpu().numpy()
ckpt.update({'alphaMask.shape': alpha_volume.shape})
ckpt.update({'alphaMask.mask': np.packbits(alpha_volume.reshape(-1))})
ckpt.update({'alphaMask.aabb': self.alphaMask.aabb.cpu()})
torch.save(ckpt, path)
def load(self, ckpt):
if 'alphaMask.aabb' in ckpt.keys():
length = np.prod(ckpt['alphaMask.shape'])
alpha_volume = torch.from_numpy(
np.unpackbits(ckpt['alphaMask.mask'])[:length].reshape(ckpt['alphaMask.shape']))
self.alphaMask = AlphaGridMask(self.device, ckpt['alphaMask.aabb'].to(self.device),
alpha_volume.float().to(self.device))
self.load_state_dict(ckpt['state_dict'])
def sample_ray_ndc(self, rays_o, rays_d, is_train=True, N_samples=-1):
N_samples = N_samples if N_samples > 0 else self.nSamples
near, far = self.near_far
interpx = torch.linspace(near, far, N_samples).unsqueeze(0).to(rays_o)
if is_train:
interpx += torch.rand_like(interpx).to(rays_o) * ((far - near) / N_samples)
rays_pts = rays_o[..., None, :] + rays_d[..., None, :] * interpx[..., None]
mask_outbbox = ((self.aabb[0] > rays_pts) | (rays_pts > self.aabb[1])).any(dim=-1)
return rays_pts, interpx, ~mask_outbbox
def sample_ray(self, rays_o, rays_d, is_train=True, N_samples=-1):
N_samples = N_samples if N_samples > 0 else self.nSamples
stepsize = self.stepSize
near, far = self.near_far
vec = torch.where(rays_d == 0, torch.full_like(rays_d, 1e-6), rays_d)
rate_a = (self.aabb[1] - rays_o) / vec
rate_b = (self.aabb[0] - rays_o) / vec
t_min = torch.minimum(rate_a, rate_b).amax(-1).clamp(min=near, max=far)
rng = torch.arange(N_samples)[None].float()
if is_train:
rng = rng.repeat(rays_d.shape[-2], 1)
rng += torch.rand_like(rng[:, [0]])
step = stepsize * rng.to(rays_o.device)
interpx = (t_min[..., None] + step)
rays_pts = rays_o[..., None, :] + rays_d[..., None, :] * interpx[..., None]
mask_outbbox = ((self.aabb[0] > rays_pts) | (rays_pts > self.aabb[1])).any(dim=-1)
return rays_pts, interpx, ~mask_outbbox
def get_mid_and_interval(self, batch_size, N_samples=-1):
N_samples = N_samples if N_samples > 0 else self.nSamples
s = torch.linspace(0, 1, N_samples+1).cuda()
m = (s[1:] + s[:-1]) * 0.5
m = m[None].repeat(batch_size,1)
interval = 1 / N_samples
return m , interval
def shrink(self, new_aabb, voxel_size):
pass
@torch.no_grad()
def getDenseAlpha(self, gridSize=None):
gridSize = self.gridSize if gridSize is None else gridSize
samples = torch.stack(torch.meshgrid(
torch.linspace(0, 1, gridSize[0]),
torch.linspace(0, 1, gridSize[1]),
torch.linspace(0, 1, gridSize[2]),
), -1).to(self.device)
dense_xyz = self.aabb[0] * (1 - samples) + self.aabb[1] * samples
# dense_xyz = dense_xyz
# print(self.stepSize, self.distance_scale*self.aabbDiag)
alpha = torch.zeros_like(dense_xyz[..., 0])
for i in range(gridSize[0]):
alpha[i] = self.compute_alpha(dense_xyz[i].view(-1, 3), self.stepSize).view((gridSize[1], gridSize[2]))
return alpha, dense_xyz
@torch.no_grad()
def updateAlphaMask(self, gridSize=(200, 200, 200)):
alpha, dense_xyz = self.getDenseAlpha(gridSize)
dense_xyz = dense_xyz.transpose(0, 2).contiguous()
alpha = alpha.clamp(0, 1).transpose(0, 2).contiguous()[None, None]
total_voxels = gridSize[0] * gridSize[1] * gridSize[2]
ks = 3
alpha = F.max_pool3d(alpha, kernel_size=ks, padding=ks // 2, stride=1).view(gridSize[::-1])
alpha[alpha >= self.alphaMask_thres] = 1
alpha[alpha < self.alphaMask_thres] = 0
self.alphaMask = AlphaGridMask(self.device, self.aabb, alpha)
valid_xyz = dense_xyz[alpha > 0.5]
xyz_min = valid_xyz.amin(0)
xyz_max = valid_xyz.amax(0)
new_aabb = torch.stack((xyz_min, xyz_max))
total = torch.sum(alpha)
print(f"bbox: {xyz_min, xyz_max} alpha rest %%%f" % (total / total_voxels * 100))
return new_aabb
@torch.no_grad()
def filtering_rays(self, all_rays, N_samples=256, chunk=10240 * 5, bbox_only=False):
print('========> filtering rays ...')
tt = time.time()
N = torch.tensor(all_rays.shape[:-1]).prod()
mask_filtered = []
idx_chunks = torch.split(torch.arange(N), chunk)
for idx_chunk in idx_chunks:
rays_chunk = all_rays[idx_chunk].to(self.device)
rays_o, rays_d = rays_chunk[..., :3], rays_chunk[..., 3:6]
if bbox_only:
vec = torch.where(rays_d == 0, torch.full_like(rays_d, 1e-6), rays_d)
rate_a = (self.aabb[1] - rays_o) / vec
rate_b = (self.aabb[0] - rays_o) / vec
t_min = torch.minimum(rate_a, rate_b).amax(-1)#.clamp(min=near, max=far)
t_max = torch.maximum(rate_a, rate_b).amin(-1)#.clamp(min=near, max=far)
mask_inbbox = t_max > t_min
else:
xyz_sampled, _,_ = self.sample_ray(rays_o, rays_d, N_samples=N_samples, is_train=False)
mask_inbbox= (self.alphaMask.sample_alpha(xyz_sampled).view(xyz_sampled.shape[:-1]) > 0).any(-1)
mask_filtered.append(mask_inbbox.cpu())
mask_filtered = torch.cat(mask_filtered).view(all_rays.shape[:-1])
print(f'Ray filtering done! takes {time.time()-tt} s. ray mask ratio: {torch.sum(mask_filtered) / N}')
return all_rays[mask_filtered], mask_filtered
def feature2density(self, density_features):
if self.fea2denseAct == "softplus":
return F.softplus(density_features + self.density_shift)
elif self.fea2denseAct == "relu":
return F.relu(density_features)
def compute_alpha(self, xyz_locs, length=1):
if self.alphaMask is not None:
alphas = self.alphaMask.sample_alpha(xyz_locs)
alpha_mask = alphas > 0
else:
alpha_mask = torch.ones_like(xyz_locs[:, 0], dtype=bool)
sigma = torch.zeros(xyz_locs.shape[:-1], device=xyz_locs.device)
if alpha_mask.any():
xyz_sampled = self.normalize_coord(xyz_locs[alpha_mask])
sigma_feature = self.compute_densityfeature(xyz_sampled)
validsigma = self.feature2density(sigma_feature)
sigma[alpha_mask] = validsigma
alpha = 1 - torch.exp(-sigma * length).view(xyz_locs.shape[:-1])
return alpha
@torch.enable_grad()
def compute_derived_normals(self, xyz_locs):
xyz_locs.requires_grad_(True)
sigma_feature = self.compute_densityfeature_with_xyz_grad(xyz_locs) # [..., 1] detach() removed in the this function
sigma = self.feature2density(sigma_feature)
d_output = torch.ones_like(sigma, requires_grad=False, device=sigma.device)
gradients = torch.autograd.grad(
outputs=sigma,
inputs=xyz_locs,
grad_outputs=d_output,
create_graph=True,
retain_graph=True,
only_inputs=True
)[0]
derived_normals = -safe_l2_normalize(gradients, dim=-1)
derived_normals = derived_normals.view(-1, 3)
return derived_normals
def compute_relative_smoothness_loss(self, values, values_jittor):
base = torch.maximum(values, values_jittor).clip(min=1e-6)
difference = torch.sum(((values - values_jittor) / base)**2, dim=-1, keepdim=True) # [..., 1]
return difference
def forward(self, rays_chunk, light_idx, white_bg=True, is_train=False, ndc_ray=False, is_relight=True, N_samples=-1):
'''
- args:
- rays_chunk: (batch_N, 6), batch_N is the number of rays in a batch
- light_idx: (batch_N, 1) the index of light in the scene
'''
viewdirs = rays_chunk[:, 3:6] # (batch_N, 3)
if ndc_ray:
xyz_sampled, z_vals, ray_valid = self.sample_ray_ndc(rays_chunk[:, :3], viewdirs, is_train=is_train,
N_samples=N_samples)
dists = torch.cat((z_vals[:, 1:] - z_vals[:, :-1], torch.zeros_like(z_vals[:, :1])),
dim=-1) # dist between 2 consecutive points along a ray
rays_norm = torch.norm(viewdirs, dim=-1, keepdim=True)
dists = dists * rays_norm # [1, n_sample]
viewdirs = viewdirs / rays_norm
else:
xyz_sampled, z_vals, ray_valid = self.sample_ray(rays_chunk[:, :3], viewdirs, is_train=is_train,
N_samples=N_samples)
dists = torch.cat((z_vals[:, 1:] - z_vals[:, :-1], torch.zeros_like(z_vals[:, :1])), dim=-1)
viewdirs = viewdirs.view(-1, 1, 3).expand(xyz_sampled.shape) # (batch_N, N_samples, 3)
light_idx = light_idx.view(-1, 1, 1).expand((*xyz_sampled.shape[:-1], 1)) # (batch_N, n_sammple, 1)
if self.alphaMask is not None:
alphas = self.alphaMask.sample_alpha(xyz_sampled[ray_valid])
alpha_mask = alphas > 0
ray_invalid = ~ray_valid
ray_invalid[ray_valid] |= (~alpha_mask)
ray_valid = ~ray_invalid
# Create empty tensor to store sigma and rgb
sigma = torch.zeros(xyz_sampled.shape[:-1], device=xyz_sampled.device)
rgb = torch.zeros((*xyz_sampled.shape[:2], 3), device=xyz_sampled.device)
# Create empty tensor to store normal, roughness, fresnel
normal = torch.zeros((*xyz_sampled.shape[:2], 3), device=xyz_sampled.device)
albedo = torch.zeros((*xyz_sampled.shape[:2], 3), device=xyz_sampled.device)
roughness = torch.zeros((*xyz_sampled.shape[:-1], 1), device=xyz_sampled.device)
albedo_smoothness_cost = torch.zeros((*xyz_sampled.shape[:2], 1), device=xyz_sampled.device)
roughness_smoothness_cost = torch.zeros((*xyz_sampled.shape[:-1], 1), device=xyz_sampled.device)
normals_diff = torch.zeros((*xyz_sampled.shape[:2], 1), device=xyz_sampled.device)
normals_orientation_loss = torch.zeros((*xyz_sampled.shape[:2], 1), device=xyz_sampled.device)
if ray_valid.any():
xyz_sampled = self.normalize_coord(xyz_sampled)
sigma_feature = self.compute_densityfeature(xyz_sampled[ray_valid])
validsigma = self.feature2density(sigma_feature)
sigma[ray_valid] = validsigma
alpha, weight, bg_weight = raw2alpha(sigma, dists * self.distance_scale)
app_mask = weight > self.rayMarch_weight_thres
if app_mask.any():
radiance_field_feat, intrinsic_feat = self.compute_bothfeature(xyz_sampled[app_mask], light_idx[app_mask])
# RGB
rgb[app_mask] = self.renderModule(xyz_sampled[app_mask], viewdirs[app_mask], radiance_field_feat)
if is_relight:
# BRDF
valid_brdf = self.renderModule_brdf(xyz_sampled[app_mask], intrinsic_feat)
valid_albedo, valid_roughness = valid_brdf[..., :3], (valid_brdf[..., 3:4] * 0.9 + 0.09)
albedo[app_mask] = valid_albedo # [..., 3]
roughness[app_mask] = valid_roughness # [..., 1]
xyz_sampled_jittor = xyz_sampled[app_mask] + torch.randn_like(xyz_sampled[app_mask]) * 0.01
intrinsic_feat_jittor = self.compute_intrinfeature(xyz_sampled_jittor)
valid_brdf_jittor = self.renderModule_brdf(xyz_sampled_jittor, intrinsic_feat_jittor)
valid_albedo_jittor, valid_roughness_jittor = valid_brdf_jittor[..., :3], (valid_brdf_jittor[..., 3:4] * 0.9 + 0.09)
albedo_smoothness_cost[app_mask] = self.compute_relative_smoothness_loss(valid_albedo, valid_albedo_jittor) # [..., 1]
roughness_smoothness_cost[app_mask] = self.compute_relative_smoothness_loss(valid_roughness, valid_roughness_jittor) # [..., 1]
# Normal
if self.normals_kind == "purely_predicted":
valid_normals = self.renderModule_normal(xyz_sampled[app_mask], intrinsic_feat)
elif self.normals_kind == "purely_derived":
valid_normals = self.compute_derived_normals(xyz_sampled[app_mask])
elif self.normals_kind == "gt_normals":
valid_normals = torch.zeros_like(xyz_sampled[app_mask]) # useless
elif self.normals_kind == "derived_plus_predicted":
# use the predicted normals and penalize the difference between the predicted normals and derived normas at the same time
derived_normals = self.compute_derived_normals(xyz_sampled[app_mask])
predicted_normals = self.renderModule_normal(xyz_sampled[app_mask], intrinsic_feat)
valid_normals = predicted_normals
normals_diff[app_mask] = torch.sum(torch.pow(predicted_normals - derived_normals, 2), dim=-1, keepdim=True)
normals_orientation_loss[app_mask] = torch.sum(viewdirs[app_mask] * predicted_normals, dim=-1, keepdim=True).clamp(min=0)
elif self.normals_kind == "residue_prediction":
derived_normals = self.compute_derived_normals(xyz_sampled[app_mask])
predicted_normals = self.renderModule_normal(xyz_sampled[app_mask], derived_normals, intrinsic_feat)
valid_normals = predicted_normals
normals_diff[app_mask] = torch.sum(torch.pow(predicted_normals - derived_normals, 2), dim=-1, keepdim=True)
normals_orientation_loss[app_mask] = torch.sum(viewdirs[app_mask] * predicted_normals, dim=-1, keepdim=True).clamp(min=0)
normal[app_mask] = valid_normals
# alpha composition
acc_map = torch.sum(weight, -1)
depth_map = torch.sum(weight * z_vals, -1)
rgb_map = torch.sum(weight[..., None] * rgb, -2)
if not is_relight:
if white_bg or (is_train and torch.rand((1,)) < 0.5):
depth_map = depth_map + (1. - acc_map) * rays_chunk[..., -1]
rgb_map = rgb_map + (1. - acc_map[..., None])
return rgb_map, depth_map, None, \
None, None, None, \
acc_map, None, None, None, \
None, None
else:
normal_map = torch.sum(weight[..., None] * normal, -2)
normals_diff_map = torch.sum(weight[..., None] * normals_diff, -2)
normals_orientation_loss_map = torch.sum(weight[..., None] * normals_orientation_loss, -2)
albedo_map = torch.sum(weight[..., None] * albedo, -2) # [..., 3]
roughness_map = torch.sum(weight[..., None] * roughness, -2) # [..., ]
fresnel_map = torch.zeros_like(albedo_map).fill_(self.fixed_fresnel) # [..., 3]
albedo_smoothness_cost_map = torch.sum(weight[..., None] * albedo_smoothness_cost, -2) # [..., 1]
roughness_smoothness_cost_map = torch.sum(weight[..., None] * roughness_smoothness_cost, -2) # [..., 1]
albedo_smoothness_loss = torch.mean(albedo_smoothness_cost_map)
roughness_smoothness_loss = torch.mean(roughness_smoothness_cost_map)
if white_bg or (is_train and torch.rand((1,)) < 0.5):
depth_map = depth_map + (1. - acc_map) * rays_chunk[..., -1]
rgb_map = rgb_map + (1. - acc_map[..., None])
normal_map = normal_map + (1 - acc_map[..., None]) * torch.tensor([0.0, 0.0, 1.0],
device=normal_map.device) # Background normal
# normal_map = normal_map
albedo_map = albedo_map + (1 - acc_map[..., None]) # Albedo background should be white
roughness_map = roughness_map + (1 - acc_map[..., None])
fresnel_map = fresnel_map + (1 - acc_map[..., None])
# tone mapping & gamma correction
rgb_map = rgb_map.clamp(0, 1)
# Tone mapping to make sure the output of self.renderModule() is in linear space,
# and the rgb_map output of this forward() is in sRGB space.
# By doing this, we can use the output of self.renderModule() to better
# represent the indirect illumination, which is implemented in another function.
if rgb_map.shape[0] > 0:
rgb_map = linear2srgb_torch(rgb_map)
albedo_map = albedo_map.clamp(0, 1)
fresnel_map = fresnel_map.clamp(0, 1)
roughness_map = roughness_map.clamp(0, 1)
normal_map = safe_l2_normalize(normal_map, dim=-1)
acc_mask = acc_map > 0.5 # where there may be intersected surface points
return rgb_map, depth_map, normal_map, \
albedo_map, roughness_map, fresnel_map, \
acc_map, normals_diff_map, normals_orientation_loss_map, acc_mask, \
albedo_smoothness_loss, roughness_smoothness_loss
| yxlao/TensoIR | 3 | TensoIR Experiments for "Objects with Lighting: A Real-World Dataset for Evaluating Reconstruction and Rendering for Object Relighting" | Python | yxlao | Yixing Lao | HKU-CS |
models/tensorBase_init.py | Python | import torch
import torch.nn
import torch.nn.functional as F
from .sh import eval_sh_bases
import numpy as np
import time
def positional_encoding(positions, freqs):
freq_bands = (2**torch.arange(freqs).float()).to(positions.device) # (F,)
pts = (positions[..., None] * freq_bands).reshape(
positions.shape[:-1] + (freqs * positions.shape[-1], )) # (..., DF)
pts = torch.cat([torch.sin(pts), torch.cos(pts)], dim=-1)
return pts
def raw2alpha(sigma, dist):
# sigma, dist [N_rays, N_samples]
alpha = 1. - torch.exp(-sigma*dist)
T = torch.cumprod(torch.cat([torch.ones(alpha.shape[0], 1).to(alpha.device), 1. - alpha + 1e-10], -1), -1)
weights = alpha * T[:, :-1] # [N_rays, N_samples]
return alpha, weights, T[:,-1:]
def SHRender(xyz_sampled, viewdirs, features):
sh_mult = eval_sh_bases(2, viewdirs)[:, None]
rgb_sh = features.view(-1, 3, sh_mult.shape[-1])
rgb = torch.relu(torch.sum(sh_mult * rgb_sh, dim=-1) + 0.5)
return rgb
def RGBRender(xyz_sampled, viewdirs, features):
rgb = features
return rgb
class AlphaGridMask(torch.nn.Module):
def __init__(self, device, aabb, alpha_volume):
super(AlphaGridMask, self).__init__()
self.device = device
self.aabb=aabb.to(self.device)
self.aabbSize = self.aabb[1] - self.aabb[0]
self.invgridSize = 1.0/self.aabbSize * 2
self.alpha_volume = alpha_volume.view(1,1,*alpha_volume.shape[-3:])
self.gridSize = torch.LongTensor([alpha_volume.shape[-1],alpha_volume.shape[-2],alpha_volume.shape[-3]]).to(self.device)
def sample_alpha(self, xyz_sampled):
xyz_sampled = self.normalize_coord(xyz_sampled)
alpha_vals = F.grid_sample(self.alpha_volume, xyz_sampled.view(1,-1,1,1,3), align_corners=True).view(-1)
return alpha_vals
def normalize_coord(self, xyz_sampled):
return (xyz_sampled-self.aabb[0]) * self.invgridSize - 1
class MLPRender_Fea(torch.nn.Module):
def __init__(self,inChanel, viewpe=6, feape=6, featureC=128):
super(MLPRender_Fea, self).__init__()
self.in_mlpC = 2*viewpe*3 + 2*feape*inChanel + 3 + inChanel
self.viewpe = viewpe
self.feape = feape
layer1 = torch.nn.Linear(self.in_mlpC, featureC)
layer2 = torch.nn.Linear(featureC, featureC)
layer3 = torch.nn.Linear(featureC,3)
self.mlp = torch.nn.Sequential(layer1, torch.nn.ReLU(inplace=True), layer2, torch.nn.ReLU(inplace=True), layer3)
torch.nn.init.constant_(self.mlp[-1].bias, 0)
def forward(self, pts, viewdirs, features):
indata = [features, viewdirs]
if self.feape > 0:
indata += [positional_encoding(features, self.feape)]
if self.viewpe > 0:
indata += [positional_encoding(viewdirs, self.viewpe)]
mlp_in = torch.cat(indata, dim=-1)
rgb = self.mlp(mlp_in)
rgb = torch.sigmoid(rgb)
return rgb
class MLPRender_PE(torch.nn.Module):
def __init__(self,inChanel, viewpe=6, pospe=6, featureC=128):
super(MLPRender_PE, self).__init__()
self.in_mlpC = (3+2*viewpe*3)+ (3+2*pospe*3) + inChanel #
self.viewpe = viewpe
self.pospe = pospe
layer1 = torch.nn.Linear(self.in_mlpC, featureC)
layer2 = torch.nn.Linear(featureC, featureC)
layer3 = torch.nn.Linear(featureC,3)
self.mlp = torch.nn.Sequential(layer1, torch.nn.ReLU(inplace=True), layer2, torch.nn.ReLU(inplace=True), layer3)
torch.nn.init.constant_(self.mlp[-1].bias, 0)
def forward(self, pts, viewdirs, features):
indata = [features, viewdirs]
if self.pospe > 0:
indata += [positional_encoding(pts, self.pospe)]
if self.viewpe > 0:
indata += [positional_encoding(viewdirs, self.viewpe)]
mlp_in = torch.cat(indata, dim=-1)
rgb = self.mlp(mlp_in)
rgb = torch.sigmoid(rgb)
return rgb
class MLPRender(torch.nn.Module):
def __init__(self,inChanel, viewpe=6, featureC=128):
super(MLPRender, self).__init__()
self.in_mlpC = (3+2*viewpe*3) + inChanel
self.viewpe = viewpe
layer1 = torch.nn.Linear(self.in_mlpC, featureC)
layer2 = torch.nn.Linear(featureC, featureC)
layer3 = torch.nn.Linear(featureC,3)
self.mlp = torch.nn.Sequential(layer1, torch.nn.ReLU(inplace=True), layer2, torch.nn.ReLU(inplace=True), layer3)
torch.nn.init.constant_(self.mlp[-1].bias, 0)
def forward(self, pts, viewdirs, features):
indata = [features, viewdirs]
if self.viewpe > 0:
indata += [positional_encoding(viewdirs, self.viewpe)]
mlp_in = torch.cat(indata, dim=-1)
rgb = self.mlp(mlp_in)
rgb = torch.sigmoid(rgb)
return rgb
class TensorBase_Init(torch.nn.Module):
def __init__(self, aabb, gridSize, device, density_n_comp = 8, appearance_n_comp = 24, app_dim = 27,
shadingMode = 'MLP_PE', alphaMask = None, near_far=[2.0,6.0],
density_shift = -10, alphaMask_thres=0.001, distance_scale=25, rayMarch_weight_thres=0.0001,
pos_pe = 6, view_pe = 6, fea_pe = 6, featureC=128, step_ratio=2.0,
fea2denseAct = 'softplus'):
super(TensorBase_Init, self).__init__()
self.density_n_comp = density_n_comp
self.app_n_comp = appearance_n_comp
self.app_dim = app_dim
self.aabb = aabb
self.alphaMask = alphaMask
self.device=device
self.density_shift = density_shift
self.alphaMask_thres = alphaMask_thres
self.distance_scale = distance_scale
self.rayMarch_weight_thres = rayMarch_weight_thres
self.fea2denseAct = fea2denseAct
self.near_far = near_far
self.step_ratio = step_ratio
self.update_stepSize(gridSize)
self.matMode = [[0,1], [0,2], [1,2]]
self.vecMode = [2, 1, 0]
self.comp_w = [1,1,1]
self.init_svd_volume(gridSize[0], device)
self.shadingMode, self.pos_pe, self.view_pe, self.fea_pe, self.featureC = shadingMode, pos_pe, view_pe, fea_pe, featureC
self.init_render_func(shadingMode, pos_pe, view_pe, fea_pe, featureC, device)
def init_render_func(self, shadingMode, pos_pe, view_pe, fea_pe, featureC, device):
if shadingMode == 'MLP_PE':
self.renderModule = MLPRender_PE(self.app_dim, view_pe, pos_pe, featureC).to(device)
elif shadingMode == 'MLP_Fea':
self.renderModule = MLPRender_Fea(self.app_dim, view_pe, fea_pe, featureC).to(device)
elif shadingMode == 'MLP':
self.renderModule = MLPRender(self.app_dim, view_pe, featureC).to(device)
elif shadingMode == 'SH':
self.renderModule = SHRender
elif shadingMode == 'RGB':
assert self.app_dim == 3
self.renderModule = RGBRender
else:
print("Unrecognized shading module")
exit()
print("pos_pe", pos_pe, "view_pe", view_pe, "fea_pe", fea_pe)
print(self.renderModule)
def update_stepSize(self, gridSize):
print("aabb", self.aabb.view(-1))
print("grid size", gridSize)
self.aabbSize = self.aabb[1] - self.aabb[0]
self.invaabbSize = 2.0/self.aabbSize
self.gridSize= torch.LongTensor(gridSize).to(self.device)
self.units=self.aabbSize / (self.gridSize-1)
self.stepSize=torch.mean(self.units)*self.step_ratio
self.aabbDiag = torch.sqrt(torch.sum(torch.square(self.aabbSize)))
self.nSamples=int((self.aabbDiag / self.stepSize).item()) + 1
print("sampling step size: ", self.stepSize)
print("sampling number: ", self.nSamples)
def init_svd_volume(self, res, device):
pass
def compute_features(self, xyz_sampled):
pass
def compute_densityfeature(self, xyz_sampled):
pass
def compute_appfeature(self, xyz_sampled):
pass
def normalize_coord(self, xyz_sampled):
return (xyz_sampled-self.aabb[0]) * self.invaabbSize - 1
def get_optparam_groups(self, lr_init_spatial = 0.02, lr_init_network = 0.001):
pass
def get_kwargs(self):
return {
'aabb': self.aabb,
'gridSize':self.gridSize.tolist(),
'density_n_comp': self.density_n_comp,
'appearance_n_comp': self.app_n_comp,
'app_dim': self.app_dim,
'density_shift': self.density_shift,
'alphaMask_thres': self.alphaMask_thres,
'distance_scale': self.distance_scale,
'rayMarch_weight_thres': self.rayMarch_weight_thres,
'fea2denseAct': self.fea2denseAct,
'near_far': self.near_far,
'step_ratio': self.step_ratio,
'shadingMode': self.shadingMode,
'pos_pe': self.pos_pe,
'view_pe': self.view_pe,
'fea_pe': self.fea_pe,
'featureC': self.featureC
}
def save(self, path):
kwargs = self.get_kwargs()
ckpt = {'kwargs': kwargs, 'state_dict': self.state_dict()}
if self.alphaMask is not None:
alpha_volume = self.alphaMask.alpha_volume.bool().cpu().numpy()
ckpt.update({'alphaMask.shape':alpha_volume.shape})
ckpt.update({'alphaMask.mask':np.packbits(alpha_volume.reshape(-1))})
ckpt.update({'alphaMask.aabb': self.alphaMask.aabb.cpu()})
torch.save(ckpt, path)
def load(self, ckpt):
if 'alphaMask.aabb' in ckpt.keys():
length = np.prod(ckpt['alphaMask.shape'])
alpha_volume = torch.from_numpy(np.unpackbits(ckpt['alphaMask.mask'])[:length].reshape(ckpt['alphaMask.shape']))
self.alphaMask = AlphaGridMask(self.device, ckpt['alphaMask.aabb'].to(self.device), alpha_volume.float().to(self.device))
self.load_state_dict(ckpt['state_dict'])
def sample_ray_ndc(self, rays_o, rays_d, is_train=True, N_samples=-1):
N_samples = N_samples if N_samples > 0 else self.nSamples
near, far = self.near_far
interpx = torch.linspace(near, far, N_samples).unsqueeze(0).to(rays_o)
if is_train:
interpx += torch.rand_like(interpx).to(rays_o) * ((far - near) / N_samples)
rays_pts = rays_o[..., None, :] + rays_d[..., None, :] * interpx[..., None]
mask_outbbox = ((self.aabb[0] > rays_pts) | (rays_pts > self.aabb[1])).any(dim=-1)
return rays_pts, interpx, ~mask_outbbox
def sample_ray(self, rays_o, rays_d, is_train=True, N_samples=-1):
N_samples = N_samples if N_samples>0 else self.nSamples
stepsize = self.stepSize
near, far = self.near_far
vec = torch.where(rays_d==0, torch.full_like(rays_d, 1e-6), rays_d)
rate_a = (self.aabb[1] - rays_o) / vec
rate_b = (self.aabb[0] - rays_o) / vec
t_min = torch.minimum(rate_a, rate_b).amax(-1).clamp(min=near, max=far)
rng = torch.arange(N_samples)[None].float()
if is_train:
rng = rng.repeat(rays_d.shape[-2],1)
rng += torch.rand_like(rng[:,[0]])
step = stepsize * rng.to(rays_o.device)
interpx = (t_min[...,None] + step)
rays_pts = rays_o[...,None,:] + rays_d[...,None,:] * interpx[...,None]
mask_outbbox = ((self.aabb[0]>rays_pts) | (rays_pts>self.aabb[1])).any(dim=-1)
return rays_pts, interpx, ~mask_outbbox
def shrink(self, new_aabb, voxel_size):
pass
@torch.no_grad()
def getDenseAlpha(self,gridSize=None):
gridSize = self.gridSize if gridSize is None else gridSize
samples = torch.stack(torch.meshgrid(
torch.linspace(0, 1, gridSize[0]),
torch.linspace(0, 1, gridSize[1]),
torch.linspace(0, 1, gridSize[2]),
), -1).to(self.device)
dense_xyz = self.aabb[0] * (1-samples) + self.aabb[1] * samples
# dense_xyz = dense_xyz
# print(self.stepSize, self.distance_scale*self.aabbDiag)
alpha = torch.zeros_like(dense_xyz[...,0])
for i in range(gridSize[0]):
alpha[i] = self.compute_alpha(dense_xyz[i].view(-1,3), self.stepSize).view((gridSize[1], gridSize[2]))
return alpha, dense_xyz
@torch.no_grad()
def updateAlphaMask(self, gridSize=(200,200,200)):
alpha, dense_xyz = self.getDenseAlpha(gridSize)
dense_xyz = dense_xyz.transpose(0,2).contiguous()
alpha = alpha.clamp(0,1).transpose(0,2).contiguous()[None,None]
total_voxels = gridSize[0] * gridSize[1] * gridSize[2]
ks = 3
alpha = F.max_pool3d(alpha, kernel_size=ks, padding=ks // 2, stride=1).view(gridSize[::-1])
alpha[alpha>=self.alphaMask_thres] = 1
alpha[alpha<self.alphaMask_thres] = 0
self.alphaMask = AlphaGridMask(self.device, self.aabb, alpha)
valid_xyz = dense_xyz[alpha>0.5]
xyz_min = valid_xyz.amin(0)
xyz_max = valid_xyz.amax(0)
new_aabb = torch.stack((xyz_min, xyz_max))
total = torch.sum(alpha)
print(f"bbox: {xyz_min, xyz_max} alpha rest %%%f"%(total/total_voxels*100))
return new_aabb
@torch.no_grad()
def filtering_rays(self, all_rays, all_rgbs, N_samples=256, chunk=10240*5, bbox_only=False):
print('========> filtering rays ...')
tt = time.time()
N = torch.tensor(all_rays.shape[:-1]).prod()
mask_filtered = []
idx_chunks = torch.split(torch.arange(N), chunk)
for idx_chunk in idx_chunks:
rays_chunk = all_rays[idx_chunk].to(self.device)
rays_o, rays_d = rays_chunk[..., :3], rays_chunk[..., 3:6]
if bbox_only:
vec = torch.where(rays_d == 0, torch.full_like(rays_d, 1e-6), rays_d)
rate_a = (self.aabb[1] - rays_o) / vec
rate_b = (self.aabb[0] - rays_o) / vec
t_min = torch.minimum(rate_a, rate_b).amax(-1)#.clamp(min=near, max=far)
t_max = torch.maximum(rate_a, rate_b).amin(-1)#.clamp(min=near, max=far)
mask_inbbox = t_max > t_min
else:
xyz_sampled, _,_ = self.sample_ray(rays_o, rays_d, N_samples=N_samples, is_train=False)
mask_inbbox= (self.alphaMask.sample_alpha(xyz_sampled).view(xyz_sampled.shape[:-1]) > 0).any(-1)
mask_filtered.append(mask_inbbox.cpu())
mask_filtered = torch.cat(mask_filtered).view(all_rgbs.shape[:-1])
print(f'Ray filtering done! takes {time.time()-tt} s. ray mask ratio: {torch.sum(mask_filtered) / N}')
return all_rays[mask_filtered], all_rgbs[mask_filtered]
def feature2density(self, density_features):
if self.fea2denseAct == "softplus":
return F.softplus(density_features+self.density_shift)
elif self.fea2denseAct == "relu":
return F.relu(density_features)
def compute_alpha(self, xyz_locs, length=1):
if self.alphaMask is not None:
alphas = self.alphaMask.sample_alpha(xyz_locs)
alpha_mask = alphas > 0
else:
alpha_mask = torch.ones_like(xyz_locs[:,0], dtype=bool)
sigma = torch.zeros(xyz_locs.shape[:-1], device=xyz_locs.device)
if alpha_mask.any():
xyz_sampled = self.normalize_coord(xyz_locs[alpha_mask])
sigma_feature = self.compute_densityfeature(xyz_sampled)
validsigma = self.feature2density(sigma_feature)
sigma[alpha_mask] = validsigma
alpha = 1 - torch.exp(-sigma*length).view(xyz_locs.shape[:-1])
return alpha
def forward(self, rays_chunk, white_bg=True, is_train=False, ndc_ray=False, N_samples=-1):
# sample points
viewdirs = rays_chunk[:, 3:6]
if ndc_ray:
xyz_sampled, z_vals, ray_valid = self.sample_ray_ndc(rays_chunk[:, :3], viewdirs, is_train=is_train,N_samples=N_samples)
dists = torch.cat((z_vals[:, 1:] - z_vals[:, :-1], torch.zeros_like(z_vals[:, :1])), dim=-1)
rays_norm = torch.norm(viewdirs, dim=-1, keepdim=True)
dists = dists * rays_norm
viewdirs = viewdirs / rays_norm
else:
xyz_sampled, z_vals, ray_valid = self.sample_ray(rays_chunk[:, :3], viewdirs, is_train=is_train,N_samples=N_samples)
dists = torch.cat((z_vals[:, 1:] - z_vals[:, :-1], torch.zeros_like(z_vals[:, :1])), dim=-1)
viewdirs = viewdirs.view(-1, 1, 3).expand(xyz_sampled.shape)
if self.alphaMask is not None:
alphas = self.alphaMask.sample_alpha(xyz_sampled[ray_valid])
alpha_mask = alphas > 0
ray_invalid = ~ray_valid
ray_invalid[ray_valid] |= (~alpha_mask)
ray_valid = ~ray_invalid
sigma = torch.zeros(xyz_sampled.shape[:-1], device=xyz_sampled.device)
rgb = torch.zeros((*xyz_sampled.shape[:2], 3), device=xyz_sampled.device)
if ray_valid.any():
xyz_sampled = self.normalize_coord(xyz_sampled)
sigma_feature = self.compute_densityfeature(xyz_sampled[ray_valid])
validsigma = self.feature2density(sigma_feature)
sigma[ray_valid] = validsigma
alpha, weight, bg_weight = raw2alpha(sigma, dists * self.distance_scale)
app_mask = weight > self.rayMarch_weight_thres
if app_mask.any():
app_features = self.compute_appfeature(xyz_sampled[app_mask])
valid_rgbs = self.renderModule(xyz_sampled[app_mask], viewdirs[app_mask], app_features)
rgb[app_mask] = valid_rgbs
acc_map = torch.sum(weight, -1)
rgb_map = torch.sum(weight[..., None] * rgb, -2)
if white_bg or (is_train and torch.rand((1,))<0.5):
rgb_map = rgb_map + (1. - acc_map[..., None])
rgb_map = rgb_map.clamp(0,1)
with torch.no_grad():
depth_map = torch.sum(weight * z_vals, -1)
depth_map = depth_map + (1. - acc_map) * rays_chunk[..., -1]
return rgb_map, depth_map # rgb, sigma, alpha, weight, bg_weight
| yxlao/TensoIR | 3 | TensoIR Experiments for "Objects with Lighting: A Real-World Dataset for Evaluating Reconstruction and Rendering for Object Relighting" | Python | yxlao | Yixing Lao | HKU-CS |
models/tensorBase_rotated_lights.py | Python | import torch
import torch.nn as nn
import torch.nn.functional as F
from .sh import eval_sh_bases
import numpy as np
import time
from models.relight_utils import linear2srgb_torch
from dataLoader.ray_utils import safe_l2_normalize
# from torch_efficient_distloss import eff_distloss, eff_distloss_native, flatten_eff_distloss
def positional_encoding(positions, freqs):
freq_bands = (2 ** torch.arange(freqs).float()).to(positions.device) # (F,)
pts = (positions[..., None] * freq_bands).reshape(
positions.shape[:-1] + (freqs * positions.shape[-1],)) # (..., DF)
pts = torch.cat([torch.sin(pts), torch.cos(pts)], dim=-1)
return pts
def raw2alpha(sigma, dist):
# sigma, dist [N_rays, N_samples]
alpha = 1. - torch.exp(-sigma * dist)
T = torch.cumprod(torch.cat([torch.ones(alpha.shape[0], 1).to(alpha.device), 1. - alpha + 1e-10], -1), -1)
weights = alpha * T[:, :-1] # [N_rays, N_samples]
return alpha, weights, T[:, -1:]
def SHRender(xyz_sampled, viewdirs, features):
sh_mult = eval_sh_bases(2, viewdirs)[:, None]
rgb_sh = features.view(-1, 3, sh_mult.shape[-1])
rgb = torch.relu(torch.sum(sh_mult * rgb_sh, dim=-1) + 0.5)
return rgb
def RGBRender(xyz_sampled, viewdirs, features):
rgb = features
return rgb
def compute_energy(lgtSGs):
lgtLambda = torch.abs(lgtSGs[:, 3:4])
lgtMu = torch.abs(lgtSGs[:, 4:])
energy = lgtMu * 2.0 * np.pi / lgtLambda * (1.0 - torch.exp(-2.0 * lgtLambda))
return energy
def fibonacci_sphere(samples=1):
'''
uniformly distribute points on a sphere
reference: https://github.com/Kai-46/PhySG/blob/master/code/model/sg_envmap_material.py
'''
points = []
phi = np.pi * (3. - np.sqrt(5.)) # golden angle in radians
for i in range(samples):
z = 1 - (i / float(samples - 1)) * 2 # y goes from 1 to -1
radius = np.sqrt(1 - z * z) # radius at y
theta = phi * i # golden angle increment
x = np.cos(theta) * radius
y = np.sin(theta) * radius
points.append([x, y, z])
points = np.array(points)
return points
def render_envmap_sg(lgtSGs, viewdirs):
viewdirs = viewdirs.to(lgtSGs.device)
viewdirs = viewdirs.unsqueeze(-2) # [..., 1, 3]
# [M, 7] ---> [..., M, 7]
dots_sh = list(viewdirs.shape[:-2])
M = lgtSGs.shape[0]
lgtSGs = lgtSGs.view([1,] * len(dots_sh) + [M, 7]).expand(dots_sh + [M, 7])
lgtSGLobes = lgtSGs[..., :3] / (torch.norm(lgtSGs[..., :3], dim=-1, keepdim=True))
lgtSGLambdas = torch.abs(lgtSGs[..., 3:4])
lgtSGMus = torch.abs(lgtSGs[..., -3:])
# [..., M, 3]
rgb = lgtSGMus * torch.exp(lgtSGLambdas * \
(torch.sum(viewdirs * lgtSGLobes, dim=-1, keepdim=True) - 1.))
rgb = torch.sum(rgb, dim=-2) # [..., 3]
return rgb
def compute_envmap(lgtSGs, H, W, tensorfactor):
'''
compute environment map from spherical Gaussian light sources
'''
rgb = render_envmap_sg(lgtSGs, tensorfactor.fixed_viewdirs)
envmap = rgb.reshape((H, W, 3))
return envmap
class AlphaGridMask(torch.nn.Module):
def __init__(self, device, aabb, alpha_volume):
super(AlphaGridMask, self).__init__()
self.device = device
self.aabb = aabb.to(self.device)
self.aabbSize = self.aabb[1] - self.aabb[0]
self.invgridSize = 1.0 / self.aabbSize * 2
self.alpha_volume = alpha_volume.view(1, 1, *alpha_volume.shape[-3:])
self.gridSize = torch.LongTensor([alpha_volume.shape[-1], alpha_volume.shape[-2], alpha_volume.shape[-3]]).to(
self.device)
def sample_alpha(self, xyz_sampled):
xyz_sampled = self.normalize_coord(xyz_sampled)
alpha_vals = F.grid_sample(self.alpha_volume, xyz_sampled.view(1, -1, 1, 1, 3), align_corners=True).view(-1)
return alpha_vals
def normalize_coord(self, xyz_sampled):
return (xyz_sampled - self.aabb[0]) * self.invgridSize - 1
class MLPRender_Fea(torch.nn.Module):
def __init__(self, inChanel, viewpe=6, feape=6, featureC=128):
super(MLPRender_Fea, self).__init__()
self.in_mlpC = 2 * viewpe * 3 + 2 * feape * inChanel + 3 + inChanel
self.viewpe = viewpe
self.feape = feape
layer1 = torch.nn.Linear(self.in_mlpC, featureC)
layer2 = torch.nn.Linear(featureC, featureC)
layer3 = torch.nn.Linear(featureC, 3)
self.mlp = torch.nn.Sequential(layer1, torch.nn.ReLU(inplace=True), layer2, torch.nn.ReLU(inplace=True), layer3)
torch.nn.init.constant_(self.mlp[-1].bias, 0)
def forward(self, pts, viewdirs, features):
indata = [features, viewdirs]
if self.feape > 0:
indata += [positional_encoding(features, self.feape)]
if self.viewpe > 0:
indata += [positional_encoding(viewdirs, self.viewpe)]
mlp_in = torch.cat(indata, dim=-1)
rgb = self.mlp(mlp_in)
rgb = torch.sigmoid(rgb)
return rgb
class MLPBRDF_Fea(torch.nn.Module):
def __init__(self, inChanel, viewpe=6, feape=6, featureC=128, outc=1, act_net=nn.Sigmoid()):
super(MLPBRDF_Fea, self).__init__()
self.in_mlpC = 2 * viewpe * 3 + 2 * feape * inChanel + 3 + inChanel
self.viewpe = viewpe
self.feape = feape
self.outc = outc
self.act_net = act_net
layer1 = torch.nn.Linear(self.in_mlpC, featureC)
layer2 = torch.nn.Linear(featureC, featureC)
layer3 = torch.nn.Linear(featureC, outc)
self.mlp = torch.nn.Sequential(layer1, torch.nn.ReLU(inplace=True), layer2, torch.nn.ReLU(inplace=True), layer3)
torch.nn.init.constant_(self.mlp[-1].bias, 0)
def forward(self, pts, viewdirs, features):
indata = [features, viewdirs]
if self.feape > 0:
indata += [positional_encoding(features, self.feape)]
if self.viewpe > 0:
indata += [positional_encoding(viewdirs, self.viewpe)]
mlp_in = torch.cat(indata, dim=-1)
spec = self.mlp(mlp_in)
spec = self.act_net(spec)
return spec
class MLPBRDF_PEandFeature(torch.nn.Module):
def __init__(self, inChanel, pospe=6, feape=6, featureC=128, outc=1, act_net=nn.Sigmoid()):
super(MLPBRDF_PEandFeature, self).__init__()
self.in_mlpC = 2 * pospe * 3 + 2 * feape * inChanel + 3 + inChanel
self.pospe = pospe
self.feape = feape
self.outc = outc
self.act_net = act_net
layer1 = torch.nn.Linear(self.in_mlpC, featureC)
layer2 = torch.nn.Linear(featureC, featureC)
layer3 = torch.nn.Linear(featureC, outc)
self.mlp = torch.nn.Sequential(layer1, torch.nn.ReLU(inplace=True), layer2, torch.nn.ReLU(inplace=True), layer3)
torch.nn.init.constant_(self.mlp[-1].bias, 0)
def forward(self, pts, features):
indata = [features, pts]
if self.feape > 0:
indata += [positional_encoding(features, self.feape)]
if self.pospe > 0:
indata += [positional_encoding(pts, self.pospe)]
mlp_in = torch.cat(indata, dim=-1)
spec = self.mlp(mlp_in)
spec = self.act_net(spec)
return spec
class MLPNormal_normal_and_xyz(torch.nn.Module):
def __init__(self, inChanel, feape=6, featureC=128, outc=1, act_net=nn.Sigmoid()):
super(MLPNormal_normal_and_xyz, self).__init__()
self.in_mlpC = 2 * feape * inChanel + inChanel + 3 + 3
self.feape = feape
self.outc = outc
self.act_net = act_net
layer1 = torch.nn.Linear(self.in_mlpC, featureC)
layer2 = torch.nn.Linear(featureC, featureC)
layer3 = torch.nn.Linear(featureC, outc)
self.mlp = torch.nn.Sequential(layer1, torch.nn.ReLU(inplace=True), layer2, torch.nn.ReLU(inplace=True), layer3)
torch.nn.init.constant_(self.mlp[-1].bias, 0)
def forward(self, pts, normal, features):
indata = [pts, normal, features]
if self.feape > 0:
indata += [positional_encoding(features, self.feape)]
mlp_in = torch.cat(indata, dim=-1)
spec = self.mlp(mlp_in)
spec = self.act_net(spec)
return spec
class MLPNormal_normal_and_PExyz(torch.nn.Module):
def __init__(self, inChanel, pospe=6, feape=6, featureC=128, outc=1, act_net=nn.Sigmoid()):
super(MLPNormal_normal_and_PExyz, self).__init__()
self.in_mlpC = 2 * pospe * 3 + 2 * feape * inChanel + 3 + inChanel + 3
self.feape = feape
self.pospe = pospe
self.outc = outc
self.act_net = act_net
layer1 = torch.nn.Linear(self.in_mlpC, featureC)
layer2 = torch.nn.Linear(featureC, featureC)
layer3 = torch.nn.Linear(featureC, outc)
self.mlp = torch.nn.Sequential(layer1, torch.nn.ReLU(inplace=True), layer2, torch.nn.ReLU(inplace=True), layer3)
torch.nn.init.constant_(self.mlp[-1].bias, 0)
def forward(self, pts, normal, features):
indata = [pts, normal, features]
if self.feape > 0:
indata += [positional_encoding(features, self.feape)]
if self.pospe > 0:
indata += [positional_encoding(pts, self.pospe)]
mlp_in = torch.cat(indata, dim=-1)
spec = self.mlp(mlp_in)
spec = self.act_net(spec)
return spec
class MLPBRDF_onlyFeature(torch.nn.Module):
def __init__(self, inChanel, pospe=6, feape=6, featureC=128, outc=1, act_net=nn.Sigmoid()):
super(MLPBRDF_onlyFeature, self).__init__()
self.in_mlpC = 2 * feape * inChanel + inChanel
self.pospe = pospe
self.feape = feape
self.outc = outc
self.act_net = act_net
layer1 = torch.nn.Linear(self.in_mlpC, featureC)
layer2 = torch.nn.Linear(featureC, featureC)
layer3 = torch.nn.Linear(featureC, outc)
self.mlp = torch.nn.Sequential(layer1, torch.nn.ReLU(inplace=True), layer2, torch.nn.ReLU(inplace=True), layer3)
torch.nn.init.constant_(self.mlp[-1].bias, 0)
def forward(self, pts, viewdirs, features):
indata = [features]
if self.feape > 0:
indata += [positional_encoding(features, self.feape)]
mlp_in = torch.cat(indata, dim=-1)
spec = self.mlp(mlp_in)
spec = self.act_net(spec)
return spec
class MLPRender_PE(torch.nn.Module):
def __init__(self, inChanel, viewpe=6, pospe=6, featureC=128):
super(MLPRender_PE, self).__init__()
self.in_mlpC = (3 + 2 * viewpe * 3) + (3 + 2 * pospe * 3) + inChanel #
self.viewpe = viewpe
self.pospe = pospe
layer1 = torch.nn.Linear(self.in_mlpC, featureC)
layer2 = torch.nn.Linear(featureC, featureC)
layer3 = torch.nn.Linear(featureC, 3)
self.mlp = torch.nn.Sequential(layer1, torch.nn.ReLU(inplace=True), layer2, torch.nn.ReLU(inplace=True), layer3)
torch.nn.init.constant_(self.mlp[-1].bias, 0)
def forward(self, pts, viewdirs, features):
indata = [features, viewdirs]
if self.pospe > 0:
indata += [positional_encoding(pts, self.pospe)]
if self.viewpe > 0:
indata += [positional_encoding(viewdirs, self.viewpe)]
mlp_in = torch.cat(indata, dim=-1)
rgb = self.mlp(mlp_in)
rgb = torch.sigmoid(rgb)
return rgb
class MLPRender(torch.nn.Module):
def __init__(self, inChanel, viewpe=6, featureC=128):
super(MLPRender, self).__init__()
self.in_mlpC = (3 + 2 * viewpe * 3) + inChanel
self.viewpe = viewpe
layer1 = torch.nn.Linear(self.in_mlpC, featureC)
layer2 = torch.nn.Linear(featureC, featureC)
layer3 = torch.nn.Linear(featureC, 3)
self.mlp = torch.nn.Sequential(layer1, torch.nn.ReLU(inplace=True), layer2, torch.nn.ReLU(inplace=True), layer3)
torch.nn.init.constant_(self.mlp[-1].bias, 0)
def forward(self, pts, viewdirs, features):
indata = [features, viewdirs]
if self.viewpe > 0:
indata += [positional_encoding(viewdirs, self.viewpe)]
mlp_in = torch.cat(indata, dim=-1)
rgb = self.mlp(mlp_in)
rgb = torch.sigmoid(rgb)
return rgb
class TensorBase(torch.nn.Module):
def __init__(self, aabb, gridSize, device,
density_n_comp=8,
appearance_n_comp=24,
app_dim=27,
shadingMode='MLP_PE',
alphaMask=None,
near_far=[2.0, 6.0],
density_shift=-10,
alphaMask_thres=0.001,
distance_scale=25,
rayMarch_weight_thres=0.0001,
pos_pe=2, view_pe=2, fea_pe=2,
featureC=128,
step_ratio=2.0,
fea2denseAct='softplus',
normals_kind="purely_predicted",
light_rotation=['000', '120', '240'],
envmap_w=32,
envmap_h=16,
light_kind='pixel',
dataset= None,
numLgtSGs=128,
fixed_fresnel= 0.04,
**kwargs
):
super(TensorBase, self).__init__()
self.density_n_comp = density_n_comp
self.app_n_comp = appearance_n_comp
self.app_dim = app_dim
self.aabb = aabb
self.alphaMask = alphaMask
self.device = device
self.density_shift = density_shift
self.alphaMask_thres = alphaMask_thres
self.distance_scale = distance_scale
self.rayMarch_weight_thres = rayMarch_weight_thres
self.fea2denseAct = fea2denseAct
self.near_far = near_far
self.step_ratio = step_ratio
self.shadingMode, self.normals_kind, self.pos_pe, self.view_pe, self.fea_pe, self.featureC = shadingMode, normals_kind, pos_pe, view_pe, fea_pe, featureC
self.light_num = len(light_rotation)
self.light_rotation = [int(rotation) for rotation in light_rotation]
self.envmap_w = envmap_w
self.envmap_h = envmap_h
self.dataset = dataset
self.light_kind = light_kind
self.numLgtSGs = numLgtSGs
self.fixed_fresnel = fixed_fresnel
self.update_stepSize(gridSize)
self.matMode = [[0, 1], [0, 2], [1, 2]]
self.vecMode = [2, 1, 0]
self.comp_w = [1, 1, 1]
self.init_svd_volume(gridSize[0], device)
self.init_render_func(shadingMode, pos_pe, view_pe, fea_pe, featureC, device)
self.init_light()
def init_render_func(self, shadingMode, pos_pe, view_pe, fea_pe, featureC, device):
if shadingMode == 'MLP_PE':
self.renderModule = MLPRender_PE(self.app_dim, view_pe, pos_pe, featureC).to(device)
elif shadingMode == 'MLP_Fea':
self.renderModule = MLPRender_Fea(self.app_dim, view_pe, fea_pe, featureC).to(device)
elif shadingMode == 'MLP':
self.renderModule = MLPRender(self.app_dim, view_pe, featureC).to(device)
elif shadingMode == 'SH':
self.renderModule = SHRender
elif shadingMode == 'RGB':
assert self.app_dim == 3
self.renderModule = RGBRender
else:
print("Unrecognized shading module")
exit()
print("pos_pe", pos_pe, "view_pe", view_pe, "fea_pe", fea_pe)
if self.normals_kind == "purely_predicted" or self.normals_kind == "derived_plus_predicted":
self.renderModule_normal = MLPBRDF_PEandFeature(self.app_dim, pos_pe, fea_pe, featureC, outc=3,
act_net=nn.Tanh()).to(device)
elif self.normals_kind == "residue_prediction":
self.renderModule_normal = MLPNormal_normal_and_PExyz(self.app_dim, pos_pe, fea_pe, featureC, outc=3,
act_net=nn.Tanh()).to(device)
# 4 = 3 + 1: albedo + roughness
self.renderModule_brdf= MLPBRDF_PEandFeature(self.app_dim, pos_pe, fea_pe, featureC, outc=4,
act_net=nn.Sigmoid()).to(device)
print("renderModule_brdf", self.renderModule_brdf)
def generate_envir_map_dir(self, envmap_h, envmap_w, is_jittor=False):
lat_step_size = np.pi / envmap_h
lng_step_size = 2 * np.pi / envmap_w
phi, theta = torch.meshgrid([torch.linspace(np.pi / 2 - 0.5 * lat_step_size, -np.pi / 2 + 0.5 * lat_step_size, envmap_h),
torch.linspace(np.pi - 0.5 * lng_step_size, -np.pi + 0.5 * lng_step_size, envmap_w)], indexing='ij')
sin_phi = torch.sin(torch.pi / 2 - phi) # [envH, envW]
light_area_weight = 4 * torch.pi * sin_phi / torch.sum(sin_phi) # [envH, envW]
assert 0 not in light_area_weight, "There shouldn't be light pixel that doesn't contribute"
light_area_weight = light_area_weight.to(torch.float32).reshape(-1) # [envH * envW, ]
if is_jittor:
phi_jittor, theta_jittor = lat_step_size * (torch.rand_like(phi) - 0.5), lng_step_size * (torch.rand_like(theta) - 0.5)
phi, theta = phi + phi_jittor, theta + theta_jittor
view_dirs = torch.stack([ torch.cos(theta) * torch.cos(phi),
torch.sin(theta) * torch.cos(phi),
torch.sin(phi)], dim=-1).view(-1, 3) # [envH * envW, 3]
return light_area_weight, view_dirs
def init_light(self):
self.light_area_weight, self.fixed_viewdirs = self.generate_envir_map_dir(self.envmap_h, self.envmap_w)
nlights = self.envmap_w * self.envmap_h
if self.light_kind == 'pixel':
self._light_rgbs = torch.nn.Parameter(torch.FloatTensor(nlights, 3).uniform_(0, 3).to(torch.float32)) # [envH * envW, 3]
elif self.light_kind == 'sg':
self.lgtSGs = nn.Parameter(torch.randn(self.numLgtSGs, 7), requires_grad=True) # [M, 7]; lobe + lambda + mu
self.lgtSGs.data[:, -2:] = self.lgtSGs.data[:, -3:-2].expand((-1, 2))
# make sure lambda is not too close to zero
self.lgtSGs.data[:, 3:4] = 10. + torch.abs(self.lgtSGs.data[:, 3:4] * 20.)
# init envmap energy
energy = compute_energy(self.lgtSGs.data)
self.lgtSGs.data[:, 4:] = torch.abs(self.lgtSGs.data[:, 4:]) / torch.sum(energy, dim=0, keepdim=True) * 2. * np.pi * 0.8
energy = compute_energy(self.lgtSGs.data)
print('init envmap energy: ', torch.sum(energy, dim=0).clone().cpu().numpy())
# deterministicly initialize lobes
lobes = fibonacci_sphere(self.numLgtSGs//2).astype(np.float32)
self.lgtSGs.data[:self.numLgtSGs//2, :3] = torch.from_numpy(lobes)
self.lgtSGs.data[self.numLgtSGs//2:, :3] = torch.from_numpy(lobes)
# rotation matrixs for incident light
self.light_rotation_matrix = []
for i in range(self.light_num):
horizontal_angle = torch.tensor(self.light_rotation[i] / 180 * torch.pi).to(torch.float32)
rotation_matrix = torch.tensor([[torch.cos(horizontal_angle), -torch.sin(horizontal_angle), 0],
[torch.sin(horizontal_angle), torch.cos(horizontal_angle), 0],
[0, 0, 1]]
).to(torch.float32)
self.light_rotation_matrix.append(rotation_matrix)
self.light_rotation_matrix = torch.stack(self.light_rotation_matrix, dim=0) # [rotation_num, 3, 3]
def gen_light_incident_dirs(self, sample_number=-1, method='fixed_envirmap', device='cuda'):
''' This function is used to generate light incident directions per iteraration,
and this function is used for the light kind of 'sg'
- args:
- sample_number: sampled incident light directions, this argumet is not always used
- method:
'fixed_envirmap': generate light incident directions on the fixed center points of the environment map
'uniform_sample': sample incident direction uniformly on the unit sphere, sample number is specified by sample_number
'stratified_sampling': random sample incident direction on each grid of envirment map
'importance_sample': sample based on light energy
- return:
- light_incident_directions: [out_putsample_number, 3]
'''
if method == 'fixed_envirmap':
light_incident_directions = self.fixed_viewdirs
elif method == 'uniform_sample':
# uniform sampling 'sample_number' points on a unit sphere
pass # TODO
elif method == 'stratified_sampling':
lat_step_size = np.pi / self.envmap_h
lng_step_size = 2 * np.pi / self.envmap_w
phi_begin, theta_begin = torch.meshgrid([
torch.linspace(np.pi / 2 - 0.5 * lat_step_size, -np.pi / 2 + 0.5 * lat_step_size, self.envmap_h),
torch.linspace(np.pi - 0.5 * lng_step_size, -np.pi + 0.5 * lng_step_size, self.envmap_w)
],
indexing='ij')
phi_jittor, theta_jittor = lat_step_size * (torch.rand_like(phi_begin) - 0.5), lng_step_size * (torch.rand_like(theta_begin) - 0.5)
phi, theta = phi_begin + phi_jittor, theta_begin + theta_jittor
light_incident_directions = torch.stack([torch.cos(theta) * torch.cos(phi),
torch.sin(theta) * torch.cos(phi),
torch.sin(phi)], dim=-1) # [H, W, 3]
elif method == 'stratifed_sample_equal_areas':
sin_phi_size = 2 / self.envmap_h
lng_step_size = 2 * np.pi / self.envmap_w
sin_phi_begin, theta_begin = torch.meshgrid([torch.linspace(1 - 0.5 * sin_phi_size, -1 + 0.5 * sin_phi_size, self.envmap_h),
torch.linspace(np.pi - 0.5 * lng_step_size, -np.pi + 0.5 * lng_step_size, self.envmap_w)], indexing='ij')
sin_phi_jittor, theta_jittor = sin_phi_size * (torch.rand_like(sin_phi_begin) - 0.5), lng_step_size * (torch.rand_like(theta_begin) - 0.5)
sin_phi, theta = sin_phi_begin + sin_phi_jittor, theta_begin + theta_jittor
phi = torch.asin(sin_phi)
light_incident_directions = torch.stack([torch.cos(theta) * torch.cos(phi),
torch.sin(theta) * torch.cos(phi),
torch.sin(phi)], dim=-1) # [H, W, 3]
elif method == 'importance_sample':
_, view_dirs = self.generate_envir_map_dir(128, 256, is_jittor=True)
envir_map = self.get_light_rgbs(view_dirs.reshape(-1, 3).to(device))[0]
with torch.no_grad():
envir_map = envir_map.reshape(128, 256, 3)
# compute the pdf of importance sampling of the environment map
light_intensity = torch.sum(envir_map, dim=2, keepdim=True) # [H, W, 1]
env_map_h, env_map_w, _ = light_intensity.shape
h_interval = 1.0 / env_map_h
sin_theta = torch.sin(torch.linspace(0 + 0.5 * h_interval, np.pi - 0.5 * h_interval, env_map_h)).to(device) # [H, ]
pdf = light_intensity * sin_theta.view(-1, 1, 1) # [H, W, 1]
pdf_to_sample = pdf / torch.sum(pdf) # [H, W, 1]
pdf_to_compute = pdf_to_sample * env_map_h * env_map_w / (2 * np.pi * np.pi * sin_theta.view(-1, 1, 1))
light_dir_idx = torch.multinomial(pdf_to_sample.view(-1), sample_number, replacement=True) # [sample_number, ]
envir_map_dir = view_dirs.view(-1, 3).to(device)
light_dir = envir_map_dir.gather(0, light_dir_idx.unsqueeze(-1).expand(-1, 3)).view(-1, 3) # [num_samples, 3]
# sample the light rgbs
envir_map_rgb = envir_map.view(-1, 3)
light_rgb = envir_map_rgb.gather(0, light_dir_idx.unsqueeze(-1).expand(-1, 3)).view(-1, 3) # [num_samples, 3]
envir_map_pdf = pdf_to_compute.view(-1, 1)
light_pdf = envir_map_pdf.gather(0, light_dir_idx.unsqueeze(-1).expand(-1, 1)).view(-1, 1) # [num_samples, 1]
return light_dir, light_rgb, light_pdf
return light_incident_directions.reshape(-1, 3) # [output_sample_number, 3]
def get_light_rgbs(self, incident_light_directions=None, device='cuda'):
'''
- args:
- incident_light_directions: [sample_number, 3]
- return:
- light_rgbs: [rotation_num, sample_number, 3]
'''
init_light_directions = incident_light_directions.to(device).reshape(1, -1, 3) # [1, sample_number, 3]
rotation_matrix = self.light_rotation_matrix.to(device) # [rotation_num, 3, 3]
remapped_light_directions = torch.matmul(init_light_directions, rotation_matrix).reshape(-1, 3) # [rotation_num * sample_number, 3]
if self.light_kind == 'sg':
light_rgbs = render_envmap_sg(self.lgtSGs.to(device), remapped_light_directions).reshape(self.light_num, -1, 3) # [rotation_num, sample_number, 3]
else:
if self.light_kind == 'pixel':
environment_map = torch.nn.functional.softplus(self._light_rgbs, beta=5).reshape(self.envmap_h, self.envmap_w, 3).to(device) # [H, W, 3]
elif self.light_kind == 'gt':
environment_map = self.dataset.lights_probes.requires_grad_(False).reshape(self.envmap_h, self.envmap_w, 3).to(device) # [H, W, 3]
else:
print("Illegal light kind: {}".format(self.light_kind))
exit(1)
environment_map = environment_map.permute(2, 0, 1).unsqueeze(0) # [1, 3, H, W]
phi = torch.arccos(remapped_light_directions[:, 2]).reshape(-1) - 1e-6
theta = torch.atan2(remapped_light_directions[:, 1], remapped_light_directions[:, 0]).reshape(-1)
# normalize to [-1, 1]
query_y = (phi / np.pi) * 2 - 1
query_x = - theta / np.pi
grid = torch.stack((query_x, query_y)).permute(1, 0).unsqueeze(0).unsqueeze(0)
light_rgbs = F.grid_sample(environment_map, grid, align_corners=False).squeeze().permute(1, 0).reshape(self.light_num, -1, 3)
return light_rgbs
def update_stepSize(self, gridSize):
print("aabb", self.aabb.view(-1))
print("grid size", gridSize)
self.aabbSize = self.aabb[1] - self.aabb[0]
self.invaabbSize = 2.0 / self.aabbSize
self.gridSize = torch.LongTensor(gridSize).to(self.device)
self.units = self.aabbSize / (self.gridSize - 1)
self.stepSize = torch.mean(self.units) * self.step_ratio
self.aabbDiag = torch.sqrt(torch.sum(torch.square(self.aabbSize)))
self.nSamples = int((self.aabbDiag / self.stepSize).item()) + 1
print("sampling step size: ", self.stepSize)
print("sampling number: ", self.nSamples)
def init_svd_volume(self, res, device):
pass
def compute_densityfeature(self, xyz_sampled):
pass
def compute_densityfeature_with_xyz_grad(self, xyz_sampled):
pass
def compute_bothfeature(self, xyz_sampled, light_idx_sampled):
pass
def compute_intrinfeature(self, xyz_sampled):
pass
def compute_appfeature(self, xyz_sampled, light_idx_sampled):
pass
def normalize_coord(self, xyz_sampled):
return (xyz_sampled - self.aabb[0]) * self.invaabbSize - 1
def get_optparam_groups(self, lr_init_spatial=0.02, lr_init_network=0.001):
pass
def get_kwargs(self):
return {
'aabb': self.aabb,
'gridSize': self.gridSize.tolist(),
'density_n_comp': self.density_n_comp,
'appearance_n_comp': self.app_n_comp,
'app_dim': self.app_dim,
'density_shift': self.density_shift,
'alphaMask_thres': self.alphaMask_thres,
'distance_scale': self.distance_scale,
'rayMarch_weight_thres': self.rayMarch_weight_thres,
'fea2denseAct': self.fea2denseAct,
'near_far': self.near_far,
'step_ratio': self.step_ratio,
'shadingMode': self.shadingMode,
'pos_pe': self.pos_pe,
'view_pe': self.view_pe,
'fea_pe': self.fea_pe,
'featureC': self.featureC,
'normals_kind': self.normals_kind,
'light_num': self.light_num,
'light_kind':self.light_kind,
'numLgtSGs':self.numLgtSGs,
'light_rotation':self.light_rotation
}
def save(self, path):
kwargs = self.get_kwargs()
ckpt = {'kwargs': kwargs, 'state_dict': self.state_dict()}
if self.alphaMask is not None:
alpha_volume = self.alphaMask.alpha_volume.bool().cpu().numpy()
ckpt.update({'alphaMask.shape': alpha_volume.shape})
ckpt.update({'alphaMask.mask': np.packbits(alpha_volume.reshape(-1))})
ckpt.update({'alphaMask.aabb': self.alphaMask.aabb.cpu()})
torch.save(ckpt, path)
def load(self, ckpt):
if 'alphaMask.aabb' in ckpt.keys():
length = np.prod(ckpt['alphaMask.shape'])
alpha_volume = torch.from_numpy(
np.unpackbits(ckpt['alphaMask.mask'])[:length].reshape(ckpt['alphaMask.shape']))
self.alphaMask = AlphaGridMask(self.device, ckpt['alphaMask.aabb'].to(self.device),
alpha_volume.float().to(self.device))
self.load_state_dict(ckpt['state_dict'])
def sample_ray_ndc(self, rays_o, rays_d, is_train=True, N_samples=-1):
N_samples = N_samples if N_samples > 0 else self.nSamples
near, far = self.near_far
interpx = torch.linspace(near, far, N_samples).unsqueeze(0).to(rays_o)
if is_train:
interpx += torch.rand_like(interpx).to(rays_o) * ((far - near) / N_samples)
rays_pts = rays_o[..., None, :] + rays_d[..., None, :] * interpx[..., None]
mask_outbbox = ((self.aabb[0] > rays_pts) | (rays_pts > self.aabb[1])).any(dim=-1)
return rays_pts, interpx, ~mask_outbbox
def sample_ray(self, rays_o, rays_d, is_train=True, N_samples=-1):
N_samples = N_samples if N_samples > 0 else self.nSamples
stepsize = self.stepSize
near, far = self.near_far
vec = torch.where(rays_d == 0, torch.full_like(rays_d, 1e-6), rays_d)
rate_a = (self.aabb[1] - rays_o) / vec
rate_b = (self.aabb[0] - rays_o) / vec
t_min = torch.minimum(rate_a, rate_b).amax(-1).clamp(min=near, max=far)
rng = torch.arange(N_samples)[None].float()
if is_train:
rng = rng.repeat(rays_d.shape[-2], 1)
rng += torch.rand_like(rng[:, [0]])
step = stepsize * rng.to(rays_o.device)
interpx = (t_min[..., None] + step)
rays_pts = rays_o[..., None, :] + rays_d[..., None, :] * interpx[..., None]
mask_outbbox = ((self.aabb[0] > rays_pts) | (rays_pts > self.aabb[1])).any(dim=-1)
return rays_pts, interpx, ~mask_outbbox
def get_mid_and_interval(self, batch_size, N_samples=-1):
N_samples = N_samples if N_samples > 0 else self.nSamples
s = torch.linspace(0, 1, N_samples+1).cuda()
m = (s[1:] + s[:-1]) * 0.5
m = m[None].repeat(batch_size,1)
interval = 1 / N_samples
return m , interval
def shrink(self, new_aabb, voxel_size):
pass
@torch.no_grad()
def getDenseAlpha(self, gridSize=None):
gridSize = self.gridSize if gridSize is None else gridSize
samples = torch.stack(torch.meshgrid(
torch.linspace(0, 1, gridSize[0]),
torch.linspace(0, 1, gridSize[1]),
torch.linspace(0, 1, gridSize[2]),
), -1).to(self.device)
dense_xyz = self.aabb[0] * (1 - samples) + self.aabb[1] * samples
# dense_xyz = dense_xyz
# print(self.stepSize, self.distance_scale*self.aabbDiag)
alpha = torch.zeros_like(dense_xyz[..., 0])
for i in range(gridSize[0]):
alpha[i] = self.compute_alpha(dense_xyz[i].view(-1, 3), self.stepSize).view((gridSize[1], gridSize[2]))
return alpha, dense_xyz
@torch.no_grad()
def updateAlphaMask(self, gridSize=(200, 200, 200)):
alpha, dense_xyz = self.getDenseAlpha(gridSize)
dense_xyz = dense_xyz.transpose(0, 2).contiguous()
alpha = alpha.clamp(0, 1).transpose(0, 2).contiguous()[None, None]
total_voxels = gridSize[0] * gridSize[1] * gridSize[2]
ks = 3
alpha = F.max_pool3d(alpha, kernel_size=ks, padding=ks // 2, stride=1).view(gridSize[::-1])
alpha[alpha >= self.alphaMask_thres] = 1
alpha[alpha < self.alphaMask_thres] = 0
self.alphaMask = AlphaGridMask(self.device, self.aabb, alpha)
valid_xyz = dense_xyz[alpha > 0.5]
xyz_min = valid_xyz.amin(0)
xyz_max = valid_xyz.amax(0)
new_aabb = torch.stack((xyz_min, xyz_max))
total = torch.sum(alpha)
print(f"bbox: {xyz_min, xyz_max} alpha rest %%%f" % (total / total_voxels * 100))
return new_aabb
@torch.no_grad()
def filtering_rays(self, all_rays, N_samples=256, chunk=10240 * 5, bbox_only=False):
print('========> filtering rays ...')
tt = time.time()
N = torch.tensor(all_rays.shape[:-1]).prod()
mask_filtered = []
idx_chunks = torch.split(torch.arange(N), chunk)
for idx_chunk in idx_chunks:
rays_chunk = all_rays[idx_chunk].to(self.device)
rays_o, rays_d = rays_chunk[..., :3], rays_chunk[..., 3:6]
if bbox_only:
vec = torch.where(rays_d == 0, torch.full_like(rays_d, 1e-6), rays_d)
rate_a = (self.aabb[1] - rays_o) / vec
rate_b = (self.aabb[0] - rays_o) / vec
t_min = torch.minimum(rate_a, rate_b).amax(-1)#.clamp(min=near, max=far)
t_max = torch.maximum(rate_a, rate_b).amin(-1)#.clamp(min=near, max=far)
mask_inbbox = t_max > t_min
else:
xyz_sampled, _,_ = self.sample_ray(rays_o, rays_d, N_samples=N_samples, is_train=False)
mask_inbbox= (self.alphaMask.sample_alpha(xyz_sampled).view(xyz_sampled.shape[:-1]) > 0).any(-1)
mask_filtered.append(mask_inbbox.cpu())
mask_filtered = torch.cat(mask_filtered).view(all_rays.shape[:-1])
print(f'Ray filtering done! takes {time.time()-tt} s. ray mask ratio: {torch.sum(mask_filtered) / N}')
return all_rays[mask_filtered], mask_filtered
def feature2density(self, density_features):
if self.fea2denseAct == "softplus":
return F.softplus(density_features + self.density_shift)
elif self.fea2denseAct == "relu":
return F.relu(density_features)
def compute_alpha(self, xyz_locs, length=1):
if self.alphaMask is not None:
alphas = self.alphaMask.sample_alpha(xyz_locs)
alpha_mask = alphas > 0
else:
alpha_mask = torch.ones_like(xyz_locs[:, 0], dtype=bool)
sigma = torch.zeros(xyz_locs.shape[:-1], device=xyz_locs.device)
if alpha_mask.any():
xyz_sampled = self.normalize_coord(xyz_locs[alpha_mask])
sigma_feature = self.compute_densityfeature(xyz_sampled)
validsigma = self.feature2density(sigma_feature)
sigma[alpha_mask] = validsigma
alpha = 1 - torch.exp(-sigma * length).view(xyz_locs.shape[:-1])
return alpha
@torch.enable_grad()
def compute_derived_normals(self, xyz_locs):
xyz_locs.requires_grad_(True)
sigma_feature = self.compute_densityfeature_with_xyz_grad(xyz_locs) # [..., 1] detach() removed in the this function
sigma = self.feature2density(sigma_feature)
d_output = torch.ones_like(sigma, requires_grad=False, device=sigma.device)
gradients = torch.autograd.grad(
outputs=sigma,
inputs=xyz_locs,
grad_outputs=d_output,
create_graph=True,
retain_graph=True,
only_inputs=True
)[0]
derived_normals = -safe_l2_normalize(gradients, dim=-1)
derived_normals = derived_normals.view(-1, 3)
return derived_normals
def compute_relative_smoothness_loss(self, values, values_jittor):
base = torch.maximum(values, values_jittor).clip(min=1e-6)
difference = torch.sum(((values - values_jittor) / base)**2, dim=-1, keepdim=True) # [..., 1]
return difference
def forward(self, rays_chunk, light_idx, white_bg=True, is_train=False, ndc_ray=False, is_relight=True, N_samples=-1):
'''
- args:
- rays_chunk: (batch_N, 6), batch_N is the number of rays in a batch
- light_idx: (batch_N, 1) the index of light in the scene
'''
viewdirs = rays_chunk[:, 3:6] # (batch_N, 3)
if ndc_ray:
xyz_sampled, z_vals, ray_valid = self.sample_ray_ndc(rays_chunk[:, :3], viewdirs, is_train=is_train,
N_samples=N_samples)
dists = torch.cat((z_vals[:, 1:] - z_vals[:, :-1], torch.zeros_like(z_vals[:, :1])),
dim=-1) # dist between 2 consecutive points along a ray
rays_norm = torch.norm(viewdirs, dim=-1, keepdim=True)
dists = dists * rays_norm # [1, n_sample]
viewdirs = viewdirs / rays_norm
else:
xyz_sampled, z_vals, ray_valid = self.sample_ray(rays_chunk[:, :3], viewdirs, is_train=is_train,
N_samples=N_samples)
dists = torch.cat((z_vals[:, 1:] - z_vals[:, :-1], torch.zeros_like(z_vals[:, :1])), dim=-1)
viewdirs = viewdirs.view(-1, 1, 3).expand(xyz_sampled.shape) # (batch_N, N_samples, 3)
light_idx = light_idx.view(-1, 1, 1).expand((*xyz_sampled.shape[:-1], 1)) # (batch_N, n_sammple, 1)
if self.alphaMask is not None:
alphas = self.alphaMask.sample_alpha(xyz_sampled[ray_valid])
alpha_mask = alphas > 0
ray_invalid = ~ray_valid
ray_invalid[ray_valid] |= (~alpha_mask)
ray_valid = ~ray_invalid
# Create empty tensor to store sigma and rgb
sigma = torch.zeros(xyz_sampled.shape[:-1], device=xyz_sampled.device)
rgb = torch.zeros((*xyz_sampled.shape[:2], 3), device=xyz_sampled.device)
# Create empty tensor to store normal, roughness, fresnel
normal = torch.zeros((*xyz_sampled.shape[:2], 3), device=xyz_sampled.device)
albedo = torch.zeros((*xyz_sampled.shape[:2], 3), device=xyz_sampled.device)
roughness = torch.zeros((*xyz_sampled.shape[:-1], 1), device=xyz_sampled.device)
albedo_smoothness_cost = torch.zeros((*xyz_sampled.shape[:2], 1), device=xyz_sampled.device)
roughness_smoothness_cost = torch.zeros((*xyz_sampled.shape[:-1], 1), device=xyz_sampled.device)
normals_diff = torch.zeros((*xyz_sampled.shape[:2], 1), device=xyz_sampled.device)
normals_orientation_loss = torch.zeros((*xyz_sampled.shape[:2], 1), device=xyz_sampled.device)
if ray_valid.any():
xyz_sampled = self.normalize_coord(xyz_sampled)
sigma_feature = self.compute_densityfeature(xyz_sampled[ray_valid])
validsigma = self.feature2density(sigma_feature)
sigma[ray_valid] = validsigma
alpha, weight, bg_weight = raw2alpha(sigma, dists * self.distance_scale)
app_mask = weight > self.rayMarch_weight_thres
if app_mask.any():
radiance_field_feat, intrinsic_feat = self.compute_bothfeature(xyz_sampled[app_mask], light_idx[app_mask])
# RGB
rgb[app_mask] = self.renderModule(xyz_sampled[app_mask], viewdirs[app_mask], radiance_field_feat)
if is_relight:
# BRDF
valid_brdf = self.renderModule_brdf(xyz_sampled[app_mask], intrinsic_feat)
valid_albedo, valid_roughness = valid_brdf[..., :3], (valid_brdf[..., 3:4] * 0.9 + 0.09)
albedo[app_mask] = valid_albedo # [..., 3]
roughness[app_mask] = valid_roughness # [..., 1]
xyz_sampled_jittor = xyz_sampled[app_mask] + torch.randn_like(xyz_sampled[app_mask]) * 0.01
intrinsic_feat_jittor = self.compute_intrinfeature(xyz_sampled_jittor)
valid_brdf_jittor = self.renderModule_brdf(xyz_sampled_jittor, intrinsic_feat_jittor)
valid_albedo_jittor, valid_roughness_jittor = valid_brdf_jittor[..., :3], (valid_brdf_jittor[..., 3:4] * 0.9 + 0.09)
albedo_smoothness_cost[app_mask] = self.compute_relative_smoothness_loss(valid_albedo, valid_albedo_jittor) # [..., 1]
roughness_smoothness_cost[app_mask] = self.compute_relative_smoothness_loss(valid_roughness, valid_roughness_jittor) # [..., 1]
# Normal
if self.normals_kind == "purely_predicted":
valid_normals = self.renderModule_normal(xyz_sampled[app_mask], intrinsic_feat)
elif self.normals_kind == "purely_derived":
valid_normals = self.compute_derived_normals(xyz_sampled[app_mask])
elif self.normals_kind == "gt_normals":
valid_normals = torch.zeros_like(xyz_sampled[app_mask]) # useless
elif self.normals_kind == "derived_plus_predicted":
# use the predicted normals and penalize the difference between the predicted normals and derived normas at the same time
derived_normals = self.compute_derived_normals(xyz_sampled[app_mask])
predicted_normals = self.renderModule_normal(xyz_sampled[app_mask], intrinsic_feat)
valid_normals = predicted_normals
normals_diff[app_mask] = torch.sum(torch.pow(predicted_normals - derived_normals, 2), dim=-1, keepdim=True)
normals_orientation_loss[app_mask] = torch.sum(viewdirs[app_mask] * predicted_normals, dim=-1, keepdim=True).clamp(min=0)
elif self.normals_kind == "residue_prediction":
derived_normals = self.compute_derived_normals(xyz_sampled[app_mask])
predicted_normals = self.renderModule_normal(xyz_sampled[app_mask], derived_normals, intrinsic_feat)
valid_normals = predicted_normals
normals_diff[app_mask] = torch.sum(torch.pow(predicted_normals - derived_normals, 2), dim=-1, keepdim=True)
normals_orientation_loss[app_mask] = torch.sum(viewdirs[app_mask] * predicted_normals, dim=-1, keepdim=True).clamp(min=0)
normal[app_mask] = valid_normals
# alpha composition
acc_map = torch.sum(weight, -1)
depth_map = torch.sum(weight * z_vals, -1)
rgb_map = torch.sum(weight[..., None] * rgb, -2)
if not is_relight:
if white_bg or (is_train and torch.rand((1,)) < 0.5):
depth_map = depth_map + (1. - acc_map) * rays_chunk[..., -1]
rgb_map = rgb_map + (1. - acc_map[..., None])
return rgb_map, depth_map, None, \
None, None, None, \
acc_map, None, None, None, \
None, None
else:
normal_map = torch.sum(weight[..., None] * normal, -2)
normals_diff_map = torch.sum(weight[..., None] * normals_diff, -2)
normals_orientation_loss_map = torch.sum(weight[..., None] * normals_orientation_loss, -2)
albedo_map = torch.sum(weight[..., None] * albedo, -2) # [..., 3]
roughness_map = torch.sum(weight[..., None] * roughness, -2) # [..., ]
fresnel_map = torch.zeros_like(albedo_map).fill_(self.fixed_fresnel) # [..., 3]
albedo_smoothness_cost_map = torch.sum(weight[..., None] * albedo_smoothness_cost, -2) # [..., 1]
roughness_smoothness_cost_map = torch.sum(weight[..., None] * roughness_smoothness_cost, -2) # [..., 1]
albedo_smoothness_loss = torch.mean(albedo_smoothness_cost_map)
roughness_smoothness_loss = torch.mean(roughness_smoothness_cost_map)
if white_bg or (is_train and torch.rand((1,)) < 0.5):
depth_map = depth_map + (1. - acc_map) * rays_chunk[..., -1]
rgb_map = rgb_map + (1. - acc_map[..., None])
normal_map = normal_map + (1 - acc_map[..., None]) * torch.tensor([0.0, 0.0, 1.0],
device=normal_map.device) # Background normal
# normal_map = normal_map
albedo_map = albedo_map + (1 - acc_map[..., None]) # Albedo background should be white
roughness_map = roughness_map + (1 - acc_map[..., None])
fresnel_map = fresnel_map + (1 - acc_map[..., None])
# tone mapping & gamma correction
rgb_map = rgb_map.clamp(0, 1)
# Tone mapping to make sure the output of self.renderModule() is in linear space,
# and the rgb_map output of this forward() is in sRGB space.
# By doing this, we can use the output of self.renderModule() to better
# represent the indirect illumination, which is implemented in another function.
if rgb_map.shape[0] > 0:
rgb_map = linear2srgb_torch(rgb_map)
albedo_map = albedo_map.clamp(0, 1)
fresnel_map = fresnel_map.clamp(0, 1)
roughness_map = roughness_map.clamp(0, 1)
normal_map = safe_l2_normalize(normal_map, dim=-1)
acc_mask = acc_map > 0.5 # where there may be intersected surface points
return rgb_map, depth_map, normal_map, \
albedo_map, roughness_map, fresnel_map, \
acc_map, normals_diff_map, normals_orientation_loss_map, acc_mask, \
albedo_smoothness_loss, roughness_smoothness_loss
| yxlao/TensoIR | 3 | TensoIR Experiments for "Objects with Lighting: A Real-World Dataset for Evaluating Reconstruction and Rendering for Object Relighting" | Python | yxlao | Yixing Lao | HKU-CS |
opencv_tonemapping.py | Python | from pathlib import Path
import cv2
import numpy as np
import camtools as ct
def main():
hdr_path = Path.home() / "research/object-relighting-dataset/dataset/antman/test/gt_env_512_rotated_0000.hdr"
# Must read with cv2.IMREAD_ANYDEPTH
hdr = cv2.imread(hdr_path, cv2.IMREAD_ANYDEPTH)
# Apply -6 EV exposure compensation
hdr = hdr / (2** 6.0)
# Tonemap
ldr = hdr ** (1 / 2.2)
# Clip to [0, 1], this can be done here or after exposure compensation
ldr = np.clip(ldr, 0, 1)
# BGR to RGB
ldr = ldr[:, :, ::-1]
# Save
ct.io.imwrite("ldr.png", ldr)
if __name__ == "__main__":
main()
| yxlao/TensoIR | 3 | TensoIR Experiments for "Objects with Lighting: A Real-World Dataset for Evaluating Reconstruction and Rendering for Object Relighting" | Python | yxlao | Yixing Lao | HKU-CS |
opt.py | Python | import configargparse
def config_parser(cmd=None):
parser = configargparse.ArgumentParser()
parser.add_argument('--config', is_config_file=True,
help='config file path')
parser.add_argument("--expname", type=str,
help='experiment name')
parser.add_argument("--basedir", type=str, default='./log',
help='where to store ckpts and logs')
parser.add_argument("--add_timestamp", type=int, default=0,
help='add timestamp to dir')
parser.add_argument("--datadir", type=str, default='./data/llff/fern',
help='input data directory')
parser.add_argument("--hdrdir", type=str, default='./data/llff/fern',
help='input HDR directory')
parser.add_argument("--progress_refresh_rate", type=int, default=10,
help='how many iterations to show psnrs or iters')
parser.add_argument("--local_rank", type=int, default=0)
parser.add_argument('--with_depth', action='store_true')
parser.add_argument('--downsample_train', type=float, default=1.0)
parser.add_argument('--downsample_test', type=float, default=1.0)
parser.add_argument('--model_name', type=str, default='TensorVMSplit',
choices=['TensorVMSplit', 'TensorCP', 'ShapeModel'])
# loader options
parser.add_argument("--batch_size", type=int, default=4096)
parser.add_argument("--n_iters", type=int, default=30000)
parser.add_argument("--save_iters", type=int, default=10000)
parser.add_argument('--dataset_name', type=str, default='tensoIR_unknown_rotated_lights',
choices=['ord', 'blender', 'llff', 'nsvf', 'dtu','tankstemple', 'own_data',
'tensorf_init', 'shapeBuffer', 'tensoIR_unknown_rotated_lights', 'tensoIR_unknown_general_multi_lights',
'tensoIR_simple', 'tensoIR_relighting_test', 'tensoIR_material_editing_test', 'tensoIR_simple_dtu'])
# training options
# learning rate
parser.add_argument("--lr_init", type=float, default=0.02,
help='learning rate')
parser.add_argument("--lr_basis", type=float, default=1e-3,
help='learning rate')
parser.add_argument("--lr_decay_iters", type=int, default=-1,
help = 'number of iterations the lr will decay to the target ratio; -1 will set it to n_iters')
parser.add_argument("--lr_decay_target_ratio", type=float, default=0.1,
help='the target decay ratio; after decay_iters inital lr decays to lr*ratio')
parser.add_argument("--lr_upsample_reset", type=int, default=1,
help='reset lr to inital after upsampling')
# loss
parser.add_argument("--L1_weight_inital", type=float, default=0.0,
help='loss weight')
parser.add_argument("--L1_weight_rest", type=float, default=0,
help='loss weight')
parser.add_argument("--Ortho_weight", type=float, default=0.0,
help='loss weight')
parser.add_argument("--TV_weight_density", type=float, default=0.0,
help='loss weight')
parser.add_argument("--TV_weight_app", type=float, default=0.0,
help='loss weight')
# model
# volume options
parser.add_argument("--n_lamb_sigma", type=int, action="append")
parser.add_argument("--n_lamb_sh", type=int, action="append")
parser.add_argument("--data_dim_color", type=int, default=27)
parser.add_argument("--rm_weight_mask_thre", type=float, default=0.0001,
help='mask points in ray marching')
parser.add_argument("--alpha_mask_thre", type=float, default=0.0001,
help='threshold for creating alpha mask volume')
parser.add_argument("--distance_scale", type=float, default=25,
help='scaling sampling distance for computation')
parser.add_argument("--density_shift", type=float, default=-10,
help='shift density in softplus; making density = 0 when feature == 0')
# network decoder
parser.add_argument("--shadingMode", type=str, default="MLP_PE",
help='which shading mode to use')
parser.add_argument("--pos_pe", type=int, default=2,
help='number of pe for pos')
parser.add_argument("--view_pe", type=int, default=2,
help='number of pe for view')
parser.add_argument("--fea_pe", type=int, default=2,
help='number of pe for features')
parser.add_argument("--featureC", type=int, default=128,
help='hidden feature channel in MLP')
parser.add_argument("--ckpt", type=str, default=None,
help='specific weights npy file to reload for coarse network')
parser.add_argument("--render_only", type=int, default=0)
parser.add_argument("--render_test", type=int, default=0)
parser.add_argument("--test_number", type=int, default=200)
parser.add_argument("--render_train", type=int, default=0)
parser.add_argument("--render_path", type=int, default=0)
parser.add_argument("--export_mesh", type=int, default=0)
# rendering options
parser.add_argument('--lindisp', default=False, action="store_true",
help='use disparity depth sampling')
parser.add_argument("--perturb", type=float, default=1.,
help='set to 0. for no jitter, 1. for jitter')
parser.add_argument("--accumulate_decay", type=float, default=0.998)
parser.add_argument("--fea2denseAct", type=str, default='softplus')
parser.add_argument('--ndc_ray', type=int, default=0)
parser.add_argument('--nSamples', type=int, default=1e6,
help='sample point each ray, pass 1e6 if automatic adjust')
parser.add_argument('--step_ratio',type=float,default=0.5)
## blender flags
parser.add_argument("--white_bkgd", action='store_true',
help='set to render synthetic data on a white bkgd (always use for dvoxels)')
parser.add_argument('--N_voxel_init',
type=int,
default=100**3)
parser.add_argument('--N_voxel_final',
type=int,
default=300**3)
parser.add_argument("--upsamp_list", type=int, action="append")
parser.add_argument("--update_AlphaMask_list", type=int, action="append")
parser.add_argument('--idx_view',
type=int,
default=0)
# logging/saving options
parser.add_argument("--N_vis", type=int, default=5,
help='N images to vis')
parser.add_argument("--vis_every", type=int, default=10000,
help='frequency of visualize the image')
parser.add_argument("--rgb_brdf_weight", type=float, default=0.1, help="weight for image loss of physically-based rendering")
parser.add_argument("--scene_bbox", type=str, action="append")
parser.add_argument("--second_near", type=float, default=0.05, help='starting point for secondary shading')
parser.add_argument("--second_far", type=float, default=1.5, help='ending point for secondary shading')
parser.add_argument("--second_nSample", type=int, default=96, help='sampling number along each incoming ray for secondary shading')
parser.add_argument("--light_sample_train", type=str, default='stratified_sampling')
parser.add_argument("--light_kind", type=str, default='sg', help='light kind, pixel or spherical gaussian')
parser.add_argument("--numLgtSGs", type=int, default='128', help='number of spherical gaussian lights')
parser.add_argument("--light_name", type=str, default="sunset", help="name of the unknown rotated lighting scene")
parser.add_argument("--light_name_list", type=str, action="append")
parser.add_argument("--light_rotation", type=str, action="append")
parser.add_argument("--acc_thre", type=float, default=0.5,
help="acc_map threshold, less than threshhold will be set to 0")
parser.add_argument("--geo_buffer_train", action='store_true', default=0)
parser.add_argument("--geo_buffer_test", action='store_true', default=0)
parser.add_argument("--geo_buffer_path", type=str, default='.')
parser.add_argument("--echo_every", type=int, default=10,
help="echo loss information every N iterations")
parser.add_argument("--relight_chunk_size", type=int, default=160000, help="chunk size when accumulating the visibility and indirect light")
parser.add_argument("--batch_size_test", type=int, default=4096, help="bath size for test")
parser.add_argument("--normals_diff_weight", type=float, default=0.0002, help="weight for normals difference loss (control the difference between predicted normals and derived normals)")
parser.add_argument("--normals_orientation_weight", type=float, default=0.001, help="weight for normals orientation loss, introduced in ref-nerf as Ro loss")
parser.add_argument("--BRDF_loss_enhance_ratio", type=float, default=1, help="ratio between the final weight and the initial weight for normals diff loss and normals direction loss")
parser.add_argument("--normals_loss_enhance_ratio", type=float, default=1, help="ratio between the final weight and the initial weight for normals diff loss and normals orientation loss")
parser.add_argument("--albedo_smoothness_loss_weight", type=float, default=0.0002, help="weight for albedo smooothness loss")
parser.add_argument("--roughness_smoothness_loss_weight", type=float, default=0.0002, help="weight for roughness smooothness loss")
parser.add_argument("--normals_kind", type=str, default="derived_plus_predicted", help="ways to get normals",
choices=["purely_derived", "purely_predicted", "derived_plus_predicted", "gt_normals", "residue_prediction"])
# # used to visibility network, deprecated now
# parser.add_argument("--ckpt_visibility", type=str, help="path to save visibility network checkpoint")
# parser.add_argument("--vis_model_name", type=str, default='ShapeModel', help="name of visibility network checkpoint")
# parser.add_argument("--train_visibility", action='store_true', help="if train visibility network")
# parser.add_argument("--visi_lr", default=0.001, type=float, help="learning rate for visibility network" )
# parser.add_argument("--visibilty_diff_weight", type=float, default=0.0, help="weight for visibility difference loss")
if cmd is not None:
return parser.parse_args(cmd)
else:
return parser.parse_args()
| yxlao/TensoIR | 3 | TensoIR Experiments for "Objects with Lighting: A Real-World Dataset for Evaluating Reconstruction and Rendering for Object Relighting" | Python | yxlao | Yixing Lao | HKU-CS |
renderer.py | Python | import numpy as np
import random
import os, imageio
from tqdm.auto import tqdm
from utils import *
from models.relight_utils import render_with_BRDF
import torch
import torchvision.utils as vutils
@torch.no_grad()
def compute_rescale_ratio_rgb(tensoIR, dataset, sampled_num=20):
'''compute three channel rescale ratio for albedo (but using RGB) by
sampling some views, this is only a very rough estimation.
Example values:
single channel rescale ratio (rgb) : 0.4612
three channels rescale ratio (rgb) : [0.4612, 0.3330, 0.2048]
single channel rescale ratio (albedo): 0.1594
three channels rescale ratio (albedo): [0.1594, 0.0485, 0.0070]
- Args:
tensoIR: model
dataset: dataset containing the G.T albedo
- Returns:
single_channel_ratio: median of the ratio of the first channel
three_channel_ratio: median of the ratio of the three channels
'''
W, H = dataset.img_wh
data_num = len(dataset)
interval = data_num // sampled_num
idx_list = [i * interval for i in range(sampled_num)]
ratio_list = list()
gt_albedo_list = []
reconstructed_albedo_list = []
for idx in tqdm(idx_list, desc="compute rescale ratio (rgb)"):
item = dataset[idx]
frame_rays = item['rays'].squeeze(0).to(tensoIR.device) # [H*W, 6]
gt_mask = item['rgbs_mask'].squeeze(0).squeeze(-1).cpu() # [H*W]
gt_albedo = item['rgbs'].squeeze(0).to(tensoIR.device) # [num_lights, H*W, 3]
gt_albedo = gt_albedo.reshape((-1, H*W, 3))
gt_albedo = gt_albedo.mean(dim=0) # [num_lights, H*W, 3] -> [H*W, 3]
light_idx = torch.zeros((frame_rays.shape[0], 1), dtype=torch.int).to(tensoIR.device).fill_(0)
albedo_map = list()
chunk_idxs = torch.split(torch.arange(frame_rays.shape[0]), 3000)
for chunk_idx in chunk_idxs:
with torch.enable_grad():
rgb_chunk, depth_chunk, normal_chunk, albedo_chunk, roughness_chunk, \
fresnel_chunk, acc_chunk, *temp \
= tensoIR(frame_rays[chunk_idx], light_idx[chunk_idx], is_train=False, white_bg=True, ndc_ray=False, N_samples=-1)
albedo_map.append(rgb_chunk.detach())
albedo_map = torch.cat(albedo_map, dim=0).reshape(H, W, 3)
gt_albedo = gt_albedo.reshape(H, W, 3)
gt_mask = gt_mask.reshape(H, W)
gt_albedo_list.append(gt_albedo[gt_mask])
reconstructed_albedo_list.append(albedo_map[gt_mask])
# ratio = torch.stack(ratio_list, dim=0).mean(dim=0)
gt_albedo_all = torch.cat(gt_albedo_list, dim=0)
albedo_map_all = torch.cat(reconstructed_albedo_list, dim=0)
single_channel_ratio = (gt_albedo_all / albedo_map_all.clamp(min=1e-6))[..., 0].median()
three_channel_ratio, _ = (gt_albedo_all / albedo_map_all.clamp(min=1e-6)).median(dim=0)
return single_channel_ratio, three_channel_ratio
@torch.no_grad()
def compute_rescale_ratio(tensoIR, dataset, sampled_num=20):
'''compute three channel rescale ratio for albedo by sampling some views
- Args:
tensoIR: model
dataset: dataset containing the G.T albedo
- Returns:
single_channel_ratio: median of the ratio of the first channel
three_channel_ratio: median of the ratio of the three channels
'''
# single_channel_ratio_rgb, three_channel_ratio_rgb = compute_rescale_ratio_rgb(
# tensoIR, dataset, sampled_num=sampled_num)
# print("single channel rescale ratio (rgb) : ", single_channel_ratio_rgb)
# print("three channels rescale ratio (rgb) : ", three_channel_ratio_rgb)
W, H = dataset.img_wh
data_num = len(dataset)
interval = data_num // sampled_num
idx_list = [i * interval for i in range(sampled_num)]
ratio_list = list()
gt_albedo_list = []
reconstructed_albedo_list = []
for idx in tqdm(idx_list, desc="compute rescale ratio (albedo)"):
item = dataset[idx]
frame_rays = item['rays'].squeeze(0).to(tensoIR.device) # [H*W, 6]
gt_mask = item['rgbs_mask'].squeeze(0).squeeze(-1).cpu() # [H*W]
gt_albedo = item['albedo'].squeeze(0).to(tensoIR.device) # [H*W, 3]
light_idx = torch.zeros((frame_rays.shape[0], 1), dtype=torch.int).to(tensoIR.device).fill_(0)
albedo_map = list()
chunk_idxs = torch.split(torch.arange(frame_rays.shape[0]), 3000)
for chunk_idx in chunk_idxs:
with torch.enable_grad():
rgb_chunk, depth_chunk, normal_chunk, albedo_chunk, roughness_chunk, \
fresnel_chunk, acc_chunk, *temp \
= tensoIR(frame_rays[chunk_idx], light_idx[chunk_idx], is_train=False, white_bg=True, ndc_ray=False, N_samples=-1)
albedo_map.append(albedo_chunk.detach())
albedo_map = torch.cat(albedo_map, dim=0).reshape(H, W, 3)
gt_albedo = gt_albedo.reshape(H, W, 3)
gt_mask = gt_mask.reshape(H, W)
gt_albedo_list.append(gt_albedo[gt_mask])
reconstructed_albedo_list.append(albedo_map[gt_mask])
# ratio = torch.stack(ratio_list, dim=0).mean(dim=0)
gt_albedo_all = torch.cat(gt_albedo_list, dim=0)
albedo_map_all = torch.cat(reconstructed_albedo_list, dim=0)
single_channel_ratio = (gt_albedo_all / albedo_map_all.clamp(min=1e-6))[..., 0].median()
three_channel_ratio, _ = (gt_albedo_all / albedo_map_all.clamp(min=1e-6)).median(dim=0)
print("single channel rescale ratio (albedo): ", single_channel_ratio)
print("three channels rescale ratio (albedo): ", three_channel_ratio)
return single_channel_ratio, three_channel_ratio
def Renderer_TensoIR_train(
rays=None,
normal_gt=None,
light_idx=None,
tensoIR=None,
N_samples=-1,
ndc_ray=False,
white_bg=True,
is_train=False,
is_relight=True,
sample_method='fixed_envirmap',
chunk_size=15000,
device='cuda',
args=None,
):
rays = rays.to(device)
light_idx = light_idx.to(device, torch.int32)
rgb_map, depth_map, normal_map, albedo_map, roughness_map, \
fresnel_map, acc_map, normals_diff_map, normals_orientation_loss_map, \
acc_mask, albedo_smoothness_loss, roughness_smoothness_loss \
= tensoIR(rays, light_idx, is_train=is_train, white_bg=white_bg, is_relight=is_relight, ndc_ray=ndc_ray, N_samples=N_samples)
# If use GT normals
if tensoIR.normals_kind == "gt_normals" and normal_gt is not None:
normal_map = normal_gt.to(device)
# Physically-based Rendering(Relighting)
if is_relight:
rgb_with_brdf_masked = render_with_BRDF(
depth_map[acc_mask],
normal_map[acc_mask],
albedo_map[acc_mask],
roughness_map[acc_mask].repeat(1, 3),
fresnel_map[acc_mask],
rays[acc_mask],
tensoIR,
light_idx[acc_mask],
sample_method,
chunk_size=chunk_size,
device=device,
args=args
)
rgb_with_brdf = torch.ones_like(rgb_map) # background default to be white
rgb_with_brdf[acc_mask] = rgb_with_brdf_masked
# rgb_with_brdf = rgb_with_brdf * acc_map[..., None] + (1. - acc_map[..., None])
else:
rgb_with_brdf = torch.ones_like(rgb_map)
ret_kw = {
"rgb_map": rgb_map,
"depth_map": depth_map,
"normal_map": normal_map,
"albedo_map": albedo_map,
"acc_map": acc_map,
"roughness_map": roughness_map,
"fresnel_map": fresnel_map,
'rgb_with_brdf_map': rgb_with_brdf,
'normals_diff_map': normals_diff_map,
'normals_orientation_loss_map': normals_orientation_loss_map,
'albedo_smoothness_loss': albedo_smoothness_loss,
'roughness_smoothness_loss': roughness_smoothness_loss,
}
return ret_kw
@torch.no_grad()
def evaluation_iter_TensoIR(
test_dataset,
tensoIR,
args,
renderer,
savePath=None,
prtx='',
N_samples=-1,
white_bg=False,
ndc_ray=False,
compute_extra_metrics=True,
device='cuda',
logger=None,
step=None,
test_all=False,
):
PSNRs_rgb, rgb_maps, depth_maps, gt_maps, gt_rgb_brdf_maps = [], [], [], [], []
PSNRs_rgb_brdf = []
rgb_with_brdf_maps, normal_rgb_maps, normal_rgb_vis_maps, normals_rgb_gt_maps = [], [], [], []
albedo_maps, single_aligned_albedo_maps, three_aligned_albedo_maps, gt_albedo_maps, roughness_maps, fresnel_maps, normals_diff_maps, normals_orientation_loss_maps = [], [], [], [], [], [], [], []
normal_raw_list = []
normal_gt_list = []
ssims, l_alex, l_vgg = [], [], []
ssims_rgb_brdf, l_alex_rgb_brdf, l_vgg_rgb_brdf = [], [], []
ssims_albedo_single, l_alex_albedo_single, l_vgg_albedo_single = [], [], []
ssims_albedo_three, l_alex_albedo_three, l_vgg_albedo_three = [], [], []
os.makedirs(savePath, exist_ok=True)
os.makedirs(savePath + "/nvs_with_radiance_field", exist_ok=True)
os.makedirs(savePath + "/nvs_with_brdf", exist_ok=True)
os.makedirs(savePath + "/normal", exist_ok=True)
os.makedirs(savePath + "/normal_vis", exist_ok=True)
os.makedirs(savePath + "/brdf", exist_ok=True)
os.makedirs(savePath + "/envir_map/", exist_ok=True)
os.makedirs(savePath + "/acc_map", exist_ok=True)
try:
tqdm._instances.clear()
except Exception:
pass
near_far = test_dataset.near_far
W, H = test_dataset.img_wh
num_test = len(test_dataset) if test_all else min(args.N_vis, len(test_dataset))
gt_envir_map = None
if test_dataset.lights_probes is not None:
gt_envir_map = test_dataset.lights_probes.reshape(test_dataset.envir_map_h, test_dataset.envir_map_w, 3).numpy()
gt_envir_map = np.uint8(np.clip(np.power(gt_envir_map, 1./2.2), 0., 1.) * 255.)
# resize to 256 * 512
gt_envir_map = cv2.resize(gt_envir_map, (512, 256), interpolation=cv2.INTER_CUBIC)
_, view_dirs = tensoIR.generate_envir_map_dir(256, 512)
predicted_envir_map = tensoIR.get_light_rgbs(view_dirs.reshape(-1, 3).to(device))[0]
predicted_envir_map = predicted_envir_map.reshape(256, 512, 3).cpu().detach().numpy()
predicted_envir_map = np.clip(predicted_envir_map, a_min=0, a_max=np.inf)
predicted_envir_map = np.uint8(np.clip(np.power(predicted_envir_map, 1./2.2), 0., 1.) * 255.)
if gt_envir_map is not None:
envirmap = np.concatenate((gt_envir_map, predicted_envir_map), axis=1)
else:
envirmap = predicted_envir_map
# save predicted envir map
imageio.imwrite(f'{savePath}/envir_map/{prtx}envirmap.png', envirmap)
test_duration = int(len(test_dataset) / num_test)
# compute global rescale ratio for predicted albedo
if test_all:
global_rescale_value_single, global_rescale_value_three = compute_rescale_ratio(tensoIR, test_dataset, sampled_num=20)
global_rescale_value_single, global_rescale_value_three = global_rescale_value_single.cpu(), global_rescale_value_three.cpu()
for idx in range(num_test):
if test_all:
print(f"test {idx} / {num_test}")
item = test_dataset.__getitem__(idx * test_duration)
rays = item['rays'] # [H*W, 6]
gt_rgb = item['rgbs'][0] # [H*W, 3]
light_idx = item['light_idx'][0] # [H*W, 1]
gt_normals = item['normals'] # [H*W, 3]
gt_rgb_wirh_brdf = gt_rgb # [H*W, 3]
gt_mask = item['rgbs_mask'] # [H*W, 1]
albedo_gt = item['albedo'] # [H*W, 3]
rgb_map, acc_map, depth_map, normal_map, albedo_map, roughness_map = [], [], [], [], [], []
fresnel_map, rgb_with_brdf_map, normals_diff_map, normals_orientation_loss_map = [], [], [], []
chunk_idxs = torch.split(torch.arange(rays.shape[0]), args.batch_size_test)
for chunk_idx in chunk_idxs:
ret_kw= renderer(
rays[chunk_idx],
None, # not used
light_idx[chunk_idx],
tensoIR,
N_samples=N_samples,
ndc_ray=ndc_ray,
white_bg=white_bg,
sample_method='fixed_envirmap',
chunk_size=args.relight_chunk_size,
device=device,
args=args
)
rgb_map.append(ret_kw['rgb_map'].detach().cpu())
depth_map.append(ret_kw['depth_map'].detach().cpu())
normal_map.append(ret_kw['normal_map'].detach().cpu())
albedo_map.append(ret_kw['albedo_map'].detach().cpu())
roughness_map.append(ret_kw['roughness_map'].detach().cpu())
fresnel_map.append(ret_kw['fresnel_map'].detach().cpu())
rgb_with_brdf_map.append(ret_kw['rgb_with_brdf_map'].detach().cpu())
normals_diff_map.append(ret_kw['normals_diff_map'].detach().cpu())
normals_orientation_loss_map.append(ret_kw['normals_orientation_loss_map'].detach().cpu())
acc_map.append(ret_kw['acc_map'].detach().cpu())
rgb_map = torch.cat(rgb_map)
depth_map = torch.cat(depth_map)
normal_map = torch.cat(normal_map)
albedo_map = torch.cat(albedo_map)
roughness_map = torch.cat(roughness_map)
fresnel_map = torch.cat(fresnel_map)
rgb_with_brdf_map = torch.cat(rgb_with_brdf_map)
normals_diff_map = torch.cat(normals_diff_map)
normals_orientation_loss_map = torch.cat(normals_orientation_loss_map)
acc_map = torch.cat(acc_map)
# normal_map_to_test = acc_map[..., None] * normal_map + (1 - acc_map[..., None]) * torch.tensor([0.0, 0.0, 1.0])
rgb_map = rgb_map.clamp(0.0, 1.0)
rgb_with_brdf_map = rgb_with_brdf_map.clamp(0.0, 1.0)
acc_map = acc_map.reshape(H, W).detach().cpu()
rgb_map, depth_map = rgb_map.reshape(H, W, 3).detach().cpu(), depth_map.reshape(H, W).detach().cpu()
rgb_with_brdf_map = rgb_with_brdf_map.reshape(H, W, 3).detach().cpu()
albedo_map = albedo_map.reshape(H, W, 3).detach().cpu()
single_aligned_albedo_map = torch.ones_like(albedo_map)
three_aligned_albedo_map = torch.ones_like(albedo_map)
gt_albedo_reshaped = albedo_gt.reshape(H, W, 3).detach().cpu()
gt_mask_reshaped = gt_mask.reshape(H, W).detach().cpu()
# single channel alignment for albedo
if test_all:
ratio_value = global_rescale_value_single
else:
ratio_value = (gt_albedo_reshaped[gt_mask_reshaped] / albedo_map[gt_mask_reshaped].clamp(min=1e-6))[..., 0].median()
single_aligned_albedo_map[gt_mask_reshaped] = (ratio_value * albedo_map[gt_mask_reshaped]).clamp(min=0.0, max=1.0)
# three channel alignment for albedo
if test_all:
ratio_value = global_rescale_value_three
else:
ratio_value, _ = (gt_albedo_reshaped[gt_mask_reshaped]/ albedo_map[gt_mask_reshaped].clamp(min=1e-6)).median(dim=0)
three_aligned_albedo_map[gt_mask_reshaped] = (ratio_value * albedo_map[gt_mask_reshaped]).clamp(min=0.0, max=1.0)
roughness_map = roughness_map.reshape(H, W, 1).repeat(1, 1, 3).detach().cpu()
fresnel_map = fresnel_map.reshape(H, W, 3).detach().cpu()
depth_map, _ = visualize_depth_numpy(depth_map.numpy(), near_far)
# Store loss and images
if test_dataset.__len__():
gt_rgb = gt_rgb.view(H, W, 3)
gt_rgb_wirh_brdf = gt_rgb_wirh_brdf.view(H, W, 3)
loss_rgb = torch.mean((rgb_map - gt_rgb) ** 2)
loss_rgb_brdf = torch.mean((rgb_with_brdf_map - gt_rgb_wirh_brdf) ** 2)
PSNRs_rgb.append(-10.0 * np.log(loss_rgb.item()) / np.log(10.0))
PSNRs_rgb_brdf.append(-10.0 * np.log(loss_rgb_brdf.item()) / np.log(10.0))
if compute_extra_metrics:
ssim = rgb_ssim(rgb_map, gt_rgb, 1)
l_a = rgb_lpips(gt_rgb.numpy(), rgb_map.numpy(), 'alex', tensoIR.device)
l_v = rgb_lpips(gt_rgb.numpy(), rgb_map.numpy(), 'vgg', tensoIR.device)
ssim_rgb_brdf = rgb_ssim(rgb_with_brdf_map, gt_rgb_wirh_brdf, 1)
l_a_rgb_brdf = rgb_lpips(gt_rgb_wirh_brdf.numpy(), rgb_with_brdf_map.numpy(), 'alex', tensoIR.device)
l_v_rgb_brdf = rgb_lpips(gt_rgb_wirh_brdf.numpy(), rgb_with_brdf_map.numpy(), 'vgg', tensoIR.device)
# single channel aligned albedo
ssim_albedo_single = rgb_ssim(single_aligned_albedo_map, gt_albedo_reshaped, 1)
l_a_albedo_single = rgb_lpips(gt_albedo_reshaped.numpy(), single_aligned_albedo_map.numpy(), 'alex', tensoIR.device)
l_v_albedo_single = rgb_lpips(gt_albedo_reshaped.numpy(), single_aligned_albedo_map.numpy(), 'vgg', tensoIR.device)
# three channel aligned albedo
ssim_albedo_three = rgb_ssim(three_aligned_albedo_map, gt_albedo_reshaped, 1)
l_a_albedo_three = rgb_lpips(gt_albedo_reshaped.numpy(), three_aligned_albedo_map.numpy(), 'alex', tensoIR.device)
l_v_albedo_three = rgb_lpips(gt_albedo_reshaped.numpy(), three_aligned_albedo_map.numpy(), 'vgg', tensoIR.device)
ssims.append(ssim)
l_alex.append(l_a)
l_vgg.append(l_v)
ssims_rgb_brdf.append(ssim_rgb_brdf)
l_alex_rgb_brdf.append(l_a_rgb_brdf)
l_vgg_rgb_brdf.append(l_v_rgb_brdf)
ssims_albedo_single.append(ssim_albedo_single)
l_alex_albedo_single.append(l_a_albedo_single)
l_vgg_albedo_single.append(l_v_albedo_single)
ssims_albedo_three.append(ssim_albedo_three)
l_alex_albedo_three.append(l_a_albedo_three)
l_vgg_albedo_three.append(l_v_albedo_three)
rgb_map = (rgb_map.numpy() * 255).astype('uint8')
rgb_with_brdf_map = (rgb_with_brdf_map.numpy() * 255).astype('uint8')
gt_rgb = (gt_rgb.numpy() * 255).astype('uint8')
gt_rgb_wirh_brdf = (gt_rgb_wirh_brdf.numpy() * 255).astype('uint8')
albedo_map = (albedo_map.numpy() * 255).astype('uint8')
roughness_map = (roughness_map.numpy() * 255).astype('uint8')
fresnel_map = (fresnel_map.numpy() * 255).astype('uint8')
acc_map = (acc_map.numpy() * 255).astype('uint8')
# Visualize normal
## Prediction
normal_map = F.normalize(normal_map, dim=-1)
# normal_map_to_test = F.normalize(normal_map_to_test, dim=-1)
# normal_raw_list.append(normal_map_to_test)
# normal_rgb_map = normal_map_to_test * 0.5 + 0.5
normal_raw_list.append(normal_map)
normal_rgb_map = normal_map * 0.5 + 0.5 # map from [-1, 1] to [0, 1] to visualize
normal_rgb_map = (normal_rgb_map.reshape(H, W, 3).cpu().numpy() * 255).astype('uint8')
normal_rgb_vis_map = (normal_rgb_map * (acc_map[:, :, None] / 255.0) + (1 -(acc_map[:, :, None] / 255.0)) * 255).astype('uint8') # white background
# GT normal
gt_normals = F.normalize(gt_normals, dim=-1)
normal_gt_list.append(gt_normals)
gt_normals_rgb_map = gt_normals * 0.5 + 0.5
gt_normals_rgb_map = (gt_normals_rgb_map.reshape(H, W, 3).numpy() * 255).astype('uint8')
# difference between the predicted normals and derived normals
normals_diff_map = (torch.clamp(normals_diff_map, 0.0, 1.0).reshape(H, W, 1).repeat(1, 1, 3).numpy() * 255).astype('uint8')
# normals orientation loss map
normals_orientation_loss_map = (torch.clamp(normals_orientation_loss_map , 0.0, 1.0).reshape(H, W, 1).repeat(1, 1, 3).numpy() * 255).astype('uint8')
rgb_maps.append(rgb_map)
rgb_with_brdf_maps.append(rgb_with_brdf_map)
depth_maps.append(depth_map)
gt_maps.append(gt_rgb)
gt_rgb_brdf_maps.append(gt_rgb_wirh_brdf)
normal_rgb_maps.append(normal_rgb_map)
normal_rgb_vis_maps.append(normal_rgb_vis_map)
normals_rgb_gt_maps.append(gt_normals_rgb_map)
if not test_all:
normals_diff_maps.append(normals_diff_map)
normals_orientation_loss_maps.append(normals_orientation_loss_map)
albedo_maps.append(albedo_map)
single_aligned_albedo_maps.append((single_aligned_albedo_map.numpy())**(1/2.2))
three_aligned_albedo_maps.append((three_aligned_albedo_map.numpy())**(1/2.2))
gt_albedo_maps.append((gt_albedo_reshaped.numpy())**(1/2.2))
roughness_maps.append(roughness_map)
fresnel_maps.append(fresnel_map)
if savePath is not None:
rgb_map = np.concatenate((rgb_map, gt_rgb, depth_map), axis=1)
rgb_with_brdf_map = np.concatenate((rgb_with_brdf_map, gt_rgb_wirh_brdf), axis=1)
normal_map = np.concatenate((normal_rgb_map, gt_normals_rgb_map, normals_diff_map, normals_orientation_loss_map), axis=1)
brdf_map = np.concatenate((albedo_map, roughness_map, fresnel_map), axis=1)
single_aligned_albedo_gamma = ((single_aligned_albedo_map.numpy())**(1/2.2) * 255).astype('uint8')
three_aligned_albedo_gamma = ((three_aligned_albedo_map.numpy())**(1/2.2) * 255).astype('uint8')
gt_albedo_gamma = ((gt_albedo_reshaped.numpy())**(1/2.2) * 255).astype('uint8')
albedo_map = np.concatenate((single_aligned_albedo_gamma, three_aligned_albedo_gamma, gt_albedo_gamma), axis=1)
imageio.imwrite(f'{savePath}/nvs_with_radiance_field/{prtx}{idx:03d}.png', rgb_map)
imageio.imwrite(f'{savePath}/nvs_with_brdf/{prtx}{idx:03d}.png', rgb_with_brdf_map)
imageio.imwrite(f'{savePath}/normal/{prtx}{idx:03d}.png', normal_map)
imageio.imwrite(f'{savePath}/normal_vis/{prtx}{idx:03d}.png', normal_rgb_vis_map)
imageio.imwrite(f'{savePath}/brdf/{prtx}{idx:03d}.png', brdf_map)
imageio.imwrite(f'{savePath}/brdf/{prtx}{idx:03d}_albedo.png', albedo_map)
imageio.imwrite(f'{savePath}/brdf/{prtx}{idx:03d}_roughness.png', roughness_map)
imageio.imwrite(f'{savePath}/acc_map/{prtx}{idx:03d}.png', acc_map)
# Randomly select a prediction to visualize
if logger and step and not test_all:
vis_idx = random.choice(range(len(rgb_maps)))
vis_rgb = torch.from_numpy(rgb_maps[vis_idx])
vis_rgb_brdf_rgb = torch.from_numpy(rgb_with_brdf_maps[vis_idx])
vis_depth = torch.from_numpy(depth_maps[vis_idx])
vis_rgb_gt = torch.from_numpy(gt_maps[vis_idx])
vis_normal_rgb = torch.from_numpy(normal_rgb_maps[vis_idx])
vis_normal_gt_rgb = torch.from_numpy(normals_rgb_gt_maps[vis_idx])
vis_normals_diff_rgb = torch.from_numpy(normals_diff_maps[vis_idx])
vis_normals_orientation_loss_rgb = torch.from_numpy(normals_orientation_loss_maps[vis_idx])
vis_albedo = torch.from_numpy(albedo_maps[vis_idx])
vis_single_aligned_albedo_gamma = torch.from_numpy((single_aligned_albedo_maps[vis_idx]* 255).astype('uint8'))
vis_three_aligned_albedo_gamma = torch.from_numpy((three_aligned_albedo_maps[vis_idx]* 255).astype('uint8'))
vis_gt_albedo_gamma = torch.from_numpy((gt_albedo_maps[vis_idx]* 255).astype('uint8'))
vis_roughness = torch.from_numpy(roughness_maps[vis_idx])
vis_fresnel = torch.from_numpy(fresnel_maps[vis_idx])
vis_rgb_grid = torch.stack([vis_rgb, vis_rgb_brdf_rgb, vis_rgb_gt, vis_depth]).permute(0, 3, 1, 2).to(float)
vis_normal_grid = torch.stack([vis_normal_rgb, vis_normal_gt_rgb, vis_normals_diff_rgb, vis_normals_orientation_loss_rgb]).permute(0, 3, 1, 2).to(float)
vis_brdf_grid = torch.stack([vis_albedo, vis_roughness, vis_fresnel]).permute(0, 3, 1, 2).to(float)
vis_envir_map_grid = torch.from_numpy(envirmap).unsqueeze(0).permute(0, 3, 1, 2).to(float)
vis_albedo_grid = torch.stack([vis_single_aligned_albedo_gamma, vis_three_aligned_albedo_gamma, vis_gt_albedo_gamma]).permute(0, 3, 1, 2).to(float)
logger.add_image('test/rgb',
vutils.make_grid(vis_rgb_grid, padding=0, normalize=True, value_range=(0, 255)), step)
logger.add_image('test/normal',
vutils.make_grid(vis_normal_grid, padding=0, normalize=True, value_range=(0, 255)), step)
logger.add_image('test/brdf',
vutils.make_grid(vis_brdf_grid, padding=0, normalize=True, value_range=(0, 255)), step)
logger.add_image('test/envir_map',
vutils.make_grid(vis_envir_map_grid, padding=0, normalize=True, value_range=(0, 255)), step)
logger.add_image('test/albedo',
vutils.make_grid(vis_albedo_grid, padding=0, normalize=True, value_range=(0, 255)), step)
# Compute metrics
if PSNRs_rgb:
psnr = np.mean(np.asarray(PSNRs_rgb))
psnr_rgb_brdf = np.mean(np.asarray(PSNRs_rgb_brdf))
gt_normal_stack = np.stack(normal_gt_list)
render_normal_stack = np.stack(normal_raw_list)
single_aligned_albedo_maps = np.stack(single_aligned_albedo_maps)
three_aligned_albedo_maps = np.stack(three_aligned_albedo_maps)
gt_albedo_maps = np.stack(gt_albedo_maps)
loss_albedo_single = np.mean((gt_albedo_maps - single_aligned_albedo_maps)**2)
loss_albedo_three = np.mean((gt_albedo_maps - three_aligned_albedo_maps)**2)
PSNR_albedo_single = -10.0 * np.log(loss_albedo_single) / np.log(10.0)
PSNR_albedo_three = -10.0 * np.log(loss_albedo_three) / np.log(10.0)
# compute mean angular error
MAE = np.mean(np.arccos(np.clip(np.sum(gt_normal_stack * render_normal_stack, axis=-1), -1, 1)) * 180 / np.pi)
if compute_extra_metrics:
ssim = np.mean(np.asarray(ssims))
l_a = np.mean(np.asarray(l_alex))
l_v = np.mean(np.asarray(l_vgg))
ssim_rgb_brdf = np.mean(np.asarray(ssims_rgb_brdf))
l_a_rgb_brdf = np.mean(np.asarray(l_alex_rgb_brdf))
l_v_rgb_brdf = np.mean(np.asarray(l_vgg_rgb_brdf))
ssim_albedo_single = np.mean(np.asarray(ssims_albedo_single))
l_a_albedo_single = np.mean(np.asarray(l_alex_albedo_single))
l_v_albedo_single = np.mean(np.asarray(l_vgg_albedo_single))
ssim_albedo_three = np.mean(np.asarray(ssims_albedo_three))
l_a_albedo_three = np.mean(np.asarray(l_alex_albedo_three))
l_v_albedo_three = np.mean(np.asarray(l_vgg_albedo_three))
saved_message = f'Iteration:{prtx[:-1]}: \n' \
+ f'\tPSNR_nvs: {psnr:.2f}, PSNR_nvs_brdf: {psnr_rgb_brdf:.2f}, PNSR_albedo_single_aligned: {PSNR_albedo_single:.2f}, PNSR_albedo_three_aligned: {PSNR_albedo_three:.2f}\n' \
+ f'\tSSIM_rgb: {ssim:.4f}, L_Alex_rgb: {l_a:.4f}, L_VGG_rgb: {l_v:.4f}\n' \
+ f'\tSSIM_rgb_brdf: {ssim_rgb_brdf:.4f}, L_Alex_rgb_brdf: {l_a_rgb_brdf:.4f}, L_VGG_rgb_brdf: {l_v_rgb_brdf:.4f}\n' \
+ f'\tSSIM_albedo_single: {ssim_albedo_single:.4f}, L_Alex_albedo_single: {l_a_albedo_single:.4f}, L_VGG_albedo_single: {l_v_albedo_single:.4f}\n' \
+ f'\tSSIM_albedo_three: {ssim_albedo_three:.4f}, L_Alex_albedo_three: {l_a_albedo_three:.4f}, L_VGG_albedo_three: {l_v_albedo_three:.4f}\n' \
+ f'\tMAE: {MAE:.2f}\n'
else:
saved_message = f'Iteration:{prtx[:-1]}, PSNR_nvs: {psnr:.2f}, PSNR_nvs_brdf: {psnr_rgb_brdf:.2f}, MAE: {MAE:.2f}, PSNR_albedo_single_aligned: {PSNR_albedo_single:.2f}, PSNR_albedo_three_aligned: {PSNR_albedo_three:.2f}\n'
# write the end of record file
with open(f'{savePath}/metrics_record.txt', 'a') as f:
f.write(saved_message)
# save video results
if test_all:
os.makedirs(savePath + "/video", exist_ok=True)
video_path = savePath + "/video"
imageio.mimsave(os.path.join(video_path, 'rgb.mp4'), np.stack(rgb_maps), fps=24, quality=8)
imageio.mimsave(os.path.join(video_path, 'rgb_brdf.mp4'), np.stack(rgb_with_brdf_maps), fps=24, quality=8)
imageio.mimsave(os.path.join(video_path, 'gt_normal_video.mp4'), np.stack(normals_rgb_gt_maps), fps=24, quality=8)
imageio.mimsave(os.path.join(video_path, 'render_normal_video.mp4'), np.stack(normal_rgb_maps), fps=24, quality=8)
imageio.mimsave(os.path.join(video_path, 'render_normal_vis_video.mp4'), np.stack(normal_rgb_vis_maps), fps=24, quality=8)
imageio.mimsave(os.path.join(video_path, 'single_aligned_albedo.mp4'), (single_aligned_albedo_maps * 255).astype('uint8'), fps=24, quality=8)
imageio.mimsave(os.path.join(video_path, 'three_aligned_albedo.mp4'), (three_aligned_albedo_maps * 255).astype('uint8'), fps=24, quality=8)
imageio.mimsave(os.path.join(video_path, 'roughness.mp4'), np.stack(roughness_maps), fps=24, quality=8)
return psnr, psnr_rgb_brdf, MAE, PSNR_albedo_single, PSNR_albedo_three
@torch.no_grad()
def evaluation_iter_TensoIR_simple(
test_dataset,
tensoIR,
args,
renderer,
savePath=None,
prtx='',
N_samples=-1,
white_bg=False,
ndc_ray=False,
compute_extra_metrics=True,
device='cuda',
logger=None,
step=None,
test_all=False,
):
PSNRs_rgb, rgb_maps, depth_maps, gt_maps, gt_rgb_brdf_maps = [], [], [], [], []
PSNRs_rgb_brdf = []
rgb_with_brdf_maps, normal_rgb_maps, normal_rgb_vis_maps = [], [], []
albedo_maps, albedo_gamma_maps, roughness_maps, fresnel_maps, normals_diff_maps, normals_orientation_loss_maps = [], [], [], [], [], []
ssims, l_alex, l_vgg = [], [], []
ssims_rgb_brdf, l_alex_rgb_brdf, l_vgg_rgb_brdf = [], [], []
os.makedirs(savePath, exist_ok=True)
os.makedirs(savePath + "/nvs_with_radiance_field", exist_ok=True)
os.makedirs(savePath + "/nvs_with_brdf", exist_ok=True)
os.makedirs(savePath + "/normal", exist_ok=True)
os.makedirs(savePath + "/normal_vis", exist_ok=True)
os.makedirs(savePath + "/brdf", exist_ok=True)
os.makedirs(savePath + "/envir_map/", exist_ok=True)
os.makedirs(savePath + "/acc_map", exist_ok=True)
try:
tqdm._instances.clear()
except Exception:
pass
near_far = test_dataset.near_far
W, H = test_dataset.img_wh
num_test = len(test_dataset) if test_all else min(args.N_vis, len(test_dataset))
_, view_dirs = tensoIR.generate_envir_map_dir(256, 512)
predicted_envir_map = tensoIR.get_light_rgbs(view_dirs.reshape(-1, 3).to(device))[0]
predicted_envir_map = predicted_envir_map.reshape(256, 512, 3).cpu().detach().numpy()
predicted_envir_map = np.clip(predicted_envir_map, a_min=0, a_max=np.inf)
predicted_envir_map = np.uint8(np.clip(np.power(predicted_envir_map, 1./2.2), 0., 1.) * 255.)
envirmap = predicted_envir_map
# save predicted envir map
imageio.imwrite(f'{savePath}/envir_map/{prtx}envirmap.png', envirmap)
test_duration = int(len(test_dataset) / num_test)
for idx in range(num_test):
item = test_dataset.__getitem__(idx * test_duration)
rays = item['rays'] # [H*W, 6]
gt_rgb = item['rgbs'][0] # [H*W, 3]
light_idx = item['light_idx'][0] # [H*W, 1]
gt_rgb_wirh_brdf = gt_rgb # [H*W, 3]
gt_mask = item['rgbs_mask'] # [H*W, 1]
rgb_map, acc_map, depth_map, normal_map, albedo_map, roughness_map, albedo_gamma_map = [], [], [], [], [], [], []
fresnel_map, rgb_with_brdf_map, normals_diff_map, normals_orientation_loss_map = [], [], [], []
chunk_idxs = torch.split(torch.arange(rays.shape[0]), args.batch_size_test)
for chunk_idx in chunk_idxs:
ret_kw= renderer(
rays[chunk_idx],
None, # not used
light_idx[chunk_idx],
tensoIR,
N_samples=N_samples,
ndc_ray=ndc_ray,
white_bg=white_bg,
sample_method='fixed_envirmap',
chunk_size=args.relight_chunk_size,
device=device,
args=args
)
rgb_map.append(ret_kw['rgb_map'].detach().cpu())
depth_map.append(ret_kw['depth_map'].detach().cpu())
normal_map.append(ret_kw['normal_map'].detach().cpu())
albedo_map.append(ret_kw['albedo_map'].detach().cpu())
roughness_map.append(ret_kw['roughness_map'].detach().cpu())
fresnel_map.append(ret_kw['fresnel_map'].detach().cpu())
rgb_with_brdf_map.append(ret_kw['rgb_with_brdf_map'].detach().cpu())
normals_diff_map.append(ret_kw['normals_diff_map'].detach().cpu())
normals_orientation_loss_map.append(ret_kw['normals_orientation_loss_map'].detach().cpu())
acc_map.append(ret_kw['acc_map'].detach().cpu())
rgb_map = torch.cat(rgb_map)
depth_map = torch.cat(depth_map)
normal_map = torch.cat(normal_map)
albedo_map = torch.cat(albedo_map)
roughness_map = torch.cat(roughness_map)
fresnel_map = torch.cat(fresnel_map)
rgb_with_brdf_map = torch.cat(rgb_with_brdf_map)
normals_diff_map = torch.cat(normals_diff_map)
normals_orientation_loss_map = torch.cat(normals_orientation_loss_map)
acc_map = torch.cat(acc_map)
rgb_map = rgb_map.clamp(0.0, 1.0)
rgb_with_brdf_map = rgb_with_brdf_map.clamp(0.0, 1.0)
acc_map = acc_map.reshape(H, W).detach().cpu()
rgb_map, depth_map = rgb_map.reshape(H, W, 3).detach().cpu(), depth_map.reshape(H, W).detach().cpu()
rgb_with_brdf_map = rgb_with_brdf_map.reshape(H, W, 3).detach().cpu()
albedo_map = albedo_map.reshape(H, W, 3).detach().cpu()
albedo_gamma_map = (albedo_map.clip(0, 1.)) ** (1.0 / 2.2)
roughness_map = roughness_map.reshape(H, W, 1).repeat(1, 1, 3).detach().cpu()
fresnel_map = fresnel_map.reshape(H, W, 3).detach().cpu()
depth_map, _ = visualize_depth_numpy(depth_map.numpy(), near_far)
# Store loss and images
if test_dataset.__len__():
gt_rgb = gt_rgb.view(H, W, 3)
gt_rgb_wirh_brdf = gt_rgb_wirh_brdf.view(H, W, 3)
loss_rgb = torch.mean((rgb_map - gt_rgb) ** 2)
loss_rgb_brdf = torch.mean((rgb_with_brdf_map - gt_rgb_wirh_brdf) ** 2)
PSNRs_rgb.append(-10.0 * np.log(loss_rgb.item()) / np.log(10.0))
PSNRs_rgb_brdf.append(-10.0 * np.log(loss_rgb_brdf.item()) / np.log(10.0))
if compute_extra_metrics:
ssim = rgb_ssim(rgb_map, gt_rgb, 1)
l_a = rgb_lpips(gt_rgb.numpy(), rgb_map.numpy(), 'alex', tensoIR.device)
l_v = rgb_lpips(gt_rgb.numpy(), rgb_map.numpy(), 'vgg', tensoIR.device)
ssim_rgb_brdf = rgb_ssim(rgb_with_brdf_map, gt_rgb_wirh_brdf, 1)
l_a_rgb_brdf = rgb_lpips(gt_rgb_wirh_brdf.numpy(), rgb_with_brdf_map.numpy(), 'alex', tensoIR.device)
l_v_rgb_brdf = rgb_lpips(gt_rgb_wirh_brdf.numpy(), rgb_with_brdf_map.numpy(), 'vgg', tensoIR.device)
ssims.append(ssim)
l_alex.append(l_a)
l_vgg.append(l_v)
ssims_rgb_brdf.append(ssim_rgb_brdf)
l_alex_rgb_brdf.append(l_a_rgb_brdf)
l_vgg_rgb_brdf.append(l_v_rgb_brdf)
rgb_map = (rgb_map.numpy() * 255).astype('uint8')
rgb_with_brdf_map = (rgb_with_brdf_map.numpy() * 255).astype('uint8')
gt_rgb = (gt_rgb.numpy() * 255).astype('uint8')
gt_rgb_wirh_brdf = (gt_rgb_wirh_brdf.numpy() * 255).astype('uint8')
albedo_map = (albedo_map.numpy() * 255).astype('uint8')
albedo_gamma_map = (albedo_gamma_map.numpy() * 255).astype('uint8')
roughness_map = (roughness_map.numpy() * 255).astype('uint8')
fresnel_map = (fresnel_map.numpy() * 255).astype('uint8')
acc_map = (acc_map.numpy() * 255).astype('uint8')
# Visualize normal
## Prediction
normal_map = F.normalize(normal_map, dim=-1)
normal_rgb_map = normal_map * 0.5 + 0.5 # map from [-1, 1] to [0, 1] to visualize
normal_rgb_map = (normal_rgb_map.reshape(H, W, 3).cpu().numpy() * 255).astype('uint8')
normal_rgb_vis_map = (normal_rgb_map * (acc_map[:, :, None] / 255.0) + (1 -(acc_map[:, :, None] / 255.0)) * 255).astype('uint8') # white background
# difference between the predicted normals and derived normals
normals_diff_map = (torch.clamp(normals_diff_map, 0.0, 1.0).reshape(H, W, 1).repeat(1, 1, 3).numpy() * 255).astype('uint8')
# normals orientation loss map
normals_orientation_loss_map = (torch.clamp(normals_orientation_loss_map , 0.0, 1.0).reshape(H, W, 1).repeat(1, 1, 3).numpy() * 255).astype('uint8')
rgb_maps.append(rgb_map)
rgb_with_brdf_maps.append(rgb_with_brdf_map)
depth_maps.append(depth_map)
gt_maps.append(gt_rgb)
gt_rgb_brdf_maps.append(gt_rgb_wirh_brdf)
normal_rgb_maps.append(normal_rgb_map)
normal_rgb_vis_maps.append(normal_rgb_vis_map)
if not test_all:
normals_diff_maps.append(normals_diff_map)
normals_orientation_loss_maps.append(normals_orientation_loss_map)
albedo_maps.append(albedo_map)
albedo_gamma_maps.append(albedo_gamma_map)
roughness_maps.append(roughness_map)
fresnel_maps.append(fresnel_map)
if savePath is not None:
rgb_map = np.concatenate((rgb_map, gt_rgb, depth_map), axis=1)
rgb_with_brdf_map = np.concatenate((rgb_with_brdf_map, gt_rgb_wirh_brdf), axis=1)
normal_map = np.concatenate((normal_rgb_map, normals_diff_map, normals_orientation_loss_map), axis=1)
brdf_map = np.concatenate((albedo_map, roughness_map, fresnel_map), axis=1)
imageio.imwrite(f'{savePath}/nvs_with_radiance_field/{prtx}{idx:03d}.png', rgb_map)
imageio.imwrite(f'{savePath}/nvs_with_brdf/{prtx}{idx:03d}.png', rgb_with_brdf_map)
imageio.imwrite(f'{savePath}/normal/{prtx}{idx:03d}.png', normal_map)
imageio.imwrite(f'{savePath}/normal_vis/{prtx}{idx:03d}.png', normal_rgb_vis_map)
imageio.imwrite(f'{savePath}/brdf/{prtx}{idx:03d}.png', brdf_map)
imageio.imwrite(f'{savePath}/brdf/{prtx}{idx:03d}_albedo.png', albedo_gamma_map)
imageio.imwrite(f'{savePath}/brdf/{prtx}{idx:03d}_roughness.png', roughness_map)
imageio.imwrite(f'{savePath}/acc_map/{prtx}{idx:03d}.png', acc_map)
# Randomly select a prediction to visualize
if logger and step and not test_all:
vis_idx = random.choice(range(len(rgb_maps)))
vis_rgb = torch.from_numpy(rgb_maps[vis_idx])
vis_rgb_brdf_rgb = torch.from_numpy(rgb_with_brdf_maps[vis_idx])
vis_depth = torch.from_numpy(depth_maps[vis_idx])
vis_rgb_gt = torch.from_numpy(gt_maps[vis_idx])
vis_normal_rgb = torch.from_numpy(normal_rgb_maps[vis_idx])
vis_normals_diff_rgb = torch.from_numpy(normals_diff_maps[vis_idx])
vis_normals_orientation_loss_rgb = torch.from_numpy(normals_orientation_loss_maps[vis_idx])
vis_albedo = torch.from_numpy(albedo_maps[vis_idx])
vis_albedo_gamma = torch.from_numpy(albedo_gamma_maps[vis_idx])
vis_roughness = torch.from_numpy(roughness_maps[vis_idx])
vis_fresnel = torch.from_numpy(fresnel_maps[vis_idx])
vis_rgb_grid = torch.stack([vis_rgb, vis_rgb_brdf_rgb, vis_rgb_gt, vis_depth]).permute(0, 3, 1, 2).to(float)
vis_normal_grid = torch.stack([vis_normal_rgb, vis_normals_diff_rgb, vis_normals_orientation_loss_rgb]).permute(0, 3, 1, 2).to(float)
vis_brdf_grid = torch.stack([vis_albedo, vis_roughness, vis_fresnel]).permute(0, 3, 1, 2).to(float)
vis_envir_map_grid = torch.from_numpy(envirmap).unsqueeze(0).permute(0, 3, 1, 2).to(float)
vis_albedo_grid = torch.stack([vis_albedo, vis_albedo_gamma]).permute(0, 3, 1, 2).to(float)
logger.add_image('test/rgb',
vutils.make_grid(vis_rgb_grid, padding=0, normalize=True, value_range=(0, 255)), step)
logger.add_image('test/normal',
vutils.make_grid(vis_normal_grid, padding=0, normalize=True, value_range=(0, 255)), step)
logger.add_image('test/brdf',
vutils.make_grid(vis_brdf_grid, padding=0, normalize=True, value_range=(0, 255)), step)
logger.add_image('test/envir_map',
vutils.make_grid(vis_envir_map_grid, padding=0, normalize=True, value_range=(0, 255)), step)
logger.add_image('test/albedo',
vutils.make_grid(vis_albedo_grid, padding=0, normalize=True, value_range=(0, 255)), step)
# Compute metrics
if PSNRs_rgb:
psnr = np.mean(np.asarray(PSNRs_rgb))
psnr_rgb_brdf = np.mean(np.asarray(PSNRs_rgb_brdf))
if compute_extra_metrics:
ssim = np.mean(np.asarray(ssims))
l_a = np.mean(np.asarray(l_alex))
l_v = np.mean(np.asarray(l_vgg))
ssim_rgb_brdf = np.mean(np.asarray(ssims_rgb_brdf))
l_a_rgb_brdf = np.mean(np.asarray(l_alex_rgb_brdf))
l_v_rgb_brdf = np.mean(np.asarray(l_vgg_rgb_brdf))
saved_message = f'Iteration:{prtx[:-1]}: \n' \
+ f'\tPSNR_nvs: {psnr:.2f}, PSNR_nvs_brdf: {psnr_rgb_brdf:.2f}\n' \
+ f'\tSSIM_rgb: {ssim:.4f}, L_Alex_rgb: {l_a:.4f}, L_VGG_rgb: {l_v:.4f}\n' \
+ f'\tSSIM_rgb_brdf: {ssim_rgb_brdf:.4f}, L_Alex_rgb_brdf: {l_a_rgb_brdf:.4f}, L_VGG_rgb_brdf: {l_v_rgb_brdf:.4f}\n'
else:
saved_message = f'Iteration:{prtx[:-1]}, PSNR_nvs: {psnr:.2f}, PSNR_nvs_brdf: {psnr_rgb_brdf:.2f}\n'
# write the end of record file
with open(f'{savePath}/metrics_record.txt', 'a') as f:
f.write(saved_message)
return psnr, psnr_rgb_brdf
@torch.no_grad()
def evaluation_iter_TensoIR_general_multi_lights(
test_dataset,
tensoIR,
args,
renderer,
savePath=None,
prtx='',
N_samples=-1,
white_bg=False,
ndc_ray=False,
compute_extra_metrics=True,
device='cuda',
logger=None,
step=None,
test_all=False,
light_idx_to_test=-1,
):
PSNRs_rgb, rgb_maps, depth_maps, gt_maps, gt_rgb_brdf_maps = [], [], [], [], []
PSNRs_rgb_brdf = []
rgb_with_brdf_maps, normal_rgb_maps, normal_rgb_vis_maps, normals_rgb_gt_maps = [], [], [], []
albedo_maps, single_aligned_albedo_maps, three_aligned_albedo_maps, gt_albedo_maps, roughness_maps, fresnel_maps, normals_diff_maps, normals_orientation_loss_maps = [], [], [], [], [], [], [], []
normal_raw_list = []
normal_gt_list = []
ssims, l_alex, l_vgg = [], [], []
ssims_rgb_brdf, l_alex_rgb_brdf, l_vgg_rgb_brdf = [], [], []
ssims_albedo_single, l_alex_albedo_single, l_vgg_albedo_single = [], [], []
ssims_albedo_three, l_alex_albedo_three, l_vgg_albedo_three = [], [], []
os.makedirs(savePath, exist_ok=True)
os.makedirs(savePath + "/nvs_with_radiance_field", exist_ok=True)
os.makedirs(savePath + "/nvs_with_brdf", exist_ok=True)
os.makedirs(savePath + "/normal", exist_ok=True)
os.makedirs(savePath + "/normal_vis", exist_ok=True)
os.makedirs(savePath + "/brdf", exist_ok=True)
os.makedirs(savePath + "/envir_map/", exist_ok=True)
os.makedirs(savePath + "/acc_map", exist_ok=True)
try:
tqdm._instances.clear()
except Exception:
pass
near_far = test_dataset.near_far
W, H = test_dataset.img_wh
num_test = len(test_dataset) if test_all else min(args.N_vis, len(test_dataset))
gt_envir_map = None
# if test_dataset.lights_probes is not None:
# gt_envir_map = test_dataset.lights_probes.reshape(test_dataset.envir_map_h, test_dataset.envir_map_w, 3).numpy()
# gt_envir_map = np.uint8(np.clip(np.power(gt_envir_map, 1./2.2), 0., 1.) * 255.)
# # resize to 256 * 512
# gt_envir_map = cv2.resize(gt_envir_map, (512, 256), interpolation=cv2.INTER_CUBIC)
_, view_dirs = tensoIR.generate_envir_map_dir(256, 512)
predicted_envir_map = tensoIR.get_light_rgbs(view_dirs.reshape(-1, 3).to(device))
predicted_envir_map = predicted_envir_map.reshape(256 * tensoIR.light_num, 512, 3).cpu().detach().numpy()
predicted_envir_map = np.clip(predicted_envir_map, a_min=0, a_max=np.inf)
predicted_envir_map = np.uint8(np.clip(np.power(predicted_envir_map, 1./2.2), 0., 1.) * 255.)
if gt_envir_map is not None:
envirmap = np.concatenate((gt_envir_map, predicted_envir_map), axis=1)
else:
envirmap = predicted_envir_map
# save predicted envir map
imageio.imwrite(f'{savePath}/envir_map/{prtx}envirmap.png', envirmap)
test_duration = int(len(test_dataset) / num_test)
# compute global rescale ratio for predicted albedo
if test_all:
global_rescale_value_single, global_rescale_value_three = compute_rescale_ratio(tensoIR, test_dataset, sampled_num=20)
global_rescale_value_single, global_rescale_value_three = global_rescale_value_single.cpu(), global_rescale_value_three.cpu()
for idx in range(num_test):
item = test_dataset.__getitem__(idx * test_duration)
# generate a random number between [0, tensoIR.light_num)
if light_idx_to_test >= 0:
light_kind_idx = light_idx_to_test
else:
light_kind_idx = int(np.random.randint(tensoIR.light_num))
rays = item['rays'] # [H*W, 6]
gt_rgb = item['rgbs'][light_kind_idx] # [H*W, 3]
light_idx = item['light_idx'][light_kind_idx] # [H*W]
gt_normals = item['normals'] # [H*W, 3]
gt_rgb_wirh_brdf = gt_rgb # [H*W, 3]
gt_mask = item['rgbs_mask'] # [H*W, 1]
albedo_gt = item['albedo'] # [H*W, 3]
rgb_map, acc_map, depth_map, normal_map, albedo_map, roughness_map = [], [], [], [], [], []
fresnel_map, rgb_with_brdf_map, normals_diff_map, normals_orientation_loss_map = [], [], [], []
chunk_idxs = torch.split(torch.arange(rays.shape[0]), args.batch_size_test)
for chunk_idx in chunk_idxs:
ret_kw= renderer(
rays[chunk_idx],
None, # not used
light_idx[chunk_idx],
tensoIR,
N_samples=N_samples,
ndc_ray=ndc_ray,
white_bg=white_bg,
sample_method='fixed_envirmap',
chunk_size=args.relight_chunk_size,
device=device,
args=args
)
rgb_map.append(ret_kw['rgb_map'].detach().cpu())
depth_map.append(ret_kw['depth_map'].detach().cpu())
normal_map.append(ret_kw['normal_map'].detach().cpu())
albedo_map.append(ret_kw['albedo_map'].detach().cpu())
roughness_map.append(ret_kw['roughness_map'].detach().cpu())
fresnel_map.append(ret_kw['fresnel_map'].detach().cpu())
rgb_with_brdf_map.append(ret_kw['rgb_with_brdf_map'].detach().cpu())
normals_diff_map.append(ret_kw['normals_diff_map'].detach().cpu())
normals_orientation_loss_map.append(ret_kw['normals_orientation_loss_map'].detach().cpu())
acc_map.append(ret_kw['acc_map'].detach().cpu())
rgb_map = torch.cat(rgb_map)
depth_map = torch.cat(depth_map)
normal_map = torch.cat(normal_map)
albedo_map = torch.cat(albedo_map)
roughness_map = torch.cat(roughness_map)
fresnel_map = torch.cat(fresnel_map)
rgb_with_brdf_map = torch.cat(rgb_with_brdf_map)
normals_diff_map = torch.cat(normals_diff_map)
normals_orientation_loss_map = torch.cat(normals_orientation_loss_map)
acc_map = torch.cat(acc_map)
# normal_map_to_test = acc_map[..., None] * normal_map + (1 - acc_map[..., None]) * torch.tensor([0.0, 0.0, 1.0])
rgb_map = rgb_map.clamp(0.0, 1.0)
rgb_with_brdf_map = rgb_with_brdf_map.clamp(0.0, 1.0)
acc_map = acc_map.reshape(H, W).detach().cpu()
rgb_map, depth_map = rgb_map.reshape(H, W, 3).detach().cpu(), depth_map.reshape(H, W).detach().cpu()
rgb_with_brdf_map = rgb_with_brdf_map.reshape(H, W, 3).detach().cpu()
albedo_map = albedo_map.reshape(H, W, 3).detach().cpu()
single_aligned_albedo_map = torch.ones_like(albedo_map)
three_aligned_albedo_map = torch.ones_like(albedo_map)
gt_albedo_reshaped = albedo_gt.reshape(H, W, 3).detach().cpu()
gt_mask_reshaped = gt_mask.reshape(H, W).detach().cpu()
# single channel alignment for albedo
if test_all:
ratio_value = global_rescale_value_single
else:
ratio_value = (gt_albedo_reshaped[gt_mask_reshaped] / albedo_map[gt_mask_reshaped].clamp(min=1e-6))[..., 0].median()
single_aligned_albedo_map[gt_mask_reshaped] = (ratio_value * albedo_map[gt_mask_reshaped]).clamp(min=0.0, max=1.0)
# three channel alignment for albedo
if test_all:
ratio_value = global_rescale_value_three
else:
ratio_value, _ = (gt_albedo_reshaped[gt_mask_reshaped]/ albedo_map[gt_mask_reshaped].clamp(min=1e-6)).median(dim=0)
three_aligned_albedo_map[gt_mask_reshaped] = (ratio_value * albedo_map[gt_mask_reshaped]).clamp(min=0.0, max=1.0)
roughness_map = roughness_map.reshape(H, W, 1).repeat(1, 1, 3).detach().cpu()
fresnel_map = fresnel_map.reshape(H, W, 3).detach().cpu()
depth_map, _ = visualize_depth_numpy(depth_map.numpy(), near_far)
# Store loss and images
if test_dataset.__len__():
gt_rgb = gt_rgb.view(H, W, 3)
gt_rgb_wirh_brdf = gt_rgb_wirh_brdf.view(H, W, 3)
loss_rgb = torch.mean((rgb_map - gt_rgb) ** 2)
loss_rgb_brdf = torch.mean((rgb_with_brdf_map - gt_rgb_wirh_brdf) ** 2)
PSNRs_rgb.append(-10.0 * np.log(loss_rgb.item()) / np.log(10.0))
PSNRs_rgb_brdf.append(-10.0 * np.log(loss_rgb_brdf.item()) / np.log(10.0))
if compute_extra_metrics:
ssim = rgb_ssim(rgb_map, gt_rgb, 1)
l_a = rgb_lpips(gt_rgb.numpy(), rgb_map.numpy(), 'alex', tensoIR.device)
l_v = rgb_lpips(gt_rgb.numpy(), rgb_map.numpy(), 'vgg', tensoIR.device)
ssim_rgb_brdf = rgb_ssim(rgb_with_brdf_map, gt_rgb_wirh_brdf, 1)
l_a_rgb_brdf = rgb_lpips(gt_rgb_wirh_brdf.numpy(), rgb_with_brdf_map.numpy(), 'alex', tensoIR.device)
l_v_rgb_brdf = rgb_lpips(gt_rgb_wirh_brdf.numpy(), rgb_with_brdf_map.numpy(), 'vgg', tensoIR.device)
# single channel aligned albedo
ssim_albedo_single = rgb_ssim(single_aligned_albedo_map, gt_albedo_reshaped, 1)
l_a_albedo_single = rgb_lpips(gt_albedo_reshaped.numpy(), single_aligned_albedo_map.numpy(), 'alex', tensoIR.device)
l_v_albedo_single = rgb_lpips(gt_albedo_reshaped.numpy(), single_aligned_albedo_map.numpy(), 'vgg', tensoIR.device)
# three channel aligned albedo
ssim_albedo_three = rgb_ssim(three_aligned_albedo_map, gt_albedo_reshaped, 1)
l_a_albedo_three = rgb_lpips(gt_albedo_reshaped.numpy(), three_aligned_albedo_map.numpy(), 'alex', tensoIR.device)
l_v_albedo_three = rgb_lpips(gt_albedo_reshaped.numpy(), three_aligned_albedo_map.numpy(), 'vgg', tensoIR.device)
ssims.append(ssim)
l_alex.append(l_a)
l_vgg.append(l_v)
ssims_rgb_brdf.append(ssim_rgb_brdf)
l_alex_rgb_brdf.append(l_a_rgb_brdf)
l_vgg_rgb_brdf.append(l_v_rgb_brdf)
ssims_albedo_single.append(ssim_albedo_single)
l_alex_albedo_single.append(l_a_albedo_single)
l_vgg_albedo_single.append(l_v_albedo_single)
ssims_albedo_three.append(ssim_albedo_three)
l_alex_albedo_three.append(l_a_albedo_three)
l_vgg_albedo_three.append(l_v_albedo_three)
rgb_map = (rgb_map.numpy() * 255).astype('uint8')
rgb_with_brdf_map = (rgb_with_brdf_map.numpy() * 255).astype('uint8')
gt_rgb = (gt_rgb.numpy() * 255).astype('uint8')
gt_rgb_wirh_brdf = (gt_rgb_wirh_brdf.numpy() * 255).astype('uint8')
albedo_map = (albedo_map.numpy() * 255).astype('uint8')
roughness_map = (roughness_map.numpy() * 255).astype('uint8')
fresnel_map = (fresnel_map.numpy() * 255).astype('uint8')
acc_map = (acc_map.numpy() * 255).astype('uint8')
# Visualize normal
## Prediction
normal_map = F.normalize(normal_map, dim=-1)
normal_raw_list.append(normal_map)
normal_rgb_map = normal_map * 0.5 + 0.5 # map from [-1, 1] to [0, 1] to visualize
normal_rgb_map = (normal_rgb_map.reshape(H, W, 3).cpu().numpy() * 255).astype('uint8')
normal_rgb_vis_map = (normal_rgb_map * (acc_map[:, :, None] / 255.0) + (1 -(acc_map[:, :, None] / 255.0)) * 255).astype('uint8') # white background
# GT normal
gt_normals = F.normalize(gt_normals, dim=-1)
normal_gt_list.append(gt_normals)
gt_normals_rgb_map = gt_normals * 0.5 + 0.5
gt_normals_rgb_map = (gt_normals_rgb_map.reshape(H, W, 3).numpy() * 255).astype('uint8')
# difference between the predicted normals and derived normals
normals_diff_map = (torch.clamp(normals_diff_map, 0.0, 1.0).reshape(H, W, 1).repeat(1, 1, 3).numpy() * 255).astype('uint8')
# normals orientation loss map
normals_orientation_loss_map = (torch.clamp(normals_orientation_loss_map , 0.0, 1.0).reshape(H, W, 1).repeat(1, 1, 3).numpy() * 255).astype('uint8')
rgb_maps.append(rgb_map)
rgb_with_brdf_maps.append(rgb_with_brdf_map)
depth_maps.append(depth_map)
gt_maps.append(gt_rgb)
gt_rgb_brdf_maps.append(gt_rgb_wirh_brdf)
normal_rgb_maps.append(normal_rgb_map)
normal_rgb_vis_maps.append(normal_rgb_vis_map)
normals_rgb_gt_maps.append(gt_normals_rgb_map)
if not test_all:
normals_diff_maps.append(normals_diff_map)
normals_orientation_loss_maps.append(normals_orientation_loss_map)
albedo_maps.append(albedo_map)
single_aligned_albedo_maps.append((single_aligned_albedo_map.numpy())**(1/2.2))
three_aligned_albedo_maps.append((three_aligned_albedo_map.numpy())**(1/2.2))
gt_albedo_maps.append((gt_albedo_reshaped.numpy())**(1/2.2))
roughness_maps.append(roughness_map)
fresnel_maps.append(fresnel_map)
if savePath is not None:
rgb_map = np.concatenate((rgb_map, gt_rgb, depth_map), axis=1)
rgb_with_brdf_map = np.concatenate((rgb_with_brdf_map, gt_rgb_wirh_brdf), axis=1)
normal_map = np.concatenate((normal_rgb_map, gt_normals_rgb_map, normals_diff_map, normals_orientation_loss_map), axis=1)
brdf_map = np.concatenate((albedo_map, roughness_map, fresnel_map), axis=1)
single_aligned_albedo_gamma = ((single_aligned_albedo_map.numpy())**(1/2.2) * 255).astype('uint8')
three_aligned_albedo_gamma = ((three_aligned_albedo_map.numpy())**(1/2.2) * 255).astype('uint8')
gt_albedo_gamma = ((gt_albedo_reshaped.numpy())**(1/2.2) * 255).astype('uint8')
albedo_map = np.concatenate((single_aligned_albedo_gamma, three_aligned_albedo_gamma, gt_albedo_gamma), axis=1)
imageio.imwrite(f'{savePath}/nvs_with_radiance_field/{prtx}{idx:03d}.png', rgb_map)
imageio.imwrite(f'{savePath}/nvs_with_brdf/{prtx}{idx:03d}.png', rgb_with_brdf_map)
imageio.imwrite(f'{savePath}/normal/{prtx}{idx:03d}.png', normal_map)
imageio.imwrite(f'{savePath}/normal_vis/{prtx}{idx:03d}.png', normal_rgb_vis_map)
imageio.imwrite(f'{savePath}/brdf/{prtx}{idx:03d}.png', brdf_map)
imageio.imwrite(f'{savePath}/brdf/{prtx}{idx:03d}_albedo.png', albedo_map)
imageio.imwrite(f'{savePath}/brdf/{prtx}{idx:03d}_roughness.png', roughness_map)
imageio.imwrite(f'{savePath}/acc_map/{prtx}{idx:03d}.png', acc_map)
# Randomly select a prediction to visualize
if logger and step and not test_all:
vis_idx = random.choice(range(len(rgb_maps)))
vis_rgb = torch.from_numpy(rgb_maps[vis_idx])
vis_rgb_brdf_rgb = torch.from_numpy(rgb_with_brdf_maps[vis_idx])
vis_depth = torch.from_numpy(depth_maps[vis_idx])
vis_rgb_gt = torch.from_numpy(gt_maps[vis_idx])
vis_normal_rgb = torch.from_numpy(normal_rgb_maps[vis_idx])
vis_normal_gt_rgb = torch.from_numpy(normals_rgb_gt_maps[vis_idx])
vis_normals_diff_rgb = torch.from_numpy(normals_diff_maps[vis_idx])
vis_normals_orientation_loss_rgb = torch.from_numpy(normals_orientation_loss_maps[vis_idx])
vis_albedo = torch.from_numpy(albedo_maps[vis_idx])
vis_single_aligned_albedo_gamma = torch.from_numpy((single_aligned_albedo_maps[vis_idx]* 255).astype('uint8'))
vis_three_aligned_albedo_gamma = torch.from_numpy((three_aligned_albedo_maps[vis_idx]* 255).astype('uint8'))
vis_gt_albedo_gamma = torch.from_numpy((gt_albedo_maps[vis_idx]* 255).astype('uint8'))
vis_roughness = torch.from_numpy(roughness_maps[vis_idx])
vis_fresnel = torch.from_numpy(fresnel_maps[vis_idx])
vis_rgb_grid = torch.stack([vis_rgb, vis_rgb_brdf_rgb, vis_rgb_gt, vis_depth]).permute(0, 3, 1, 2).to(float)
vis_normal_grid = torch.stack([vis_normal_rgb, vis_normal_gt_rgb, vis_normals_diff_rgb, vis_normals_orientation_loss_rgb]).permute(0, 3, 1, 2).to(float)
vis_brdf_grid = torch.stack([vis_albedo, vis_roughness, vis_fresnel]).permute(0, 3, 1, 2).to(float)
vis_envir_map_grid = torch.from_numpy(envirmap).unsqueeze(0).permute(0, 3, 1, 2).to(float)
vis_albedo_grid = torch.stack([vis_single_aligned_albedo_gamma, vis_three_aligned_albedo_gamma, vis_gt_albedo_gamma]).permute(0, 3, 1, 2).to(float)
logger.add_image('test/rgb',
vutils.make_grid(vis_rgb_grid, padding=0, normalize=True, value_range=(0, 255)), step)
logger.add_image('test/normal',
vutils.make_grid(vis_normal_grid, padding=0, normalize=True, value_range=(0, 255)), step)
logger.add_image('test/brdf',
vutils.make_grid(vis_brdf_grid, padding=0, normalize=True, value_range=(0, 255)), step)
logger.add_image('test/envir_map',
vutils.make_grid(vis_envir_map_grid, padding=0, normalize=True, value_range=(0, 255)), step)
logger.add_image('test/albedo',
vutils.make_grid(vis_albedo_grid, padding=0, normalize=True, value_range=(0, 255)), step)
# Compute metrics
if PSNRs_rgb:
psnr = np.mean(np.asarray(PSNRs_rgb))
psnr_rgb_brdf = np.mean(np.asarray(PSNRs_rgb_brdf))
gt_normal_stack = np.stack(normal_gt_list)
render_normal_stack = np.stack(normal_raw_list)
single_aligned_albedo_maps = np.stack(single_aligned_albedo_maps)
three_aligned_albedo_maps = np.stack(three_aligned_albedo_maps)
gt_albedo_maps = np.stack(gt_albedo_maps)
loss_albedo_single = np.mean((gt_albedo_maps - single_aligned_albedo_maps)**2)
loss_albedo_three = np.mean((gt_albedo_maps - three_aligned_albedo_maps)**2)
PSNR_albedo_single = -10.0 * np.log(loss_albedo_single) / np.log(10.0)
PSNR_albedo_three = -10.0 * np.log(loss_albedo_three) / np.log(10.0)
# compute mean angular error
MAE = np.mean(np.arccos(np.clip(np.sum(gt_normal_stack * render_normal_stack, axis=-1), -1, 1)) * 180 / np.pi)
if compute_extra_metrics:
ssim = np.mean(np.asarray(ssims))
l_a = np.mean(np.asarray(l_alex))
l_v = np.mean(np.asarray(l_vgg))
ssim_rgb_brdf = np.mean(np.asarray(ssims_rgb_brdf))
l_a_rgb_brdf = np.mean(np.asarray(l_alex_rgb_brdf))
l_v_rgb_brdf = np.mean(np.asarray(l_vgg_rgb_brdf))
ssim_albedo_single = np.mean(np.asarray(ssims_albedo_single))
l_a_albedo_single = np.mean(np.asarray(l_alex_albedo_single))
l_v_albedo_single = np.mean(np.asarray(l_vgg_albedo_single))
ssim_albedo_three = np.mean(np.asarray(ssims_albedo_three))
l_a_albedo_three = np.mean(np.asarray(l_alex_albedo_three))
l_v_albedo_three = np.mean(np.asarray(l_vgg_albedo_three))
saved_message = f'Iteration:{prtx[:-1]}: \n' \
+ f'\tPSNR_nvs: {psnr:.2f}, PSNR_nvs_brdf: {psnr_rgb_brdf:.2f}, PNSR_albedo_single_aligned: {PSNR_albedo_single:.2f}, PNSR_albedo_three_aligned: {PSNR_albedo_three:.2f}\n' \
+ f'\tSSIM_rgb: {ssim:.4f}, L_Alex_rgb: {l_a:.4f}, L_VGG_rgb: {l_v:.4f}\n' \
+ f'\tSSIM_rgb_brdf: {ssim_rgb_brdf:.4f}, L_Alex_rgb_brdf: {l_a_rgb_brdf:.4f}, L_VGG_rgb_brdf: {l_v_rgb_brdf:.4f}\n' \
+ f'\tSSIM_albedo_single: {ssim_albedo_single:.4f}, L_Alex_albedo_single: {l_a_albedo_single:.4f}, L_VGG_albedo_single: {l_v_albedo_single:.4f}\n' \
+ f'\tSSIM_albedo_three: {ssim_albedo_three:.4f}, L_Alex_albedo_three: {l_a_albedo_three:.4f}, L_VGG_albedo_three: {l_v_albedo_three:.4f}\n' \
+ f'\tMAE: {MAE:.2f}\n'
else:
saved_message = f'Iteration:{prtx[:-1]}, PSNR_nvs: {psnr:.2f}, PSNR_nvs_brdf: {psnr_rgb_brdf:.2f}, MAE: {MAE:.2f}, PSNR_albedo_single_aligned: {PSNR_albedo_single:.2f}, PSNR_albedo_three_aligned: {PSNR_albedo_three:.2f}\n'
# write the end of record file
with open(f'{savePath}/metrics_record.txt', 'a') as f:
f.write(saved_message)
# save video results
if test_all:
os.makedirs(savePath + "/video", exist_ok=True)
video_path = savePath + "/video"
imageio.mimsave(os.path.join(video_path, 'rgb.mp4'), np.stack(rgb_maps), fps=24, quality=8)
imageio.mimsave(os.path.join(video_path, 'rgb_brdf.mp4'), np.stack(rgb_with_brdf_maps), fps=24, quality=8)
imageio.mimsave(os.path.join(video_path, 'gt_normal_video.mp4'), np.stack(normals_rgb_gt_maps), fps=24, quality=8)
imageio.mimsave(os.path.join(video_path, 'render_normal_video.mp4'), np.stack(normal_rgb_maps), fps=24, quality=8)
imageio.mimsave(os.path.join(video_path, 'render_normal_vis_video.mp4'), np.stack(normal_rgb_vis_maps), fps=24, quality=8)
imageio.mimsave(os.path.join(video_path, 'single_aligned_albedo.mp4'), (single_aligned_albedo_maps * 255).astype('uint8'), fps=24, quality=8)
imageio.mimsave(os.path.join(video_path, 'three_aligned_albedo.mp4'), (three_aligned_albedo_maps * 255).astype('uint8'), fps=24, quality=8)
imageio.mimsave(os.path.join(video_path, 'roughness.mp4'), np.stack(roughness_maps), fps=24, quality=8)
return psnr, psnr_rgb_brdf, MAE, PSNR_albedo_single, PSNR_albedo_three
| yxlao/TensoIR | 3 | TensoIR Experiments for "Objects with Lighting: A Real-World Dataset for Evaluating Reconstruction and Rendering for Object Relighting" | Python | yxlao | Yixing Lao | HKU-CS |
scripts/export_mesh.py | Python | import torch
from opt import config_parser
from renderer import *
from models.tensoRF_rotated_lights import raw2alpha, TensorVMSplit, AlphaGridMask
from utils import *
args = config_parser()
print(args)
device = torch.device("cuda:{}".format(args.local_rank) if torch.cuda.is_available() else "cpu")
@torch.no_grad()
def export_mesh(args):
ckpt = torch.load(args.ckpt, map_location=device)
kwargs = ckpt['kwargs']
kwargs.update({'device': device})
tensorf = eval(args.model_name)(**kwargs)
tensorf.load(ckpt)
alpha, _ = tensorf.getDenseAlpha()
convert_sdf_samples_to_ply(alpha.cpu(), f'{args.ckpt[:-3]}.ply', bbox=tensorf.aabb.cpu(), level=0.005)
if __name__ == '__main__':
torch.set_default_dtype(torch.float32)
torch.manual_seed(20211202)
torch.cuda.manual_seed_all(20211202)
np.random.seed(20211202)
export_mesh(args)
| yxlao/TensoIR | 3 | TensoIR Experiments for "Objects with Lighting: A Real-World Dataset for Evaluating Reconstruction and Rendering for Object Relighting" | Python | yxlao | Yixing Lao | HKU-CS |
scripts/relight_importance.py | Python |
import os
from tqdm import tqdm
import imageio
import numpy as np
from opt import config_parser
import torch
import torch.nn as nn
from utils import visualize_depth_numpy
# ----------------------------------------
# use this if loaded checkpoint is generate from single-light or rotated multi-light setting
from models.tensoRF_rotated_lights import raw2alpha, TensorVMSplit, AlphaGridMask
# # use this if loaded checkpoint is generate from general multi-light setting
# from models.tensoRF_general_multi_lights import TensorVMSplit, AlphaGridMask
# ----------------------------------------
from dataLoader.ray_utils import safe_l2_normalize
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
from dataLoader import dataset_dict
from models.relight_utils import *
brdf_specular = GGX_specular
from utils import rgb_ssim, rgb_lpips
from models.relight_utils import Environment_Light
from renderer import compute_rescale_ratio
@torch.no_grad()
def relight(dataset, args):
if not os.path.exists(args.ckpt):
print('the checkpoint path for tensoIR does not exists!!!')
return
ckpt = torch.load(args.ckpt, map_location=device)
kwargs = ckpt['kwargs']
kwargs.update({'device': device})
tensoIR = eval(args.model_name)(**kwargs)
tensoIR.load(ckpt)
W, H = dataset.img_wh
near_far = dataset.near_far
rgb_frames_list = []
optimized_normal_list = []
aligned_albedo_list = []
roughness_list = []
envir_light = Environment_Light(args.hdrdir)
####
light_rotation_idx = 0
####
global_rescale_value_single, global_rescale_value_three = compute_rescale_ratio(tensoIR, dataset)
rescale_value = global_rescale_value_three
relight_psnr = dict()
relight_l_alex, relight_l_vgg, relight_ssim = dict(), dict(), dict()
for cur_light_name in dataset.light_names:
relight_psnr[f'{cur_light_name}'] = []
relight_l_alex[f'{cur_light_name}'] = []
relight_l_vgg[f'{cur_light_name}'] = []
relight_ssim[f'{cur_light_name}'] = []
for idx in tqdm(range(len(dataset)), desc="Rendering relight images"):
relight_pred_img_with_bg, relight_pred_img_without_bg, relight_gt_img = dict(), dict(), dict()
for cur_light_name in dataset.light_names:
relight_pred_img_with_bg[f'{cur_light_name}'] = []
relight_pred_img_without_bg[f'{cur_light_name}'] = []
relight_gt_img[f'{cur_light_name}'] = []
cur_dir_path = os.path.join(args.geo_buffer_path, f'{dataset.split}_{idx:0>3d}')
os.makedirs(cur_dir_path, exist_ok=True)
item = dataset[idx]
frame_rays = item['rays'].squeeze(0).to(device) # [H*W, 6]
# gt_normal = item['normals'].squeeze(0).cpu() # [H*W, 3]s
gt_mask = item['rgbs_mask'].squeeze(0).squeeze(-1).cpu() # [H*W]
gt_rgb = item['rgbs'].squeeze(0).reshape(len(light_name_list), H, W, 3).cpu() # [N, H, W, 3]
gt_albedo = item['albedo'].squeeze(0).to(device) # [H*W, 3]
light_idx = torch.zeros((frame_rays.shape[0], 1), dtype=torch.int).to(device).fill_(light_rotation_idx)
rgb_map, depth_map, normal_map, albedo_map, roughness_map, fresnel_map, normals_diff_map, normals_orientation_loss_map = [], [], [], [], [], [], [], []
acc_map = []
chunk_idxs = torch.split(torch.arange(frame_rays.shape[0]), args.batch_size) # choose the first light idx
for chunk_idx in chunk_idxs:
with torch.enable_grad():
rgb_chunk, depth_chunk, normal_chunk, albedo_chunk, roughness_chunk, \
fresnel_chunk, acc_chunk, *temp \
= tensoIR(frame_rays[chunk_idx], light_idx[chunk_idx], is_train=False, white_bg=True, ndc_ray=False, N_samples=-1)
relight_rgb_chunk = torch.ones_like(rgb_chunk)
# gt_albedo_chunk = gt_albedo[chunk_idx] # use GT to debug
acc_chunk_mask = (acc_chunk > args.acc_mask_threshold)
rays_o_chunk, rays_d_chunk = frame_rays[chunk_idx][:, :3], frame_rays[chunk_idx][:, 3:]
surface_xyz_chunk = rays_o_chunk + depth_chunk.unsqueeze(-1) * rays_d_chunk # [bs, 3]
masked_surface_pts = surface_xyz_chunk[acc_chunk_mask] # [surface_point_num, 3]
masked_normal_chunk = normal_chunk[acc_chunk_mask] # [surface_point_num, 3]
masked_albedo_chunk = albedo_chunk[acc_chunk_mask] # [surface_point_num, 3]
masked_roughness_chunk = roughness_chunk[acc_chunk_mask] # [surface_point_num, 1]
masked_fresnel_chunk = fresnel_chunk[acc_chunk_mask] # [surface_point_num, 1]
masked_light_idx_chunk = light_idx[chunk_idx][acc_chunk_mask] # [surface_point_num, 1]
## Get incident light directions
for idx, cur_light_name in enumerate(dataset.light_names):
# if os.path.exists(os.path.join(cur_dir_path, 'relighting', f'{cur_light_name}.png')):
# continue
relight_rgb_chunk.fill_(1.0)
masked_light_dir, masked_light_rgb, masked_light_pdf = envir_light.sample_light(cur_light_name, masked_normal_chunk.shape[0], 512) # [bs, envW * envH, 3]
surf2l = masked_light_dir # [surface_point_num, envW * envH, 3]
surf2c = -rays_d_chunk[acc_chunk_mask] # [surface_point_num, 3]
surf2c = safe_l2_normalize(surf2c, dim=-1) # [surface_point_num, 3]
cosine = torch.einsum("ijk,ik->ij", surf2l, masked_normal_chunk) # surf2l:[surface_point_num, envW * envH, 3] * masked_normal_chunk:[surface_point_num, 3] -> cosine:[surface_point_num, envW * envH]
cosine_mask = (cosine > 1e-6) # [surface_point_num, envW * envH] mask half of the incident light that is behind the surface
visibility = torch.zeros((*cosine_mask.shape, 1), device=device) # [surface_point_num, envW * envH, 1]
masked_surface_xyz = masked_surface_pts[:, None, :].expand((*cosine_mask.shape, 3)) # [surface_point_num, envW * envH, 3]
cosine_masked_surface_pts = masked_surface_xyz[cosine_mask] # [num_of_vis_to_get, 3]
cosine_masked_surf2l = surf2l[cosine_mask] # [num_of_vis_to_get, 3]
cosine_masked_visibility = torch.zeros(cosine_masked_surf2l.shape[0], 1, device=device) # [num_of_vis_to_get, 1]
chunk_idxs_vis = torch.split(torch.arange(cosine_masked_surface_pts.shape[0]), 100000)
for chunk_vis_idx in chunk_idxs_vis:
chunk_surface_pts = cosine_masked_surface_pts[chunk_vis_idx] # [chunk_size, 3]
chunk_surf2light = cosine_masked_surf2l[chunk_vis_idx] # [chunk_size, 3]
nerv_vis, nerfactor_vis = compute_transmittance(
tensoIR=tensoIR,
surf_pts=chunk_surface_pts,
light_in_dir=chunk_surf2light,
nSample=96,
vis_near=0.05,
vis_far=1.5
) # [chunk_size, 1]
if args.vis_equation == 'nerfactor':
cosine_masked_visibility[chunk_vis_idx] = nerfactor_vis.unsqueeze(-1)
elif args.vis_equation == 'nerv':
cosine_masked_visibility[chunk_vis_idx] = nerv_vis.unsqueeze(-1)
visibility[cosine_mask] = cosine_masked_visibility
## Get BRDF specs
nlights = surf2l.shape[1]
# relighting
specular_relighting = brdf_specular(masked_normal_chunk, surf2c, surf2l, masked_roughness_chunk, masked_fresnel_chunk) # [surface_point_num, envW * envH, 3]
masked_albedo_chunk_rescaled = masked_albedo_chunk * rescale_value
surface_brdf_relighting = masked_albedo_chunk_rescaled.unsqueeze(1).expand(-1, nlights, -1) / np.pi + specular_relighting # [surface_point_num, envW * envH, 3]
direct_light = masked_light_rgb
light_rgbs = visibility * direct_light # [bs, envW * envH, 3]
light_pix_contrib = surface_brdf_relighting * light_rgbs * cosine[:, :, None] / masked_light_pdf
surface_relight_rgb_chunk = torch.mean(light_pix_contrib, dim=1) # [bs, 3]
### Tonemapping
surface_relight_rgb_chunk = torch.clamp(surface_relight_rgb_chunk, min=0.0, max=1.0)
### Colorspace transform
if surface_relight_rgb_chunk.shape[0] > 0:
surface_relight_rgb_chunk = linear2srgb_torch(surface_relight_rgb_chunk)
relight_rgb_chunk[acc_chunk_mask] = surface_relight_rgb_chunk
bg_color = envir_light.get_light(cur_light_name, rays_d_chunk) # [bs, 3]
bg_color = torch.clamp(bg_color, min=0.0, max=1.0)
bg_color = linear2srgb_torch(bg_color)
relight_without_bg = torch.ones_like(bg_color)
relight_with_bg = torch.ones_like(bg_color)
relight_without_bg[acc_chunk_mask] = relight_rgb_chunk[acc_chunk_mask]
acc_temp = acc_chunk[..., None]
acc_temp[acc_temp <= 0.9] = 0.0
relight_with_bg = acc_temp * relight_without_bg + (1.0 - acc_temp) * bg_color
relight_pred_img_with_bg[cur_light_name].append(relight_with_bg.detach().clone().cpu())
relight_pred_img_without_bg[cur_light_name].append(relight_without_bg.detach().clone().cpu())
rgb_map.append(rgb_chunk.cpu().detach())
depth_map.append(depth_chunk.cpu().detach())
acc_map.append(acc_chunk.cpu().detach())
normal_map.append(normal_chunk.cpu().detach())
albedo_map.append(albedo_chunk.cpu().detach())
roughness_map.append(roughness_chunk.cpu().detach())
rgb_map = torch.cat(rgb_map, dim=0)
depth_map = torch.cat(depth_map, dim=0)
acc_map = torch.cat(acc_map, dim=0)
normal_map = torch.cat(normal_map, dim=0)
acc_map_mask = (acc_map > args.acc_mask_threshold)
albedo_map = torch.cat(albedo_map, dim=0)
roughness_map = torch.cat(roughness_map, dim=0)
os.makedirs(os.path.join(cur_dir_path, 'relighting_with_bg'), exist_ok=True)
os.makedirs(os.path.join(cur_dir_path, 'relighting_without_bg'), exist_ok=True)
for light_name_idx, cur_light_name in enumerate(dataset.light_names):
relight_map_with_bg = torch.cat(relight_pred_img_with_bg[cur_light_name], dim=0).reshape(H, W, 3).numpy()
relight_map_without_bg = torch.cat(relight_pred_img_without_bg[cur_light_name], dim=0).reshape(H, W, 3).numpy()
if args.if_save_relight_rgb:
imageio.imwrite(os.path.join(cur_dir_path, 'relighting_with_bg', f'{cur_light_name}.png'), (relight_map_with_bg * 255).astype('uint8'))
imageio.imwrite(os.path.join(cur_dir_path, 'relighting_without_bg', f'{cur_light_name}.png'), (relight_map_without_bg * 255).astype('uint8'))
# change the background color to white before computing metrics
acc_map_mask = acc_map_mask.reshape(H, W)
gt_img_map = gt_rgb[light_name_idx].numpy()
loss_relight = np.mean((relight_map_without_bg - gt_img_map) ** 2)
cur_psnr = -10.0 * np.log(loss_relight) / np.log(10.0)
ssim_relight = rgb_ssim(relight_map_without_bg, gt_img_map, 1)
l_a_relight = rgb_lpips(gt_img_map, relight_map_without_bg, 'alex', tensoIR.device)
l_v_relight = rgb_lpips(gt_img_map, relight_map_without_bg, 'vgg', tensoIR.device)
relight_psnr[cur_light_name].append(cur_psnr)
relight_ssim[cur_light_name].append(ssim_relight)
relight_l_alex[cur_light_name].append(l_a_relight)
relight_l_vgg[cur_light_name].append(l_v_relight)
# write relight image psnr to a txt file
with open(os.path.join(cur_dir_path, 'relighting_without_bg', 'relight_psnr.txt'), 'w') as f:
for cur_light_name in dataset.light_names:
f.write(f'{cur_light_name}: PNSR {relight_psnr[cur_light_name][-1]}; SSIM {relight_ssim[cur_light_name][-1]}; L_Alex {relight_l_alex[cur_light_name][-1]}; L_VGG {relight_l_vgg[cur_light_name][-1]}\n')
rgb_map = (rgb_map.reshape(H, W, 3).numpy() * 255).astype('uint8')
rgb_frames_list.append(rgb_map)
depth_map, _ = visualize_depth_numpy(depth_map.reshape(H, W, 1).numpy(), near_far)
acc_map = (acc_map.reshape(H, W, 1).numpy() * 255).astype('uint8')
if args.if_save_rgb:
imageio.imwrite(os.path.join(cur_dir_path, 'rgb.png'), rgb_map)
if args.if_save_depth:
imageio.imwrite(os.path.join(cur_dir_path, 'depth.png'), depth_map)
if args.if_save_acc:
imageio.imwrite(os.path.join(cur_dir_path, 'acc.png'), acc_map)
if args.if_save_albedo:
gt_albedo_reshaped = gt_albedo.reshape(H, W, 3).cpu()
albedo_map = albedo_map.reshape(H, W, 3)
# three channels rescale
gt_albedo_mask = gt_mask.reshape(H, W)
ratio_value, _ = (gt_albedo_reshaped[gt_albedo_mask]/ albedo_map[gt_albedo_mask].clamp(min=1e-6)).median(dim=0)
# ratio_value = gt_albedo_reshaped[gt_albedo_mask].median(dim=0)[0] / albedo_map[gt_albedo_mask].median(dim=0)[0]
albedo_map[gt_albedo_mask] = (ratio_value * albedo_map[gt_albedo_mask]).clamp(min=0.0, max=1.0)
albedo_map_to_save = (albedo_map * 255).numpy().astype('uint8')
albedo_map_to_save = np.concatenate([albedo_map_to_save, acc_map], axis=2).astype('uint8')
imageio.imwrite(os.path.join(cur_dir_path, 'albedo.png'), albedo_map_to_save)
if args.if_save_albedo_gamma_corrected:
to_save_albedo = (albedo_map ** (1/2.2) * 255).numpy().astype('uint8')
to_save_albedo = np.concatenate([to_save_albedo, acc_map], axis=2)
# gamma cororection
imageio.imwrite(os.path.join(cur_dir_path, 'albedo_gamma_corrected.png'), to_save_albedo)
# save GT gamma corrected albedo
gt_albedo_reshaped = (gt_albedo_reshaped ** (1/2.2) * 255).numpy().astype('uint8')
gt_albedo_reshaped = np.concatenate([gt_albedo_reshaped, acc_map], axis=2)
imageio.imwrite(os.path.join(cur_dir_path, 'gt_albedo_gamma_corrected.png'), gt_albedo_reshaped)
aligned_albedo_list.append(((albedo_map ** (1.0/2.2)) * 255).numpy().astype('uint8'))
roughness_map = roughness_map.reshape(H, W, 1)
# expand to three channels
roughness_map = (roughness_map.expand(-1, -1, 3) * 255)
roughness_map = np.concatenate([roughness_map, acc_map], axis=2)
imageio.imwrite(os.path.join(cur_dir_path, 'roughness.png'), (roughness_map).astype('uint8'))
roughness_list.append((roughness_map).astype('uint8'))
if args.if_render_normal:
normal_map = F.normalize(normal_map, dim=-1)
normal_rgb_map = normal_map * 0.5 + 0.5
normal_rgb_map = (normal_rgb_map.reshape(H, W, 3).numpy() * 255).astype('uint8')
normal_rgb_map = np.concatenate([normal_rgb_map, acc_map], axis=2)
imageio.imwrite(os.path.join(cur_dir_path, 'normal.png'), normal_rgb_map)
# write relight image psnr to a txt file
with open(os.path.join(args.geo_buffer_path, 'relight_psnr.txt'), 'w') as f:
for cur_light_name in dataset.light_names:
f.write(f'{cur_light_name}: PSNR {np.mean(relight_psnr[cur_light_name])}; SSIM {np.mean(relight_ssim[cur_light_name])}; L_Alex {np.mean(relight_l_alex[cur_light_name])}; L_VGG {np.mean(relight_l_vgg[cur_light_name])}\n')
if args.if_save_rgb_video:
video_path = os.path.join(args.geo_buffer_path,'video')
os.makedirs(video_path, exist_ok=True)
imageio.mimsave(os.path.join(video_path, 'rgb_video.mp4'), np.stack(rgb_frames_list), fps=24, macro_block_size=1)
if args.if_render_normal:
video_path = os.path.join(args.geo_buffer_path,'video')
os.makedirs(video_path, exist_ok=True)
for render_idx in range(len(dataset)):
cur_dir_path = os.path.join(args.geo_buffer_path, f'{dataset.split}_{render_idx:0>3d}')
normal_map = imageio.v2.imread(os.path.join(cur_dir_path, 'normal.png'))
normal_mask = (normal_map[..., -1] / 255) > args.acc_mask_threshold
normal_map = normal_map[..., :3] * (normal_mask[..., 3:4] / 255.0) + 255 * (1 - normal_mask[..., 3:4] / 255.0)
optimized_normal_list.append(normal_map)
imageio.mimsave(os.path.join(video_path, 'render_normal_video.mp4'), np.stack(optimized_normal_list), fps=24, macro_block_size=1)
if args.if_save_albedo:
video_path = os.path.join(args.geo_buffer_path,'video')
os.makedirs(video_path, exist_ok=True)
imageio.mimsave(os.path.join(video_path, 'aligned_albedo_video.mp4'), np.stack(aligned_albedo_list), fps=24, macro_block_size=1)
imageio.mimsave(os.path.join(video_path, 'roughness_video.mp4'), np.stack(roughness_list), fps=24, macro_block_size=1)
if args.render_video:
video_path = os.path.join(args.geo_buffer_path,'video_without_bg')
os.makedirs(video_path, exist_ok=True)
for cur_light_name in dataset.light_names:
frame_list = []
for render_idx in range(len(dataset)):
cur_dir_path = os.path.join(args.geo_buffer_path, f'{dataset.split}_{render_idx:0>3d}', 'relighting_without_bg')
frame_list.append(imageio.v2.imread(os.path.join(cur_dir_path, f'{cur_light_name}.png')))
imageio.mimsave(os.path.join(video_path, f'{cur_light_name}_video.mp4'), np.stack(frame_list), fps=24, macro_block_size=1)
video_path = os.path.join(args.geo_buffer_path,'video_with_bg')
os.makedirs(video_path, exist_ok=True)
for cur_light_name in dataset.light_names:
frame_list = []
for render_idx in range(len(dataset)):
cur_dir_path = os.path.join(args.geo_buffer_path, f'{dataset.split}_{render_idx:0>3d}', 'relighting_with_bg')
frame_list.append(imageio.v2.imread(os.path.join(cur_dir_path, f'{cur_light_name}.png')))
imageio.mimsave(os.path.join(video_path, f'{cur_light_name}_video.mp4'), np.stack(frame_list), fps=24, macro_block_size=1)
if __name__ == "__main__":
args = config_parser()
print(args)
print("*" * 80)
print('The result will be saved in {}'.format(os.path.abspath(args.geo_buffer_path)))
torch.set_default_dtype(torch.float32)
torch.manual_seed(20211202)
torch.cuda.manual_seed_all(20211202)
np.random.seed(20211202)
# The following args are not defined in opt.py
args.if_save_rgb = False
args.if_save_depth = False
args.if_save_acc = True
args.if_save_rgb_video = False
args.if_save_relight_rgb = True
args.if_save_albedo = True
args.if_save_albedo_gamma_corrected = True
args.acc_mask_threshold = 0.5
args.if_render_normal = True
args.vis_equation = 'nerv'
args.render_video = True
dataset = dataset_dict[args.dataset_name]
# names of the environment maps used for relighting
light_name_list= ['bridge', 'city', 'fireplace', 'forest', 'night']
test_dataset = dataset(
args.datadir,
args.hdrdir,
split='test',
random_test=False,
downsample=args.downsample_test,
light_names=light_name_list,
light_rotation=args.light_rotation,
)
relight(test_dataset , args)
| yxlao/TensoIR | 3 | TensoIR Experiments for "Objects with Lighting: A Real-World Dataset for Evaluating Reconstruction and Rendering for Object Relighting" | Python | yxlao | Yixing Lao | HKU-CS |
scripts/relight_ord.py | Python | import os
from tqdm import tqdm
import imageio
import numpy as np
from opt import config_parser
import torch
import torch.nn as nn
from utils import visualize_depth_numpy
# ----------------------------------------
# use this if loaded checkpoint is generate from single-light or rotated multi-light setting
from models.tensoRF_rotated_lights import raw2alpha, TensorVMSplit, AlphaGridMask
# # use this if loaded checkpoint is generate from general multi-light setting
# from models.tensoRF_general_multi_lights import TensorVMSplit, AlphaGridMask
# ----------------------------------------
from dataLoader.ray_utils import safe_l2_normalize
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
from dataLoader import dataset_dict
from models.relight_utils import *
brdf_specular = GGX_specular
# from utils import rgb_ssim, rgb_lpips
from models.relight_utils import Environment_Light
from renderer import compute_rescale_ratio_rgb
def tone_map(linear_rgbs):
linear_rgbs = torch.clamp(linear_rgbs, min=0.0, max=1.0)
if linear_rgbs.shape[0] > 0:
srgbs = linear2srgb_torch(linear_rgbs)
else:
srgbs = linear_rgbs
return srgbs
@torch.no_grad()
def relight(dataset, args):
if not os.path.exists(args.ckpt):
print('the checkpoint path for tensoIR does not exists!!!')
return
ckpt = torch.load(args.ckpt, map_location=device)
kwargs = ckpt['kwargs']
kwargs.update({'device': device})
tensoIR = eval(args.model_name)(**kwargs)
tensoIR.load(ckpt)
W, H = dataset.img_wh
light_rotation_idx = 0
env_lights = Environment_Light(
hdr_directory=args.hdrdir,
light_names=sorted(list(set(dataset.light_names))),
)
# TODO: Fix me with proper rescale_value:
# - This is the code to estimate rescale_value
# ```
# global_rescale_value_single, global_rescale_value_three = compute_rescale_ratio_rgb(tensoIR, dataset)
# rescale_value = global_rescale_value_three
# print(f"rescale_value computed with RGB (not accurate): {rescale_value}")
# ```
# - For armodillo, the rescale ratio is tensor([0.1594, 0.0485, 0.0070], device='cuda:0')
# rescale_value = torch.tensor([0.1594, 0.0485, 0.0070], device='cuda:0')
# - For mic, the rescale ratio computed with RGB is:
# rescale_value = tensor([1.0013, 1.0013, 1.0013], device='cuda:0')
# Therefore, we simply use [1, 1, 1] for datasets without gt albedo.
rescale_value = torch.tensor([1.0, 1.0, 1.0], device='cuda:0')
for dataset_idx in tqdm(range(len(dataset)),
desc="Rendering relight images"):
# Prepare paths.
cur_dir_path = os.path.join(args.geo_buffer_path,
f'{dataset.split}_{dataset_idx:0>3d}')
os.makedirs(cur_dir_path, exist_ok=True)
# Load datset.
dataset_item = dataset[dataset_idx]
frame_rays = dataset_item['rays'].squeeze(0).to(device) # [H*W, 6]
# Initialize collectors.
# This was designed such that each image is rendered under multiple
# lights, so it is a dictionary. We don't need that anymore.
im_chunks_with_bg = dict()
im_chunks_wout_bg = dict()
linear_im_chunks_with_bg = dict()
linear_im_chunks_wout_bg = dict()
light_name = dataset_item['light_name']
im_chunks_with_bg[light_name] = []
im_chunks_wout_bg[light_name] = []
linear_im_chunks_with_bg[light_name] = []
linear_im_chunks_wout_bg[light_name] = []
light_idx = torch.zeros(
(frame_rays.shape[0], 1),
dtype=torch.int).to(device).fill_(light_rotation_idx)
chunk_idxs = torch.split(
torch.arange(frame_rays.shape[0]),
args.batch_size) # choose the first light dataset_idx
for chunk_idx in tqdm(chunk_idxs, desc="Rendering chunks"):
with torch.enable_grad():
(fg_chunk, depth_chunk, normal_chunk, albedo_chunk,
roughness_chunk, fresnel_chunk, acc_chunk,
*temp) = tensoIR(frame_rays[chunk_idx],
light_idx[chunk_idx],
is_train=False,
white_bg=True,
ndc_ray=False,
N_samples=-1)
acc_chunk_mask = (acc_chunk > args.acc_mask_threshold)
rays_o_chunk, rays_d_chunk = frame_rays[
chunk_idx][:, :3], frame_rays[chunk_idx][:, 3:]
# [bs, 3]
surface_xyz_chunk = rays_o_chunk + depth_chunk.unsqueeze(
-1) * rays_d_chunk
# [surface_point_num, 3]
masked_surface_pts = surface_xyz_chunk[acc_chunk_mask]
# [surface_point_num, 3]
masked_normal_chunk = normal_chunk[acc_chunk_mask]
# [surface_point_num, 3]
masked_albedo_chunk = albedo_chunk[acc_chunk_mask]
# [surface_point_num, 1]
masked_roughness_chunk = roughness_chunk[acc_chunk_mask]
# [surface_point_num, 1]
masked_fresnel_chunk = fresnel_chunk[acc_chunk_mask]
# [surface_point_num, 1]
masked_light_idx_chunk = light_idx[chunk_idx][acc_chunk_mask]
## Get incident light directions
# [bs, envW * envH, 3]
(masked_light_dir, masked_light_rgb,
masked_light_pdf) = env_lights.sample_light(
light_name, masked_normal_chunk.shape[0], 512)
# [surface_point_num, envW * envH, 3]
surf2l = masked_light_dir
# [surface_point_num, 3]
surf2c = -rays_d_chunk[acc_chunk_mask]
# [surface_point_num, 3]
surf2c = safe_l2_normalize(surf2c, dim=-1)
# surf2l:[surface_point_num, envW * envH, 3] *
# masked_normal_chunk:[surface_point_num, 3]
# -> cosine:[surface_point_num, envW * envH]
cosine = torch.einsum("ijk,ik->ij", surf2l, masked_normal_chunk)
# [surface_point_num, envW * envH] mask half of the incident light
# that is behind the surface
cosine_mask = (cosine > 1e-6)
# [surface_point_num, envW * envH, 1]
visibility = torch.zeros((*cosine_mask.shape, 1), device=device)
# [surface_point_num, envW * envH, 3]
masked_surface_xyz = masked_surface_pts[:, None, :].expand(
(*cosine_mask.shape, 3))
# [num_of_vis_to_get, 3]
cosine_masked_surface_pts = masked_surface_xyz[cosine_mask]
# [num_of_vis_to_get, 3]
cosine_masked_surf2l = surf2l[cosine_mask]
# [num_of_vis_to_get, 1]
cosine_masked_visibility = torch.zeros(
cosine_masked_surf2l.shape[0], 1, device=device)
chunk_idxs_vis = torch.split(
torch.arange(cosine_masked_surface_pts.shape[0]), 100000)
for chunk_vis_idx in chunk_idxs_vis:
# [chunk_size, 3]
chunk_surface_pts = cosine_masked_surface_pts[chunk_vis_idx]
# [chunk_size, 3]
chunk_surf2light = cosine_masked_surf2l[chunk_vis_idx]
nerv_vis, nerfactor_vis = compute_transmittance(
tensoIR=tensoIR,
surf_pts=chunk_surface_pts,
light_in_dir=chunk_surf2light,
nSample=96,
vis_near=0.05,
vis_far=1.5) # [chunk_size, 1]
if args.vis_equation == 'nerfactor':
cosine_masked_visibility[
chunk_vis_idx] = nerfactor_vis.unsqueeze(-1)
elif args.vis_equation == 'nerv':
cosine_masked_visibility[
chunk_vis_idx] = nerv_vis.unsqueeze(-1)
visibility[cosine_mask] = cosine_masked_visibility
## Get BRDF specs
nlights = surf2l.shape[1]
# relighting
specular_relighting = brdf_specular(
masked_normal_chunk, surf2c, surf2l, masked_roughness_chunk,
masked_fresnel_chunk) # [surface_point_num, envW * envH, 3]
masked_albedo_chunk_rescaled = masked_albedo_chunk * rescale_value
surface_brdf_relighting = masked_albedo_chunk_rescaled.unsqueeze(
1
).expand(
-1, nlights, -1
) / np.pi + specular_relighting # [surface_point_num, envW * envH, 3]
direct_light = masked_light_rgb
light_rgbs = visibility * direct_light # [bs, envW * envH, 3]
light_pix_contrib = surface_brdf_relighting * light_rgbs * cosine[:, :,
None] / masked_light_pdf
################################################################
# sRGB space.
################################################################
# Foreground and background chunks in RGB.
linear_fg_chunk = torch.mean(light_pix_contrib, dim=1)
linear_bg_chunk = env_lights.get_light(light_name, rays_d_chunk)
fg_chunk = tone_map(linear_fg_chunk)
bg_chunk = tone_map(linear_bg_chunk)
# Compute image chunk without background.
im_chunk_wout_bg = torch.ones_like(bg_chunk)
im_chunk_wout_bg[acc_chunk_mask] = fg_chunk
# Compute image chunk with background.
acc_temp = acc_chunk[..., None]
acc_temp[acc_temp <= 0.9] = 0.0
im_chunk_with_bg = torch.ones_like(bg_chunk)
im_chunk_with_bg = acc_temp * im_chunk_wout_bg + (
1.0 - acc_temp) * bg_chunk
# Transfer to CPU and collect.
im_chunk_wout_bg = im_chunk_wout_bg.detach().clone().cpu()
im_chunk_with_bg = im_chunk_with_bg.detach().clone().cpu()
im_chunks_with_bg[light_name].append(im_chunk_with_bg)
im_chunks_wout_bg[light_name].append(im_chunk_wout_bg)
################################################################
################################################################
# linear RGB space.
################################################################
# Foreground and background chunks in RGB.
linear_fg_chunk = torch.mean(light_pix_contrib, dim=1)
linear_bg_chunk = env_lights.get_light(light_name, rays_d_chunk)
fg_chunk = linear_fg_chunk
bg_chunk = linear_bg_chunk
# Compute image chunk without background.
im_chunk_wout_bg = torch.ones_like(bg_chunk)
im_chunk_wout_bg[acc_chunk_mask] = fg_chunk
# Compute image chunk with background.
acc_temp = acc_chunk[..., None]
acc_temp[acc_temp <= 0.9] = 0.0
im_chunk_with_bg = torch.ones_like(bg_chunk)
im_chunk_with_bg = acc_temp * im_chunk_wout_bg + (
1.0 - acc_temp) * bg_chunk
# Transfer to CPU and collect.
im_chunk_wout_bg = im_chunk_wout_bg.detach().clone().cpu()
im_chunk_with_bg = im_chunk_with_bg.detach().clone().cpu()
linear_im_chunks_with_bg[light_name].append(im_chunk_with_bg)
linear_im_chunks_wout_bg[light_name].append(im_chunk_wout_bg)
################################################################
os.makedirs(os.path.join(cur_dir_path, 'with_bg'), exist_ok=True)
os.makedirs(os.path.join(cur_dir_path, 'wout_bg'), exist_ok=True)
# yapf: disable
im_with_bg = torch.cat(im_chunks_with_bg[light_name], dim=0)
im_wout_bg = torch.cat(im_chunks_wout_bg[light_name], dim=0)
im_with_bg = im_with_bg.reshape(H, W, 3).numpy()
im_wout_bg = im_wout_bg.reshape(H, W, 3).numpy()
linear_im_with_bg = torch.cat(linear_im_chunks_with_bg[light_name], dim=0)
linear_im_wout_bg = torch.cat(linear_im_chunks_wout_bg[light_name], dim=0)
linear_im_with_bg = linear_im_with_bg.reshape(H, W, 3).numpy()
linear_im_wout_bg = linear_im_wout_bg.reshape(H, W, 3).numpy()
# yapf: enable
# Prepare paths.
cur_dir_path = Path(cur_dir_path)
with_bg_path = cur_dir_path / "with_bg" / f"{light_name}.png"
wout_bg_path = cur_dir_path / "wout_bg" / f"{light_name}.png"
linear_with_bg_path = cur_dir_path / "linear_with_bg" / f"{light_name}.npy"
linear_wout_bg_path = cur_dir_path / "linear_wout_bg" / f"{light_name}.npy"
with_bg_path.parent.mkdir(parents=True, exist_ok=True)
wout_bg_path.parent.mkdir(parents=True, exist_ok=True)
linear_with_bg_path.parent.mkdir(parents=True, exist_ok=True)
linear_wout_bg_path.parent.mkdir(parents=True, exist_ok=True)
# Save images.
imageio.imwrite(
os.path.join(cur_dir_path, 'with_bg', f'{light_name}.png'),
(im_with_bg * 255).astype('uint8'))
imageio.imwrite(
os.path.join(cur_dir_path, 'wout_bg', f'{light_name}.png'),
(im_wout_bg * 255).astype('uint8'))
np.save(linear_with_bg_path, linear_im_with_bg)
np.save(linear_wout_bg_path, linear_im_wout_bg)
# Print.
print(f"Saved {with_bg_path}")
print(f"Saved {wout_bg_path}")
print(f"Saved {linear_with_bg_path}")
print(f"Saved {linear_wout_bg_path}")
def main():
args = config_parser()
print(args)
print("*" * 80)
print('The result will be saved in {}'.format(
os.path.abspath(args.geo_buffer_path)))
torch.set_default_dtype(torch.float32)
torch.manual_seed(20211202)
torch.cuda.manual_seed_all(20211202)
np.random.seed(20211202)
# The following args are not defined in opt.py
args.acc_mask_threshold = 0.5
args.if_render_normal = False
args.vis_equation = 'nerv'
dataset = dataset_dict[args.dataset_name]
test_dataset = dataset(args.datadir,
split='test',
random_test=False,
downsample=args.downsample_test,
light_rotation=args.light_rotation)
relight(test_dataset, args)
if __name__ == "__main__":
main()
| yxlao/TensoIR | 3 | TensoIR Experiments for "Objects with Lighting: A Real-World Dataset for Evaluating Reconstruction and Rendering for Object Relighting" | Python | yxlao | Yixing Lao | HKU-CS |
train_ord.py | Python |
import os
import sys
import torch
from tqdm.auto import tqdm
from opt import config_parser
import datetime
import torch.distributed as dist
from torch.utils.tensorboard import SummaryWriter
from renderer import *
from models.tensoRF_rotated_lights import raw2alpha, TensorVMSplit, AlphaGridMask
from utils import *
from dataLoader import dataset_dict
args = config_parser()
print(args)
# Setup multi-device training
num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
is_distributed = num_gpus > 1
if is_distributed:
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend="nccl", init_method="env://")
synchronize()
device = torch.device("cuda:{}".format(args.local_rank) if torch.cuda.is_available() else "cpu")
print(f'Running with {num_gpus} GPU(s)...')
renderer = Renderer_TensoIR_train
class SimpleSampler:
def __init__(self, total, batch):
self.total = total
self.batch = batch
self.curr = total
self.ids = None
def nextids(self):
self.curr+=self.batch
if self.curr + self.batch > self.total:
self.ids = torch.LongTensor(np.random.permutation(self.total))
self.curr = 0
return self.ids[self.curr:self.curr+self.batch]
@torch.no_grad()
def export_mesh(args):
ckpt = torch.load(args.ckpt, map_location=device)
kwargs = ckpt['kwargs']
kwargs.update({'device': device})
tensoIR = eval(args.model_name)(**kwargs)
tensoIR.load(ckpt)
alpha, _ = tensoIR.getDenseAlpha()
convert_sdf_samples_to_ply(alpha.cpu(), f'{args.ckpt[:-3]}.ply', bbox=tensoIR.aabb.cpu(), level=0.005)
def render_test(args):
# init dataset
dataset = dataset_dict[args.dataset_name]
test_dataset = dataset(args.datadir,
split='test',
downsample=args.downsample_train,
is_stack=True)
white_bg = test_dataset.white_bg
ndc_ray = args.ndc_ray
if not os.path.exists(args.ckpt):
print('the ckpt path does not exists!!')
return
ckpt = torch.load(args.ckpt, map_location=device)
kwargs = ckpt['kwargs']
kwargs.update({'device': device})
tensoIR = eval(args.model_name)(**kwargs)
tensoIR.load(ckpt)
logfolder = f'{args.basedir}/test_{args.expname}'
if args.render_test:
PSNRs_test, PSNRs_rgb_brdf_test = evaluation_iter_TensoIR_simple(
test_dataset,
tensoIR,
args,
renderer,
f'{logfolder}/',
N_samples=-1,
white_bg=white_bg,
ndc_ray=ndc_ray,
device=device,
test_all=True
)
print(f'PSNRs_test: {PSNRs_test}')
print(f'PSNRs_rgb_brdf_test: {PSNRs_rgb_brdf_test}')
# if args.render_path:
# c2ws = test_dataset.render_path
# os.makedirs(f'{logfolder}/{args.expname}/imgs_path_all', exist_ok=True)
# evaluation_path(test_dataset, tensoIR, c2ws, renderer, f'{logfolder}/{args.expname}/imgs_path_all/',
# N_vis=-1, N_samples=-1, white_bg=white_bg, ndc_ray=ndc_ray, device=device)
def reconstruction(args):
# init dataset
dataset = dataset_dict[args.dataset_name]
train_dataset = dataset(args.datadir,
split='train',
downsample=args.downsample_train,
light_name=args.light_name,
light_rotation=args.light_rotation,
scene_bbox=args.scene_bbox,
)
test_dataset = dataset(
args.datadir,
split='test',
downsample=args.downsample_test,
light_name=args.light_name,
light_rotation=args.light_rotation,
scene_bbox=args.scene_bbox,
is_stack=True,
)
# if is_distributed:
# train_sampler = torch.utils.data.DistributedSampler(train_dataset, num_replicas=dist.get_world_size(),
# rank=dist.get_rank())
# train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=100, sampler=train_sampler,
# num_workers=16, drop_last=True, pin_memory=True)
# else:
# train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=100, num_workers=16,
# drop_last=True, shuffle=True)
print(f'Finish reading dataset')
white_bg = train_dataset.white_bg
near_far = train_dataset.near_far
ndc_ray = args.ndc_ray
# init resolution
upsamp_list = args.upsamp_list
update_AlphaMask_list = args.update_AlphaMask_list
n_lamb_sigma = args.n_lamb_sigma
n_lamb_sh = args.n_lamb_sh
if args.add_timestamp:
logfolder = f'{args.basedir}/{args.expname}{datetime.datetime.now().strftime("-%Y%m%d-%H%M%S")}'
else:
logfolder = f'{args.basedir}/{args.expname}'
# init log file
os.makedirs(logfolder, exist_ok=True)
os.makedirs(f'{logfolder}/imgs_vis', exist_ok=True)
os.makedirs(f'{logfolder}/checkpoints', exist_ok=True)
summary_writer = SummaryWriter(logfolder)
# copy the config file into the log folder
os.system(f'cp {args.config} {logfolder}')
# init parameters
aabb = train_dataset.scene_bbox.to(device)
reso_cur = N_to_reso(args.N_voxel_init, aabb) # number of voxels in each direction
nSamples = min(args.nSamples, cal_n_samples(reso_cur, args.step_ratio))
if args.ckpt is not None:
ckpt = torch.load(args.ckpt, map_location=device)
kwargs = ckpt['kwargs']
kwargs.update({'device': device})
tensoIR = eval(args.model_name)(**kwargs)
tensoIR.load(ckpt)
else:
tensoIR = eval(args.model_name)(
aabb,
reso_cur,
device,
density_n_comp=n_lamb_sigma,
appearance_n_comp=n_lamb_sh,
app_dim=args.data_dim_color,
near_far=near_far,
shadingMode=args.shadingMode,
alphaMask_thres=args.alpha_mask_thre,
density_shift=args.density_shift,
distance_scale=args.distance_scale,
pos_pe=args.pos_pe,
view_pe=args.view_pe,
fea_pe=args.fea_pe,
featureC=args.featureC,
step_ratio=args.step_ratio,
fea2denseAct=args.fea2denseAct,
normals_kind = args.normals_kind,
light_rotation=args.light_rotation,
light_kind=args.light_kind,
dataset=train_dataset,
numLgtSGs=args.numLgtSGs,
)
grad_vars = tensoIR.get_optparam_groups(args.lr_init, args.lr_basis)
if args.lr_decay_iters > 0:
lr_factor = args.lr_decay_target_ratio ** (1 / (args.lr_decay_iters))
else:
args.lr_decay_iters = args.n_iters
lr_factor = args.lr_decay_target_ratio ** (1 / (args.lr_decay_iters))
print("lr decay", args.lr_decay_target_ratio, args.lr_decay_iters)
optimizer = torch.optim.Adam(grad_vars, betas=(0.9, 0.99))
# linear in logrithmic space
N_voxel_list = (torch.round(torch.exp(
torch.linspace(np.log(args.N_voxel_init), np.log(args.N_voxel_final), len(upsamp_list) + 1))).long()).tolist()[1:]
torch.cuda.empty_cache()
PSNRs_test, PSNRs_rgb_brdf_test = [0], [0]
PSNRs_rgb, PSNRs_rgb_brdf = [], []
Ortho_reg_weight = args.Ortho_weight
print("initial Ortho_reg_weight", Ortho_reg_weight)
L1_reg_weight = args.L1_weight_inital
print("initial L1_reg_weight", L1_reg_weight)
TV_weight_density, TV_weight_app = args.TV_weight_density, args.TV_weight_app
tvreg = TVLoss()
print(f"initial TV_weight density: {TV_weight_density} appearance: {TV_weight_app}")
all_rays, all_rgbs, all_light_idx = train_dataset.all_rays, train_dataset.all_rgbs, train_dataset.all_light_idx
# Filter rays outside the bbox
rays_filtered, filter_mask = tensoIR.filtering_rays(all_rays, bbox_only=True)
rgbs_filtered = all_rgbs[filter_mask, :] # [filtered(N*H*W), 3]
light_idx_filtered = all_light_idx[filter_mask, :] # [filtered(N*H*W), 1]
trainingSampler = SimpleSampler(rays_filtered.shape[0], args.batch_size)
pbar = tqdm(range(args.n_iters), miniters=args.progress_refresh_rate, file=sys.stdout) if (
(not is_distributed) or (dist.get_rank() == 0)) else range(args.n_iters)
relight_flag = False
for iteration in pbar:
# Sample batch_size chunk from all rays
rays_idx = trainingSampler.nextids()
rays_train = rays_filtered[rays_idx]
rgb_train = rgbs_filtered[rays_idx].to(device)
light_idx_train = light_idx_filtered[rays_idx].to(device)
rgb_with_brdf_train = rgb_train
ret_kw = renderer(
rays=rays_train, # [batch_size, 6]
normal_gt=None, # [batch_size, 3]
light_idx=light_idx_train, # [batch_size, 1]
tensoIR=tensoIR, # nn.Module
N_samples=nSamples, # int
white_bg=white_bg, # bool
ndc_ray=ndc_ray,
device=device,
sample_method=args.light_sample_train,
chunk_size=args.relight_chunk_size,
is_train=True,
is_relight=relight_flag,
args=args
)
total_loss = 0
loss_rgb_brdf = torch.tensor(1e-6).to(device)
loss_rgb = torch.mean((ret_kw['rgb_map'] - rgb_train) ** 2)
total_loss += loss_rgb
if Ortho_reg_weight > 0:
loss_reg = tensoIR.vector_comp_diffs()
total_loss += Ortho_reg_weight * loss_reg
summary_writer.add_scalar('train/reg', loss_reg.detach().item(), global_step=iteration)
if L1_reg_weight > 0:
loss_reg_L1 = tensoIR.density_L1()
total_loss += L1_reg_weight * loss_reg_L1
summary_writer.add_scalar('train/reg_l1', loss_reg_L1.detach().item(), global_step=iteration)
if TV_weight_density > 0:
TV_weight_density *= lr_factor
loss_tv = tensoIR.TV_loss_density(tvreg) * TV_weight_density
total_loss = total_loss + loss_tv
summary_writer.add_scalar('train/reg_tv_density', loss_tv.detach().item(), global_step=iteration)
if TV_weight_app > 0:
TV_weight_app *= lr_factor
loss_tv = tensoIR.TV_loss_app(tvreg)*TV_weight_app
total_loss = total_loss + loss_tv
summary_writer.add_scalar('train/reg_tv_app', loss_tv.detach().item(), global_step=iteration)
if relight_flag:
loss_rgb_brdf = torch.mean((ret_kw['rgb_with_brdf_map'] - rgb_with_brdf_train) ** 2)
total_loss += loss_rgb_brdf * args.rgb_brdf_weight
# exponential growth
normal_weight_factor = args.normals_loss_enhance_ratio ** ((iteration- update_AlphaMask_list[0])/ (args.n_iters - update_AlphaMask_list[0]))
BRDF_weight_factor = args.BRDF_loss_enhance_ratio ** ((iteration- update_AlphaMask_list[0])/ (args.n_iters - update_AlphaMask_list[0]))
if args.normals_diff_weight > 0:
loss_normals_diff = normal_weight_factor * args.normals_diff_weight * ret_kw['normals_diff_map'].mean()
total_loss += loss_normals_diff
summary_writer.add_scalar('train/normals_diff_loss', loss_normals_diff.detach().item(), iteration)
if args.normals_orientation_weight > 0:
loss_normals_orientation = normal_weight_factor * args.normals_orientation_weight * ret_kw['normals_orientation_loss_map'].mean()
total_loss += loss_normals_orientation
summary_writer.add_scalar('train/normals_orientation_loss', loss_normals_orientation.detach().item(), iteration)
if args.roughness_smoothness_loss_weight > 0:
roughness_smoothness_loss = BRDF_weight_factor * args.roughness_smoothness_loss_weight * ret_kw['roughness_smoothness_loss']
total_loss += roughness_smoothness_loss
summary_writer.add_scalar('train/roughness_smoothness_loss', roughness_smoothness_loss.detach().item(), iteration)
if args.albedo_smoothness_loss_weight > 0:
albedo_smoothness_loss = BRDF_weight_factor * args.albedo_smoothness_loss_weight * ret_kw['albedo_smoothness_loss']
total_loss += albedo_smoothness_loss
summary_writer.add_scalar('train/albedo_smoothness_loss', albedo_smoothness_loss.detach().item(), iteration)
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
total_loss = total_loss.detach().item()
loss_rgb = loss_rgb.detach().item()
loss_rgb_brdf = loss_rgb_brdf.detach().item()
PSNRs_rgb.append(-10.0 * np.log(loss_rgb) / np.log(10.0))
if relight_flag:
PSNRs_rgb_brdf.append(-10.0 * np.log(loss_rgb_brdf) / np.log(10.0))
else:
PSNRs_rgb_brdf.append(0.0)
if (not is_distributed) or (dist.get_rank() == 0):
summary_writer.add_scalar('train/mse', total_loss, global_step=iteration)
summary_writer.add_scalar('train/PSNRs_rgb', PSNRs_rgb[-1], global_step=iteration)
summary_writer.add_scalar('train/mse_rgb', loss_rgb, global_step=iteration)
if relight_flag:
summary_writer.add_scalar('train/PSNRs_rgb_brdf', PSNRs_rgb_brdf[-1], global_step=iteration)
summary_writer.add_scalar('train/mse_rgb_brdf', loss_rgb_brdf, global_step=iteration)
# Print the current values of the losses.
if iteration % args.progress_refresh_rate == 0:
pbar.set_description(
f'Iteration {iteration:05d} PSNR:'
+ f' train_rgb = {float(np.mean(PSNRs_rgb)):.2f}'
+ f' train_rgb_brdf = {float(np.mean(PSNRs_rgb_brdf)):.2f}'
+ f' test_rgb = {float(np.mean(PSNRs_test)):.2f}'
+ f' test_rgb_brdf = {float(np.mean(PSNRs_rgb_brdf_test)):.2f}'
+ f' mse = {float(total_loss):.6f}'
)
PSNRs_rgb = []
PSNRs_rgb_brdf = []
# Evaluate on testing dataset
if iteration % args.vis_every == args.vis_every - 1 and args.N_vis != 0 and relight_flag:
PSNRs_test, PSNRs_rgb_brdf_test = evaluation_iter_TensoIR_simple(
test_dataset,
tensoIR,
args,
renderer,
f'{logfolder}/imgs_vis/',
prtx=f'{iteration:06d}_',
N_samples=nSamples,
white_bg=white_bg,
ndc_ray=ndc_ray,
compute_extra_metrics=False,
logger=summary_writer,
step=iteration,
device=device,
)
summary_writer.add_scalar('test/psnr_rgb', np.mean(PSNRs_test), global_step=iteration)
summary_writer.add_scalar('test/psnr_rgb_brdf', np.mean(PSNRs_rgb_brdf_test), global_step=iteration)
# Save iteration models
if iteration % args.save_iters == 0:
tensoIR.save(f'{logfolder}/checkpoints/{args.expname}_{iteration}.th')
# Update learning rate
for param_group in optimizer.param_groups:
param_group['lr'] = param_group['lr'] * lr_factor
if iteration in update_AlphaMask_list:
if reso_cur[0] * reso_cur[1] * reso_cur[2] < 256 ** 3: # update volume resolution
reso_mask = reso_cur
new_aabb = tensoIR.updateAlphaMask(tuple(reso_mask))
if iteration == update_AlphaMask_list[0]:
tensoIR.shrink(new_aabb)
# tensorVM.alphaMask = None
L1_reg_weight = args.L1_weight_rest
print("continuing L1_reg_weight", L1_reg_weight)
# The GPU demands will decrease significantly after AlphaMask is generated, so we can begin relighting training
relight_flag = True
torch.cuda.empty_cache()
TV_weight_density = 0
TV_weight_app = 0
if not args.ndc_ray and iteration == update_AlphaMask_list[1]:
# Filter rays outside the bbox
rays_filtered, filter_mask = tensoIR.filtering_rays(all_rays, bbox_only=True)
rgbs_filtered = all_rgbs[filter_mask, :] # [filtered(N*H*W), 3]
light_idx_filtered = all_light_idx[filter_mask, :] # [filtered(N*H*W), 1]
trainingSampler = SimpleSampler(rays_filtered.shape[0], args.batch_size)
if iteration in upsamp_list:
n_voxels = N_voxel_list.pop(0)
reso_cur = N_to_reso(n_voxels, tensoIR.aabb)
nSamples = min(args.nSamples, cal_n_samples(reso_cur, args.step_ratio))
tensoIR.upsample_volume_grid(reso_cur)
if args.lr_upsample_reset:
print("reset lr to initial")
lr_scale = 1 # 0.1 ** (iteration / args.n_iters)
else:
lr_scale = args.lr_decay_target_ratio ** (iteration / args.n_iters)
grad_vars = tensoIR.get_optparam_groups(args.lr_init * lr_scale, args.lr_basis * lr_scale)
optimizer = torch.optim.Adam(grad_vars, betas=(0.9, 0.99))
tensoIR.save(f'{logfolder}/{args.expname}.th')
# if args.render_train:
# os.makedirs(f'{logfolder}/imgs_train_all', exist_ok=True)
# train_dataset = dataset(args.datadir, split='train', downsample=args.downsample_train, is_stack=True)
# PSNRs_test = evaluation(train_dataset, tensoIR, args, renderer, visibility_net, f'{logfolder}/imgs_train_all/',
# N_vis=-1, N_samples=-1, white_bg=white_bg, ndc_ray=ndc_ray, device=device)
# print(f'======> {args.expname} test all psnr: {np.mean(PSNRs_test)} <========================')
if args.render_test:
os.makedirs(f'{logfolder}/imgs_test_all', exist_ok=True)
PSNRs_test, PSNRs_rgb_brdf_test = evaluation_iter_TensoIR_simple(
test_dataset,
tensoIR,
args,
renderer,
f'{logfolder}/imgs_test_all/',
N_samples=-1,
white_bg=white_bg,
ndc_ray=ndc_ray,
device=device,
test_all=True
)
summary_writer.add_scalar('test/psnr_rgb_all', np.mean(PSNRs_test), global_step=iteration)
summary_writer.add_scalar('test/psnr_rgb_brdf_all', np.mean(PSNRs_rgb_brdf_test), global_step=iteration)
print(f'======> {args.expname} test all: nvs psnr: {np.mean(PSNRs_test)}, nvs with brdf psnr: {np.mean(PSNRs_rgb_brdf_test)} <========================')
# if args.render_path:
# c2ws = test_dataset.render_path
# # c2ws = test_dataset.poses
# print('========>', c2ws.shape)
# os.makedirs(f'{logfolder}/imgs_path_all', exist_ok=True)
# evaluation_path(test_dataset, tensoIR, c2ws, renderer, visibility_net, f'{logfolder}/imgs_path_all/',
# N_vis=-1, N_samples=-1, white_bg=white_bg, ndc_ray=ndc_ray, device=device)
if __name__ == '__main__':
torch.set_default_dtype(torch.float32)
torch.manual_seed(20211202)
torch.cuda.manual_seed_all(20211202)
np.random.seed(20211202)
random.seed(20211202)
os.environ['PYTHONHASHSEED'] = str(20211202)
if args.export_mesh:
export_mesh(args)
if args.render_only and (args.render_test or args.render_path):
render_test(args)
else:
reconstruction(args)
| yxlao/TensoIR | 3 | TensoIR Experiments for "Objects with Lighting: A Real-World Dataset for Evaluating Reconstruction and Rendering for Object Relighting" | Python | yxlao | Yixing Lao | HKU-CS |
train_tensoIR.py | Python | import os
import sys
import torch
from tqdm.auto import tqdm
from opt import config_parser
import datetime
import torch.distributed as dist
from torch.utils.tensorboard import SummaryWriter
from renderer import *
from models.tensoRF_rotated_lights import raw2alpha, TensorVMSplit, AlphaGridMask
from utils import *
from dataLoader import dataset_dict
args = config_parser()
print(args)
# Setup multi-device training
num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
is_distributed = num_gpus > 1
if is_distributed:
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend="nccl", init_method="env://")
synchronize()
device = torch.device("cuda:{}".format(args.local_rank) if torch.cuda.is_available() else "cpu")
print(f'Running with {num_gpus} GPU(s)...')
renderer = Renderer_TensoIR_train
class SimpleSampler:
def __init__(self, total, batch):
self.total = total
self.batch = batch
self.curr = total
self.ids = None
def nextids(self):
self.curr+=self.batch
if self.curr + self.batch > self.total:
self.ids = torch.LongTensor(np.random.permutation(self.total))
self.curr = 0
return self.ids[self.curr:self.curr+self.batch]
@torch.no_grad()
def export_mesh(args):
ckpt = torch.load(args.ckpt, map_location=device)
kwargs = ckpt['kwargs']
kwargs.update({'device': device})
tensoIR = eval(args.model_name)(**kwargs)
tensoIR.load(ckpt)
alpha, _ = tensoIR.getDenseAlpha()
convert_sdf_samples_to_ply(alpha.cpu(), f'{args.ckpt[:-3]}.ply', bbox=tensoIR.aabb.cpu(), level=0.005)
def render_test(args):
# init dataset
dataset = dataset_dict[args.dataset_name]
test_dataset = dataset(args.datadir, args.hdrdir, light_rotation=args.light_rotation, split='test', downsample=args.downsample_train, is_stack=False,
sub=args.test_number)
white_bg = test_dataset.white_bg
ndc_ray = args.ndc_ray
if not os.path.exists(args.ckpt):
print('the ckpt path does not exists!!')
return
ckpt = torch.load(args.ckpt, map_location=device)
kwargs = ckpt['kwargs']
kwargs.update({'device': device})
tensoIR = eval(args.model_name)(**kwargs)
tensoIR.load(ckpt)
logfolder = f'{args.basedir}/test_{args.expname}{datetime.datetime.now().strftime("-%Y%m%d-%H%M%S")}'
if args.render_test:
PSNRs_test, PSNRs_rgb_brdf_test, MAE_test,\
PSNR_albedo_single, PSNR_albedo_three = evaluation_iter_TensoIR(
test_dataset,
tensoIR,
args,
renderer,
f'{logfolder}/imgs_test_all/',
N_samples=-1,
white_bg=white_bg,
ndc_ray=ndc_ray,
device=device,
test_all=True
)
print(f'PSNRs_test: {PSNRs_test}')
print(f'PSNRs_rgb_brdf_test: {PSNRs_rgb_brdf_test}')
print(f'MAE_test: {MAE_test}')
print(f'PSNR_albedo_single: {PSNR_albedo_single}')
print(f'PSNR_albedo_three: {PSNR_albedo_three}')
def reconstruction(args):
# init dataset
dataset = dataset_dict[args.dataset_name]
train_dataset = dataset(
args.datadir,
args.hdrdir,
split='train',
downsample=args.downsample_train,
light_name=args.light_name,
light_rotation=args.light_rotation
)
test_dataset = dataset(
args.datadir,
args.hdrdir,
split='test',
downsample=args.downsample_test,
light_name=args.light_name,
light_rotation=args.light_rotation
)
print(f'Finish reading dataset')
white_bg = train_dataset.white_bg
near_far = train_dataset.near_far
ndc_ray = args.ndc_ray
# init resolution
upsamp_list = args.upsamp_list
update_AlphaMask_list = args.update_AlphaMask_list
n_lamb_sigma = args.n_lamb_sigma
n_lamb_sh = args.n_lamb_sh
if args.add_timestamp:
logfolder = f'{args.basedir}/{args.expname}{datetime.datetime.now().strftime("-%Y%m%d-%H%M%S")}'
else:
logfolder = f'{args.basedir}/{args.expname}'
# init log file
os.makedirs(logfolder, exist_ok=True)
os.makedirs(f'{logfolder}/imgs_vis', exist_ok=True)
os.makedirs(f'{logfolder}/checkpoints', exist_ok=True)
summary_writer = SummaryWriter(logfolder)
# copy the config file into the log folder
os.system(f'cp {args.config} {logfolder}')
# init parameters
aabb = train_dataset.scene_bbox.to(device)
reso_cur = N_to_reso(args.N_voxel_init, aabb) # number of voxels in each direction
nSamples = min(args.nSamples, cal_n_samples(reso_cur, args.step_ratio))
if args.ckpt is not None:
ckpt = torch.load(args.ckpt, map_location=device)
kwargs = ckpt['kwargs']
kwargs.update({'device': device})
tensoIR = eval(args.model_name)(**kwargs)
tensoIR.load(ckpt)
else:
tensoIR = eval(args.model_name)(aabb,
reso_cur,
device,
density_n_comp=n_lamb_sigma,
appearance_n_comp=n_lamb_sh,
app_dim=args.data_dim_color,
near_far=near_far,
shadingMode=args.shadingMode,
alphaMask_thres=args.alpha_mask_thre,
density_shift=args.density_shift,
distance_scale=args.distance_scale,
pos_pe=args.pos_pe,
view_pe=args.view_pe,
fea_pe=args.fea_pe,
featureC=args.featureC,
step_ratio=args.step_ratio,
fea2denseAct=args.fea2denseAct,
normals_kind = args.normals_kind,
light_rotation=args.light_rotation,
light_kind=args.light_kind,
dataset=train_dataset,
numLgtSGs = args.numLgtSGs,
)
grad_vars = tensoIR.get_optparam_groups(args.lr_init, args.lr_basis)
if args.lr_decay_iters > 0:
lr_factor = args.lr_decay_target_ratio ** (1 / (args.lr_decay_iters))
else:
args.lr_decay_iters = args.n_iters
lr_factor = args.lr_decay_target_ratio ** (1 / (args.lr_decay_iters))
print("lr decay", args.lr_decay_target_ratio, args.lr_decay_iters)
optimizer = torch.optim.Adam(grad_vars, betas=(0.9, 0.99))
# linear in logrithmic space
N_voxel_list = (torch.round(torch.exp(
torch.linspace(np.log(args.N_voxel_init), np.log(args.N_voxel_final), len(upsamp_list) + 1))).long()).tolist()[1:]
torch.cuda.empty_cache()
PSNRs_test, PSNRs_rgb_brdf_test = [0], [0]
PSNRs_rgb, PSNRs_rgb_brdf = [], []
Ortho_reg_weight = args.Ortho_weight
print("initial Ortho_reg_weight", Ortho_reg_weight)
L1_reg_weight = args.L1_weight_inital
print("initial L1_reg_weight", L1_reg_weight)
TV_weight_density, TV_weight_app = args.TV_weight_density, args.TV_weight_app
tvreg = TVLoss()
print(f"initial TV_weight density: {TV_weight_density} appearance: {TV_weight_app}")
all_rays, all_rgbs, all_masks, all_light_idx = train_dataset.all_rays, train_dataset.all_rgbs, train_dataset.all_masks, train_dataset.all_light_idx
# Filter rays outside the bbox
rays_filtered, filter_mask = tensoIR.filtering_rays(all_rays, bbox_only=True)
rgbs_filtered = all_rgbs[filter_mask, :] # [filtered(N*H*W), 3]
light_idx_filtered = all_light_idx[filter_mask, :] # [filtered(N*H*W), 1]
trainingSampler = SimpleSampler(rays_filtered.shape[0], args.batch_size)
pbar = tqdm(range(args.n_iters), miniters=args.progress_refresh_rate, file=sys.stdout) if (
(not is_distributed) or (dist.get_rank() == 0)) else range(args.n_iters)
relight_flag = False
for iteration in pbar:
# Sample batch_size chunk from all rays
rays_idx = trainingSampler.nextids()
rays_train = rays_filtered[rays_idx]
rgb_train = rgbs_filtered[rays_idx].to(device)
light_idx_train = light_idx_filtered[rays_idx].to(device)
rgb_with_brdf_train = rgb_train
ret_kw = renderer(
rays=rays_train, # [batch_size, 6]
normal_gt=None, # [batch_size, 3]
light_idx=light_idx_train, # [batch_size, 1]
tensoIR=tensoIR, # nn.Module
N_samples=nSamples, # int
white_bg=white_bg, # bool
ndc_ray=ndc_ray,
device=device,
sample_method=args.light_sample_train,
chunk_size=args.relight_chunk_size,
is_train=True,
is_relight=relight_flag,
args=args
)
total_loss = 0
loss_rgb_brdf = torch.tensor(1e-6).to(device)
loss_rgb = torch.mean((ret_kw['rgb_map'] - rgb_train) ** 2)
total_loss += loss_rgb
if Ortho_reg_weight > 0:
loss_reg = tensoIR.vector_comp_diffs()
total_loss += Ortho_reg_weight * loss_reg
summary_writer.add_scalar('train/reg', loss_reg.detach().item(), global_step=iteration)
if L1_reg_weight > 0:
loss_reg_L1 = tensoIR.density_L1()
total_loss += L1_reg_weight * loss_reg_L1
summary_writer.add_scalar('train/reg_l1', loss_reg_L1.detach().item(), global_step=iteration)
if TV_weight_density > 0:
TV_weight_density *= lr_factor
loss_tv = tensoIR.TV_loss_density(tvreg) * TV_weight_density
total_loss = total_loss + loss_tv
summary_writer.add_scalar('train/reg_tv_density', loss_tv.detach().item(), global_step=iteration)
if TV_weight_app > 0:
TV_weight_app *= lr_factor
loss_tv = tensoIR.TV_loss_app(tvreg)*TV_weight_app
total_loss = total_loss + loss_tv
summary_writer.add_scalar('train/reg_tv_app', loss_tv.detach().item(), global_step=iteration)
if relight_flag:
loss_rgb_brdf = torch.mean((ret_kw['rgb_with_brdf_map'] - rgb_with_brdf_train) ** 2)
total_loss += loss_rgb_brdf * args.rgb_brdf_weight
# exponential growth if ratio set > 1
normal_weight_factor = args.normals_loss_enhance_ratio ** ((iteration- update_AlphaMask_list[0]) / (args.n_iters - update_AlphaMask_list[0]))
BRDF_weight_factor = args.BRDF_loss_enhance_ratio ** ((iteration- update_AlphaMask_list[0]) / (args.n_iters - update_AlphaMask_list[0]))
if args.normals_diff_weight > 0:
loss_normals_diff = normal_weight_factor * args.normals_diff_weight * ret_kw['normals_diff_map'].mean()
total_loss += loss_normals_diff
summary_writer.add_scalar('train/normals_diff_loss', loss_normals_diff.detach().item(), iteration)
if args.normals_orientation_weight > 0:
loss_normals_orientation = normal_weight_factor * args.normals_orientation_weight * ret_kw['normals_orientation_loss_map'].mean()
total_loss += loss_normals_orientation
summary_writer.add_scalar('train/normals_orientation_loss', loss_normals_orientation.detach().item(), iteration)
if args.roughness_smoothness_loss_weight > 0:
roughness_smoothness_loss = BRDF_weight_factor * args.roughness_smoothness_loss_weight * ret_kw['roughness_smoothness_loss']
total_loss += roughness_smoothness_loss
summary_writer.add_scalar('train/roughness_smoothness_loss', roughness_smoothness_loss.detach().item(), iteration)
if args.albedo_smoothness_loss_weight > 0:
albedo_smoothness_loss = BRDF_weight_factor * args.albedo_smoothness_loss_weight * ret_kw['albedo_smoothness_loss']
total_loss += albedo_smoothness_loss
summary_writer.add_scalar('train/albedo_smoothness_loss', albedo_smoothness_loss.detach().item(), iteration)
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
total_loss = total_loss.detach().item()
loss_rgb = loss_rgb.detach().item()
loss_rgb_brdf = loss_rgb_brdf.detach().item()
PSNRs_rgb.append(-10.0 * np.log(loss_rgb) / np.log(10.0))
if relight_flag:
PSNRs_rgb_brdf.append(-10.0 * np.log(loss_rgb_brdf) / np.log(10.0))
else:
PSNRs_rgb_brdf.append(0.0)
if (not is_distributed) or (dist.get_rank() == 0):
summary_writer.add_scalar('train/mse', total_loss, global_step=iteration)
summary_writer.add_scalar('train/PSNRs_rgb', PSNRs_rgb[-1], global_step=iteration)
summary_writer.add_scalar('train/mse_rgb', loss_rgb, global_step=iteration)
if relight_flag:
summary_writer.add_scalar('train/PSNRs_rgb_brdf', PSNRs_rgb_brdf[-1], global_step=iteration)
summary_writer.add_scalar('train/mse_rgb_brdf', loss_rgb_brdf, global_step=iteration)
# Print the current values of the losses.
if iteration % args.progress_refresh_rate == 0:
pbar.set_description(
f'Iteration {iteration:05d} PSNR:'
+ f' train_rgb = {float(np.mean(PSNRs_rgb)):.2f}'
+ f' train_rgb_brdf = {float(np.mean(PSNRs_rgb_brdf)):.2f}'
+ f' test_rgb = {float(np.mean(PSNRs_test)):.2f}'
+ f' test_rgb_brdf = {float(np.mean(PSNRs_rgb_brdf_test)):.2f}'
+ f' mse = {float(total_loss):.6f}'
)
PSNRs_rgb = []
PSNRs_rgb_brdf = []
# Evaluate on testing dataset
if iteration % args.vis_every == args.vis_every - 1 and args.N_vis != 0 and relight_flag:
PSNRs_test, PSNRs_rgb_brdf_test, MAE_test, \
PSNR_albedo_single, PSNR_albedo_three \
= evaluation_iter_TensoIR(
test_dataset,
tensoIR,
args,
renderer,
f'{logfolder}/imgs_vis/',
prtx=f'{iteration:06d}_',
N_samples=nSamples,
white_bg=white_bg,
ndc_ray=ndc_ray,
compute_extra_metrics=False,
logger=summary_writer,
step=iteration,
device=device,
)
summary_writer.add_scalar('test/psnr_rgb', np.mean(PSNRs_test), global_step=iteration)
summary_writer.add_scalar('test/psnr_rgb_brdf', np.mean(PSNRs_rgb_brdf_test), global_step=iteration)
summary_writer.add_scalar('test/mae', MAE_test, global_step=iteration)
summary_writer.add_scalar('test/psnr_albedo_single', PSNR_albedo_single, global_step=iteration)
summary_writer.add_scalar('test/psnr_albedo_three', PSNR_albedo_three, global_step=iteration)
# Save iteration models
if iteration % args.save_iters == 0:
tensoIR.save(f'{logfolder}/checkpoints/{args.expname}_{iteration}.th')
# Update learning rate
for param_group in optimizer.param_groups:
param_group['lr'] = param_group['lr'] * lr_factor
if iteration in update_AlphaMask_list:
if reso_cur[0] * reso_cur[1] * reso_cur[2] < 256 ** 3: # update volume resolution
reso_mask = reso_cur
new_aabb = tensoIR.updateAlphaMask(tuple(reso_mask))
if iteration == update_AlphaMask_list[0]:
tensoIR.shrink(new_aabb)
# tensorVM.alphaMask = None
L1_reg_weight = args.L1_weight_rest
print("continuing L1_reg_weight", L1_reg_weight)
# The GPU demands will decrease significantly after AlphaMask is generated, so we can begin relighting training
relight_flag = True
torch.cuda.empty_cache()
TV_weight_density = 0
TV_weight_app = 0
if not args.ndc_ray and iteration == update_AlphaMask_list[1]:
# Filter rays outside the bbox
rays_filtered, filter_mask = tensoIR.filtering_rays(all_rays, bbox_only=True)
rgbs_filtered = all_rgbs[filter_mask, :] # [filtered(N*H*W), 3]
light_idx_filtered = all_light_idx[filter_mask, :] # [filtered(N*H*W), 1]
trainingSampler = SimpleSampler(rays_filtered.shape[0], args.batch_size)
if iteration in upsamp_list:
n_voxels = N_voxel_list.pop(0)
reso_cur = N_to_reso(n_voxels, tensoIR.aabb)
nSamples = min(args.nSamples, cal_n_samples(reso_cur, args.step_ratio))
tensoIR.upsample_volume_grid(reso_cur)
if args.lr_upsample_reset:
print("reset lr to initial")
lr_scale = 1 # 0.1 ** (iteration / args.n_iters)
else:
lr_scale = args.lr_decay_target_ratio ** (iteration / args.n_iters)
grad_vars = tensoIR.get_optparam_groups(args.lr_init * lr_scale, args.lr_basis * lr_scale)
optimizer = torch.optim.Adam(grad_vars, betas=(0.9, 0.99))
tensoIR.save(f'{logfolder}/{args.expname}.th')
# if args.render_train:
# os.makedirs(f'{logfolder}/imgs_train_all', exist_ok=True)
# train_dataset = dataset(args.datadir, split='train', downsample=args.downsample_train, is_stack=True)
# PSNRs_test = evaluation(train_dataset, tensoIR, args, renderer, visibility_net, f'{logfolder}/imgs_train_all/',
# N_vis=-1, N_samples=-1, white_bg=white_bg, ndc_ray=ndc_ray, device=device)
# print(f'======> {args.expname} test all psnr: {np.mean(PSNRs_test)} <========================')
if args.render_test:
os.makedirs(f'{logfolder}/imgs_test_all', exist_ok=True)
PSNRs_test, PSNRs_rgb_brdf_test, MAE_test,\
PSNR_albedo_single, PSNR_albedo_three = evaluation_iter_TensoIR(
test_dataset,
tensoIR,
args,
renderer,
f'{logfolder}/imgs_test_all/',
N_samples=-1,
white_bg=white_bg,
ndc_ray=ndc_ray,
device=device,
test_all=True
)
summary_writer.add_scalar('test/psnr_rgb_all', np.mean(PSNRs_test), global_step=iteration)
summary_writer.add_scalar('test/psnr_rgb_brdf_all', np.mean(PSNRs_rgb_brdf_test), global_step=iteration)
summary_writer.add_scalar('test/mae_all', MAE_test, global_step=iteration)
summary_writer.add_scalar('test/psnr_albedo_single_all', PSNR_albedo_single, global_step=iteration)
summary_writer.add_scalar('test/psnr_albedo_three_all', PSNR_albedo_three, global_step=iteration)
print(f'======> {args.expname} test all: nvs psnr: {np.mean(PSNRs_test)}, nvs with brdf psnr: {np.mean(PSNRs_rgb_brdf_test)}, MAE: {MAE_test} <========================')
# if args.render_path:
# c2ws = test_dataset.render_path
# # c2ws = test_dataset.poses
# print('========>', c2ws.shape)
# os.makedirs(f'{logfolder}/imgs_path_all', exist_ok=True)
# evaluation_path(test_dataset, tensoIR, c2ws, renderer, visibility_net, f'{logfolder}/imgs_path_all/',
# N_vis=-1, N_samples=-1, white_bg=white_bg, ndc_ray=ndc_ray, device=device)
if __name__ == '__main__':
torch.set_default_dtype(torch.float32)
torch.manual_seed(20211202)
torch.cuda.manual_seed_all(20211202)
np.random.seed(20211202)
random.seed(20211202)
os.environ['PYTHONHASHSEED'] = str(20211202)
if args.export_mesh:
export_mesh(args)
if args.render_only and (args.render_test or args.render_path):
render_test(args)
else:
reconstruction(args)
| yxlao/TensoIR | 3 | TensoIR Experiments for "Objects with Lighting: A Real-World Dataset for Evaluating Reconstruction and Rendering for Object Relighting" | Python | yxlao | Yixing Lao | HKU-CS |
train_tensoIR_general_multi_lights.py | Python | """
Author: Haian Jin 8/03/22
Feature:
"""
import os
import sys
import torch
from tqdm.auto import tqdm
from opt import config_parser
import datetime
import torch.distributed as dist
from torch.utils.tensorboard import SummaryWriter
from renderer import *
from models.tensoRF_general_multi_lights import raw2alpha, TensorVMSplit, AlphaGridMask
from utils import *
from dataLoader import dataset_dict
args = config_parser()
print(args)
# Setup multi-device training
num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
is_distributed = num_gpus > 1
if is_distributed:
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend="nccl", init_method="env://")
synchronize()
device = torch.device("cuda:{}".format(args.local_rank) if torch.cuda.is_available() else "cpu")
print(f'Running with {num_gpus} GPU(s)...')
renderer = Renderer_TensoIR_train
class SimpleSampler:
def __init__(self, total, batch):
self.total = total
self.batch = batch
self.curr = total
self.ids = None
def nextids(self):
self.curr+=self.batch
if self.curr + self.batch > self.total:
self.ids = torch.LongTensor(np.random.permutation(self.total))
self.curr = 0
return self.ids[self.curr:self.curr+self.batch]
@torch.no_grad()
def export_mesh(args):
ckpt = torch.load(args.ckpt, map_location=device)
kwargs = ckpt['kwargs']
kwargs.update({'device': device})
tensoIR = eval(args.model_name)(**kwargs)
tensoIR.load(ckpt)
alpha, _ = tensoIR.getDenseAlpha()
convert_sdf_samples_to_ply(alpha.cpu(), f'{args.ckpt[:-3]}.ply', bbox=tensoIR.aabb.cpu(), level=0.005)
def render_test(args):
# init dataset
dataset = dataset_dict[args.dataset_name]
test_dataset = dataset(args.datadir, args.hdrdir, split='test', downsample=args.downsample_train, is_stack=False,
sub=args.test_number)
white_bg = test_dataset.white_bg
ndc_ray = args.ndc_ray
if not os.path.exists(args.ckpt):
print('the ckpt path does not exists!!')
return
ckpt = torch.load(args.ckpt, map_location=device)
kwargs = ckpt['kwargs']
kwargs.update({'device': device})
tensoIR = eval(args.model_name)(**kwargs)
tensoIR.load(ckpt)
logfolder = f'{args.basedir}/test_{args.expname}{datetime.datetime.now().strftime("-%Y%m%d-%H%M%S")}'
# if args.render_train:
# os.makedirs(f'{logfolder}/imgs_train_all', exist_ok=True)
# train_dataset = dataset(args.datadir, args.hdrdir, split='train', downsample=args.downsample_train,
# is_stack=False)
# evaluation_all(train_dataset, tensoIR, args, renderer,visibility_net, f'{logfolder}/imgs_test_all/',
# N_samples=-1, white_bg=white_bg, ndc_ray=ndc_ray, device=device, test_all=True)
if args.render_test:
os.makedirs(f'{logfolder}/imgs_test_all', exist_ok=True)
PSNRs_test_list, PSNRs_rgb_brdf_test_list = [], []
for light_idx_to_test in range(tensoIR.light_num):
cur_light_name = tensoIR.light_name_list[light_idx_to_test]
os.makedirs(f'{logfolder}/imgs_test_all/{cur_light_name}', exist_ok=True)
PSNRs_test, PSNRs_rgb_brdf_test, MAE_test,\
PSNR_albedo_single, PSNR_albedo_three = evaluation_iter_TensoIR_general_multi_lights(
test_dataset,
tensoIR,
args,
renderer,
f'{logfolder}/imgs_test_all/{cur_light_name}/',
N_samples=-1,
white_bg=white_bg,
ndc_ray=ndc_ray,
device=device,
test_all=True,
light_idx_to_test=light_idx_to_test,
)
PSNRs_test_list.append(np.mean(PSNRs_test))
PSNRs_rgb_brdf_test_list.append(np.mean(PSNRs_rgb_brdf_test))
print(f'PSNRs_test: {np.mean(PSNRs_test_list)}')
print(f'PSNRs_rgb_brdf_test: {np.mean(PSNRs_rgb_brdf_test_list)}')
print(f'MAE_test: {MAE_test}')
print(f'PSNR_albedo_single: {PSNR_albedo_single}')
print(f'PSNR_albedo_three: {PSNR_albedo_three}')
def reconstruction(args):
# init dataset
dataset = dataset_dict[args.dataset_name]
train_dataset = dataset(
args.datadir,
args.hdrdir,
split='train',
downsample=args.downsample_train,
light_name=args.light_name,
light_name_list=args.light_name_list,
light_rotation=args.light_rotation
)
test_dataset = dataset(
args.datadir,
args.hdrdir,
split='test',
downsample=args.downsample_test,
light_name=args.light_name,
light_name_list=args.light_name_list,
light_rotation=args.light_rotation
)
# if is_distributed:
# train_sampler = torch.utils.data.DistributedSampler(train_dataset, num_replicas=dist.get_world_size(),
# rank=dist.get_rank())
# train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=100, sampler=train_sampler,
# num_workers=16, drop_last=True, pin_memory=True)
# else:
# train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=100, num_workers=16,
# drop_last=True, shuffle=True)
print(f'Finish reading dataset')
white_bg = train_dataset.white_bg
near_far = train_dataset.near_far
ndc_ray = args.ndc_ray
# init resolution
upsamp_list = args.upsamp_list
update_AlphaMask_list = args.update_AlphaMask_list
n_lamb_sigma = args.n_lamb_sigma
n_lamb_sh = args.n_lamb_sh
if args.add_timestamp:
logfolder = f'{args.basedir}/{args.expname}{datetime.datetime.now().strftime("-%Y%m%d-%H%M%S")}'
else:
logfolder = f'{args.basedir}/{args.expname}'
# init log file
os.makedirs(logfolder, exist_ok=True)
os.makedirs(f'{logfolder}/imgs_vis', exist_ok=True)
os.makedirs(f'{logfolder}/checkpoints', exist_ok=True)
summary_writer = SummaryWriter(logfolder)
# copy the config file into the log folder
os.system(f'cp {args.config} {logfolder}')
# init parameters
aabb = train_dataset.scene_bbox.to(device)
reso_cur = N_to_reso(args.N_voxel_init, aabb) # number of voxels in each direction
nSamples = min(args.nSamples, cal_n_samples(reso_cur, args.step_ratio))
if args.ckpt is not None:
ckpt = torch.load(args.ckpt, map_location=device)
kwargs = ckpt['kwargs']
kwargs.update({'device': device})
tensoIR = eval(args.model_name)(**kwargs)
tensoIR.load(ckpt)
else:
tensoIR = eval(args.model_name)(aabb,
reso_cur,
device,
density_n_comp=n_lamb_sigma,
appearance_n_comp=n_lamb_sh,
app_dim=args.data_dim_color,
near_far=near_far,
shadingMode=args.shadingMode,
alphaMask_thres=args.alpha_mask_thre,
density_shift=args.density_shift,
distance_scale=args.distance_scale,
pos_pe=args.pos_pe,
view_pe=args.view_pe,
fea_pe=args.fea_pe,
featureC=args.featureC,
step_ratio=args.step_ratio,
fea2denseAct=args.fea2denseAct,
normals_kind = args.normals_kind,
light_rotation=args.light_rotation,
light_name_list= args.light_name_list,
light_kind=args.light_kind,
dataset=train_dataset,
numLgtSGs = args.numLgtSGs,
)
grad_vars = tensoIR.get_optparam_groups(args.lr_init, args.lr_basis)
if args.lr_decay_iters > 0:
lr_factor = args.lr_decay_target_ratio ** (1 / (args.lr_decay_iters))
else:
args.lr_decay_iters = args.n_iters
lr_factor = args.lr_decay_target_ratio ** (1 / (args.lr_decay_iters))
print("lr decay", args.lr_decay_target_ratio, args.lr_decay_iters)
optimizer = torch.optim.Adam(grad_vars, betas=(0.9, 0.99))
# linear in logrithmic space
N_voxel_list = (torch.round(torch.exp(
torch.linspace(np.log(args.N_voxel_init), np.log(args.N_voxel_final), len(upsamp_list) + 1))).long()).tolist()[1:]
torch.cuda.empty_cache()
PSNRs_test, PSNRs_rgb_brdf_test = [0], [0]
PSNRs_rgb, PSNRs_rgb_brdf = [], []
Ortho_reg_weight = args.Ortho_weight
print("initial Ortho_reg_weight", Ortho_reg_weight)
L1_reg_weight = args.L1_weight_inital
print("initial L1_reg_weight", L1_reg_weight)
TV_weight_density, TV_weight_app = args.TV_weight_density, args.TV_weight_app
tvreg = TVLoss()
print(f"initial TV_weight density: {TV_weight_density} appearance: {TV_weight_app}")
all_rays, all_rgbs, all_masks, all_light_idx = train_dataset.all_rays, train_dataset.all_rgbs, train_dataset.all_masks, train_dataset.all_light_idx
# Filter rays outside the bbox
rays_filtered, filter_mask = tensoIR.filtering_rays(all_rays, bbox_only=True)
rgbs_filtered = all_rgbs[filter_mask, :] # [filtered(N*H*W), 3]
light_idx_filtered = all_light_idx[filter_mask, :] # [filtered(N*H*W), 1]
trainingSampler = SimpleSampler(rays_filtered.shape[0], args.batch_size)
pbar = tqdm(range(args.n_iters), miniters=args.progress_refresh_rate, file=sys.stdout) if (
(not is_distributed) or (dist.get_rank() == 0)) else range(args.n_iters)
relight_flag = False
for iteration in pbar:
# Sample batch_size chunk from all rays
rays_idx = trainingSampler.nextids()
rays_train = rays_filtered[rays_idx]
rgb_train = rgbs_filtered[rays_idx].to(device)
light_idx_train = light_idx_filtered[rays_idx].to(device)
rgb_with_brdf_train = rgb_train
ret_kw = renderer(
rays=rays_train, # [batch_size, 6]
normal_gt=None, # [batch_size, 3]
light_idx=light_idx_train, # [batch_size, 1]
tensoIR=tensoIR, # nn.Module
N_samples=nSamples, # int
white_bg=white_bg, # bool
ndc_ray=ndc_ray,
device=device,
sample_method=args.light_sample_train,
chunk_size=args.relight_chunk_size,
is_train=True,
is_relight=relight_flag,
args=args
)
total_loss = 0
loss_rgb_brdf = torch.tensor(1e-6).to(device)
loss_rgb = torch.mean((ret_kw['rgb_map'] - rgb_train) ** 2)
total_loss += loss_rgb
if Ortho_reg_weight > 0:
loss_reg = tensoIR.vector_comp_diffs()
total_loss += Ortho_reg_weight * loss_reg
summary_writer.add_scalar('train/reg', loss_reg.detach().item(), global_step=iteration)
if L1_reg_weight > 0:
loss_reg_L1 = tensoIR.density_L1()
total_loss += L1_reg_weight * loss_reg_L1
summary_writer.add_scalar('train/reg_l1', loss_reg_L1.detach().item(), global_step=iteration)
if TV_weight_density > 0:
TV_weight_density *= lr_factor
loss_tv = tensoIR.TV_loss_density(tvreg) * TV_weight_density
total_loss = total_loss + loss_tv
summary_writer.add_scalar('train/reg_tv_density', loss_tv.detach().item(), global_step=iteration)
if TV_weight_app > 0:
TV_weight_app *= lr_factor
loss_tv = tensoIR.TV_loss_app(tvreg)*TV_weight_app
total_loss = total_loss + loss_tv
summary_writer.add_scalar('train/reg_tv_app', loss_tv.detach().item(), global_step=iteration)
if relight_flag:
loss_rgb_brdf = torch.mean((ret_kw['rgb_with_brdf_map'] - rgb_with_brdf_train) ** 2)
total_loss += loss_rgb_brdf * args.rgb_brdf_weight
# exponential growth
normal_weight_factor = args.normals_loss_enhance_ratio ** ((iteration- update_AlphaMask_list[0])/ (args.n_iters - update_AlphaMask_list[0]))
BRDF_weight_factor = args.BRDF_loss_enhance_ratio ** ((iteration- update_AlphaMask_list[0])/ (args.n_iters - update_AlphaMask_list[0]))
if args.normals_diff_weight > 0:
loss_normals_diff = normal_weight_factor * args.normals_diff_weight * ret_kw['normals_diff_map'].mean()
total_loss += loss_normals_diff
summary_writer.add_scalar('train/normals_diff_loss', loss_normals_diff.detach().item(), iteration)
if args.normals_orientation_weight > 0:
loss_normals_orientation = normal_weight_factor * args.normals_orientation_weight * ret_kw['normals_orientation_loss_map'].mean()
total_loss += loss_normals_orientation
summary_writer.add_scalar('train/normals_orientation_loss', loss_normals_orientation.detach().item(), iteration)
if args.roughness_smoothness_loss_weight > 0:
roughness_smoothness_loss = BRDF_weight_factor * args.roughness_smoothness_loss_weight * ret_kw['roughness_smoothness_loss']
total_loss += roughness_smoothness_loss
summary_writer.add_scalar('train/roughness_smoothness_loss', roughness_smoothness_loss.detach().item(), iteration)
if args.albedo_smoothness_loss_weight > 0:
albedo_smoothness_loss = BRDF_weight_factor * args.albedo_smoothness_loss_weight * ret_kw['albedo_smoothness_loss']
total_loss += albedo_smoothness_loss
summary_writer.add_scalar('train/albedo_smoothness_loss', albedo_smoothness_loss.detach().item(), iteration)
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
total_loss = total_loss.detach().item()
loss_rgb = loss_rgb.detach().item()
loss_rgb_brdf = loss_rgb_brdf.detach().item()
PSNRs_rgb.append(-10.0 * np.log(loss_rgb) / np.log(10.0))
if relight_flag:
PSNRs_rgb_brdf.append(-10.0 * np.log(loss_rgb_brdf) / np.log(10.0))
else:
PSNRs_rgb_brdf.append(0.0)
if (not is_distributed) or (dist.get_rank() == 0):
summary_writer.add_scalar('train/mse', total_loss, global_step=iteration)
summary_writer.add_scalar('train/PSNRs_rgb', PSNRs_rgb[-1], global_step=iteration)
summary_writer.add_scalar('train/mse_rgb', loss_rgb, global_step=iteration)
if relight_flag:
summary_writer.add_scalar('train/PSNRs_rgb_brdf', PSNRs_rgb_brdf[-1], global_step=iteration)
summary_writer.add_scalar('train/mse_rgb_brdf', loss_rgb_brdf, global_step=iteration)
# Print the current values of the losses.
if iteration % args.progress_refresh_rate == 0:
pbar.set_description(
f'Iteration {iteration:05d} PSNR:'
+ f' train_rgb = {float(np.mean(PSNRs_rgb)):.2f}'
+ f' train_rgb_brdf = {float(np.mean(PSNRs_rgb_brdf)):.2f}'
+ f' test_rgb = {float(np.mean(PSNRs_test)):.2f}'
+ f' test_rgb_brdf = {float(np.mean(PSNRs_rgb_brdf_test)):.2f}'
+ f' mse = {float(total_loss):.6f}'
)
PSNRs_rgb = []
PSNRs_rgb_brdf = []
# Evaluate on testing dataset
if iteration % args.vis_every == args.vis_every - 1 and args.N_vis != 0 and relight_flag:
PSNRs_test, PSNRs_rgb_brdf_test, MAE_test, \
PSNR_albedo_single, PSNR_albedo_three \
= evaluation_iter_TensoIR_general_multi_lights(
test_dataset,
tensoIR,
args,
renderer,
f'{logfolder}/imgs_vis/',
prtx=f'{iteration:06d}_',
N_samples=nSamples,
white_bg=white_bg,
ndc_ray=ndc_ray,
compute_extra_metrics=False,
logger=summary_writer,
step=iteration,
device=device,
)
summary_writer.add_scalar('test/psnr_rgb', np.mean(PSNRs_test), global_step=iteration)
summary_writer.add_scalar('test/psnr_rgb_brdf', np.mean(PSNRs_rgb_brdf_test), global_step=iteration)
summary_writer.add_scalar('test/mae', MAE_test, global_step=iteration)
summary_writer.add_scalar('test/psnr_albedo_single', PSNR_albedo_single, global_step=iteration)
summary_writer.add_scalar('test/psnr_albedo_three', PSNR_albedo_three, global_step=iteration)
# Save iteration models
if iteration % args.save_iters == 0:
tensoIR.save(f'{logfolder}/checkpoints/{args.expname}_{iteration}.th')
# Update learning rate
for param_group in optimizer.param_groups:
param_group['lr'] = param_group['lr'] * lr_factor
if iteration in update_AlphaMask_list:
if reso_cur[0] * reso_cur[1] * reso_cur[2] < 256 ** 3: # update volume resolution
reso_mask = reso_cur
new_aabb = tensoIR.updateAlphaMask(tuple(reso_mask))
if iteration == update_AlphaMask_list[0]:
tensoIR.shrink(new_aabb)
# tensorVM.alphaMask = None
L1_reg_weight = args.L1_weight_rest
print("continuing L1_reg_weight", L1_reg_weight)
# The GPU demands will decrease significantly after AlphaMask is generated, so we can begin relighting training
relight_flag = True
torch.cuda.empty_cache()
TV_weight_density = 0
TV_weight_app = 0
if not args.ndc_ray and iteration == update_AlphaMask_list[1]:
# Filter rays outside the bbox
rays_filtered, filter_mask = tensoIR.filtering_rays(all_rays, bbox_only=True)
rgbs_filtered = all_rgbs[filter_mask, :] # [filtered(N*H*W), 3]
light_idx_filtered = all_light_idx[filter_mask, :] # [filtered(N*H*W), 1]
trainingSampler = SimpleSampler(rays_filtered.shape[0], args.batch_size)
if iteration in upsamp_list:
n_voxels = N_voxel_list.pop(0)
reso_cur = N_to_reso(n_voxels, tensoIR.aabb)
nSamples = min(args.nSamples, cal_n_samples(reso_cur, args.step_ratio))
tensoIR.upsample_volume_grid(reso_cur)
if args.lr_upsample_reset:
print("reset lr to initial")
lr_scale = 1 # 0.1 ** (iteration / args.n_iters)
else:
lr_scale = args.lr_decay_target_ratio ** (iteration / args.n_iters)
grad_vars = tensoIR.get_optparam_groups(args.lr_init * lr_scale, args.lr_basis * lr_scale)
optimizer = torch.optim.Adam(grad_vars, betas=(0.9, 0.99))
tensoIR.save(f'{logfolder}/{args.expname}.th')
# if args.render_train:
# os.makedirs(f'{logfolder}/imgs_train_all', exist_ok=True)
# train_dataset = dataset(args.datadir, split='train', downsample=args.downsample_train, is_stack=True)
# PSNRs_test = evaluation(train_dataset, tensoIR, args, renderer, visibility_net, f'{logfolder}/imgs_train_all/',
# N_vis=-1, N_samples=-1, white_bg=white_bg, ndc_ray=ndc_ray, device=device)
# print(f'======> {args.expname} test all psnr: {np.mean(PSNRs_test)} <========================')
if args.render_test:
os.makedirs(f'{logfolder}/imgs_test_all', exist_ok=True)
PSNRs_test_list, PSNRs_rgb_brdf_test_list = [], []
for light_idx_to_test in range(tensoIR.light_num):
cur_light_name = tensoIR.light_name_list[light_idx_to_test]
os.makedirs(f'{logfolder}/imgs_test_all/{cur_light_name}', exist_ok=True)
PSNRs_test, PSNRs_rgb_brdf_test, MAE_test,\
PSNR_albedo_single, PSNR_albedo_three = evaluation_iter_TensoIR_general_multi_lights(
test_dataset,
tensoIR,
args,
renderer,
f'{logfolder}/imgs_test_all/{cur_light_name}/',
N_samples=-1,
white_bg=white_bg,
ndc_ray=ndc_ray,
device=device,
test_all=True,
light_idx_to_test=light_idx_to_test,
)
PSNRs_test_list.append(np.mean(PSNRs_test))
PSNRs_rgb_brdf_test_list.append(np.mean(PSNRs_rgb_brdf_test))
summary_writer.add_scalar('test/psnr_rgb_all', np.mean(PSNRs_test_list), global_step=iteration)
summary_writer.add_scalar('test/psnr_rgb_brdf_all', np.mean(PSNRs_rgb_brdf_test_list), global_step=iteration)
summary_writer.add_scalar('test/mae_all', MAE_test, global_step=iteration)
summary_writer.add_scalar('test/psnr_albedo_single_all', PSNR_albedo_single, global_step=iteration)
summary_writer.add_scalar('test/psnr_albedo_three_all', PSNR_albedo_three, global_step=iteration)
print(f'======> {args.expname} test all: nvs psnr: {np.mean(PSNRs_test_list)}, nvs with brdf psnr: {np.mean(PSNRs_rgb_brdf_test_list)}, MAE: {MAE_test} <========================')
# if args.render_path:
# c2ws = test_dataset.render_path
# # c2ws = test_dataset.poses
# print('========>', c2ws.shape)
# os.makedirs(f'{logfolder}/imgs_path_all', exist_ok=True)
# evaluation_path(test_dataset, tensoIR, c2ws, renderer, visibility_net, f'{logfolder}/imgs_path_all/',
# N_vis=-1, N_samples=-1, white_bg=white_bg, ndc_ray=ndc_ray, device=device)
if __name__ == '__main__':
torch.set_default_dtype(torch.float32)
torch.manual_seed(20211202)
torch.cuda.manual_seed_all(20211202)
np.random.seed(20211202)
random.seed(20211202)
os.environ['PYTHONHASHSEED'] = str(20211202)
if args.export_mesh:
export_mesh(args)
if args.render_only and (args.render_test or args.render_path):
render_test(args)
else:
reconstruction(args)
| yxlao/TensoIR | 3 | TensoIR Experiments for "Objects with Lighting: A Real-World Dataset for Evaluating Reconstruction and Rendering for Object Relighting" | Python | yxlao | Yixing Lao | HKU-CS |
train_tensoIR_rotated_multi_lights.py | Python | import os
import sys
import torch
from tqdm.auto import tqdm
from opt import config_parser
import datetime
import torch.distributed as dist
from torch.utils.tensorboard import SummaryWriter
from renderer import *
from models.tensoRF_rotated_lights import raw2alpha, TensorVMSplit, AlphaGridMask
from utils import *
from dataLoader import dataset_dict
args = config_parser()
print(args)
# Setup multi-device training
num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
is_distributed = num_gpus > 1
if is_distributed:
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend="nccl", init_method="env://")
synchronize()
device = torch.device("cuda:{}".format(args.local_rank) if torch.cuda.is_available() else "cpu")
print(f'Running with {num_gpus} GPU(s)...')
renderer = Renderer_TensoIR_train
class SimpleSampler:
def __init__(self, total, batch):
self.total = total
self.batch = batch
self.curr = total
self.ids = None
def nextids(self):
self.curr+=self.batch
if self.curr + self.batch > self.total:
self.ids = torch.LongTensor(np.random.permutation(self.total))
self.curr = 0
return self.ids[self.curr:self.curr+self.batch]
@torch.no_grad()
def export_mesh(args):
ckpt = torch.load(args.ckpt, map_location=device)
kwargs = ckpt['kwargs']
kwargs.update({'device': device})
tensoIR = eval(args.model_name)(**kwargs)
tensoIR.load(ckpt)
alpha, _ = tensoIR.getDenseAlpha()
convert_sdf_samples_to_ply(alpha.cpu(), f'{args.ckpt[:-3]}.ply', bbox=tensoIR.aabb.cpu(), level=0.005)
def render_test(args):
# init dataset
dataset = dataset_dict[args.dataset_name]
test_dataset = dataset(args.datadir, args.hdrdir, light_rotation=args.light_rotation, split='test', downsample=args.downsample_train, is_stack=False,
sub=args.test_number)
white_bg = test_dataset.white_bg
ndc_ray = args.ndc_ray
if not os.path.exists(args.ckpt):
print('the ckpt path does not exists!!')
return
ckpt = torch.load(args.ckpt, map_location=device)
kwargs = ckpt['kwargs']
kwargs.update({'device': device})
tensoIR = eval(args.model_name)(**kwargs)
tensoIR.load(ckpt)
logfolder = f'{args.basedir}/test_{args.expname}{datetime.datetime.now().strftime("-%Y%m%d-%H%M%S")}'
# if args.render_train:
# os.makedirs(f'{logfolder}/imgs_train_all', exist_ok=True)
# train_dataset = dataset(args.datadir, args.hdrdir, split='train', downsample=args.downsample_train,
# is_stack=False)
# evaluation_all(train_dataset, tensoIR, args, renderer,visibility_net, f'{logfolder}/imgs_test_all/',
# N_samples=-1, white_bg=white_bg, ndc_ray=ndc_ray, device=device, test_all=True)
if args.render_test:
PSNRs_test, PSNRs_rgb_brdf_test, MAE_test,\
PSNR_albedo_single, PSNR_albedo_three = evaluation_iter_TensoIR(
test_dataset,
tensoIR,
args,
renderer,
f'{logfolder}/imgs_test_all/',
N_samples=-1,
white_bg=white_bg,
ndc_ray=ndc_ray,
device=device,
test_all=True
)
print(f'PSNRs_test: {PSNRs_test}')
print(f'PSNRs_rgb_brdf_test: {PSNRs_rgb_brdf_test}')
print(f'MAE_test: {MAE_test}')
print(f'PSNR_albedo_single: {PSNR_albedo_single}')
print(f'PSNR_albedo_three: {PSNR_albedo_three}')
# if args.render_path:
# c2ws = test_dataset.render_path
# os.makedirs(f'{logfolder}/{args.expname}/imgs_path_all', exist_ok=True)
# evaluation_path(test_dataset, tensoIR, c2ws, renderer, f'{logfolder}/{args.expname}/imgs_path_all/',
# N_vis=-1, N_samples=-1, white_bg=white_bg, ndc_ray=ndc_ray, device=device)
def reconstruction(args):
# init dataset
dataset = dataset_dict[args.dataset_name]
train_dataset = dataset(
args.datadir,
args.hdrdir,
split='train',
downsample=args.downsample_train,
light_name=args.light_name,
light_rotation=args.light_rotation
)
test_dataset = dataset(
args.datadir,
args.hdrdir,
split='test',
downsample=args.downsample_test,
light_name=args.light_name,
light_rotation=args.light_rotation
)
# if is_distributed:
# train_sampler = torch.utils.data.DistributedSampler(train_dataset, num_replicas=dist.get_world_size(),
# rank=dist.get_rank())
# train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=100, sampler=train_sampler,
# num_workers=16, drop_last=True, pin_memory=True)
# else:
# train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=100, num_workers=16,
# drop_last=True, shuffle=True)
print(f'Finish reading dataset')
white_bg = train_dataset.white_bg
near_far = train_dataset.near_far
ndc_ray = args.ndc_ray
# init resolution
upsamp_list = args.upsamp_list
update_AlphaMask_list = args.update_AlphaMask_list
n_lamb_sigma = args.n_lamb_sigma
n_lamb_sh = args.n_lamb_sh
if args.add_timestamp:
logfolder = f'{args.basedir}/{args.expname}{datetime.datetime.now().strftime("-%Y%m%d-%H%M%S")}'
else:
logfolder = f'{args.basedir}/{args.expname}'
# init log file
os.makedirs(logfolder, exist_ok=True)
os.makedirs(f'{logfolder}/imgs_vis', exist_ok=True)
os.makedirs(f'{logfolder}/checkpoints', exist_ok=True)
summary_writer = SummaryWriter(logfolder)
# copy the config file into the log folder
os.system(f'cp {args.config} {logfolder}')
# init parameters
aabb = train_dataset.scene_bbox.to(device)
reso_cur = N_to_reso(args.N_voxel_init, aabb) # number of voxels in each direction
nSamples = min(args.nSamples, cal_n_samples(reso_cur, args.step_ratio))
if args.ckpt is not None:
ckpt = torch.load(args.ckpt, map_location=device)
kwargs = ckpt['kwargs']
kwargs.update({'device': device})
tensoIR = eval(args.model_name)(**kwargs)
tensoIR.load(ckpt)
else:
tensoIR = eval(args.model_name)(aabb,
reso_cur,
device,
density_n_comp=n_lamb_sigma,
appearance_n_comp=n_lamb_sh,
app_dim=args.data_dim_color,
near_far=near_far,
shadingMode=args.shadingMode,
alphaMask_thres=args.alpha_mask_thre,
density_shift=args.density_shift,
distance_scale=args.distance_scale,
pos_pe=args.pos_pe,
view_pe=args.view_pe,
fea_pe=args.fea_pe,
featureC=args.featureC,
step_ratio=args.step_ratio,
fea2denseAct=args.fea2denseAct,
normals_kind = args.normals_kind,
light_rotation=args.light_rotation,
light_kind=args.light_kind,
dataset=train_dataset,
numLgtSGs = args.numLgtSGs,
)
grad_vars = tensoIR.get_optparam_groups(args.lr_init, args.lr_basis)
if args.lr_decay_iters > 0:
lr_factor = args.lr_decay_target_ratio ** (1 / (args.lr_decay_iters))
else:
args.lr_decay_iters = args.n_iters
lr_factor = args.lr_decay_target_ratio ** (1 / (args.lr_decay_iters))
print("lr decay", args.lr_decay_target_ratio, args.lr_decay_iters)
optimizer = torch.optim.Adam(grad_vars, betas=(0.9, 0.99))
# linear in logrithmic space
N_voxel_list = (torch.round(torch.exp(
torch.linspace(np.log(args.N_voxel_init), np.log(args.N_voxel_final), len(upsamp_list) + 1))).long()).tolist()[1:]
torch.cuda.empty_cache()
PSNRs_test, PSNRs_rgb_brdf_test = [0], [0]
PSNRs_rgb, PSNRs_rgb_brdf = [], []
Ortho_reg_weight = args.Ortho_weight
print("initial Ortho_reg_weight", Ortho_reg_weight)
L1_reg_weight = args.L1_weight_inital
print("initial L1_reg_weight", L1_reg_weight)
TV_weight_density, TV_weight_app = args.TV_weight_density, args.TV_weight_app
tvreg = TVLoss()
print(f"initial TV_weight density: {TV_weight_density} appearance: {TV_weight_app}")
all_rays, all_rgbs, all_masks, all_light_idx = train_dataset.all_rays, train_dataset.all_rgbs, train_dataset.all_masks, train_dataset.all_light_idx
# Filter rays outside the bbox
rays_filtered, filter_mask = tensoIR.filtering_rays(all_rays, bbox_only=True)
rgbs_filtered = all_rgbs[filter_mask, :] # [filtered(N*H*W), 3]
light_idx_filtered = all_light_idx[filter_mask, :] # [filtered(N*H*W), 1]
trainingSampler = SimpleSampler(rays_filtered.shape[0], args.batch_size)
pbar = tqdm(range(args.n_iters), miniters=args.progress_refresh_rate, file=sys.stdout) if (
(not is_distributed) or (dist.get_rank() == 0)) else range(args.n_iters)
relight_flag = False
for iteration in pbar:
# Sample batch_size chunk from all rays
rays_idx = trainingSampler.nextids()
rays_train = rays_filtered[rays_idx]
rgb_train = rgbs_filtered[rays_idx].to(device)
light_idx_train = light_idx_filtered[rays_idx].to(device)
rgb_with_brdf_train = rgb_train
ret_kw = renderer(
rays=rays_train, # [batch_size, 6]
normal_gt=None, # [batch_size, 3]
light_idx=light_idx_train, # [batch_size, 1]
tensoIR=tensoIR, # nn.Module
N_samples=nSamples, # int
white_bg=white_bg, # bool
ndc_ray=ndc_ray,
device=device,
sample_method=args.light_sample_train,
chunk_size=args.relight_chunk_size,
is_train=True,
is_relight=relight_flag,
args=args
)
total_loss = 0
loss_rgb_brdf = torch.tensor(1e-6).to(device)
loss_rgb = torch.mean((ret_kw['rgb_map'] - rgb_train) ** 2)
total_loss += loss_rgb
if Ortho_reg_weight > 0:
loss_reg = tensoIR.vector_comp_diffs()
total_loss += Ortho_reg_weight * loss_reg
summary_writer.add_scalar('train/reg', loss_reg.detach().item(), global_step=iteration)
if L1_reg_weight > 0:
loss_reg_L1 = tensoIR.density_L1()
total_loss += L1_reg_weight * loss_reg_L1
summary_writer.add_scalar('train/reg_l1', loss_reg_L1.detach().item(), global_step=iteration)
if TV_weight_density > 0:
TV_weight_density *= lr_factor
loss_tv = tensoIR.TV_loss_density(tvreg) * TV_weight_density
total_loss = total_loss + loss_tv
summary_writer.add_scalar('train/reg_tv_density', loss_tv.detach().item(), global_step=iteration)
if TV_weight_app > 0:
TV_weight_app *= lr_factor
loss_tv = tensoIR.TV_loss_app(tvreg)*TV_weight_app
total_loss = total_loss + loss_tv
summary_writer.add_scalar('train/reg_tv_app', loss_tv.detach().item(), global_step=iteration)
if relight_flag:
loss_rgb_brdf = torch.mean((ret_kw['rgb_with_brdf_map'] - rgb_with_brdf_train) ** 2)
total_loss += loss_rgb_brdf * args.rgb_brdf_weight
# exponential growth
normal_weight_factor = args.normals_loss_enhance_ratio ** ((iteration- update_AlphaMask_list[0])/ (args.n_iters - update_AlphaMask_list[0]))
BRDF_weight_factor = args.BRDF_loss_enhance_ratio ** ((iteration- update_AlphaMask_list[0])/ (args.n_iters - update_AlphaMask_list[0]))
if args.normals_diff_weight > 0:
loss_normals_diff = normal_weight_factor * args.normals_diff_weight * ret_kw['normals_diff_map'].mean()
total_loss += loss_normals_diff
summary_writer.add_scalar('train/normals_diff_loss', loss_normals_diff.detach().item(), iteration)
if args.normals_orientation_weight > 0:
loss_normals_orientation = normal_weight_factor * args.normals_orientation_weight * ret_kw['normals_orientation_loss_map'].mean()
total_loss += loss_normals_orientation
summary_writer.add_scalar('train/normals_orientation_loss', loss_normals_orientation.detach().item(), iteration)
if args.roughness_smoothness_loss_weight > 0:
roughness_smoothness_loss = BRDF_weight_factor * args.roughness_smoothness_loss_weight * ret_kw['roughness_smoothness_loss']
total_loss += roughness_smoothness_loss
summary_writer.add_scalar('train/roughness_smoothness_loss', roughness_smoothness_loss.detach().item(), iteration)
if args.albedo_smoothness_loss_weight > 0:
albedo_smoothness_loss = BRDF_weight_factor * args.albedo_smoothness_loss_weight * ret_kw['albedo_smoothness_loss']
total_loss += albedo_smoothness_loss
summary_writer.add_scalar('train/albedo_smoothness_loss', albedo_smoothness_loss.detach().item(), iteration)
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
total_loss = total_loss.detach().item()
loss_rgb = loss_rgb.detach().item()
loss_rgb_brdf = loss_rgb_brdf.detach().item()
PSNRs_rgb.append(-10.0 * np.log(loss_rgb) / np.log(10.0))
if relight_flag:
PSNRs_rgb_brdf.append(-10.0 * np.log(loss_rgb_brdf) / np.log(10.0))
else:
PSNRs_rgb_brdf.append(0.0)
if (not is_distributed) or (dist.get_rank() == 0):
summary_writer.add_scalar('train/mse', total_loss, global_step=iteration)
summary_writer.add_scalar('train/PSNRs_rgb', PSNRs_rgb[-1], global_step=iteration)
summary_writer.add_scalar('train/mse_rgb', loss_rgb, global_step=iteration)
if relight_flag:
summary_writer.add_scalar('train/PSNRs_rgb_brdf', PSNRs_rgb_brdf[-1], global_step=iteration)
summary_writer.add_scalar('train/mse_rgb_brdf', loss_rgb_brdf, global_step=iteration)
# Print the current values of the losses.
if iteration % args.progress_refresh_rate == 0:
pbar.set_description(
f'Iteration {iteration:05d} PSNR:'
+ f' train_rgb = {float(np.mean(PSNRs_rgb)):.2f}'
+ f' train_rgb_brdf = {float(np.mean(PSNRs_rgb_brdf)):.2f}'
+ f' test_rgb = {float(np.mean(PSNRs_test)):.2f}'
+ f' test_rgb_brdf = {float(np.mean(PSNRs_rgb_brdf_test)):.2f}'
+ f' mse = {float(total_loss):.6f}'
)
PSNRs_rgb = []
PSNRs_rgb_brdf = []
# Evaluate on testing dataset
if iteration % args.vis_every == args.vis_every - 1 and args.N_vis != 0 and relight_flag:
PSNRs_test, PSNRs_rgb_brdf_test, MAE_test, \
PSNR_albedo_single, PSNR_albedo_three \
= evaluation_iter_TensoIR(
test_dataset,
tensoIR,
args,
renderer,
f'{logfolder}/imgs_vis/',
prtx=f'{iteration:06d}_',
N_samples=nSamples,
white_bg=white_bg,
ndc_ray=ndc_ray,
compute_extra_metrics=False,
logger=summary_writer,
step=iteration,
device=device,
)
summary_writer.add_scalar('test/psnr_rgb', np.mean(PSNRs_test), global_step=iteration)
summary_writer.add_scalar('test/psnr_rgb_brdf', np.mean(PSNRs_rgb_brdf_test), global_step=iteration)
summary_writer.add_scalar('test/mae', MAE_test, global_step=iteration)
summary_writer.add_scalar('test/psnr_albedo_single', PSNR_albedo_single, global_step=iteration)
summary_writer.add_scalar('test/psnr_albedo_three', PSNR_albedo_three, global_step=iteration)
# Save iteration models
if iteration % args.save_iters == 0:
tensoIR.save(f'{logfolder}/checkpoints/{args.expname}_{iteration}.th')
# Update learning rate
for param_group in optimizer.param_groups:
param_group['lr'] = param_group['lr'] * lr_factor
if iteration in update_AlphaMask_list:
if reso_cur[0] * reso_cur[1] * reso_cur[2] < 256 ** 3: # update volume resolution
reso_mask = reso_cur
new_aabb = tensoIR.updateAlphaMask(tuple(reso_mask))
if iteration == update_AlphaMask_list[0]:
tensoIR.shrink(new_aabb)
# tensorVM.alphaMask = None
L1_reg_weight = args.L1_weight_rest
print("continuing L1_reg_weight", L1_reg_weight)
# The GPU demands will decrease significantly after AlphaMask is generated, so we can begin relighting training
relight_flag = True
torch.cuda.empty_cache()
TV_weight_density = 0
TV_weight_app = 0
if not args.ndc_ray and iteration == update_AlphaMask_list[1]:
# Filter rays outside the bbox
rays_filtered, filter_mask = tensoIR.filtering_rays(all_rays, bbox_only=True)
rgbs_filtered = all_rgbs[filter_mask, :] # [filtered(N*H*W), 3]
light_idx_filtered = all_light_idx[filter_mask, :] # [filtered(N*H*W), 1]
trainingSampler = SimpleSampler(rays_filtered.shape[0], args.batch_size)
if iteration in upsamp_list:
n_voxels = N_voxel_list.pop(0)
reso_cur = N_to_reso(n_voxels, tensoIR.aabb)
nSamples = min(args.nSamples, cal_n_samples(reso_cur, args.step_ratio))
tensoIR.upsample_volume_grid(reso_cur)
if args.lr_upsample_reset:
print("reset lr to initial")
lr_scale = 1 # 0.1 ** (iteration / args.n_iters)
else:
lr_scale = args.lr_decay_target_ratio ** (iteration / args.n_iters)
grad_vars = tensoIR.get_optparam_groups(args.lr_init * lr_scale, args.lr_basis * lr_scale)
optimizer = torch.optim.Adam(grad_vars, betas=(0.9, 0.99))
tensoIR.save(f'{logfolder}/{args.expname}.th')
# if args.render_train:
# os.makedirs(f'{logfolder}/imgs_train_all', exist_ok=True)
# train_dataset = dataset(args.datadir, split='train', downsample=args.downsample_train, is_stack=True)
# PSNRs_test = evaluation(train_dataset, tensoIR, args, renderer, visibility_net, f'{logfolder}/imgs_train_all/',
# N_vis=-1, N_samples=-1, white_bg=white_bg, ndc_ray=ndc_ray, device=device)
# print(f'======> {args.expname} test all psnr: {np.mean(PSNRs_test)} <========================')
if args.render_test:
os.makedirs(f'{logfolder}/imgs_test_all', exist_ok=True)
PSNRs_test, PSNRs_rgb_brdf_test, MAE_test,\
PSNR_albedo_single, PSNR_albedo_three = evaluation_iter_TensoIR(
test_dataset,
tensoIR,
args,
renderer,
f'{logfolder}/imgs_test_all/',
N_samples=-1,
white_bg=white_bg,
ndc_ray=ndc_ray,
device=device,
test_all=True
)
summary_writer.add_scalar('test/psnr_rgb_all', np.mean(PSNRs_test), global_step=iteration)
summary_writer.add_scalar('test/psnr_rgb_brdf_all', np.mean(PSNRs_rgb_brdf_test), global_step=iteration)
summary_writer.add_scalar('test/mae_all', MAE_test, global_step=iteration)
summary_writer.add_scalar('test/psnr_albedo_single_all', PSNR_albedo_single, global_step=iteration)
summary_writer.add_scalar('test/psnr_albedo_three_all', PSNR_albedo_three, global_step=iteration)
print(f'======> {args.expname} test all: nvs psnr: {np.mean(PSNRs_test)}, nvs with brdf psnr: {np.mean(PSNRs_rgb_brdf_test)}, MAE: {MAE_test} <========================')
# if args.render_path:
# c2ws = test_dataset.render_path
# # c2ws = test_dataset.poses
# print('========>', c2ws.shape)
# os.makedirs(f'{logfolder}/imgs_path_all', exist_ok=True)
# evaluation_path(test_dataset, tensoIR, c2ws, renderer, visibility_net, f'{logfolder}/imgs_path_all/',
# N_vis=-1, N_samples=-1, white_bg=white_bg, ndc_ray=ndc_ray, device=device)
if __name__ == '__main__':
torch.set_default_dtype(torch.float32)
torch.manual_seed(20211202)
torch.cuda.manual_seed_all(20211202)
np.random.seed(20211202)
random.seed(20211202)
os.environ['PYTHONHASHSEED'] = str(20211202)
if args.export_mesh:
export_mesh(args)
if args.render_only and (args.render_test or args.render_path):
render_test(args)
else:
reconstruction(args)
| yxlao/TensoIR | 3 | TensoIR Experiments for "Objects with Lighting: A Real-World Dataset for Evaluating Reconstruction and Rendering for Object Relighting" | Python | yxlao | Yixing Lao | HKU-CS |
utils.py | Python | import cv2, torch
import numpy as np
from PIL import Image
import torchvision.transforms as T
import torch.nn.functional as F
import scipy.signal
mse2psnr = lambda x : -10. * torch.log(x) / torch.log(torch.Tensor([10.]))
def visualize_depth_numpy(depth, minmax=None, cmap=cv2.COLORMAP_JET, mask=None):
"""
depth: (H, W)
"""
x = np.nan_to_num(depth) # change nan to 0
if minmax is None:
mi = np.min(x[x>0]) # get minimum positive depth (ignore background)
ma = np.max(x)
else:
mi,ma = minmax
x = (x-mi)/(ma-mi+1e-8) # normalize to 0~1
# TODO change mask to white
if mask is not None:
x[mask] = 1.0
x = (255*x).astype(np.uint8)
x_ = cv2.applyColorMap(x, cmap)
return x_, [mi,ma]
def init_log(log, keys):
for key in keys:
log[key] = torch.tensor([0.0], dtype=float)
return log
def visualize_depth(depth, minmax=None, cmap=cv2.COLORMAP_JET):
"""
depth: (H, W)
"""
if type(depth) is not np.ndarray:
depth = depth.cpu().numpy()
x = np.nan_to_num(depth) # change nan to 0
if minmax is None:
mi = np.min(x[x>0]) # get minimum positive depth (ignore background)
ma = np.max(x)
else:
mi,ma = minmax
x = (x-mi)/(ma-mi+1e-8) # normalize to 0~1
x = (255*x).astype(np.uint8)
x_ = Image.fromarray(cv2.applyColorMap(x, cmap))
x_ = T.ToTensor()(x_) # (3, H, W)
return x_, [mi,ma]
def N_to_reso(n_voxels, bbox):
xyz_min, xyz_max = bbox
voxel_size = ((xyz_max - xyz_min).prod() / n_voxels).pow(1 / 3) # total volumes / number
return ((xyz_max - xyz_min) / voxel_size).long().tolist()
def cal_n_samples(reso, step_ratio=0.5):
return int(np.linalg.norm(reso)/step_ratio)
__LPIPS__ = {}
def init_lpips(net_name, device):
assert net_name in ['alex', 'vgg']
import lpips
print(f'init_lpips: lpips_{net_name}')
return lpips.LPIPS(net=net_name, version='0.1').eval().to(device)
def rgb_lpips(np_gt, np_im, net_name, device):
if net_name not in __LPIPS__:
__LPIPS__[net_name] = init_lpips(net_name, device)
gt = torch.from_numpy(np_gt).permute([2, 0, 1]).contiguous().to(device)
im = torch.from_numpy(np_im).permute([2, 0, 1]).contiguous().to(device)
return __LPIPS__[net_name](gt, im, normalize=True).item()
def findItem(items, target):
for one in items:
if one[:len(target)]==target:
return one
return None
''' Evaluation metrics (ssim, lpips)
'''
def rgb_ssim(img0, img1, max_val,
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03,
return_map=False):
# Modified from https://github.com/google/mipnerf/blob/16e73dfdb52044dcceb47cda5243a686391a6e0f/internal/math.py#L58
assert len(img0.shape) == 3
assert img0.shape[-1] == 3
assert img0.shape == img1.shape
# Construct a 1D Gaussian blur filter.
hw = filter_size // 2
shift = (2 * hw - filter_size + 1) / 2
f_i = ((np.arange(filter_size) - hw + shift) / filter_sigma)**2
filt = np.exp(-0.5 * f_i)
filt /= np.sum(filt)
# Blur in x and y (faster than the 2D convolution).
def convolve2d(z, f):
return scipy.signal.convolve2d(z, f, mode='valid')
filt_fn = lambda z: np.stack([
convolve2d(convolve2d(z[...,i], filt[:, None]), filt[None, :])
for i in range(z.shape[-1])], -1)
mu0 = filt_fn(img0)
mu1 = filt_fn(img1)
mu00 = mu0 * mu0
mu11 = mu1 * mu1
mu01 = mu0 * mu1
sigma00 = filt_fn(img0**2) - mu00
sigma11 = filt_fn(img1**2) - mu11
sigma01 = filt_fn(img0 * img1) - mu01
# Clip the variances and covariances to valid values.
# Variance must be non-negative:
sigma00 = np.maximum(0., sigma00)
sigma11 = np.maximum(0., sigma11)
sigma01 = np.sign(sigma01) * np.minimum(
np.sqrt(sigma00 * sigma11), np.abs(sigma01))
c1 = (k1 * max_val)**2
c2 = (k2 * max_val)**2
numer = (2 * mu01 + c1) * (2 * sigma01 + c2)
denom = (mu00 + mu11 + c1) * (sigma00 + sigma11 + c2)
ssim_map = numer / denom
ssim = np.mean(ssim_map)
return ssim_map if return_map else ssim
import torch.nn as nn
class TVLoss(nn.Module):
def __init__(self,TVLoss_weight=1):
super(TVLoss,self).__init__()
self.TVLoss_weight = TVLoss_weight
def forward(self,x):
batch_size = x.size()[0]
h_x = x.size()[2]
w_x = x.size()[3]
count_h = self._tensor_size(x[:,:,1:,:])
count_w = self._tensor_size(x[:,:,:,1:])
# count_w = max(self._tensor_size(x[:,:,:,1:]), 1)
h_tv = torch.pow((x[:,:,1:,:]-x[:,:,:h_x-1,:]),2).sum()
w_tv = torch.pow((x[:,:,:,1:]-x[:,:,:,:w_x-1]),2).sum()
return self.TVLoss_weight*2*(h_tv/count_h+w_tv/count_w)/batch_size
def _tensor_size(self,t):
return t.size()[1]*t.size()[2]*t.size()[3]
import plyfile
import skimage.measure
def convert_sdf_samples_to_ply(
pytorch_3d_sdf_tensor,
ply_filename_out,
bbox,
level=0.5,
offset=None,
scale=None,
):
"""
Convert sdf samples to .ply
:param pytorch_3d_sdf_tensor: a torch.FloatTensor of shape (n,n,n)
:voxel_grid_origin: a list of three floats: the bottom, left, down origin of the voxel grid
:voxel_size: float, the size of the voxels
:ply_filename_out: string, path of the filename to save to
This function adapted from: https://github.com/RobotLocomotion/spartan
"""
numpy_3d_sdf_tensor = pytorch_3d_sdf_tensor.numpy()
voxel_size = list((bbox[1]-bbox[0]) / np.array(pytorch_3d_sdf_tensor.shape))
verts, faces, normals, values = skimage.measure.marching_cubes(
numpy_3d_sdf_tensor, level=level, spacing=voxel_size
)
faces = faces[...,::-1] # inverse face orientation
# transform from voxel coordinates to camera coordinates
# note x and y are flipped in the output of marching_cubes
mesh_points = np.zeros_like(verts)
mesh_points[:, 0] = bbox[0,0] + verts[:, 0]
mesh_points[:, 1] = bbox[0,1] + verts[:, 1]
mesh_points[:, 2] = bbox[0,2] + verts[:, 2]
# apply additional offset and scale
if scale is not None:
mesh_points = mesh_points / scale
if offset is not None:
mesh_points = mesh_points - offset
# try writing to the ply file
num_verts = verts.shape[0]
num_faces = faces.shape[0]
verts_tuple = np.zeros((num_verts,), dtype=[("x", "f4"), ("y", "f4"), ("z", "f4")])
for i in range(0, num_verts):
verts_tuple[i] = tuple(mesh_points[i, :])
faces_building = []
for i in range(0, num_faces):
faces_building.append(((faces[i, :].tolist(),)))
faces_tuple = np.array(faces_building, dtype=[("vertex_indices", "i4", (3,))])
el_verts = plyfile.PlyElement.describe(verts_tuple, "vertex")
el_faces = plyfile.PlyElement.describe(faces_tuple, "face")
ply_data = plyfile.PlyData([el_verts, el_faces])
print("saving mesh to %s" % (ply_filename_out))
ply_data.write(ply_filename_out)
# Multi-GPU training
import torch.distributed as dist
def synchronize():
"""
Helper function to synchronize (barrier) among all processes when using distributed training
"""
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
| yxlao/TensoIR | 3 | TensoIR Experiments for "Objects with Lighting: A Real-World Dataset for Evaluating Reconstruction and Rendering for Object Relighting" | Python | yxlao | Yixing Lao | HKU-CS |
src/corres/corres_map.py | Python | import itertools
import json
import time
from pathlib import Path
import camtools as ct
import cv2
import igraph as ig
import numpy as np
import open3d as o3d
from tqdm import tqdm
from typing import List, Tuple
import json
class CorresMap:
"""
1. stores correspondences
2. map query and random sample
3. corres post-processing, e.g. filter by ray reprojection distance
4. load and save
"""
@classmethod
def from_npz(cls, corres_path):
"""
Args:
corres_path: Path to the correspondence npz file.
"""
corres_path = Path(corres_path)
if not corres_path.is_file():
raise FileNotFoundError(f"{corres_path} not found.")
if not corres_path.suffix == ".npz":
raise ValueError(f"{corres_path} is not a npz file.")
corres_list = np.load(corres_path)["corres_list"]
return cls(corres_list)
@classmethod
def from_corres_list(cls, corres_list, verbose=True):
"""
Args:
corres_list: List of corres, (n, 7) float32 tensor:
[
[src_index, src_x, src_y, dst_index, dst_x, dst_y, confidence],
[src_index, src_x, src_y, dst_index, dst_x, dst_y, confidence],
...
]
"""
return cls(corres_list, verbose=verbose)
@staticmethod
def sanitize_corres_list(corres_list):
"""
Remove duplications and crete bidirectional corres_list.
"""
return CorresMap.from_corres_list(corres_list, verbose=False).corres_list
def save_npz(self, corres_path):
"""
Args:
corres_path: Path to the correspondence npz file.
"""
corres_path = Path(corres_path)
corres_path.parent.mkdir(parents=True, exist_ok=True)
np.savez_compressed(corres_path, corres_list=self.corres_list)
def __init__(self, corres_list, verbose=True):
"""
Use use the following factory function instead:
- CorresMap.from_npz()
- CorresMap.from_corres_list()
Args:
corres_list: List of corres, (n, 7) float32 np array.
verbose: If True, print warnings if corres_list is not unique or not
bidirectional.
"""
# For compatibility, corres_list may or may not have confidence value
# at the last column. If it does not have confidence value, we will
# append 1.0 to the last column.
if corres_list.shape[1] == 6:
corres_list = np.concatenate(
[corres_list, np.ones((len(corres_list), 1))], axis=1
)
corres_list = corres_list.astype(np.float32)
CorresMap.check_corres_list(corres_list)
self.update_internal_states(corres_list, verbose=verbose)
def __len__(self):
return len(self.corres_list)
def update_internal_states(
self, corres_list, force_bidirectional=True, verbose=True
):
"""
States:
- self.corres_list:
(n, 7) float32 np array.
- self.corres_map:
Key:
(src_index, src_x, src_y)
Value:
[
[dst_index, dst_x, dst_y, confidence],
[dst_index, dst_x, dst_y, confidence],
...
]
- self.image_pair_to_corres: maps (src_index, dst_index) an array:
Key:
(src_index, dst_index)
Value:
[
[src_index, src_x, src_y, dst_index, dst_x, dst_y, confidence],
[src_index, src_x, src_y, dst_index, dst_x, dst_y, confidence],
...
]
"""
CorresMap.check_corres_list(corres_list)
# List to map.
self.corres_map = CorresMap.build_corres_map(
corres_list,
force_bidirectional=force_bidirectional,
)
# Map to list.
keys = sorted(list(self.corres_map.keys()))
self.corres_list = []
for key in tqdm(keys, leave=False, desc="corres_map to corres_list"):
for value in self.corres_map[key]:
src_index, src_x, src_y = key
dst_index, dst_x, dst_y, confidence = value
corres = [src_index, src_x, src_y, dst_index, dst_x, dst_y, confidence]
self.corres_list.append(corres)
self.corres_list = (
np.array(self.corres_list).astype(np.float32).reshape((-1, 7))
)
# Sanity check for the input corres_list.
if verbose and len(corres_list) != len(self.corres_list):
print(
f"Warning: corres_list is not unique or not bidirectional. "
f"Length {len(corres_list)} -> {len(self.corres_list)}."
)
# This accelerates querying image pairs.
self.image_pair_to_corres = CorresMap.build_image_pair_to_corres(
self.corres_list
)
def update_corres_points_map(self, normalized_Ks, normalized_Ts):
"""
Updates self.corres_points_map. This function expects the all existing
states to be up-to-date.
Args:
normalized_Ks: List of normalized Ks.
normalized_Ts: List of normalized Ts.
self.corres_points_map:
Key (tuple of length 6), float32:
(src_index, src_x, src_y, dst_index, dst_x, dst_y)
Value (tuple of length 11), float32:
(src_px, src_py, src_pz,
dst_px, dst_py, dst_pz,
mid_px, mid_py, mid_pz,
src_depth, dst_depth)
The map is bidirectional.
"""
Ks = normalized_Ks
Ts = normalized_Ts
Cs = [ct.convert.T_to_C(T) for T in Ts]
src_dst_indices = self.get_all_image_pair_indices()
src_dst_indices = [
src_dst_index
for src_dst_index in src_dst_indices
if src_dst_index[0] < src_dst_index[1]
]
self.corres_points_map = dict()
for src_index, dst_index in tqdm(
src_dst_indices, desc="update_corres_points_map", leave=False
):
# Compute intersection points.
pair_corres_list = self.query_image_pair(src_index, dst_index)
src_pixels = pair_corres_list[:, 1:3]
dst_pixels = pair_corres_list[:, 4:6]
src_K = Ks[src_index]
src_T = Ts[src_index]
dst_K = Ks[dst_index]
dst_T = Ts[dst_index]
src_C = Cs[src_index]
dst_C = Cs[dst_index]
src_os, src_ds = ct.raycast.gen_rays(src_K, src_T, src_pixels)
dst_os, dst_ds = ct.raycast.gen_rays(dst_K, dst_T, dst_pixels)
src_ps, dst_ps = ct.solver.closest_points_of_line_pairs(
src_os, src_ds, dst_os, dst_ds
)
mid_ps = (src_ps + dst_ps) / 2
# src_ps, dst_ps, mid_ps are already normalized.
assert len(src_ps) == len(dst_ps) == len(mid_ps)
assert len(src_ps) == len(src_pixels) == len(dst_pixels)
for src_pixel, dst_pixel, src_p, dst_p, mid_p in zip(
src_pixels, dst_pixels, src_ps, dst_ps, mid_ps
):
src_x, src_y = src_pixel
dst_x, dst_y = dst_pixel
# Compute depths (use src_p and dst_p, not mid_p)
src_depth = np.linalg.norm(src_C - src_p)
dst_depth = np.linalg.norm(dst_C - dst_p)
# src->dst match.
key = (src_index, src_x, src_y, dst_index, dst_x, dst_y)
value = (
src_p[0],
src_p[1],
src_p[2],
dst_p[0],
dst_p[1],
dst_p[2],
mid_p[0],
mid_p[1],
mid_p[2],
src_depth,
dst_depth,
)
self.corres_points_map[key] = value
# dst->src match.
key = (dst_index, dst_x, dst_y, src_index, src_x, src_y)
value = (
dst_p[0],
dst_p[1],
dst_p[2],
src_p[0],
src_p[1],
src_p[2],
mid_p[0],
mid_p[1],
mid_p[2],
dst_depth,
src_depth,
)
self.corres_points_map[key] = value
def replace_image_indices(self, index_map):
"""
Replace images indices, by mapping src_index to dst_index.
"""
corres_list = []
for corres in tqdm(self.corres_list, desc="replace_image_indices", leave=False):
src_index, src_x, src_y, dst_index, dst_x, dst_y, confi = corres
src_index = index_map[src_index]
dst_index = index_map[dst_index]
corres = src_index, src_x, src_y, dst_index, dst_x, dst_y, confi
corres_list.append(corres)
corres_list = np.array(corres_list).astype(np.float32).reshape((-1, 7))
prev_num_corres = self.get_num_corres()
prev_num_image_pairs = self.get_num_image_pairs()
self.update_internal_states(corres_list)
num_corres = self.get_num_corres()
num_image_pairs = self.get_num_image_pairs()
print(
f"[CorresMap] replace_image_indices"
f"({index_map}):\n"
f" {prev_num_corres} -> {num_corres} corres\n"
f" {prev_num_image_pairs} -> {num_image_pairs} image pairs"
)
def propagate_corres(self, max_dist):
"""
Propagate correspondences. This should only be called once.
"""
corres_list = CorresMap.propagate_corres_list(
self.corres_list, max_dist=max_dist
)
prev_num_corres = self.get_num_corres()
prev_num_image_pairs = self.get_num_image_pairs()
self.update_internal_states(corres_list)
num_corres = self.get_num_corres()
num_image_pairs = self.get_num_image_pairs()
print(
f"[CorresMap] propagate_corres"
f"({max_dist}):\n"
f" {prev_num_corres} -> {num_corres} corres\n"
f" {prev_num_image_pairs} -> {num_image_pairs} image pairs"
)
def filter_by_restricted_image_indices(self, restricted_image_indices):
"""
Filter by a list of restricted image indices. This is used for sparse
view training.
Args:
restricted_image_indices: list of image indices to be sampled. Both
the src_index and dst_index must be in the list.
"""
corres_list = []
for src_index, dst_index in itertools.combinations(restricted_image_indices, 2):
corres_list.extend(self.query_image_pair(src_index, dst_index))
corres_list.extend(self.query_image_pair(dst_index, src_index))
corres_list = np.array(corres_list).astype(np.float32).reshape((-1, 7))
prev_num_corres = self.get_num_corres()
prev_num_image_pairs = self.get_num_image_pairs()
self.update_internal_states(corres_list)
num_corres = self.get_num_corres()
num_image_pairs = self.get_num_image_pairs()
print(
f"[CorresMap] filter_by_restricted_image_indices"
f"({restricted_image_indices}):\n"
f" {prev_num_corres} -> {num_corres} corres\n"
f" {prev_num_image_pairs} -> {num_image_pairs} image pairs"
)
def filter_by_ransac(
self, Ks, Ts, ransac_min_inliers, ransac_threshold, ransac_prob
):
"""
Args:
Ks: list of camera intrinsics, (3, 3) array.
Ts: list of camera extrinsics, (4, 4) array.
ransac_min_inliers: int, minimum number of inliers for ransac. If
the number of inliers is less than this, the whole image pair is
discarded.
ransac_threshold: float, threshold in pixels for ransac.
ransac_prob: float, confidence probability for ransac.
"""
for K in Ks:
ct.sanity.assert_K(K)
for T in Ts:
ct.sanity.assert_T(T)
fitter = RobustFitter(ransac_threshold, ransac_prob)
# Only do src_index < dst_index, and duplicate that after filtering.
src_dst_indices = self.get_all_image_pair_indices()
src_dst_indices = [
src_dst_index
for src_dst_index in src_dst_indices
if src_dst_index[0] < src_dst_index[1]
]
corres_list = []
for src_index, dst_index in tqdm(
src_dst_indices, desc="filter_by_ransac", leave=False
):
pair_corres_list = self.query_image_pair(src_index, dst_index)
src_pixels = pair_corres_list[:, 1:3]
dst_pixels = pair_corres_list[:, 4:6]
confidences = pair_corres_list[:, 6]
src_K = Ks[src_index]
src_T = Ts[src_index]
dst_K = Ks[dst_index]
dst_T = Ts[dst_index]
# Fit T with K.
R, t, mask = fitter.fit(src_pixels, dst_pixels, src_K, dst_K)
# Filter my mask.
src_pixels = src_pixels[mask]
dst_pixels = dst_pixels[mask]
confidences = confidences[mask]
if mask.sum() >= ransac_min_inliers:
for src_pixel, dst_pixel, confidence in zip(
src_pixels,
dst_pixels,
confidences,
):
src_x, src_y = src_pixel
dst_x, dst_y = dst_pixel
corres_list.append(
[
src_index,
src_x,
src_y,
dst_index,
dst_x,
dst_y,
confidence,
]
)
corres_list = np.array(corres_list).astype(np.float32).reshape((-1, 7))
prev_num_corres = self.get_num_corres()
prev_num_image_pairs = self.get_num_image_pairs()
self.update_internal_states(corres_list)
num_corres = self.get_num_corres()
num_image_pairs = self.get_num_image_pairs()
print(
f"[CorresMap] filter_by_ransac"
f"({ransac_min_inliers}, {ransac_threshold}, {ransac_prob}):\n"
f" {prev_num_corres} -> {num_corres} corres\n"
f" {prev_num_image_pairs} -> {num_image_pairs} image pairs"
)
def filter_by_min_num_corres(self, min_num_corres):
"""
Args:
min_num_corres: int, minimum number of corres. The image pair
with less than this number of corres will be removed.
"""
src_dst_indices = self.get_all_image_pair_indices()
corres_list = []
for src_index, dst_index in src_dst_indices:
pair_corres_list = self.query_image_pair(src_index, dst_index)
if len(pair_corres_list) >= min_num_corres:
corres_list.extend(pair_corres_list)
corres_list = np.array(corres_list).astype(np.float32).reshape((-1, 7))
prev_num_corres = self.get_num_corres()
prev_num_image_pairs = self.get_num_image_pairs()
self.update_internal_states(corres_list)
num_corres = self.get_num_corres()
num_image_pairs = self.get_num_image_pairs()
print(
f"[CorresMap] filter_by_min_num_corres"
f"({min_num_corres}):\n"
f" {prev_num_corres} -> {num_corres} corres\n"
f" {prev_num_image_pairs} -> {num_image_pairs} image pairs"
)
def filter_by_ray_reproject_dist(self, Ks, Ts, min_median_dist, min_dist):
"""
Args:
min_median_dist: minimum median reprojection pixel distance of a
given image pair. If the median distance is less than this
value, all corres of the image pair will be removed.
min_dist: minimum reprojection pixel distance of a given ray pair.
This is the second filter after the image pair is filtered by
min_median_dist.
Ks: list of camera intrinsics, (3, 3) array.
Ts: list of camera extrinsics, (4, 4) array.
"""
for K in Ks:
ct.sanity.assert_K(K)
for T in Ts:
ct.sanity.assert_T(T)
assert len(Ks) == len(Ts)
# Only do src_index < dst_index, and duplicate that after filtering.
src_dst_indices = self.get_all_image_pair_indices()
src_dst_indices = [
src_dst_index
for src_dst_index in src_dst_indices
if src_dst_index[0] < src_dst_index[1]
]
corres_list = []
for src_index, dst_index in tqdm(
src_dst_indices, desc="filter_by_ray_reproject_dist", leave=False
):
pair_corres_list = self.query_image_pair(src_index, dst_index)
src_pixels = pair_corres_list[:, 1:3]
dst_pixels = pair_corres_list[:, 4:6]
confidences = pair_corres_list[:, 6]
src_K = Ks[src_index]
src_T = Ts[src_index]
dst_K = Ks[dst_index]
dst_T = Ts[dst_index]
src_os, src_ds = ct.raycast.gen_rays(src_K, src_T, src_pixels)
dst_os, dst_ds = ct.raycast.gen_rays(dst_K, dst_T, dst_pixels)
# Solve for closest points in two rays.
src_ps, dst_ps = ct.solver.closest_points_of_line_pairs(
src_os, src_ds, dst_os, dst_ds
)
# Project both points to src and dst images.
src_pixels_in_src_image = ct.project.point_cloud_to_pixel(
src_ps, src_K, src_T
)
dst_pixels_in_src_image = ct.project.point_cloud_to_pixel(
dst_ps, src_K, src_T
)
src_pixels_in_dst_image = ct.project.point_cloud_to_pixel(
src_ps, dst_K, dst_T
)
dst_pixels_in_dst_image = ct.project.point_cloud_to_pixel(
dst_ps, dst_K, dst_T
)
# Compute pixel reprojection distances.
dists_in_src_image = np.linalg.norm(
src_pixels_in_src_image - dst_pixels_in_src_image, axis=1
)
dists_in_dst_image = np.linalg.norm(
src_pixels_in_dst_image - dst_pixels_in_dst_image, axis=1
)
dists = (dists_in_src_image + dists_in_dst_image) / 2
# Filter.
median_dist = np.median(dists)
if median_dist < min_median_dist:
valid_dist_mask = dists < min_dist
src_pixels = src_pixels[valid_dist_mask]
dst_pixels = dst_pixels[valid_dist_mask]
confidences = confidences[valid_dist_mask]
for src_pixel, dst_pixel, confidence in zip(
src_pixels, dst_pixels, confidences
):
src_x, src_y = src_pixel
dst_x, dst_y = dst_pixel
corres_list.append(
[src_index, src_x, src_y, dst_index, dst_x, dst_y, confidence]
)
corres_list.append(
[dst_index, dst_x, dst_y, src_index, src_x, src_y, confidence]
)
corres_list = np.array(corres_list).astype(np.float32).reshape((-1, 7))
prev_num_corres = self.get_num_corres()
prev_num_image_pairs = self.get_num_image_pairs()
self.update_internal_states(corres_list)
num_corres = self.get_num_corres()
num_image_pairs = self.get_num_image_pairs()
print(
f"[CorresMap] filter_by_ray_reproject_dist"
f"({min_median_dist}, {min_dist}):\n"
f" {prev_num_corres} -> {num_corres} corres\n"
f" {prev_num_image_pairs} -> {num_image_pairs} image pairs"
)
def filter_by_random_keep_ratio(self, random_keep_ratio):
"""
Args:
random_keep_ratio: float, random keep ratio.
"""
corres_list = np.copy(self.corres_list)
keep_indices = np.random.choice(
len(corres_list),
int(len(corres_list) * random_keep_ratio),
replace=False,
)
corres_list = corres_list[keep_indices]
prev_num_corres = self.get_num_corres()
prev_num_image_pairs = self.get_num_image_pairs()
self.update_internal_states(corres_list)
num_corres = self.get_num_corres()
num_image_pairs = self.get_num_image_pairs()
print(
f"[CorresMap] filter_by_random_keep_ratio"
f"({random_keep_ratio}):\n"
f" {prev_num_corres} -> {num_corres} corres\n"
f" {prev_num_image_pairs} -> {num_image_pairs} image pairs"
)
def add_corres_noise(self, noise_std, image_hw):
"""
Add noise to corres in both x and y pixel coordinates.
Args:
noise_std: float, standard deviation of the noise.
image_hw: tuple of two ints, (im_height, im_width).
"""
im_height, im_width = image_hw
corres_list = np.copy(self.corres_list)
# corres_list.shape: (N, 7)
# src_index, src_x, src_y, dst_index, dst_x, dst_y, confidence
# 0 1 2 3 4 5 6
# Only keep corres where src_index < dst_index.
corres_list = corres_list[corres_list[:, 0] < corres_list[:, 3]]
# Add noise to (src_x, src_y) and (dst_x, dst_y).
src_noise = np.random.normal(0, noise_std, size=(len(corres_list), 2))
dst_noise = np.random.normal(0, noise_std, size=(len(corres_list), 2))
corres_list[:, 1:3] += src_noise
corres_list[:, 4:6] += dst_noise
# Round (src_x, src_y) and (dst_x, dst_y) to ints (stored as floats).
corres_list[:, 1:3] = np.round(corres_list[:, 1:3])
corres_list[:, 4:6] = np.round(corres_list[:, 4:6])
# Make sure it is still within the image.
corres_list[:, 1] = np.clip(corres_list[:, 1], 0, im_width - 1)
corres_list[:, 2] = np.clip(corres_list[:, 2], 0, im_height - 1)
corres_list[:, 4] = np.clip(corres_list[:, 4], 0, im_width - 1)
corres_list[:, 5] = np.clip(corres_list[:, 5], 0, im_height - 1)
prev_num_corres = self.get_num_corres()
prev_num_image_pairs = self.get_num_image_pairs()
self.update_internal_states(corres_list)
num_corres = self.get_num_corres()
num_image_pairs = self.get_num_image_pairs()
print(
f"[CorresMap] add_corres_noise"
f"({noise_std}):\n"
f" {prev_num_corres} -> {num_corres} corres\n"
f" {prev_num_image_pairs} -> {num_image_pairs} image pairs"
)
def filter_by_statistical_outliers(
self, Ks, Ts, nb_neighbors, std_ratio, debug_save_path=None
):
"""
Args:
Ks: a list of camera intrinsics, (3, 3) array.
Ts: a list of camera extrinsics, (4, 4) array.
nb_neighbors: number of neighbors to use for computing the mean
and standard deviation of distances for the current point.
std_ratio: if the distance to the nb_neighbors is larger than
std_ratio * std, then the point is considered an outlier. The
lower the more aggressive.
debug_save_path: if not None, save debug pcd files.
"""
for K in Ks:
ct.sanity.assert_K(K)
for T in Ts:
ct.sanity.assert_T(T)
assert len(Ks) == len(Ts)
# Only do src_index < dst_index, and duplicate that after filtering.
src_dst_indices = self.get_all_image_pair_indices()
src_dst_indices = [
src_dst_index
for src_dst_index in src_dst_indices
if src_dst_index[0] < src_dst_index[1]
]
# Collect all corres and points.
corres_list = []
points = []
for src_index, dst_index in tqdm(
src_dst_indices, desc="filter_by_ray_reproject_dist", leave=False
):
pair_corres_list = self.query_image_pair(src_index, dst_index)
src_pixels = pair_corres_list[:, 1:3]
dst_pixels = pair_corres_list[:, 4:6]
confidences = pair_corres_list[:, 6]
src_K = Ks[src_index]
src_T = Ts[src_index]
dst_K = Ks[dst_index]
dst_T = Ts[dst_index]
src_os, src_ds = ct.raycast.gen_rays(src_K, src_T, src_pixels)
dst_os, dst_ds = ct.raycast.gen_rays(dst_K, dst_T, dst_pixels)
# Solve for closest points in two rays.
src_ps, dst_ps = ct.solver.closest_points_of_line_pairs(
src_os, src_ds, dst_os, dst_ds
)
mid_ps = (src_ps + dst_ps) / 2
# Collect to list.
corres_list.append(pair_corres_list)
points.append(mid_ps)
# (n, 7)
corres_list = np.concatenate(corres_list, axis=0).reshape((-1, 7))
points = np.concatenate(points, axis=0).reshape((-1, 3))
assert len(corres_list) == len(points)
# Filter points: get inliner indices.
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(points)
_, inlier_indices = pcd.remove_statistical_outlier(
nb_neighbors=nb_neighbors,
std_ratio=std_ratio,
)
inlier_indices = np.array(inlier_indices)
# Visualize for debugging.
if debug_save_path is not None:
# Color inlier_indices with gray, outlier indices with red.
colors = np.array([[1, 0, 0] for i in range(len(points))], dtype=np.float64)
colors[inlier_indices] = [0.2, 0.2, 0.2]
pcd.colors = o3d.utility.Vector3dVector(colors)
debug_save_path = Path(debug_save_path)
Path(debug_save_path).parent.mkdir(parents=True, exist_ok=True)
o3d.io.write_point_cloud(str(debug_save_path), pcd)
# Filter corres_list.
corres_list = corres_list[inlier_indices]
prev_num_corres = self.get_num_corres()
prev_num_image_pairs = self.get_num_image_pairs()
self.update_internal_states(corres_list)
num_corres = self.get_num_corres()
num_image_pairs = self.get_num_image_pairs()
print(
f"[CorresMap] filter_by_statistical_outliers"
f"({nb_neighbors}, {std_ratio}):\n"
f" {prev_num_corres} -> {num_corres} corres\n"
f" {prev_num_image_pairs} -> {num_image_pairs} image pairs"
)
def filter_by_fuse_corres_unidirectional(self):
"""
Fuse corres if with the same
[src_index, src_x , src_y ] can map to:
- [dst_index, dst_x0, dst_y0],
- [dst_index, dst_x1, dst_y1],
- [dst_index, dst_x2, dst_y2],
- ...
This function fuses dst_x0, dst_y0, dst_x1, dst_y1, dst_x2, dst_y2, ...
to dst_x, dst_y. The confidence is the average of the confidences of
the fused corres.
Note:
- This function does not handle inverse mapping. The resulting
corres map will not be symmetric.
- Dst corres coordinates will be rounded to as floating point ints.
"""
# Current corres_list shall be 7 columns
# src_index, src_x, src_y, dst_index, dst_x, dst_y, confidence
assert self.corres_list.shape[1] == 7
# config: src_index, src_x, src_y, dst_index
keys = np.unique(self.corres_list[:, :4], axis=0)
keys = [tuple(k.tolist()) for k in keys]
# key_to_values
# For each key, store a list of dst_x, dst_y, confidence.
key_to_values = {k: [] for k in keys}
for i in range(len(self.corres_list)):
key = tuple(self.corres_list[i, :4].tolist())
value = self.corres_list[i, 4:].tolist()
key_to_values[key].append(value)
# Compute average dst_x, dst_y, confidence for each key.
key_to_unique_value = {}
for key, values in key_to_values.items():
values = np.array(values)
dst_x = np.round(np.mean(values[:, 0]))
dst_y = np.round(np.mean(values[:, 1]))
confidence = np.mean(values[:, 2])
key_to_unique_value[key] = [dst_x, dst_y, confidence]
# Convert key_to_unique_value to corres_list.
corres_list = []
for key, value in key_to_unique_value.items():
corres_list.append(list(key) + value)
corres_list = np.array(corres_list).astype(np.float32).reshape((-1, 7))
prev_num_corres = self.get_num_corres()
prev_num_image_pairs = self.get_num_image_pairs()
self.update_internal_states(corres_list, force_bidirectional=True)
num_corres = self.get_num_corres()
num_image_pairs = self.get_num_image_pairs()
print(
f"[CorresMap] filter_by_fuse_corres_unidirectional:\n"
f" {prev_num_corres} -> {num_corres} corres\n"
f" {prev_num_image_pairs} -> {num_image_pairs} image pairs"
)
def filter_by_gt_mesh(self, gt_mesh_path, Ks, Ts, dist_threshold=0.02):
"""
Filter by the ground truth mesh. Correspondences whose 3d point location
is not within the dist_threshold to the nearest point on the mesh will
be removed.
gt_mesh will be loaded, and normalized to fit inside a unit sphere.
The dist_threshold is defined in the unit sphere space.
This function should only be used for debugging, not in the final code.
Args:
gt_mesh_path: Path to the ground truth mesh.
Ks: list of camera intrinsics, (3, 3) array.
Ts: list of camera extrinsics, (4, 4) array.
dist_threshold: Distance threshold to the nearest point on the mesh.
Defined in the unified unit sphere space.
"""
raise NotImplementedError("This function is not implemented yet.")
for K in Ks:
ct.sanity.assert_K(K)
for T in Ts:
ct.sanity.assert_T(T)
assert len(Ks) == len(Ts)
if not gt_mesh_path:
print(f"gt_mesh_path is empty, skip filtering.")
return
gt_mesh_path = Path(gt_mesh_path)
if not gt_mesh_path.is_file():
raise ValueError(f"{gt_mesh_path} is not a file.")
# Load and normalize gt_mesh to unit sphere, and build KDTree.
gt_mesh = o3d.io.read_triangle_mesh(str(gt_mesh_path))
normalize_mat = ct.normalize.compute_normalize_mat(np.asarray(gt_mesh.vertices))
gt_mesh.vertices = o3d.utility.Vector3dVector(
ct.transform.transform_points(np.asarray(gt_mesh.vertices), normalize_mat)
)
gt_pcd = o3d.geometry.PointCloud()
gt_pcd.points = gt_mesh.vertices
gt_kdtree = o3d.geometry.KDTreeFlann(gt_pcd)
# This stores the filtered corres_list.
corres_list = []
# Reconstruct points from all corres.
src_dst_indices = self.get_all_image_pair_indices()
src_dst_indices = [
src_dst_index
for src_dst_index in src_dst_indices
if src_dst_index[0] < src_dst_index[1]
]
for src_index, dst_index in tqdm(
src_dst_indices, desc="compute intersections", leave=False
):
# Compute intersection points.
pair_corres_list = self.query_image_pair(src_index, dst_index)
src_pixels = pair_corres_list[:, 1:3]
dst_pixels = pair_corres_list[:, 4:6]
src_K = Ks[src_index]
src_T = Ts[src_index]
dst_K = Ks[dst_index]
dst_T = Ts[dst_index]
src_os, src_ds = ct.raycast.gen_rays(src_K, src_T, src_pixels)
dst_os, dst_ds = ct.raycast.gen_rays(dst_K, dst_T, dst_pixels)
src_ps, dst_ps = ct.solver.closest_points_of_line_pairs(
src_os, src_ds, dst_os, dst_ds
)
pair_corres_points = (src_ps + dst_ps) / 2
# Normalize corres_points.
pair_corres_points = ct.transform.transform_points(
pair_corres_points, normalize_mat
)
# Query distances to the gt_mesh.
dists = []
for corres_point in pair_corres_points:
k, idx, dist2 = gt_kdtree.search_knn_vector_3d(corres_point, 1)
dists.append(float(np.sqrt(dist2[0])))
dists = np.array(dists)
# Filter by dist_threshold.
pair_corres_list = pair_corres_list[dists < dist_threshold]
corres_list.extend(pair_corres_list)
corres_list = np.array(corres_list).astype(np.float32).reshape((-1, 7))
prev_num_corres = self.get_num_corres()
prev_num_image_pairs = self.get_num_image_pairs()
self.update_internal_states(corres_list)
num_corres = self.get_num_corres()
num_image_pairs = self.get_num_image_pairs()
print(
f"[CorresMap] filter_by_gt_mesh"
f"({gt_mesh_path.name}, {dist_threshold}):\n"
f" {prev_num_corres} -> {num_corres} corres\n"
f" {prev_num_image_pairs} -> {num_image_pairs} image pairs"
)
def compute_corres_points(self, Ks, Ts):
"""
Returns (N, 3) array of points computed by all correspondences.
These are the "mid points".
"""
for K in Ks:
ct.sanity.assert_K(K)
for T in Ts:
ct.sanity.assert_T(T)
assert len(Ks) == len(Ts)
# Reconstruct points from all corres.
src_dst_indices = self.get_all_image_pair_indices()
src_dst_indices = [
src_dst_index
for src_dst_index in src_dst_indices
if src_dst_index[0] < src_dst_index[1]
]
all_pair_corres_points = np.empty((0, 3))
for src_index, dst_index in tqdm(
src_dst_indices, desc="compute intersections", leave=False
):
# Compute intersection points.
pair_corres_list = self.query_image_pair(src_index, dst_index)
src_pixels = pair_corres_list[:, 1:3]
dst_pixels = pair_corres_list[:, 4:6]
src_K = Ks[src_index]
src_T = Ts[src_index]
dst_K = Ks[dst_index]
dst_T = Ts[dst_index]
src_os, src_ds = ct.raycast.gen_rays(src_K, src_T, src_pixels)
dst_os, dst_ds = ct.raycast.gen_rays(dst_K, dst_T, dst_pixels)
src_ps, dst_ps = ct.solver.closest_points_of_line_pairs(
src_os, src_ds, dst_os, dst_ds
)
pair_corres_points = (src_ps + dst_ps) / 2
all_pair_corres_points = np.concatenate(
(all_pair_corres_points, pair_corres_points), axis=0
)
return all_pair_corres_points
def rescale_corres(self, src_wh, dst_wh):
"""
Downscale the corres_map's pixel coords along with the image resize.
We assume that the src image (src_wh) is resized to dst image (dst_wh).
For example, if the downscale factor is 2, an src image of shape
(100, 100) is resized to (50, 50). If we have a correspondence in
location (20, 20) in the src image, this function will convert this
correspondence to (10, 10) in the dst image.
Args:
src_wh: (width, height) of the src image.
dst_wh: (width, height) of the dst image.
"""
# This stores the filtered corres_list.
# [src_index, src_x, src_y, dst_index, dst_x, dst_y, confidence]
corres_list = np.copy(self.corres_list).astype(np.float32)
# This function is defined in the inverse way: we assume the image is
# resized from dst_wh to src_wh.
corres_list[:, 1:3] = ct.image.recover_resized_pixels(
dst_pixels=corres_list[:, 1:3],
src_wh=dst_wh,
dst_wh=src_wh,
keep_aspect_ratio=False,
)
corres_list[:, 4:6] = ct.image.recover_resized_pixels(
dst_pixels=corres_list[:, 4:6],
src_wh=dst_wh,
dst_wh=src_wh,
keep_aspect_ratio=False,
)
# Only keep src_index < dst_index. Bidirectional corres_map will be
# built later.
corres_list = corres_list[corres_list[:, 0] < corres_list[:, 3]]
# key = [src_index, src_x, src_y, dst_index].round()
# val = [dst_x, dst_y, confidence]
# A key may correspond to multiple values. The values will be first
# sorted in a list, then the list will be averaged.
corres_dict = {}
for corres in tqdm(corres_list, leave=False, desc="rescale corres"):
key = tuple(corres[:4].round().astype(np.float32))
val = corres[4:].tolist()
if key in corres_dict:
corres_dict[key].append(val)
else:
corres_dict[key] = [val]
# Average the values and round to integer.
corres_list = []
for key, vals in corres_dict.items():
vals = np.array(vals)
dst_xs, dst_ys, dst_confidences = vals[:, 0], vals[:, 1], vals[:, 2]
dst_x = np.mean(dst_xs, axis=0).round().astype(np.float32)
dst_y = np.mean(dst_ys, axis=0).round().astype(np.float32)
confidence = np.mean(dst_confidences, axis=0).astype(np.float32)
val = (dst_x, dst_y, confidence)
corres = np.array(key + val).astype(np.float32)
corres_list.append(corres)
corres_list = np.array(corres_list).astype(np.float32).reshape((-1, 7))
# Make sure the scaled corres is within the dst image.
# 0 1 2 3 4 5 6
# corres = [src_index, src_x, src_y, dst_index, dst_x, dst_y, confidence])
w, h = dst_wh # Make sure the scaled corres is within the dst image.
corres_list[:, 1] = np.clip(corres_list[:, 1], 0, w - 1)
corres_list[:, 2] = np.clip(corres_list[:, 2], 0, h - 1)
corres_list[:, 4] = np.clip(corres_list[:, 4], 0, w - 1)
corres_list[:, 5] = np.clip(corres_list[:, 5], 0, h - 1)
# Update internal states.
prev_num_corres = self.get_num_corres()
prev_num_image_pairs = self.get_num_image_pairs()
self.update_internal_states(corres_list)
num_corres = self.get_num_corres()
num_image_pairs = self.get_num_image_pairs()
print(
f"[CorresMap] rescale_corres(src_wh={src_wh}, dst_wh={dst_wh}):\n"
f" {prev_num_corres} -> {num_corres} corres\n"
f" {prev_num_image_pairs} -> {num_image_pairs} image pairs"
)
@staticmethod
def check_corres_list(corres_list):
assert isinstance(corres_list, np.ndarray)
assert corres_list.ndim == 2
assert corres_list.shape[1] == 7
assert corres_list.dtype == np.float32
@staticmethod
def build_corres_map(corres_list, force_bidirectional=True):
"""
Build bidirectional corres_map from corres_list. One input ray can have
multiple correspondences. The corres_map will also remove remove
duplications in the corres_list.
Args:
corres_list: np.ndarray of shape (N, 7), where N is the number of
correspondences. The 7 columns are:
[src_index, src_x, src_y, dst_index, dst_x, dst_y, confidence]
force_bidirectional: If True, the corres_map will be bidirectional.
If False, the corres_map simply be built with the current
corres_list.
Returns:
corres_map mapping keys to values, where:
- key: (src_index, src_x, src_y)
- val: [
(dst_index, dst_x, dst_y, confidence),
(dst_index, dst_x, dst_y, confidence),
...
]
Both key and val must be in np.float32.
"""
# Sanity check input.
CorresMap.check_corres_list(corres_list)
if force_bidirectional:
# Build bidirectional corres_list.
# The confidence is stored at the [6]-th column and will be preserved.
corres_list_inv = np.copy(corres_list)
corres_list_inv[:, 0:3] = corres_list[:, 3:6]
corres_list_inv[:, 3:6] = corres_list[:, 0:3]
corres_list = np.concatenate([corres_list, corres_list_inv], axis=0)
# Unique corres_list.
src_ixy_dst_ixy = (corres_list[:, :6]).astype(np.float32)
_, unique_ixs = np.unique(src_ixy_dst_ixy, axis=0, return_index=True)
corres_list = corres_list[unique_ixs]
# Build map. One key can have multiple correspondences. We treat each
# value as a set for deduplication. The map is bidirectional.
corres_map = dict()
for corres in tqdm(corres_list, leave=False, desc="Build corres_map"):
key = tuple(corres[:3])
val = tuple(corres[3:7])
if key in corres_map:
corres_map[key].append(val)
else:
corres_map[key] = [val]
return corres_map
@staticmethod
def build_image_pair_to_corres(corres_list):
"""
Args:
corres_list: (n, 7) float32 tensor:
[
[src_index, src_x, src_y, dst_index, dst_x, dst_y, confidence],
[src_index, src_x, src_y, dst_index, dst_x, dst_y, confidence],
...
]
Returns:
Map from (src_index, dst_index) to corres. Everything is in float32.
{
(src_index, dst_index): [
[src_index, src_x, src_y, dst_index, dst_x, dst_y, confidence],
[src_index, src_x, src_y, dst_index, dst_x, dst_y, confidence],
...
],
(src_index, dst_index): [
[src_index, src_x, src_y, dst_index, dst_x, dst_y, confidence],
[src_index, src_x, src_y, dst_index, dst_x, dst_y, confidence],
...
],
...
}
"""
CorresMap.check_corres_list(corres_list)
image_pair_to_corres = dict()
for corres in tqdm(corres_list, leave=False, desc="Build image_pair_to_corres"):
src_index, _, _, dst_index, _, _, _ = corres
key = (src_index, dst_index)
if key in image_pair_to_corres:
image_pair_to_corres[key].append(corres)
else:
image_pair_to_corres[key] = [corres]
for key in image_pair_to_corres:
image_pair_to_corres[key] = np.array(image_pair_to_corres[key]).astype(
np.float32
)
return image_pair_to_corres
def sample(self, sample_num):
"""
Args:
sample_num: number of samples, int.
Returns:
A list of [src_index, src_x, src_y, dst_index, dst_x, dst_y, confidence]
in (n, 7) float32 tensor.
"""
indices = np.random.randint(len(self.corres_list), size=sample_num)
samples = self.corres_list[indices]
return samples
def query(self, src_indices, src_xs, src_ys):
"""
Args:
src_indices: image indices, float32, (n, ).
src_xs: cols, float32, (n, ).
src_ys: rows, float32, (n, ).
Returns:
Correspondences in float32, (m, 7) tensor. One query
ray can have 0, 1, or multiple correspondences. The query_index
is the index of the input query ray.
[
[src_index, src_x, src_y, dst_index, dst_x, dst_y, confidence, query_index],
[src_index, src_x, src_y, dst_index, dst_x, dst_y, confidence, query_index],
...
]
"""
assert isinstance(src_indices, np.ndarray)
assert isinstance(src_xs, np.ndarray)
assert isinstance(src_ys, np.ndarray)
assert len(src_indices) == len(src_xs) == len(src_ys)
num_queries = len(src_indices)
result_corres_list = []
for query_index in range(num_queries):
src_index = src_indices[query_index]
src_x = src_xs[query_index]
src_y = src_ys[query_index]
key = (src_index, src_x, src_y)
vals = self.corres_map.get(key, [])
for val in vals:
dst_index, dst_x, dst_y, confidence = val
corres = [
src_index,
src_x,
src_y,
dst_index,
dst_x,
dst_y,
confidence,
query_index,
]
result_corres_list.append(corres)
return np.array(result_corres_list).astype(np.float32).reshape((-1, 8))
def query_image_pair(self, src_index, dst_index):
"""
Args:
src_index: src image index.
dst_index: dst image index.
Returns:
float32, (n, 7) array. List of corres between the src and dst images.
[
[src_index, src_x, src_y, dst_index, dst_x, dst_y, confidence],
[src_index, src_x, src_y, dst_index, dst_x, dst_y, confidence],
...
]
"""
key = (src_index, dst_index)
if key in self.image_pair_to_corres:
return self.image_pair_to_corres[key]
else:
return np.empty((0, 7), dtype=np.float32)
def get_all_image_pair_indices(self):
src_dst_indices = sorted(list(self.image_pair_to_corres.keys()))
src_dst_indices = np.array(src_dst_indices).astype(np.int64)
return src_dst_indices
def get_num_corres(self):
return len(self)
def get_num_image_pairs(self):
return len(self.image_pair_to_corres.keys())
def __str__(self):
s = ""
s += f"CorresMap of {self.get_num_corres()} corres and "
s += f"{self.get_num_image_pairs()} unidirectional image pairs."
return s
def __repr__(self):
return self.__str__()
@staticmethod
def propagate_corres_list(corres_list, max_dist):
"""
Propagate the correspondence list to graph vertices with distance <= 2.
Deprecated: Propagate the correspondence list to all reachable connected
components.
Args:
corres_list: (n, 7) float32 tensor:
[
[src_index, src_x, src_y, dst_index, dst_x, dst_y, confidence],
[src_index, src_x, src_y, dst_index, dst_x, dst_y, confidence],
...
]
max_dist: int, maximum distance of graph vertices.
Returns:
corres_list: (n, 7) float32 tensor:
[
[src_index, src_x, src_y, dst_index, dst_x, dst_y, confidence],
[src_index, src_x, src_y, dst_index, dst_x, dst_y, confidence],
...
]
"""
if max_dist != 2:
raise NotImplementedError("Only max_dist == 2 is supported.")
# (im_idx, x, y) is a key.
all_keys = np.unique(corres_list[:, :6].reshape((-1, 3)), axis=0)
all_keys = sorted([tuple(key) for key in all_keys])
# Map key <-> vertex id.
map_vid_to_key = {i: key for i, key in enumerate(all_keys)}
map_key_to_vid = {key: i for i, key in map_vid_to_key.items()}
num_vertices = len(all_keys)
# Compute all edges, with confidence as edge weight.
map_src_dst_vids_to_confidence = dict()
edges = []
for corres in corres_list:
src_key = tuple(corres[0:3])
dst_key = tuple(corres[3:6])
confidence = corres[6]
src_vid = map_key_to_vid[src_key]
dst_vid = map_key_to_vid[dst_key]
# Add edge.
if src_vid < dst_vid:
edges.append((src_vid, dst_vid, confidence))
# Add to map.
map_src_dst_vids_to_confidence[(src_vid, dst_vid)] = confidence
map_src_dst_vids_to_confidence[(dst_vid, src_vid)] = confidence
# Build graph.
g = ig.Graph()
g.add_vertices(num_vertices)
g.add_edges([e[0:2] for e in edges])
assert len(g.es) == len(edges)
g.es["confidence"] = [e[2] for e in edges]
# Gather all vertices with distance <= 2.
corres_list_new = []
for src_vid in tqdm(
range(num_vertices),
total=num_vertices,
desc="Propagate corres",
leave=False,
):
src_v = g.vs[src_vid]
# Vertices with distance 0.
dst_0_vs = [src_v]
# Vertices with distance 1.
dst_1_vs = src_v.neighbors()
for dst_v in dst_1_vs:
src_vid = src_v.index
dst_vid = dst_v.index
src_key = map_vid_to_key[src_vid]
dst_key = map_vid_to_key[dst_vid]
src_im_idx = src_key[0]
dst_im_idx = dst_key[0]
# Skip if the corres is between the same image.
if src_im_idx == dst_im_idx:
continue
# Add to corres_list_new.
confidence = map_src_dst_vids_to_confidence[(src_vid, dst_vid)]
corres = list(src_key) + list(dst_key) + [confidence]
corres_list_new.append(corres)
# Vertices with distance 2.
for dst_1_v in dst_1_vs:
dst_2_vs = dst_1_v.neighbors()
dst_2_vs = [
v for v in dst_2_vs if v not in dst_0_vs and v not in dst_1_vs
]
for dst_2_v in dst_2_vs:
src_vid = src_v.index
mid_vid = dst_1_v.index
dst_vid = dst_2_v.index
src_key = map_vid_to_key[src_vid]
mid_key = map_vid_to_key[mid_vid]
dst_key = map_vid_to_key[dst_vid]
src_im_idx = src_key[0]
mid_im_idx = mid_key[0]
dst_im_idx = dst_key[0]
# Skip if the corres is between the same image.
if len(set([src_im_idx, mid_im_idx, dst_im_idx])) != 3:
continue
# Add to corres_list_new.
src_mid_confidence = map_src_dst_vids_to_confidence[
(src_vid, mid_vid)
]
mid_dst_confidence = map_src_dst_vids_to_confidence[
(mid_vid, dst_vid)
]
# Option 1: product
confidence = src_mid_confidence * mid_dst_confidence
# Option 2: average
# confidence = (src_mid_confidence + mid_dst_confidence) / 2
corres = list(src_key) + list(dst_key) + [confidence]
corres_list_new.append(corres)
corres_list_new = np.array(corres_list_new).astype(np.float32).reshape((-1, 7))
return corres_list_new
class RobustFitter:
def __init__(self, threshold=1.0, prob=0.99999):
"""
Args:
threshold: float, threshold in pixels for ransac.
prob: float, confidence probability for ransac.
"""
self.threshold = threshold
self.prob = prob
def fit(self, src_pixels, dst_pixels, src_K, dst_K):
"""
Estimate pose and return the mask for inliers.
Args:
src_pixels: (N, 2) array, float32, order: (col, row).
dst_pixels: (N, 2) array, float32, order: (col, row).
K0: (3, 3) array, float32. Camera intrinsics of the first image.
K1: (3, 3) array, float32. Camera intrinsics of the second image.
Returns:
R: (3, 3) array, float32. Rotation matrix.
t: (3,) array, float32. Translation matrix.
mask: (N,) array, bool. True for inliers.
"""
empty_ret = (
np.eye(3, dtype=np.float32),
np.zeros((3,), dtype=np.float32),
np.zeros((len(src_pixels),), dtype=bool),
)
if len(src_pixels) < 5:
return empty_ret
# Normalize key points.
src_pixels = (src_pixels - src_K[[0, 1], [2, 2]][None]) / src_K[[0, 1], [0, 1]][
None
]
dst_pixels = (dst_pixels - dst_K[[0, 1], [2, 2]][None]) / dst_K[[0, 1], [0, 1]][
None
]
# Normalize ransac threshold.
ransac_thr = self.threshold / np.mean(
[src_K[0, 0], dst_K[1, 1], src_K[0, 0], dst_K[1, 1]]
)
# Compute pose with cv2.
E, mask = cv2.findEssentialMat(
src_pixels,
dst_pixels,
np.eye(3),
threshold=ransac_thr,
prob=self.prob,
method=cv2.USAC_MAGSAC,
)
if E is None:
print(
f"Warning: E is None while trying to recover pose. "
f"(# src_pixels: {len(src_pixels)})"
)
return empty_ret
# Recover pose from E.
best_num_inliers = 0
ret = empty_ret
for _E in np.split(E, len(E) / 3):
n, R, t, _ = cv2.recoverPose(
_E, src_pixels, dst_pixels, np.eye(3), 1e9, mask=mask
)
if n > best_num_inliers:
ret = (R, t[:, 0], mask.ravel() > 0)
best_num_inliers = n
return ret
def read_corres(corres_dir, num_views, matcher_name, config_name):
"""
Read correspondences.
Example usage:
```
corres_dir = "nerf/data/nerf_llff/fern/corres"
num_views = 3
matcher_name = "dkm_loftr"
config_name = "default"
train_ids, test_ids, corres_map = read_corres(corres_dir=corres_dir,
num_views=num_views,
matcher_name=matcher_name,
config_name=config_name)
```
Args:
corres_dir: Path to correspondences directory. For example
```
nerf/data/nerf_llff/fern/corres
├── dkm_loftr_7_9_13
│ ├── 007_009_connections.jpg
│ ├── 007_009_points.jpg
│ ├── 007_013_connections.jpg
│ ├── 007_013_points.jpg
│ ├── 009_013_connections.jpg
│ ├── 009_013_points.jpg
│ ├── corres.npz
│ └── corres_raw.npz
└── selected_cameras.json
```
num_views: Number of views (cameras) to load. This will query the
selected_cameras.json to determine which views to load.
matcher_name: Name of matcher. For example, "dkm_loftr".
Return:
A tuple of (train_ids, test_ids, corres_map).
"""
corres_dir = Path(corres_dir)
if not corres_dir.is_dir():
raise ValueError(f"Invalid corres_dir: {corres_dir}")
# Load selected cameras.
train_ids, test_ids = read_selected_cameras(corres_dir, num_views)
# Load correspondence map.
match_dir_name = f"{matcher_name}_{config_name}_{num_views}"
corres_map_path = corres_dir / match_dir_name / "corres.npz"
corres_raw_map_path = corres_dir / match_dir_name / "corres_raw.npz"
if corres_map_path.is_file():
pass
else:
if corres_raw_map_path.is_file():
# Fallback to raw corres_map, unfiltered.
corres_map_path = corres_raw_map_path
else:
raise ValueError(f"Invalid corres.npz path: {corres_map_path}")
corres_map = CorresMap.from_npz(corres_path=corres_map_path)
return train_ids, test_ids, corres_map
def read_selected_cameras(corres_dir, num_views):
"""
Read train_ids and test_ids from {corres_dir}/selected_cameras.json
Example usage:
```python
corres_dir = "nerf/data/nerf_llff/fern/corres"
num_views = 3
train_ids, test_ids = read_selected_cameras(corres_dir, num_views)
```
Example selected_cameras.json:
{
"3": {
"train_ids": [ 1, 10, 19 ],
"test_ids": [ 0, 8, 16 ]
},
"6": {
"train_ids": [ 1, 4, 7, 12, 15, 19 ],
"test_ids": [ 0, 8, 16 ]
},
"9": {
"train_ids": [ 1, 3, 5, 7, 10, 12, 14, 17, 19 ],
"test_ids": [ 0, 8, 16 ]
}
}
Args:
corres_dir: Path to correspondences directory. For example
```
nerf/data/nerf_llff/fern/corres
├── dkm_loftr_7_9_13
│ ├── 007_009_connections.jpg
│ ├── 007_009_points.jpg
│ ├── 007_013_connections.jpg
│ ├── 007_013_points.jpg
│ ├── 009_013_connections.jpg
│ ├── 009_013_points.jpg
│ ├── corres.npz
│ └── corres_raw.npz
└── selected_cameras.json
```
num_views: Number of views (cameras) to load. This will query the
selected_cameras.json to determine which views to load.
Return:
A list of train_ids, and a list of test_ids.
"""
json_path = corres_dir / "selected_cameras.json"
with open(json_path, "r") as f:
json_dict = json.load(f)
key = str(num_views)
train_ids = json_dict[key]["train_ids"]
test_ids = json_dict[key]["test_ids"]
if len(train_ids) != num_views:
raise ValueError(f"Invalid train_ids: {train_ids}")
return train_ids, test_ids
def load_camera_split(
camera_split_path: Path, scene_name: str, num_views: int
) -> Tuple[List[int], List[int]]:
"""
Read train_ids and test_ids from camera split .json file.
"""
with open(camera_split_path, "r") as f:
camera_split_dict = json.load(f)
train_ids = camera_split_dict[scene_name][str(num_views)]["train_ids"]
test_ids = camera_split_dict[scene_name][str(num_views)]["test_ids"]
return train_ids, test_ids
| yxlao/corres-nerf | 49 | (NeurIPS 2023) CorresNeRF: Image Correspondence Priors for Neural Radiance Fields | yxlao | Yixing Lao | HKU-CS | |
src/corres/dataloader.py | Python | from pathlib import Path
import camtools as ct
import numpy as np
from ..nerf.load_llff import load_llff_data
from ..neus.dataset import unpack_neus_camera_npz
from tqdm import tqdm
def load_llff_cameras(scene_dir, factor):
"""
Args:
scene_dir: Path to the scene directory.
factor: Factor to downsample images by. This affects the camera
intrinsics as well.
Return:
Ks, Ts
"""
assert factor in [1, 2, 4, 8]
# Load images and cameras, using the official loader.
images, poses, bds, render_poses, i_test = load_llff_data(
scene_dir,
factor=factor,
recenter=True,
bd_factor=0.75,
spherify=False,
)
hwf = poses[0, :3, -1]
# (20, 3, 4)
poses = poses[:, :3, :4]
# (20, 4, 4)
poses = [
np.concatenate([pose, np.array([0, 0, 0, 1]).reshape(1, 4)], axis=0)
for pose in poses
]
print("Loaded llff", images.shape, render_poses.shape, hwf, scene_dir)
if not isinstance(i_test, list):
i_test = [i_test]
# Read cameras.
height, width, focal = hwf
fx = fy = focal
cx = width / 2
cy = height / 2
Ks = []
Ts = []
for pose in poses:
K = np.array(
[
[fx, 0, cx],
[0, fy, cy],
[0, 0, 1],
]
)
Ks.append(K)
# Convert Blender to pinhole
T = ct.convert.T_opengl_to_opencv(ct.convert.pose_to_T(pose))
Ts.append(T)
return Ks, Ts
def load_llff(scene_dir, factor):
"""
Args:
scene_dir: Path to the scene directory.
factor: Factor to downsample images by. This affects the camera
intrinsics as well.
Return:
images: list of HxWx3 float32 images, from 0-1.
Ks: list of 3x3 float32 camera intrinsics.
Ts: list of 4x4 float32 camera extrinsics.
This is primarily used by our own code, e.g. the matcher.
"""
assert factor in [1, 2, 4, 8]
# Load images and cameras, using the official loader.
images, poses, bds, render_poses, i_test = load_llff_data(
scene_dir, factor=factor, recenter=True, bd_factor=0.75, spherify=False
)
hwf = poses[0, :3, -1]
poses = poses[:, :3, :4]
print("Loaded llff", images.shape, render_poses.shape, hwf, scene_dir)
# factor == 1: list of 20 (3024, 4032, 3) matrices
# factor == 8: list of 20 (378, 504, 3) matrices
images = [im for im in images]
# Load cameras
# Ks: list of 20 (3, 3) matrices
# Ts: list of 20 (4, 4) matrices
Ks, Ts = load_llff_cameras(scene_dir, factor=factor)
assert len(images) == len(Ks) == len(Ts)
return images, Ks, Ts
def load_dtu(scene_dir):
"""
Return:
images: list of HxWx3 float32 images, from 0-1.
Ks: list of 3x3 float32 camera intrinsics.
Ts: list of 4x4 float32 camera extrinsics.
"""
scene_dir = Path(scene_dir)
if not scene_dir.exists():
raise ValueError(f"scene_dir does not exist: {scene_dir}")
# All *.jpg and *.png files will be searched. Do not put other files in the
# image directory. Images files will be sorted by name for later indexing.
im_dir = scene_dir / "image"
im_paths = list(im_dir.glob("*.jpg")) + list(im_dir.glob("*.png"))
im_paths = sorted(im_paths)
if len(im_paths) == 0:
raise ValueError(f"No images found in {im_dir}.")
ims = []
for im_path in tqdm(im_paths, desc=f"Read images from {im_dir}", leave=False):
im = ct.io.imread(im_path)
ims.append(im)
# Read cameras.
camera_path = scene_dir / "cameras_sphere.npz"
neus_cameras = unpack_neus_camera_npz(camera_path)
Ks = []
Ts = []
for neus_camera in neus_cameras:
world_mat = neus_camera["world_mat"]
P = world_mat[:3, :4]
K, R, t = ct.convert.P_to_K_R_t(P)
T = ct.convert.R_t_to_T(R, t)
Ks.append(K)
Ts.append(T)
assert len(ims) == len(Ks) == len(Ts)
return ims, Ks, Ts
| yxlao/corres-nerf | 49 | (NeurIPS 2023) CorresNeRF: Image Correspondence Priors for Neural Radiance Fields | yxlao | Yixing Lao | HKU-CS | |
src/corres/fused_matcher.py | Python | import numpy as np
from .matcher import Matcher
class FusedMatcher(Matcher):
def __init__(self, matchers):
super().__init__()
assert isinstance(matchers, list)
assert len(matchers) > 0
for matcher in matchers:
assert isinstance(matcher, Matcher)
self.matchers = matchers
def match_image_pair(self, im_src, im_dst):
"""
Match two images. Matching will be performed twice and results will be
combined.
Args:
im_src: shape (h, w, 3), float32, range [0-1]
im_dst: shape (h, w, 3), float32, range [0-1]
Returns:
(src_pixels, dst_pixels):
- src_pixels: shape (N, 2), float32, order: (col, row)
- dst_pixels: shape (N, 2), float32, order: (col, row)
- confidences: shape (N,), float32, range [0-1]
"""
# Run all matchers.
all_src_pixels = np.empty((0, 2), dtype=np.float32)
all_dst_pixels = np.empty((0, 2), dtype=np.float32)
all_confidences = np.empty((0,), dtype=np.float32)
for matcher in self.matchers:
src_pixels, dst_pixels, confidences = matcher.match_image_pair(
im_src, im_dst
)
all_src_pixels = np.concatenate((all_src_pixels, src_pixels), axis=0)
all_dst_pixels = np.concatenate((all_dst_pixels, dst_pixels), axis=0)
all_confidences = np.concatenate((all_confidences, confidences), axis=0)
# Unique.
(
all_src_pixels,
all_dst_pixels,
all_confidences,
) = Matcher._unique_src_dst_confidences(
src_pixels=all_src_pixels,
dst_pixels=all_dst_pixels,
confidences=all_confidences,
)
return all_src_pixels, all_dst_pixels, all_confidences
| yxlao/corres-nerf | 49 | (NeurIPS 2023) CorresNeRF: Image Correspondence Priors for Neural Radiance Fields | yxlao | Yixing Lao | HKU-CS | |
src/corres/matcher.py | Python | import abc
import itertools
import camtools as ct
import numpy as np
import torch
from matplotlib import pyplot as plt
from tqdm import tqdm
from .corres_map import CorresMap
class Matcher:
__metaclass__ = abc.ABCMeta
def __init__(self):
pass
@staticmethod
def assert_im_dtype_range_ndim(im, dtype=np.float32, ndim=2):
assert isinstance(im, np.ndarray)
assert im.dtype == dtype, f"im.dtype={im.dtype}, expected dtype={dtype}"
assert im.ndim == ndim, f"im.ndim={im.ndim}, expected ndim={ndim}"
if ndim == 3:
assert im.shape[2] == 3, f"im.shape[2]={im.shape[2]}, expected 3"
@staticmethod
def _unique_src_dst_confidences(src_pixels, dst_pixels, confidences):
"""
Unique src<->dst pairs, and keep the first confidence.
Args:
src_pixels: (N, 2)
dst_pixels: (N, 2)
confidences: (N, )
Returns:
(src_pixels, dst_pixels, confidences):
- src_pixels: (N', 2)
- dst_pixels: (N', 2)
- confidences: (N', )
"""
# (N, 4)
src_dst_pixels = np.concatenate((src_pixels, dst_pixels), axis=1)
# (N', )
unique_indices = np.unique(src_dst_pixels, axis=0, return_index=True)[1]
# (N', 2)
src_pixels = src_pixels[unique_indices]
# (N', 2)
dst_pixels = dst_pixels[unique_indices]
# (N', )
confidences = confidences[unique_indices]
return src_pixels, dst_pixels, confidences
@staticmethod
def _matcher_aug_swap(im_src, im_dst, matcher):
"""
Match two images by swapping the source and destination images.
Args:
im_src: shape (h, w, 3), float32, range [0-1]
im_dst: shape (h, w, 3), float32, range [0-1]
matcher: Base matcher function,
(im_src, im_dst) -> (src_pixels, dst_pixels, confidences)
Returns:
(src_pixels, dst_pixels):
- src_pixels: shape (N, 2), float32, order: (col, row)
- dst_pixels: shape (N, 2), float32, order: (col, row)
- confidences: shape (N, )
"""
# (N1, 2), (N1, 2)
src_pixels_0, dst_pixels_0, confidences_0 = matcher(im_src, im_dst)
# (N2, 2), (N2, 2)
dst_pixels_1, src_pixels_1, confidences_1 = matcher(im_dst, im_src)
# (N1 + N2, 2)
src_pixels = np.concatenate((src_pixels_0, src_pixels_1), axis=0)
# (N1 + N2, 2)
dst_pixels = np.concatenate((dst_pixels_0, dst_pixels_1), axis=0)
# (N1 + N2, )
confidences = np.concatenate((confidences_0, confidences_1), axis=0)
# Unique.
(
src_pixels,
dst_pixels,
confidences,
) = Matcher._unique_src_dst_confidences(
src_pixels=src_pixels,
dst_pixels=dst_pixels,
confidences=confidences,
)
return src_pixels, dst_pixels, confidences
@staticmethod
def _matcher_aug_rotate(im_src, im_dst, matcher):
"""
Match two images with rotation augmentation. Images will be rotated 0,
90, 180, and 270 degrees counter-clockwise, resulting in 16 matches.
Args:
im_src: shape (h, w, 3), float32, range [0-1]
im_dst: shape (h, w, 3), float32, range [0-1]
matcher: Base matcher function,
(im_src, im_dst) -> (src_pixels, dst_pixels, confidences)
Returns:
(src_pixels, dst_pixels):
- src_pixels: shape (N, 2), float32, order: (col, row)
- dst_pixels: shape (N, 2), float32, order: (col, row)
- confidences: shape (N, )
"""
src_wh = im_src.shape[1], im_src.shape[0]
all_src_pixels = np.empty((0, 2), dtype=np.float32)
all_dst_pixels = np.empty((0, 2), dtype=np.float32)
all_confidences = np.empty((0,), dtype=np.float32)
for src_rotation in [0, 90, 180, 270]:
for dst_rotation in [0, 90, 180, 270]:
im_src_rotated = ct.image.rotate(im_src, ccw_degrees=src_rotation)
im_dst_rotated = ct.image.rotate(im_dst, ccw_degrees=dst_rotation)
if im_src_rotated.shape != im_dst_rotated.shape:
continue
src_pixels, dst_pixels, confidences = matcher(
im_src=im_src_rotated,
im_dst=im_dst_rotated,
)
src_pixels = ct.image.recover_rotated_pixels(
src_pixels, src_wh, src_rotation
)
dst_pixels = ct.image.recover_rotated_pixels(
dst_pixels, src_wh, dst_rotation
)
src_pixels = np.round(src_pixels).astype(np.float32).reshape((-1, 2))
dst_pixels = np.round(dst_pixels).astype(np.float32).reshape((-1, 2))
all_src_pixels = np.concatenate((all_src_pixels, src_pixels))
all_dst_pixels = np.concatenate((all_dst_pixels, dst_pixels))
all_confidences = np.concatenate((all_confidences, confidences))
# Unique.
(
all_src_pixels,
all_dst_pixels,
all_confidences,
) = Matcher._unique_src_dst_confidences(
src_pixels=all_src_pixels,
dst_pixels=all_dst_pixels,
confidences=all_confidences,
)
return all_src_pixels, all_dst_pixels, all_confidences
@abc.abstractmethod
def match_image_pair(self, input):
"""
Args:
im_src: shape (h, w) or (h, w, 3), must be float32, range [0-1]
im_dst: shape (h, w) or (h, w, 3), must be float32, range [0-1]
Returns:
(src_pixels, dst_pixels):
- src_pixels: shape (N, 2), float32, order: (col, row)
- dst_pixels: shape (N, 2), float32, order: (col, row)
"""
raise NotImplementedError("Please implement this method.")
def match_image_pairs(self, ims):
"""
Calls match_image_pair() for each pair of images. Returns a list of
correspondences.
Args:
ims: List of images. Each of them must be of shape (h, w, 3),
float32, range [0-1].
Returns:
A *bidirectional* list of correspondences.
([
[src_index, src_x, src_y, dst_index, dst_x, dst_y, confidence],
[src_index, src_x, src_y, dst_index, dst_x, dst_y, confidence],
...
], dtype=np.float32)
- The coordinates are rounded to the nearest integer. This is useful
for NeRF to generate rays at integer coordinates.
- The list is bidirectional such that each correspondence pair is
repeated twice. The matching is done for src_index < dst_index,
but the result is repeated.
"""
# Sanity checks. Must be (h, w, 3), float32, range [0-1].
assert len(ims) > 0
for im in ims:
Matcher.assert_im_dtype_range_ndim(im, dtype=np.float32, ndim=3)
assert im.shape == ims[0].shape
# Match all image pairs.
all_corres_list = np.zeros((0, 7), dtype=np.float32)
all_pair_indices = list(itertools.combinations(range(len(ims)), 2))
for src_index, dst_index in tqdm(
all_pair_indices, desc="Match images", leave=False
):
# Get image pair.
im_src = ims[src_index]
im_dst = ims[dst_index]
# Match image pair. Augmentation is handled internally.
src_pixels, dst_pixels, confidences = self.match_image_pair(im_src, im_dst)
# Build bi-directional correspondences.
pair_corres_list = np.empty((len(src_pixels), 7), dtype=np.float32)
pair_corres_list[:, 0] = src_index
pair_corres_list[:, 1:3] = src_pixels
pair_corres_list[:, 3] = dst_index
pair_corres_list[:, 4:6] = dst_pixels
pair_corres_list[:, 6] = confidences
# Add to all_corres_list.
all_corres_list = np.concatenate(
(
all_corres_list,
pair_corres_list.reshape((-1, 7)),
)
)
# Remove duplicate correspondences.
print(f"Begin sanitize all_corres_list, len: {len(all_corres_list)}")
all_corres_list = CorresMap.sanitize_corres_list(all_corres_list)
print(f"After sanitize all_corres_list, len: {len(all_corres_list)}")
return all_corres_list
| yxlao/corres-nerf | 49 | (NeurIPS 2023) CorresNeRF: Image Correspondence Priors for Neural Radiance Fields | yxlao | Yixing Lao | HKU-CS | |
src/corres/matcher_dkm.py | Python | import functools
from pathlib import Path
import camtools as ct
import numpy as np
from dkm.models.model_zoo import DKMv3_indoor, DKMv3_outdoor
from matplotlib import pyplot as plt
from PIL import Image
from .matcher import Matcher
def numpy_to_pil(im_numpy):
"""
Convert a numpy array to an uint8 PIL image, with no surprises.
Args:
im_numpy: (H, W, C) numpy array. Must not contain alpha channel. C must
be 3. The channel order is assumed to be RGB. The dtype must be
uint8, float32, or float64. The range of values will be checked.
Returns:
A uint8 PIL image.
"""
ct.sanity.assert_shape(im_numpy, (None, None, 3), "im_numpy")
if im_numpy.dtype == np.uint8:
assert np.min(im_numpy) >= 0 and np.max(im_numpy) <= 255
return Image.fromarray(im_numpy)
elif im_numpy.dtype == np.float32 or im_numpy.dtype == np.float64:
assert np.min(im_numpy) >= 0 and np.max(im_numpy) <= 1
return Image.fromarray(np.round(im_numpy * 255).astype(np.uint8))
else:
raise ValueError(f"Unsupported dtype {im_numpy.dtype}")
def pil_to_numpy(im_pil):
"""
Convert a (uint8) PIL image to a numpy array, with no surprises.
Args:
im_pil: An RGB PIL image. Must not contain alpha channel. The channel
order is assumed to be RGB.
Returns:
A float32 numpy array of shape (H, W, C) where C == 3.
"""
assert im_pil.mode == "RGB"
im_numpy = np.array(im_pil, dtype=np.float32) / 255.0
return im_numpy
class MatcherDKM(Matcher):
def __init__(self, model_type, augmentations=("swap", "rotate")):
"""
DKMv3 matcher.
Args:
model_type: "outdoor" or "indoor".
augmentations: List of augmentations to apply to the images.
The following augmentations are supported:
- "swap": Swap the images.
- "rotate": Rotate the images by 90, 180, or 270 degrees.
"""
super().__init__()
# https://github.com/Parskatt/DKM/blob/main/dkm/models/model_zoo/__init__.py
if model_type == "outdoor":
self.dkm_model = DKMv3_outdoor()
self.dkm_wh_landscape = (720, 540)
self.dkm_wh_portrait = (540, 720)
elif model_type == "indoor":
self.dkm_model = DKMv3_indoor()
self.dkm_wh_landscape = (640, 480)
self.dkm_wh_portrait = (480, 640)
else:
raise ValueError(f"Unknown model_type {model_type}")
# Initialize augmentations.
for aug in augmentations:
if aug not in ("swap", "rotate"):
raise ValueError(f"Unknown augmentation {aug}")
self.augmentations = augmentations
def match_image_pair(self, im_src, im_dst):
"""
Match two images.
Args:
im_src: shape (h, w, 3), float32, range [0-1]
im_dst: shape (h, w, 3), float32, range [0-1]
Returns:
(src_pixels, dst_pixels):
- src_pixels: shape (N, 2), int64, order: (col, row)
- dst_pixels: shape (N, 2), int64, order: (col, row)
"""
# Each matcher already removes duplicates before returning.
matcher = self._matcher_base
if "rotate" in self.augmentations:
matcher = functools.partial(
Matcher._matcher_aug_rotate,
matcher=matcher,
)
if "swap" in self.augmentations:
matcher = functools.partial(
Matcher._matcher_aug_swap,
matcher=matcher,
)
src_pixels, dst_pixels, confidences = matcher(im_src, im_dst)
return src_pixels, dst_pixels, confidences
def _matcher_base(self, im_src, im_dst):
"""
Match two images.
Args:
im_src: shape (h, w, 3), float32, range [0-1]
im_dst: shape (h, w, 3), float32, range [0-1]
Returns:
(src_pixels, dst_pixels):
- src_pixels: shape (N, 2), int64, order: (col, row)
- dst_pixels: shape (N, 2), int64, order: (col, row)
- confidences: shape (N, ), float32, range [0, 1], actually [0.5, 1]
"""
# Sanity checks.
Matcher.assert_im_dtype_range_ndim(im_src, dtype=np.float32, ndim=3)
Matcher.assert_im_dtype_range_ndim(im_dst, dtype=np.float32, ndim=3)
src_original_wh = (im_src.shape[1], im_src.shape[0])
dst_original_wh = (im_dst.shape[1], im_dst.shape[0])
src_original_w, src_original_h = src_original_wh
dst_original_w, dst_original_h = dst_original_wh
# Both src and dst will be reshaped to dkm_wh, regardless of their
# original aspect ratio.
if src_original_w > src_original_h:
dkm_wh = self.dkm_wh_landscape
else:
dkm_wh = self.dkm_wh_portrait
# Resize images.
im_src = ct.image.resize(im_src, shape_wh=dkm_wh, aspect_ratio_fill=[0, 0, 0])
im_dst = ct.image.resize(im_dst, shape_wh=dkm_wh, aspect_ratio_fill=[0, 0, 0])
im_src = numpy_to_pil(im_src)
im_dst = numpy_to_pil(im_dst)
# Run DKM.
# flow.shape: (h, 2 * w, 4), (480, 1280, 4)
# confidence: (h, 2 * w) , (480, 1280)
flow, confidences = self.dkm_model.match(im_src, im_dst)
# valid_mask: (h, 2 * w) , (480, 1280)
valid_mask = (confidences > 0.5).cpu().numpy()
src_ndc_coords = flow[..., :2].cpu().numpy()[valid_mask] # (n, 2)
dst_ndc_coords = flow[..., 2:].cpu().numpy()[valid_mask] # (n, 2)
# (h, 2 * w) -> (N,)
confidences = confidences.cpu().numpy()[valid_mask]
# Convert NDC to pixel coordinates.
src_pixels = ct.image.ndc_coords_to_pixels(src_ndc_coords, dkm_wh)
dst_pixels = ct.image.ndc_coords_to_pixels(dst_ndc_coords, dkm_wh)
# Convert to the original image pixel coordinates.
src_pixels = ct.image.recover_resized_pixels(
src_pixels, src_original_wh, dkm_wh, keep_aspect_ratio=True
)
dst_pixels = ct.image.recover_resized_pixels(
dst_pixels, dst_original_wh, dkm_wh, keep_aspect_ratio=True
)
src_pixels = np.round(src_pixels).astype(np.float32)
dst_pixels = np.round(dst_pixels).astype(np.float32)
# Rounding might cause out-of-bound.
mask = np.ones((src_pixels.shape[0],), dtype=bool)
mask = np.logical_and(mask, src_pixels[:, 0] >= 0)
mask = np.logical_and(mask, src_pixels[:, 0] < src_original_w)
mask = np.logical_and(mask, src_pixels[:, 1] >= 0)
mask = np.logical_and(mask, src_pixels[:, 1] < src_original_h)
mask = np.logical_and(mask, dst_pixels[:, 0] >= 0)
mask = np.logical_and(mask, dst_pixels[:, 0] < dst_original_w)
mask = np.logical_and(mask, dst_pixels[:, 1] >= 0)
mask = np.logical_and(mask, dst_pixels[:, 1] < dst_original_h)
src_pixels = src_pixels[mask, :]
dst_pixels = dst_pixels[mask, :]
confidences = confidences[mask]
# Unique.
(
src_pixels,
dst_pixels,
confidences,
) = Matcher._unique_src_dst_confidences(
src_pixels=src_pixels,
dst_pixels=dst_pixels,
confidences=confidences,
)
return src_pixels, dst_pixels, confidences
| yxlao/corres-nerf | 49 | (NeurIPS 2023) CorresNeRF: Image Correspondence Priors for Neural Radiance Fields | yxlao | Yixing Lao | HKU-CS | |
src/nerf/load_llff.py | Python | import os
import imageio
import numpy as np
########## Slightly modified version of LLFF data loading code
########## see https://github.com/Fyusion/LLFF for original
def _minify(basedir, factors=[], resolutions=[]):
needtoload = False
for r in factors:
imgdir = os.path.join(basedir, "images_{}".format(r))
if not os.path.exists(imgdir):
needtoload = True
for r in resolutions:
imgdir = os.path.join(basedir, "images_{}x{}".format(r[1], r[0]))
if not os.path.exists(imgdir):
needtoload = True
if not needtoload:
return
from shutil import copy
from subprocess import check_output
imgdir = os.path.join(basedir, "images")
imgs = [os.path.join(imgdir, f) for f in sorted(os.listdir(imgdir))]
imgs = [
f
for f in imgs
if any([f.endswith(ex) for ex in ["JPG", "jpg", "png", "jpeg", "PNG"]])
]
imgdir_orig = imgdir
wd = os.getcwd()
for r in factors + resolutions:
if isinstance(r, int):
name = "images_{}".format(r)
resizearg = "{}%".format(100.0 / r)
else:
name = "images_{}x{}".format(r[1], r[0])
resizearg = "{}x{}".format(r[1], r[0])
imgdir = os.path.join(basedir, name)
if os.path.exists(imgdir):
continue
print("Minifying", r, basedir)
os.makedirs(imgdir)
check_output("cp {}/* {}".format(imgdir_orig, imgdir), shell=True)
ext = imgs[0].split(".")[-1]
args = " ".join(
["mogrify", "-resize", resizearg, "-format", "png", "*.{}".format(ext)]
)
print(args)
os.chdir(imgdir)
check_output(args, shell=True)
os.chdir(wd)
if ext != "png":
check_output("rm {}/*.{}".format(imgdir, ext), shell=True)
print("Removed duplicates")
print("Done")
def _load_data(basedir, factor=None, width=None, height=None, load_imgs=True):
poses_arr = np.load(os.path.join(basedir, "poses_bounds.npy"))
poses = poses_arr[:, :-2].reshape([-1, 3, 5]).transpose([1, 2, 0])
bds = poses_arr[:, -2:].transpose([1, 0])
img0 = [
os.path.join(basedir, "images", f)
for f in sorted(os.listdir(os.path.join(basedir, "images")))
if f.endswith("JPG") or f.endswith("jpg") or f.endswith("png")
][0]
sh = imageio.imread(img0).shape
sfx = ""
if factor is not None:
sfx = "_{}".format(factor)
_minify(basedir, factors=[factor])
factor = factor
elif height is not None:
factor = sh[0] / float(height)
width = int(sh[1] / factor)
_minify(basedir, resolutions=[[height, width]])
sfx = "_{}x{}".format(width, height)
elif width is not None:
factor = sh[1] / float(width)
height = int(sh[0] / factor)
_minify(basedir, resolutions=[[height, width]])
sfx = "_{}x{}".format(width, height)
else:
factor = 1
imgdir = os.path.join(basedir, "images" + sfx)
if not os.path.exists(imgdir):
print(imgdir, "does not exist, returning")
return
imgfiles = [
os.path.join(imgdir, f)
for f in sorted(os.listdir(imgdir))
if f.endswith("JPG") or f.endswith("jpg") or f.endswith("png")
]
if poses.shape[-1] != len(imgfiles):
print(
"Mismatch between imgs {} and poses {} !!!!".format(
len(imgfiles), poses.shape[-1]
)
)
return
sh = imageio.imread(imgfiles[0]).shape
poses[:2, 4, :] = np.array(sh[:2]).reshape([2, 1])
poses[2, 4, :] = poses[2, 4, :] * 1.0 / factor
if not load_imgs:
return poses, bds
def imread(f):
if f.endswith("png"):
return imageio.imread(f, format="PNG-PIL", ignoregamma=True)
else:
return imageio.imread(f)
imgs = imgs = [imread(f)[..., :3] / 255.0 for f in imgfiles]
imgs = np.stack(imgs, -1)
print("Loaded image data", imgs.shape, poses[:, -1, 0])
return poses, bds, imgs
def normalize(x):
return x / np.linalg.norm(x)
def viewmatrix(z, up, pos):
vec2 = normalize(z)
vec1_avg = up
# np.cross noreturn bug: https://github.com/numpy/numpy/issues/22146
# https://github.com/microsoft/pylance-release/issues/3277#issuecomment-1238905167
cross_prod = lambda x, y: np.cross(x, y)
vec0 = normalize(cross_prod(vec1_avg, vec2))
vec1 = normalize(cross_prod(vec2, vec0))
m = np.stack([vec0, vec1, vec2, pos], 1)
return m
def ptstocam(pts, c2w):
tt = np.matmul(c2w[:3, :3].T, (pts - c2w[:3, 3])[..., np.newaxis])[..., 0]
return tt
def poses_avg(poses):
hwf = poses[0, :3, -1:]
center = poses[:, :3, 3].mean(0)
vec2 = normalize(poses[:, :3, 2].sum(0))
up = poses[:, :3, 1].sum(0)
c2w = np.concatenate([viewmatrix(vec2, up, center), hwf], 1)
return c2w
def render_path_spiral(c2w, up, rads, focal, zdelta, zrate, rots, N):
render_poses = []
rads = np.array(list(rads) + [1.0])
hwf = c2w[:, 4:5]
for theta in np.linspace(0.0, 2.0 * np.pi * rots, N + 1)[:-1]:
c = np.dot(
c2w[:3, :4],
np.array([np.cos(theta), -np.sin(theta), -np.sin(theta * zrate), 1.0])
* rads,
)
z = normalize(c - np.dot(c2w[:3, :4], np.array([0, 0, -focal, 1.0])))
render_poses.append(np.concatenate([viewmatrix(z, up, c), hwf], 1))
return render_poses
def recenter_poses(poses):
poses_ = poses + 0
bottom = np.reshape([0, 0, 0, 1.0], [1, 4])
c2w = poses_avg(poses)
c2w = np.concatenate([c2w[:3, :4], bottom], -2)
bottom = np.tile(np.reshape(bottom, [1, 1, 4]), [poses.shape[0], 1, 1])
poses = np.concatenate([poses[:, :3, :4], bottom], -2)
poses = np.linalg.inv(c2w) @ poses
poses_[:, :3, :4] = poses[:, :3, :4]
poses = poses_
return poses
#####################
def spherify_poses(poses, bds):
p34_to_44 = lambda p: np.concatenate(
[p, np.tile(np.reshape(np.eye(4)[-1, :], [1, 1, 4]), [p.shape[0], 1, 1])], 1
)
rays_d = poses[:, :3, 2:3]
rays_o = poses[:, :3, 3:4]
def min_line_dist(rays_o, rays_d):
A_i = np.eye(3) - rays_d * np.transpose(rays_d, [0, 2, 1])
b_i = -A_i @ rays_o
pt_mindist = np.squeeze(
-np.linalg.inv((np.transpose(A_i, [0, 2, 1]) @ A_i).mean(0)) @ (b_i).mean(0)
)
return pt_mindist
pt_mindist = min_line_dist(rays_o, rays_d)
center = pt_mindist
up = (poses[:, :3, 3] - center).mean(0)
vec0 = normalize(up)
vec1 = normalize(np.cross([0.1, 0.2, 0.3], vec0))
vec2 = normalize(np.cross(vec0, vec1))
pos = center
c2w = np.stack([vec1, vec2, vec0, pos], 1)
poses_reset = np.linalg.inv(p34_to_44(c2w[None])) @ p34_to_44(poses[:, :3, :4])
rad = np.sqrt(np.mean(np.sum(np.square(poses_reset[:, :3, 3]), -1)))
sc = 1.0 / rad
poses_reset[:, :3, 3] *= sc
bds *= sc
rad *= sc
centroid = np.mean(poses_reset[:, :3, 3], 0)
zh = centroid[2]
radcircle = np.sqrt(rad**2 - zh**2)
new_poses = []
for th in np.linspace(0.0, 2.0 * np.pi, 120):
camorigin = np.array([radcircle * np.cos(th), radcircle * np.sin(th), zh])
up = np.array([0, 0, -1.0])
vec2 = normalize(camorigin)
vec0 = normalize(np.cross(vec2, up))
vec1 = normalize(np.cross(vec2, vec0))
pos = camorigin
p = np.stack([vec0, vec1, vec2, pos], 1)
new_poses.append(p)
new_poses = np.stack(new_poses, 0)
new_poses = np.concatenate(
[new_poses, np.broadcast_to(poses[0, :3, -1:], new_poses[:, :3, -1:].shape)], -1
)
poses_reset = np.concatenate(
[
poses_reset[:, :3, :4],
np.broadcast_to(poses[0, :3, -1:], poses_reset[:, :3, -1:].shape),
],
-1,
)
return poses_reset, new_poses, bds
def load_llff_data(
basedir, factor=8, recenter=True, bd_factor=0.75, spherify=False, path_zflat=False
):
"""
Args:
basedir: scene dir, containing images and annotations
factor: downsampling factor for images
recenter: recenter camera centers
bd_factor: rescale scene to fit in bd_factor * unit ball
spherify: spherically captured 360 scene
path_zflat: path to zflat, by default this is not used
"""
# factor=8 downsamples original imgs by 8x
poses, bds, imgs = _load_data(basedir, factor=factor)
print("Loaded", basedir, bds.min(), bds.max())
# Correct rotation matrix ordering and move variable dim to axis 0
poses = np.concatenate([poses[:, 1:2, :], -poses[:, 0:1, :], poses[:, 2:, :]], 1)
poses = np.moveaxis(poses, -1, 0).astype(np.float32)
imgs = np.moveaxis(imgs, -1, 0).astype(np.float32)
images = imgs
bds = np.moveaxis(bds, -1, 0).astype(np.float32)
# Rescale if bd_factor is provided
sc = 1.0 if bd_factor is None else 1.0 / (bds.min() * bd_factor)
poses[:, :3, 3] *= sc
bds *= sc
if recenter:
poses = recenter_poses(poses)
if spherify:
poses, render_poses, bds = spherify_poses(poses, bds)
else:
c2w = poses_avg(poses)
print("recentered", c2w.shape)
print(c2w[:3, :4])
## Get spiral
# Get average pose
up = normalize(poses[:, :3, 1].sum(0))
# Find a reasonable "focus depth" for this dataset
close_depth, inf_depth = bds.min() * 0.9, bds.max() * 5.0
dt = 0.75
mean_dz = 1.0 / (((1.0 - dt) / close_depth + dt / inf_depth))
focal = mean_dz
# Get radii for spiral path
shrink_factor = 0.8
zdelta = close_depth * 0.2
tt = poses[:, :3, 3] # ptstocam(poses[:3,3,:].T, c2w).T
rads = np.percentile(np.abs(tt), 90, 0)
c2w_path = c2w
N_views = 120
N_rots = 2
if path_zflat:
# zloc = np.percentile(tt, 10, 0)[2]
zloc = -close_depth * 0.1
c2w_path[:3, 3] = c2w_path[:3, 3] + zloc * c2w_path[:3, 2]
rads[2] = 0.0
N_rots = 1
N_views /= 2
# Generate poses for spiral path
render_poses = render_path_spiral(
c2w_path, up, rads, focal, zdelta, zrate=0.5, rots=N_rots, N=N_views
)
render_poses = np.array(render_poses).astype(np.float32)
c2w = poses_avg(poses)
print("Data:")
print(poses.shape, images.shape, bds.shape)
dists = np.sum(np.square(c2w[:3, 3] - poses[:, :3, 3]), -1)
i_test = np.argmin(dists)
print("HOLDOUT view is", i_test)
images = images.astype(np.float32)
poses = poses.astype(np.float32)
return images, poses, bds, render_poses, i_test
| yxlao/corres-nerf | 49 | (NeurIPS 2023) CorresNeRF: Image Correspondence Priors for Neural Radiance Fields | yxlao | Yixing Lao | HKU-CS | |
src/nerf/run_nerf_helpers.py | Python | import numpy as np
import torch
# torch.autograd.set_detect_anomaly(True)
import torch.nn as nn
import torch.nn.functional as F
# Misc
img2mse = lambda x, y: torch.mean((x - y) ** 2)
mse2psnr = lambda x: -10.0 * torch.log(x) / torch.log(torch.Tensor([10.0]))
to8b = lambda x: (255 * np.clip(x, 0, 1)).astype(np.uint8)
# Positional encoding (section 5.1)
class Embedder:
def __init__(self, **kwargs):
self.kwargs = kwargs
self.create_embedding_fn()
def create_embedding_fn(self):
embed_fns = []
d = self.kwargs["input_dims"]
out_dim = 0
if self.kwargs["include_input"]:
embed_fns.append(lambda x: x)
out_dim += d
max_freq = self.kwargs["max_freq_log2"]
N_freqs = self.kwargs["num_freqs"]
if self.kwargs["log_sampling"]:
freq_bands = 2.0 ** torch.linspace(0.0, max_freq, steps=N_freqs)
else:
freq_bands = torch.linspace(2.0**0.0, 2.0**max_freq, steps=N_freqs)
for freq in freq_bands:
for p_fn in self.kwargs["periodic_fns"]:
embed_fns.append(lambda x, p_fn=p_fn, freq=freq: p_fn(x * freq))
out_dim += d
self.embed_fns = embed_fns
self.out_dim = out_dim
def embed(self, inputs):
return torch.cat([fn(inputs) for fn in self.embed_fns], -1)
def get_embedder(multires, i=0):
if i == -1:
return nn.Identity(), 3
embed_kwargs = {
"include_input": True,
"input_dims": 3,
"max_freq_log2": multires - 1,
"num_freqs": multires,
"log_sampling": True,
"periodic_fns": [torch.sin, torch.cos],
}
embedder_obj = Embedder(**embed_kwargs)
embed = lambda x, eo=embedder_obj: eo.embed(x)
return embed, embedder_obj.out_dim
# Model
class NeRF(nn.Module):
def __init__(
self,
D=8,
W=256,
input_ch=3,
input_ch_views=3,
output_ch=4,
skips=[4],
use_viewdirs=False,
):
""" """
super(NeRF, self).__init__()
self.D = D
self.W = W
self.input_ch = input_ch
self.input_ch_views = input_ch_views
self.skips = skips
self.use_viewdirs = use_viewdirs
self.pts_linears = nn.ModuleList(
[nn.Linear(input_ch, W)]
+ [
nn.Linear(W, W) if i not in self.skips else nn.Linear(W + input_ch, W)
for i in range(D - 1)
]
)
# Implementation according to the official code release
# https://github.com/bmild/nerf/blob/master/run_nerf_helpers.py#L104-L105
self.views_linears = nn.ModuleList([nn.Linear(input_ch_views + W, W // 2)])
# Implementation according to the paper
# self.views_linears = nn.ModuleList(
# [nn.Linear(input_ch_views + W, W//2)] + [nn.Linear(W//2, W//2) for i in range(D//2)])
if use_viewdirs:
self.feature_linear = nn.Linear(W, W)
self.alpha_linear = nn.Linear(W, 1)
self.rgb_linear = nn.Linear(W // 2, 3)
else:
self.output_linear = nn.Linear(W, output_ch)
def forward(self, x):
input_pts, input_views = torch.split(
x, [self.input_ch, self.input_ch_views], dim=-1
)
h = input_pts
for i, l in enumerate(self.pts_linears):
h = self.pts_linears[i](h)
h = F.relu(h)
if i in self.skips:
h = torch.cat([input_pts, h], -1)
if self.use_viewdirs:
alpha = self.alpha_linear(h)
feature = self.feature_linear(h)
h = torch.cat([feature, input_views], -1)
for i, l in enumerate(self.views_linears):
h = self.views_linears[i](h)
h = F.relu(h)
rgb = self.rgb_linear(h)
outputs = torch.cat([rgb, alpha], -1)
else:
outputs = self.output_linear(h)
return outputs
def load_weights_from_keras(self, weights):
assert self.use_viewdirs, "Not implemented if use_viewdirs=False"
# Load pts_linears
for i in range(self.D):
idx_pts_linears = 2 * i
self.pts_linears[i].weight.data = torch.from_numpy(
np.transpose(weights[idx_pts_linears])
)
self.pts_linears[i].bias.data = torch.from_numpy(
np.transpose(weights[idx_pts_linears + 1])
)
# Load feature_linear
idx_feature_linear = 2 * self.D
self.feature_linear.weight.data = torch.from_numpy(
np.transpose(weights[idx_feature_linear])
)
self.feature_linear.bias.data = torch.from_numpy(
np.transpose(weights[idx_feature_linear + 1])
)
# Load views_linears
idx_views_linears = 2 * self.D + 2
self.views_linears[0].weight.data = torch.from_numpy(
np.transpose(weights[idx_views_linears])
)
self.views_linears[0].bias.data = torch.from_numpy(
np.transpose(weights[idx_views_linears + 1])
)
# Load rgb_linear
idx_rbg_linear = 2 * self.D + 4
self.rgb_linear.weight.data = torch.from_numpy(
np.transpose(weights[idx_rbg_linear])
)
self.rgb_linear.bias.data = torch.from_numpy(
np.transpose(weights[idx_rbg_linear + 1])
)
# Load alpha_linear
idx_alpha_linear = 2 * self.D + 6
self.alpha_linear.weight.data = torch.from_numpy(
np.transpose(weights[idx_alpha_linear])
)
self.alpha_linear.bias.data = torch.from_numpy(
np.transpose(weights[idx_alpha_linear + 1])
)
# Ray helpers
def get_rays(H, W, K, c2w):
# pytorch's meshgrid has indexing='ij'
i, j = torch.meshgrid(torch.linspace(0, W - 1, W), torch.linspace(0, H - 1, H))
i = i.t()
j = j.t()
dirs = torch.stack(
[(i - K[0][2]) / K[0][0], -(j - K[1][2]) / K[1][1], -torch.ones_like(i)], -1
)
# Rotate ray directions from camera frame to the world frame
# dot product, equals to: [c2w.dot(dir) for dir in dirs]
rays_d = torch.sum(dirs[..., np.newaxis, :] * c2w[:3, :3], -1)
# Translate camera frame's origin to the world frame. It is the origin of all rays.
rays_o = c2w[:3, -1].expand(rays_d.shape)
return rays_o, rays_d
def get_rays_np(H, W, K, c2w):
# i: (378, 504), x, width
# j: (378, 504), y, height
i, j = np.meshgrid(
np.arange(W, dtype=np.float32), np.arange(H, dtype=np.float32), indexing="xy"
)
dirs = np.stack(
[(i - K[0][2]) / K[0][0], -(j - K[1][2]) / K[1][1], -np.ones_like(i)], -1
)
# Rotate ray directions from camera frame to the world frame
# dot product, equals to: [c2w.dot(dir) for dir in dirs]
rays_d = np.sum(dirs[..., np.newaxis, :] * c2w[:3, :3], -1)
# Translate camera frame's origin to the world frame. It is the origin of all rays.
rays_o = np.broadcast_to(c2w[:3, -1], np.shape(rays_d))
return rays_o, rays_d
def get_rays_np_with_coords(H, W, K, c2w):
# i: (378, 504), x, width
# j: (378, 504), y, height
i, j = np.meshgrid(
np.arange(W, dtype=np.float32), np.arange(H, dtype=np.float32), indexing="xy"
)
dirs = np.stack(
[(i - K[0][2]) / K[0][0], -(j - K[1][2]) / K[1][1], -np.ones_like(i)], -1
)
# Rotate ray directions from camera frame to the world frame
# dot product, equals to: [c2w.dot(dir) for dir in dirs]
rays_d = np.sum(dirs[..., np.newaxis, :] * c2w[:3, :3], -1)
# Translate camera frame's origin to the world frame. It is the origin of all rays.
rays_o = np.broadcast_to(c2w[:3, -1], np.shape(rays_d))
return rays_o, rays_d, i, j
def ndc_rays(H, W, focal, near, rays_o, rays_d):
# Shift ray origins to near plane
t = -(near + rays_o[..., 2]) / rays_d[..., 2]
rays_o = rays_o + t[..., None] * rays_d
# Projection
o0 = -1.0 / (W / (2.0 * focal)) * rays_o[..., 0] / rays_o[..., 2]
o1 = -1.0 / (H / (2.0 * focal)) * rays_o[..., 1] / rays_o[..., 2]
o2 = 1.0 + 2.0 * near / rays_o[..., 2]
d0 = (
-1.0
/ (W / (2.0 * focal))
* (rays_d[..., 0] / rays_d[..., 2] - rays_o[..., 0] / rays_o[..., 2])
)
d1 = (
-1.0
/ (H / (2.0 * focal))
* (rays_d[..., 1] / rays_d[..., 2] - rays_o[..., 1] / rays_o[..., 2])
)
d2 = -2.0 * near / rays_o[..., 2]
rays_o = torch.stack([o0, o1, o2], -1)
rays_d = torch.stack([d0, d1, d2], -1)
return rays_o, rays_d
# Hierarchical sampling (section 5.2)
def sample_pdf(bins, weights, N_samples, det=False, pytest=False):
# Get pdf
weights = weights + 1e-5 # prevent nans
pdf = weights / torch.sum(weights, -1, keepdim=True)
cdf = torch.cumsum(pdf, -1)
cdf = torch.cat([torch.zeros_like(cdf[..., :1]), cdf], -1) # (batch, len(bins))
# Take uniform samples
if det:
u = torch.linspace(0.0, 1.0, steps=N_samples)
u = u.expand(list(cdf.shape[:-1]) + [N_samples])
else:
u = torch.rand(list(cdf.shape[:-1]) + [N_samples])
# Pytest, overwrite u with numpy's fixed random numbers
if pytest:
np.random.seed(0)
new_shape = list(cdf.shape[:-1]) + [N_samples]
if det:
u = np.linspace(0.0, 1.0, N_samples)
u = np.broadcast_to(u, new_shape)
else:
u = np.random.rand(*new_shape)
u = torch.Tensor(u)
# Invert CDF
u = u.contiguous()
inds = torch.searchsorted(cdf, u, right=True)
below = torch.max(torch.zeros_like(inds - 1), inds - 1)
above = torch.min((cdf.shape[-1] - 1) * torch.ones_like(inds), inds)
inds_g = torch.stack([below, above], -1) # (batch, N_samples, 2)
# cdf_g = tf.gather(cdf, inds_g, axis=-1, batch_dims=len(inds_g.shape)-2)
# bins_g = tf.gather(bins, inds_g, axis=-1, batch_dims=len(inds_g.shape)-2)
matched_shape = [inds_g.shape[0], inds_g.shape[1], cdf.shape[-1]]
cdf_g = torch.gather(cdf.unsqueeze(1).expand(matched_shape), 2, inds_g)
bins_g = torch.gather(bins.unsqueeze(1).expand(matched_shape), 2, inds_g)
denom = cdf_g[..., 1] - cdf_g[..., 0]
denom = torch.where(denom < 1e-5, torch.ones_like(denom), denom)
t = (u - cdf_g[..., 0]) / denom
samples = bins_g[..., 0] + t * (bins_g[..., 1] - bins_g[..., 0])
return samples
| yxlao/corres-nerf | 49 | (NeurIPS 2023) CorresNeRF: Image Correspondence Priors for Neural Radiance Fields | yxlao | Yixing Lao | HKU-CS | |
src/neus/dataset.py | Python | import os
from glob import glob
from pathlib import Path
import cv2 as cv
import numpy as np
import torch
from scipy.spatial.transform import Rotation as Rot
from scipy.spatial.transform import Slerp
from ..corres.corres_map import CorresMap, load_camera_split
# This function is borrowed from IDR: https://github.com/lioryariv/idr
def load_K_Rt_from_P(filename, P=None):
if P is None:
lines = open(filename).read().splitlines()
if len(lines) == 4:
lines = lines[1:]
lines = [[x[0], x[1], x[2], x[3]] for x in (x.split(" ") for x in lines)]
P = np.asarray(lines).astype(np.float32).squeeze()
out = cv.decomposeProjectionMatrix(P)
K = out[0]
R = out[1]
t = out[2]
K = K / K[2, 2]
intrinsics = np.eye(4)
intrinsics[:3, :3] = K
pose = np.eye(4, dtype=np.float32)
pose[:3, :3] = R.transpose()
pose[:3, 3] = (t[:3] / t[3])[:, 0]
return intrinsics, pose
def unpack_neus_camera_npz(camera_path) -> dict:
"""
Assumes npz contains tensors with _{cam_id} camera index.
# Normalize the cameras such that the visual hull of the observed object is
# approximately inside the unit sphere.
#
# scale_mat_{cam_id} should be the same across all cameras.
- scale_mat_{cam_id}: (4, 4), last row [0, 0, 0, 1]
- scale_mat_inv_{cam_id} (not used)
# Projection matrix form world to image (pixel)
- world_mat_{cam_id}: (4, 4), last row [0, 0, 0, 1]
- world_mat_inv_{cam_id} (not used)
# Camera matrix (not used)
- camera_mat_{cam_id}: (4, 4), last row [0, 0, 0, 1]
- camera_mat_inv_{cam_id} (not used)
# Typical P
P = camera["world_mat"] @ camera["scale_mat"]
P = P[:3, :4]
"""
npz = np.load(str(camera_path))
cam_id = 0
cameras = []
while True:
scale_mat = npz.get(f"scale_mat_{cam_id}", default=None)
world_mat = npz.get(f"world_mat_{cam_id}", default=None)
# camera_mat = npz.get(f"camera_mat_{cam_id}", default=None)
all_are_none = any(
[
scale_mat is None,
world_mat is None,
# camera_mat is None,
]
)
if all_are_none:
# All cameras are read
break
else:
assert scale_mat.shape == (4, 4)
assert world_mat.shape == (4, 4)
# assert camera_mat.shape == (4, 4)
np.testing.assert_allclose(scale_mat[3, :], np.array([0, 0, 0, 1]))
np.testing.assert_allclose(world_mat[3, :], np.array([0, 0, 0, 1]))
# np.testing.assert_allclose(camera_mat[3, :], np.array([0, 0, 0, 1]))
camera = dict()
camera["scale_mat"] = scale_mat
camera["world_mat"] = world_mat
# camera["camera_mat"] = camera_mat
cameras.append(camera)
cam_id += 1
return cameras
class Dataset:
def __init__(self, conf, scene_name, corres_enabled=False):
super(Dataset, self).__init__()
print("Load data: Begin")
self.device = torch.device("cuda")
self.conf = conf
# Data
self.data_root = Path(conf.get_string("data_root"))
self.data_dir = self.data_root / scene_name
# Corres data
corres_root = conf.get_string("corres_root", None)
if corres_root is not None:
self.corres_root = Path(corres_root)
self.corres_dir = self.corres_root / scene_name
else:
self.corres_root = None
self.corres_dir = None
self.render_cameras_name = conf.get_string("render_cameras_name")
self.object_cameras_name = conf.get_string("object_cameras_name")
camera_dict = np.load(self.data_dir / self.render_cameras_name)
self.camera_dict = camera_dict
self.images_list = sorted(list((self.data_dir / "image").glob("*.png")))
self.images_list = [str(e) for e in self.images_list]
self.n_images = len(self.images_list)
self.images_np = (
np.stack([cv.imread(im_name) for im_name in self.images_list]) / 256.0
)
self.masks_list = sorted(glob(os.path.join(self.data_dir, "mask/*.png")))
self.masks_np = (
np.stack([cv.imread(im_name) for im_name in self.masks_list]) / 256.0
)
# world_mat is a projection matrix from world to image
self.world_mats_np = [
camera_dict["world_mat_%d" % idx].astype(np.float32)
for idx in range(self.n_images)
]
self.scale_mats_np = []
# scale_mat: used for coordinate normalization, we assume the scene to
# render is inside a unit sphere at origin.
self.scale_mats_np = [
camera_dict["scale_mat_%d" % idx].astype(np.float32)
for idx in range(self.n_images)
]
self.intrinsics_all = []
self.pose_all = []
self.Ks = []
self.Ts = []
for scale_mat, world_mat in zip(self.scale_mats_np, self.world_mats_np):
P = world_mat @ scale_mat
P = P[:3, :4]
intrinsics, pose = load_K_Rt_from_P(None, P)
self.intrinsics_all.append(torch.from_numpy(intrinsics).float())
self.pose_all.append(torch.from_numpy(pose).float())
self.Ks.append(torch.from_numpy(intrinsics[:3, :3]).float())
self.Ts.append(torch.from_numpy(np.linalg.inv(pose)).float())
# [n_images, H, W, 3]
self.images = torch.from_numpy(self.images_np.astype(np.float32)).cpu()
# [n_images, H, W, 3]
self.masks = torch.from_numpy(self.masks_np.astype(np.float32)).cpu()
# [n_images, 4, 4]
self.intrinsics_all = torch.stack(self.intrinsics_all).to(self.device)
# [n_images, 4, 4]
self.intrinsics_all_inv = torch.inverse(self.intrinsics_all)
self.focal = self.intrinsics_all[0][0, 0]
# [n_images, 4, 4]
self.pose_all = torch.stack(self.pose_all).to(self.device)
# [n_images, 3, 3]
self.Ks = torch.stack(self.Ks).to(self.device)
# [n_images, 4, 4]
self.Ts = torch.stack(self.Ts).to(self.device)
self.H, self.W = self.images.shape[1], self.images.shape[2]
self.image_pixels = self.H * self.W
# Load sparse views
if "num_sparse_views" in conf:
# Retrieve the sparse views from the config json file.
assert "corres_dir" not in conf
self.num_sparse_views = conf.get_int("num_sparse_views")
camera_split_path = conf.get_string("camera_split_path")
self.sparse_views, _ = load_camera_split(
camera_split_path=camera_split_path,
scene_name=scene_name,
num_views=self.num_sparse_views,
)
else:
self.num_sparse_views = self.n_images
self.sparse_views = list(range(self.n_images))
# Load corres dictionary
if corres_enabled:
corres_matcher = conf.get_string("corres_matcher", "")
corres_config = conf.get_string("corres_config", "")
corres_dir_name = "_".join(
[
corres_matcher,
corres_config,
str(self.num_sparse_views),
]
)
corres_path = Path(self.corres_dir) / corres_dir_name / "corres.npz"
self.corres_map = CorresMap.from_npz(corres_path=corres_path)
num_corres = self.corres_map.get_num_corres()
num_image_pairs = self.corres_map.get_num_image_pairs()
self.corres_map.update_corres_points_map(
normalized_Ks=self.Ks.cpu().numpy(),
normalized_Ts=self.Ts.cpu().numpy(),
)
num_total_pixels = self.H * self.W * self.n_images
corres_percentage = num_corres / num_total_pixels * 100.0
print(
f"Dataset stats:\n"
f"- corres_path: {corres_path} {'does not exist' if not corres_path.is_file() else ''}\n"
f"- {num_total_pixels} pixels, {num_corres} corres ({corres_percentage:.2f}%)\n"
f"- {len(self.images)} images, {num_image_pairs} image pairs"
)
else:
self.corres_map = None
num_corres = 0
num_image_pairs = 0
print("Corres is not enabled.")
object_bbox_min = np.array([-1.01, -1.01, -1.01, 1.0])
object_bbox_max = np.array([1.01, 1.01, 1.01, 1.0])
# Object scale mat: region of interest to **extract mesh**
object_scale_mat = np.load(
os.path.join(self.data_dir, self.object_cameras_name)
)["scale_mat_0"]
object_bbox_min = (
np.linalg.inv(self.scale_mats_np[0])
@ object_scale_mat
@ object_bbox_min[:, None]
)
object_bbox_max = (
np.linalg.inv(self.scale_mats_np[0])
@ object_scale_mat
@ object_bbox_max[:, None]
)
self.object_bbox_min = object_bbox_min[:3, 0]
self.object_bbox_max = object_bbox_max[:3, 0]
print("Load data: End")
def gen_rays_at(self, img_idx, resolution_level=1):
"""
Generate rays at world space from one camera.
"""
l = resolution_level
tx = torch.linspace(0, self.W - 1, self.W // l)
ty = torch.linspace(0, self.H - 1, self.H // l)
pixels_x, pixels_y = torch.meshgrid(tx, ty, indexing="ij")
p = torch.stack(
[pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1
) # W, H, 3
p = torch.matmul(
self.intrinsics_all_inv[img_idx, None, None, :3, :3], p[:, :, :, None]
).squeeze() # W, H, 3
rays_v = p / torch.linalg.norm(p, ord=2, dim=-1, keepdim=True) # W, H, 3
rays_v = torch.matmul(
self.pose_all[img_idx, None, None, :3, :3], rays_v[:, :, :, None]
).squeeze() # W, H, 3
rays_o = self.pose_all[img_idx, None, None, :3, 3].expand(
rays_v.shape
) # W, H, 3
return rays_o.transpose(0, 1), rays_v.transpose(0, 1)
def gen_rays_at_custom_camera(self, K, T, resolution_level=1):
"""
Generate rays at world space from one camera.
"""
l = resolution_level
tx = torch.linspace(0, self.W - 1, self.W // l)
ty = torch.linspace(0, self.H - 1, self.H // l)
pixels_x, pixels_y = torch.meshgrid(tx, ty, indexing="ij")
p = torch.stack(
[pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1
) # W, H, 3
p = torch.matmul(
torch.from_numpy(np.linalg.inv(K)[None, None, :].astype(np.float32)).to(
self.device
),
p[:, :, :, None],
).squeeze() # W, H, 3
rays_v = p / torch.linalg.norm(p, ord=2, dim=-1, keepdim=True) # W, H, 3
rays_v = torch.matmul(
torch.from_numpy(
np.linalg.inv(T)[:3, :3][None, None, :].astype(np.float32)
).to(self.device),
rays_v[:, :, :, None],
).squeeze() # W, H, 3
# W, H, 3
rays_o = (
torch.from_numpy(np.linalg.inv(T)[:3, 3][None, None, :].astype(np.float32))
.to(self.device)
.expand(rays_v.shape)
)
return rays_o.transpose(0, 1), rays_v.transpose(0, 1)
def gen_random_rays_at_pixels(self, img_indices, pixels_x, pixels_y):
"""
Generate rays at world space, possibly from multiple cameras.
Args:
img_indices: list of camera indices
pixels_x: cols
pixels_y: rows
Returns,
(rays_o, rays_d, color, mask) in cuda tensors.
rays_o: (512, 3), ray origins.
rays_d: (512, 3), normalized to unit length, ray directions.
color: (512, 3), float32 gt color
mask: (512, 1), float32, 0 for background, 0.9961 for foreground.
"""
assert len(img_indices) == len(pixels_x) == len(pixels_y)
# color: (batch_size, 3), float32.
color = self.images[img_indices.cpu(), pixels_y.cpu(), pixels_x.cpu()]
color = color.cuda()
# mask: (batch_size, 1), float32.
mask = self.masks[img_indices.cpu(), pixels_y.cpu(), pixels_x.cpu()]
mask = mask.cuda()
# p: (batch_size, 3), float32.
# initialize to [[x, y, 1],
# [x, y, 1],
# [... ]]
pixels = torch.stack(
[pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1
).float()
pixels = pixels.cuda()
# self.intrinsics_all_inv : (num_images, 4, 4).
# self.intrinsics_all_inv[img_indices, :3, :3] : (batch_size, 3, 3).
# p[:, :, None]) : (batch_size, 3, 1).
# p (result from batched matmul) : (batch_size, 3, 1).
# p squeezed : (batch_size, 3).
pixels_in_camera = torch.matmul(
self.intrinsics_all_inv[img_indices, :3, :3], pixels[:, :, None]
).squeeze(dim=-1)
# batch_size, 3
rays_v = pixels_in_camera / torch.linalg.norm(
pixels_in_camera, ord=2, dim=-1, keepdim=True
)
# batch_size, 3
rays_v = torch.matmul(
self.pose_all[img_indices, :3, :3], rays_v[:, :, None]
).squeeze(dim=-1)
# Camera center
# 3d coordinates, not homogeneous.
# batch_size, 3
rays_o = self.pose_all[img_indices, :3, 3]
# .expand(rays_v.shape)
# rays_o: (512, 3)
# rays_v: (512, 3)
# color : (512, 3)
# mask : (512, 1)
#
# output: (512, 10)
ret = rays_o, rays_v, color, mask[:, :1]
return ret
def gen_random_rays_at(self, img_idx, batch_size):
"""
Generate random rays at world space from one camera.
"""
# Random x (col) and y (row) indices.
pixels_x = torch.randint(low=0, high=self.W, size=[batch_size])
pixels_y = torch.randint(low=0, high=self.H, size=[batch_size])
# color: (batch_size, 3), float32.
# max color is 0.9961 (255 / 256).
# min color is 0.
color = self.images[img_idx][(pixels_y, pixels_x)]
# mask: (batch_size, 3), float32.
# value either 0.9961 or 0.
mask = self.masks[img_idx][(pixels_y, pixels_x)]
# p: (batch_size, 3), float32.
# initialize to [[x, y, 1],
# [x, y, 1],
# [... ]]
p = torch.stack([pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1).float()
# self.intrinsics_all_inv : (num_images, 4, 4).
# self.intrinsics_all_inv[img_idx, None, :3, :3]: (1 , 3, 3).
# p[:, :, None]) : (batch_size, 3, 1).
# p (result from batched matmul) : (batch_size, 3, 1).
# p squeezed : (batch_size, 3).
p = torch.matmul(
self.intrinsics_all_inv[img_idx, None, :3, :3], p[:, :, None]
).squeeze()
# batch_size, 3
rays_v = p / torch.linalg.norm(p, ord=2, dim=-1, keepdim=True)
# batch_size, 3
rays_v = torch.matmul(
self.pose_all[img_idx, None, :3, :3], rays_v[:, :, None]
).squeeze()
# Camera center
# 3d coordinates, not homogeneous.
# batch_size, 3
rays_o = self.pose_all[img_idx, None, :3, 3].expand(rays_v.shape)
# rays_o: (512, 3)
# rays_v: (512, 3)
# color : (512, 3)
# mask : (512, 1)
#
# output: (512, 10)
ret = rays_o, rays_v, color.cuda(), mask[:, :1].cuda()
return ret
def gen_rays_between(self, idx_0, idx_1, ratio, resolution_level=1):
"""
Interpolate pose between two cameras.
"""
l = resolution_level
tx = torch.linspace(0, self.W - 1, self.W // l)
ty = torch.linspace(0, self.H - 1, self.H // l)
pixels_x, pixels_y = torch.meshgrid(tx, ty, indexing="ij")
p = torch.stack(
[pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1
) # W, H, 3
p = torch.matmul(
self.intrinsics_all_inv[0, None, None, :3, :3], p[:, :, :, None]
).squeeze() # W, H, 3
rays_v = p / torch.linalg.norm(p, ord=2, dim=-1, keepdim=True) # W, H, 3
trans = (
self.pose_all[idx_0, :3, 3] * (1.0 - ratio)
+ self.pose_all[idx_1, :3, 3] * ratio
)
pose_0 = self.pose_all[idx_0].detach().cpu().numpy()
pose_1 = self.pose_all[idx_1].detach().cpu().numpy()
pose_0 = np.linalg.inv(pose_0)
pose_1 = np.linalg.inv(pose_1)
rot_0 = pose_0[:3, :3]
rot_1 = pose_1[:3, :3]
rots = Rot.from_matrix(np.stack([rot_0, rot_1]))
key_times = [0, 1]
slerp = Slerp(key_times, rots)
rot = slerp(ratio)
pose = np.diag([1.0, 1.0, 1.0, 1.0])
pose = pose.astype(np.float32)
pose[:3, :3] = rot.as_matrix()
pose[:3, 3] = ((1.0 - ratio) * pose_0 + ratio * pose_1)[:3, 3]
pose = np.linalg.inv(pose)
rot = torch.from_numpy(pose[:3, :3]).cuda()
trans = torch.from_numpy(pose[:3, 3]).cuda()
rays_v = torch.matmul(
rot[None, None, :3, :3], rays_v[:, :, :, None]
).squeeze() # W, H, 3
rays_o = trans[None, None, :3].expand(rays_v.shape) # W, H, 3
return rays_o.transpose(0, 1), rays_v.transpose(0, 1)
def image_at(self, idx, resolution_level):
img_float = self.images[idx].numpy() # float, BGR
img = (img_float * 255.0).astype(np.uint8)
return (
cv.resize(img, (self.W // resolution_level, self.H // resolution_level))
).clip(0, 255)
| yxlao/corres-nerf | 49 | (NeurIPS 2023) CorresNeRF: Image Correspondence Priors for Neural Radiance Fields | yxlao | Yixing Lao | HKU-CS | |
src/neus/embedder.py | Python | import torch
# Positional encoding embedding. Code was taken from https://github.com/bmild/nerf.
class Embedder:
def __init__(self, **kwargs):
self.kwargs = kwargs
self.create_embedding_fn()
def create_embedding_fn(self):
embed_fns = []
d = self.kwargs["input_dims"]
out_dim = 0
if self.kwargs["include_input"]:
embed_fns.append(lambda x: x)
out_dim += d
max_freq = self.kwargs["max_freq_log2"]
N_freqs = self.kwargs["num_freqs"]
if self.kwargs["log_sampling"]:
freq_bands = 2.0 ** torch.linspace(0.0, max_freq, N_freqs)
else:
freq_bands = torch.linspace(2.0**0.0, 2.0**max_freq, N_freqs)
for freq in freq_bands:
for p_fn in self.kwargs["periodic_fns"]:
embed_fns.append(lambda x, p_fn=p_fn, freq=freq: p_fn(x * freq))
out_dim += d
self.embed_fns = embed_fns
self.out_dim = out_dim
def embed(self, inputs):
return torch.cat([fn(inputs) for fn in self.embed_fns], -1)
def get_embedder(multires, input_dims=3):
embed_kwargs = {
"include_input": True,
"input_dims": input_dims,
"max_freq_log2": multires - 1,
"num_freqs": multires,
"log_sampling": True,
"periodic_fns": [torch.sin, torch.cos],
}
embedder_obj = Embedder(**embed_kwargs)
def embed(x, eo=embedder_obj):
return eo.embed(x)
return embed, embedder_obj.out_dim
| yxlao/corres-nerf | 49 | (NeurIPS 2023) CorresNeRF: Image Correspondence Priors for Neural Radiance Fields | yxlao | Yixing Lao | HKU-CS | |
src/neus/fields.py | Python | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from .embedder import get_embedder
# This implementation is borrowed from IDR: https://github.com/lioryariv/idr
class SDFNetwork(nn.Module):
def __init__(
self,
d_in,
d_out,
d_hidden,
n_layers,
skip_in=(4,),
multires=0,
bias=0.5,
scale=1,
geometric_init=True,
weight_norm=True,
inside_outside=False,
):
super(SDFNetwork, self).__init__()
dims = [d_in] + [d_hidden for _ in range(n_layers)] + [d_out]
self.embed_fn_fine = None
if multires > 0:
embed_fn, input_ch = get_embedder(multires, input_dims=d_in)
self.embed_fn_fine = embed_fn
dims[0] = input_ch
self.num_layers = len(dims)
self.skip_in = skip_in
self.scale = scale
for l in range(0, self.num_layers - 1):
if l + 1 in self.skip_in:
out_dim = dims[l + 1] - dims[0]
else:
out_dim = dims[l + 1]
lin = nn.Linear(dims[l], out_dim)
if geometric_init:
if l == self.num_layers - 2:
if not inside_outside:
torch.nn.init.normal_(
lin.weight,
mean=np.sqrt(np.pi) / np.sqrt(dims[l]),
std=0.0001,
)
torch.nn.init.constant_(lin.bias, -bias)
else:
torch.nn.init.normal_(
lin.weight,
mean=-np.sqrt(np.pi) / np.sqrt(dims[l]),
std=0.0001,
)
torch.nn.init.constant_(lin.bias, bias)
elif multires > 0 and l == 0:
torch.nn.init.constant_(lin.bias, 0.0)
torch.nn.init.constant_(lin.weight[:, 3:], 0.0)
torch.nn.init.normal_(
lin.weight[:, :3], 0.0, np.sqrt(2) / np.sqrt(out_dim)
)
elif multires > 0 and l in self.skip_in:
torch.nn.init.constant_(lin.bias, 0.0)
torch.nn.init.normal_(
lin.weight, 0.0, np.sqrt(2) / np.sqrt(out_dim)
)
torch.nn.init.constant_(lin.weight[:, -(dims[0] - 3) :], 0.0)
else:
torch.nn.init.constant_(lin.bias, 0.0)
torch.nn.init.normal_(
lin.weight, 0.0, np.sqrt(2) / np.sqrt(out_dim)
)
if weight_norm:
lin = nn.utils.weight_norm(lin)
setattr(self, "lin" + str(l), lin)
self.activation = nn.Softplus(beta=100)
def forward(self, inputs):
inputs = inputs * self.scale
if self.embed_fn_fine is not None:
inputs = self.embed_fn_fine(inputs)
x = inputs
for l in range(0, self.num_layers - 1):
lin = getattr(self, "lin" + str(l))
if l in self.skip_in:
x = torch.cat([x, inputs], 1) / np.sqrt(2)
x = lin(x)
if l < self.num_layers - 2:
x = self.activation(x)
return torch.cat([x[:, :1] / self.scale, x[:, 1:]], dim=-1)
def sdf(self, x):
return self.forward(x)[:, :1]
def sdf_hidden_appearance(self, x):
return self.forward(x)
def gradient(self, x):
x.requires_grad_(True)
y = self.sdf(x)
d_output = torch.ones_like(y, requires_grad=False, device=y.device)
gradients = torch.autograd.grad(
outputs=y,
inputs=x,
grad_outputs=d_output,
create_graph=True,
retain_graph=True,
only_inputs=True,
)[0]
return gradients.unsqueeze(1)
# This implementation is borrowed from IDR: https://github.com/lioryariv/idr
class RenderingNetwork(nn.Module):
def __init__(
self,
d_feature,
mode,
d_in,
d_out,
d_hidden,
n_layers,
weight_norm=True,
multires_view=0,
squeeze_out=True,
):
super().__init__()
self.mode = mode
self.squeeze_out = squeeze_out
dims = [d_in + d_feature] + [d_hidden for _ in range(n_layers)] + [d_out]
self.embedview_fn = None
if multires_view > 0:
embedview_fn, input_ch = get_embedder(multires_view)
self.embedview_fn = embedview_fn
dims[0] += input_ch - 3
self.num_layers = len(dims)
for l in range(0, self.num_layers - 1):
out_dim = dims[l + 1]
lin = nn.Linear(dims[l], out_dim)
if weight_norm:
lin = nn.utils.weight_norm(lin)
setattr(self, "lin" + str(l), lin)
self.relu = nn.ReLU()
def forward(self, points, normals, view_dirs, feature_vectors):
if self.embedview_fn is not None:
view_dirs = self.embedview_fn(view_dirs)
rendering_input = None
if self.mode == "idr":
rendering_input = torch.cat(
[points, view_dirs, normals, feature_vectors], dim=-1
)
elif self.mode == "no_view_dir":
rendering_input = torch.cat([points, normals, feature_vectors], dim=-1)
elif self.mode == "no_normal":
rendering_input = torch.cat([points, view_dirs, feature_vectors], dim=-1)
x = rendering_input
for l in range(0, self.num_layers - 1):
lin = getattr(self, "lin" + str(l))
x = lin(x)
if l < self.num_layers - 2:
x = self.relu(x)
if self.squeeze_out:
x = torch.sigmoid(x)
return x
# This implementation is borrowed from nerf-pytorch:
# https://github.com/yenchenlin/nerf-pytorch
class NeRF(nn.Module):
def __init__(
self,
D=8,
W=256,
d_in=3,
d_in_view=3,
multires=0,
multires_view=0,
output_ch=4,
skips=[4],
use_viewdirs=False,
):
super(NeRF, self).__init__()
self.D = D
self.W = W
self.d_in = d_in
self.d_in_view = d_in_view
self.input_ch = 3
self.input_ch_view = 3
self.embed_fn = None
self.embed_fn_view = None
if multires > 0:
embed_fn, input_ch = get_embedder(multires, input_dims=d_in)
self.embed_fn = embed_fn
self.input_ch = input_ch
if multires_view > 0:
embed_fn_view, input_ch_view = get_embedder(
multires_view, input_dims=d_in_view
)
self.embed_fn_view = embed_fn_view
self.input_ch_view = input_ch_view
self.skips = skips
self.use_viewdirs = use_viewdirs
self.pts_linears = nn.ModuleList(
[nn.Linear(self.input_ch, W)]
+ [
(
nn.Linear(W, W)
if i not in self.skips
else nn.Linear(W + self.input_ch, W)
)
for i in range(D - 1)
]
)
### Implementation according to the official code release
### (https://github.com/bmild/nerf/blob/master/run_nerf_helpers.py#L104-L105)
self.views_linears = nn.ModuleList([nn.Linear(self.input_ch_view + W, W // 2)])
### Implementation according to the paper
# self.views_linears = nn.ModuleList(
# [nn.Linear(input_ch_views + W, W//2)] + [nn.Linear(W//2, W//2) for i in range(D//2)])
if use_viewdirs:
self.feature_linear = nn.Linear(W, W)
self.alpha_linear = nn.Linear(W, 1)
self.rgb_linear = nn.Linear(W // 2, 3)
else:
self.output_linear = nn.Linear(W, output_ch)
def forward(self, input_pts, input_views):
if self.embed_fn is not None:
input_pts = self.embed_fn(input_pts)
if self.embed_fn_view is not None:
input_views = self.embed_fn_view(input_views)
h = input_pts
for i, l in enumerate(self.pts_linears):
h = self.pts_linears[i](h)
h = F.relu(h)
if i in self.skips:
h = torch.cat([input_pts, h], -1)
if self.use_viewdirs:
alpha = self.alpha_linear(h)
feature = self.feature_linear(h)
h = torch.cat([feature, input_views], -1)
for i, l in enumerate(self.views_linears):
h = self.views_linears[i](h)
h = F.relu(h)
rgb = self.rgb_linear(h)
return alpha, rgb
else:
assert False
class SingleVarianceNetwork(nn.Module):
def __init__(self, init_val):
super(SingleVarianceNetwork, self).__init__()
self.register_parameter("variance", nn.Parameter(torch.tensor(init_val)))
def forward(self, x):
return torch.ones([len(x), 1]) * torch.exp(self.variance * 10.0)
| yxlao/corres-nerf | 49 | (NeurIPS 2023) CorresNeRF: Image Correspondence Priors for Neural Radiance Fields | yxlao | Yixing Lao | HKU-CS | |
src/neus/renderer.py | Python | import mcubes
import numpy as np
import torch
import torch.nn.functional as F
def extract_fields(bound_min, bound_max, resolution, query_func):
N = 64
X = torch.linspace(bound_min[0], bound_max[0], resolution).split(N)
Y = torch.linspace(bound_min[1], bound_max[1], resolution).split(N)
Z = torch.linspace(bound_min[2], bound_max[2], resolution).split(N)
u = np.zeros([resolution, resolution, resolution], dtype=np.float32)
with torch.no_grad():
for xi, xs in enumerate(X):
for yi, ys in enumerate(Y):
for zi, zs in enumerate(Z):
xx, yy, zz = torch.meshgrid(xs, ys, zs, indexing="ij")
pts = torch.cat(
[xx.reshape(-1, 1), yy.reshape(-1, 1), zz.reshape(-1, 1)],
dim=-1,
)
val = (
query_func(pts)
.reshape(len(xs), len(ys), len(zs))
.detach()
.cpu()
.numpy()
)
u[
xi * N : xi * N + len(xs),
yi * N : yi * N + len(ys),
zi * N : zi * N + len(zs),
] = val
return u
def extract_geometry(bound_min, bound_max, resolution, threshold, query_func):
print("threshold: {}".format(threshold))
u = extract_fields(bound_min, bound_max, resolution, query_func)
vertices, triangles = mcubes.marching_cubes(u, threshold)
b_max_np = bound_max.detach().cpu().numpy()
b_min_np = bound_min.detach().cpu().numpy()
vertices = (
vertices / (resolution - 1.0) * (b_max_np - b_min_np)[None, :]
+ b_min_np[None, :]
)
return vertices, triangles
def sample_pdf(bins, weights, n_samples, det=False):
# This implementation is from NeRF
# Get pdf
weights = weights + 1e-5 # prevent nans
pdf = weights / torch.sum(weights, -1, keepdim=True)
cdf = torch.cumsum(pdf, -1)
cdf = torch.cat([torch.zeros_like(cdf[..., :1]), cdf], -1)
# Take uniform samples
if det:
u = torch.linspace(
0.0 + 0.5 / n_samples, 1.0 - 0.5 / n_samples, steps=n_samples
)
u = u.expand(list(cdf.shape[:-1]) + [n_samples])
else:
u = torch.rand(list(cdf.shape[:-1]) + [n_samples])
# Invert CDF
u = u.contiguous()
inds = torch.searchsorted(cdf, u, right=True)
below = torch.max(torch.zeros_like(inds - 1), inds - 1)
above = torch.min((cdf.shape[-1] - 1) * torch.ones_like(inds), inds)
inds_g = torch.stack([below, above], -1) # (batch, N_samples, 2)
matched_shape = [inds_g.shape[0], inds_g.shape[1], cdf.shape[-1]]
cdf_g = torch.gather(cdf.unsqueeze(1).expand(matched_shape), 2, inds_g)
bins_g = torch.gather(bins.unsqueeze(1).expand(matched_shape), 2, inds_g)
denom = cdf_g[..., 1] - cdf_g[..., 0]
denom = torch.where(denom < 1e-5, torch.ones_like(denom), denom)
t = (u - cdf_g[..., 0]) / denom
samples = bins_g[..., 0] + t * (bins_g[..., 1] - bins_g[..., 0])
return samples
class NeuSRenderer:
def __init__(
self,
nerf,
sdf_network,
deviation_network,
color_network,
n_samples,
n_importance,
n_outside,
up_sample_steps,
perturb,
):
self.nerf = nerf
self.sdf_network = sdf_network
self.deviation_network = deviation_network
self.color_network = color_network
self.n_samples = n_samples
self.n_importance = n_importance
self.n_outside = n_outside
self.up_sample_steps = up_sample_steps
self.perturb = perturb
@staticmethod
def near_far_from_sphere(rays_o, rays_d):
a = torch.sum(rays_d**2, dim=-1, keepdim=True)
b = 2.0 * torch.sum(rays_o * rays_d, dim=-1, keepdim=True)
mid = 0.5 * (-b) / a
near = mid - 1.0
far = mid + 1.0
return near, far
@staticmethod
def render_core(
rays_o,
rays_d,
z_vals,
sample_dist,
sdf_network,
deviation_network,
color_network,
background_alpha=None,
background_sampled_color=None,
background_rgb=None,
cos_anneal_ratio=0.0,
):
batch_size, n_samples = z_vals.shape
# Section length
dists = z_vals[..., 1:] - z_vals[..., :-1]
dists = torch.cat(
[dists, torch.Tensor([sample_dist]).expand(dists[..., :1].shape)], -1
)
mid_z_vals = z_vals + dists * 0.5
# Section midpoints
# n_rays, n_samples, 3
pts = rays_o[:, None, :] + rays_d[:, None, :] * mid_z_vals[..., :, None]
dirs = rays_d[:, None, :].expand(pts.shape)
pts = pts.reshape(-1, 3)
dirs = dirs.reshape(-1, 3)
sdf_nn_output = sdf_network(pts)
sdf = sdf_nn_output[:, :1]
feature_vector = sdf_nn_output[:, 1:]
gradients = sdf_network.gradient(pts).squeeze()
sampled_color = color_network(pts, gradients, dirs, feature_vector).reshape(
batch_size, n_samples, 3
)
inv_s = deviation_network(torch.zeros([1, 3]))[:, :1].clip(
1e-6, 1e6
) # Single parameter
inv_s = inv_s.expand(batch_size * n_samples, 1)
true_cos = (dirs * gradients).sum(-1, keepdim=True)
# "cos_anneal_ratio" grows from 0 to 1 in the beginning training iterations. The anneal strategy below makes
# the cos value "not dead" at the beginning training iterations, for better convergence.
iter_cos = -(
F.relu(-true_cos * 0.5 + 0.5) * (1.0 - cos_anneal_ratio)
+ F.relu(-true_cos) * cos_anneal_ratio
) # always non-positive
# Estimate signed distances at section points
estimated_next_sdf = sdf + iter_cos * dists.reshape(-1, 1) * 0.5
estimated_prev_sdf = sdf - iter_cos * dists.reshape(-1, 1) * 0.5
prev_cdf = torch.sigmoid(estimated_prev_sdf * inv_s)
next_cdf = torch.sigmoid(estimated_next_sdf * inv_s)
p = prev_cdf - next_cdf
c = prev_cdf
alpha = ((p + 1e-5) / (c + 1e-5)).reshape(batch_size, n_samples).clip(0.0, 1.0)
pts_norm = torch.linalg.norm(pts, ord=2, dim=-1, keepdim=True).reshape(
batch_size, n_samples
)
inside_sphere = (pts_norm < 1.0).float().detach()
relax_inside_sphere = (pts_norm < 1.2).float().detach()
# Render with background
if background_alpha is not None:
alpha = alpha * inside_sphere + background_alpha[:, :n_samples] * (
1.0 - inside_sphere
)
alpha = torch.cat([alpha, background_alpha[:, n_samples:]], dim=-1)
sampled_color = (
sampled_color * inside_sphere[:, :, None]
+ background_sampled_color[:, :n_samples]
* (1.0 - inside_sphere)[:, :, None]
)
sampled_color = torch.cat(
[sampled_color, background_sampled_color[:, n_samples:]], dim=1
)
weights = (
alpha
* torch.cumprod(
torch.cat([torch.ones([batch_size, 1]), 1.0 - alpha + 1e-7], -1), -1
)[:, :-1]
)
weights_sum = weights.sum(dim=-1, keepdim=True)
# Normalize weights to sum up to 1, only for the wmask case.
if background_alpha is not None:
weights_normalized = weights / weights.sum(axis=1, keepdim=True)
color = (sampled_color * weights[:, :, None]).sum(dim=1)
if background_rgb is not None: # Fixed background, usually black
color = color + background_rgb * (1.0 - weights_sum)
# Eikonal loss, only for inside.
gradient_error = (
torch.linalg.norm(
gradients.reshape(batch_size, n_samples, 3), ord=2, dim=-1
)
- 1.0
) ** 2
gradient_error = (relax_inside_sphere * gradient_error).sum() / (
relax_inside_sphere.sum() + 1e-5
)
# Return surface points for corres supervision.
normalized_weights = weights / weights_sum
surface_points = (
pts.reshape((batch_size, -1, 3)) * normalized_weights[:, :, None]
)
surface_points = surface_points.sum(axis=1, keepdims=True).squeeze(axis=-1)
return {
"surface_points": surface_points,
"color": color,
"sdf": sdf,
"dists": dists,
"gradients": gradients.reshape(batch_size, n_samples, 3),
"s_val": 1.0 / inv_s,
"mid_z_vals": mid_z_vals,
"weights": weights,
"cdf": c.reshape(batch_size, n_samples),
"gradient_error": gradient_error,
"inside_sphere": inside_sphere,
}
def render_core_outside(
self, rays_o, rays_d, z_vals, sample_dist, nerf, background_rgb=None
):
"""
Render background
"""
batch_size, n_samples = z_vals.shape
# Section length
dists = z_vals[..., 1:] - z_vals[..., :-1]
dists = torch.cat(
[dists, torch.Tensor([sample_dist]).expand(dists[..., :1].shape)], -1
)
mid_z_vals = z_vals + dists * 0.5
# Section midpoints
pts = (
rays_o[:, None, :] + rays_d[:, None, :] * mid_z_vals[..., :, None]
) # batch_size, n_samples, 3
dis_to_center = torch.linalg.norm(pts, ord=2, dim=-1, keepdim=True).clip(
1.0, 1e10
)
pts = torch.cat(
[pts / dis_to_center, 1.0 / dis_to_center], dim=-1
) # batch_size, n_samples, 4
dirs = rays_d[:, None, :].expand(batch_size, n_samples, 3)
pts = pts.reshape(-1, 3 + int(self.n_outside > 0))
dirs = dirs.reshape(-1, 3)
density, sampled_color = nerf(pts, dirs)
sampled_color = torch.sigmoid(sampled_color)
alpha = 1.0 - torch.exp(
-F.softplus(density.reshape(batch_size, n_samples)) * dists
)
alpha = alpha.reshape(batch_size, n_samples)
weights = (
alpha
* torch.cumprod(
torch.cat([torch.ones([batch_size, 1]), 1.0 - alpha + 1e-7], -1), -1
)[:, :-1]
)
sampled_color = sampled_color.reshape(batch_size, n_samples, 3)
color = (weights[:, :, None] * sampled_color).sum(dim=1)
if background_rgb is not None:
color = color + background_rgb * (1.0 - weights.sum(dim=-1, keepdim=True))
return {
"color": color,
"sampled_color": sampled_color,
"alpha": alpha,
"weights": weights,
}
def up_sample(self, rays_o, rays_d, z_vals, sdf, n_importance, inv_s):
"""
Up sampling give a fixed inv_s
"""
batch_size, n_samples = z_vals.shape
# n_rays, n_samples, 3
pts = rays_o[:, None, :] + rays_d[:, None, :] * z_vals[..., :, None]
radius = torch.linalg.norm(pts, ord=2, dim=-1, keepdim=False)
inside_sphere = (radius[:, :-1] < 1.0) | (radius[:, 1:] < 1.0)
sdf = sdf.reshape(batch_size, n_samples)
prev_sdf, next_sdf = sdf[:, :-1], sdf[:, 1:]
prev_z_vals, next_z_vals = z_vals[:, :-1], z_vals[:, 1:]
mid_sdf = (prev_sdf + next_sdf) * 0.5
cos_val = (next_sdf - prev_sdf) / (next_z_vals - prev_z_vals + 1e-5)
# ----------------------------------------------------------------------
# Use min value of [ cos, prev_cos ]
# Though it makes the sampling (not rendering) a little bit biased,
# this strategy can make the sampling more
# robust when meeting situations like below:
#
# SDF
# ^
# |\ -----x----...
# | \ /
# | x x
# |---\----/-------------> 0 level
# | \ /
# | \/
# |
# ----------------------------------------------------------------------
prev_cos_val = torch.cat(
[torch.zeros([batch_size, 1]), cos_val[:, :-1]], dim=-1
)
cos_val = torch.stack([prev_cos_val, cos_val], dim=-1)
cos_val, _ = torch.min(cos_val, dim=-1, keepdim=False)
cos_val = cos_val.clip(-1e3, 0.0) * inside_sphere
dist = next_z_vals - prev_z_vals
prev_esti_sdf = mid_sdf - cos_val * dist * 0.5
next_esti_sdf = mid_sdf + cos_val * dist * 0.5
prev_cdf = torch.sigmoid(prev_esti_sdf * inv_s)
next_cdf = torch.sigmoid(next_esti_sdf * inv_s)
alpha = (prev_cdf - next_cdf + 1e-5) / (prev_cdf + 1e-5)
weights = (
alpha
* torch.cumprod(
torch.cat([torch.ones([batch_size, 1]), 1.0 - alpha + 1e-7], -1), -1
)[:, :-1]
)
z_samples = sample_pdf(z_vals, weights, n_importance, det=True).detach()
return z_samples
def cat_z_vals(self, rays_o, rays_d, z_vals, new_z_vals, sdf, last=False):
batch_size, n_samples = z_vals.shape
_, n_importance = new_z_vals.shape
pts = rays_o[:, None, :] + rays_d[:, None, :] * new_z_vals[..., :, None]
z_vals = torch.cat([z_vals, new_z_vals], dim=-1)
z_vals, index = torch.sort(z_vals, dim=-1)
if not last:
new_sdf = self.sdf_network.sdf(pts.reshape(-1, 3)).reshape(
batch_size, n_importance
)
sdf = torch.cat([sdf, new_sdf], dim=-1)
xx = (
torch.arange(batch_size)[:, None]
.expand(batch_size, n_samples + n_importance)
.reshape(-1)
)
index = index.reshape(-1)
sdf = sdf[(xx, index)].reshape(batch_size, n_samples + n_importance)
return z_vals, sdf
def render(
self,
rays_o,
rays_d,
perturb_overwrite=-1,
background_rgb=None,
cos_anneal_ratio=0.0,
):
near, far = NeuSRenderer.near_far_from_sphere(rays_o, rays_d)
batch_size = len(rays_o)
sample_dist = (
2.0 / self.n_samples
) # Assuming the region of interest is a unit sphere
z_vals = torch.linspace(0.0, 1.0, self.n_samples)
z_vals = near + (far - near) * z_vals[None, :]
z_vals_outside = None
if self.n_outside > 0:
z_vals_outside = torch.linspace(
1e-3, 1.0 - 1.0 / (self.n_outside + 1.0), self.n_outside
)
n_samples = self.n_samples
perturb = self.perturb
if perturb_overwrite >= 0:
perturb = perturb_overwrite
if perturb > 0:
t_rand = torch.rand([batch_size, 1]) - 0.5
z_vals = z_vals + t_rand * 2.0 / self.n_samples
if self.n_outside > 0:
mids = 0.5 * (z_vals_outside[..., 1:] + z_vals_outside[..., :-1])
upper = torch.cat([mids, z_vals_outside[..., -1:]], -1)
lower = torch.cat([z_vals_outside[..., :1], mids], -1)
t_rand = torch.rand([batch_size, z_vals_outside.shape[-1]])
z_vals_outside = lower[None, :] + (upper - lower)[None, :] * t_rand
if self.n_outside > 0:
z_vals_outside = (
far / torch.flip(z_vals_outside, dims=[-1]) + 1.0 / self.n_samples
)
background_alpha = None
background_sampled_color = None
# Coarse to fine sampling:
# - n_importance = 64
# - up_sample_steps = 4
#
# - z_vals (512, 64) : init coarse
# for i in range(self.up_sample_steps):
# - z_vals (512, 80) : 1st fine
# - z_vals (512, 96) : 2nd fine
# - z_vals (512, 112): 3rd fine
# - z_vals (512, 128): 4th fine
if self.n_importance > 0:
with torch.no_grad():
pts = rays_o[:, None, :] + rays_d[:, None, :] * z_vals[..., :, None]
sdf = self.sdf_network.sdf(pts.reshape(-1, 3)).reshape(
batch_size, self.n_samples
)
for i in range(self.up_sample_steps):
new_z_vals = self.up_sample(
rays_o,
rays_d,
z_vals,
sdf,
self.n_importance // self.up_sample_steps,
64 * 2**i,
)
z_vals, sdf = self.cat_z_vals(
rays_o,
rays_d,
z_vals,
new_z_vals,
sdf,
last=(i + 1 == self.up_sample_steps),
)
n_samples = self.n_samples + self.n_importance
# Background model
if self.n_outside > 0:
z_vals_feed = torch.cat([z_vals, z_vals_outside], dim=-1)
z_vals_feed, _ = torch.sort(z_vals_feed, dim=-1)
ret_outside = self.render_core_outside(
rays_o, rays_d, z_vals_feed, sample_dist, self.nerf
)
background_sampled_color = ret_outside["sampled_color"]
background_alpha = ret_outside["alpha"]
# Render core
ret_fine = NeuSRenderer.render_core(
rays_o,
rays_d,
z_vals,
sample_dist,
self.sdf_network,
self.deviation_network,
self.color_network,
background_rgb=background_rgb,
background_alpha=background_alpha,
background_sampled_color=background_sampled_color,
cos_anneal_ratio=cos_anneal_ratio,
)
color_fine = ret_fine["color"]
weights = ret_fine["weights"]
weights_sum = weights.sum(dim=-1, keepdim=True)
gradients = ret_fine["gradients"]
s_val = (
ret_fine["s_val"].reshape(batch_size, n_samples).mean(dim=-1, keepdim=True)
)
return {
"surface_points": ret_fine["surface_points"],
"color_fine": color_fine,
"s_val": s_val,
"cdf_fine": ret_fine["cdf"],
"weight_sum": weights_sum,
"weight_max": torch.max(weights, dim=-1, keepdim=True)[0],
"gradients": gradients,
"weights": weights,
"gradient_error": ret_fine["gradient_error"],
"inside_sphere": ret_fine["inside_sphere"],
}
def extract_geometry(self, bound_min, bound_max, resolution, threshold=0.0):
return extract_geometry(
bound_min,
bound_max,
resolution=resolution,
threshold=threshold,
query_func=lambda pts: -self.sdf_network.sdf(pts),
)
| yxlao/corres-nerf | 49 | (NeurIPS 2023) CorresNeRF: Image Correspondence Priors for Neural Radiance Fields | yxlao | Yixing Lao | HKU-CS | |
src/neus/runner.py | Python | import logging
import math
import os
from pathlib import Path
from shutil import copyfile
import cv2 as cv
import numpy as np
import torch
import torch.nn.functional as F
import trimesh
from matplotlib import pyplot as plt
from pyhocon import ConfigFactory
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from .dataset import Dataset
from .fields import NeRF, RenderingNetwork, SDFNetwork, SingleVarianceNetwork
from .renderer import NeuSRenderer
class Runner:
def __init__(
self,
conf_path,
scene_name=None,
is_continue=False,
from_checkpoint=None,
backup_code=False,
inference_only=False,
):
self.device = torch.device("cuda")
assert scene_name is not None
# Configuration
self.conf_path = Path(conf_path)
conf_name = self.conf_path.stem
with open(self.conf_path) as f:
conf_text = f.read()
self.conf = ConfigFactory.parse_string(conf_text)
self.exp_root = Path(self.conf["general.exp_root"])
if conf_name == "womask":
conf_name_dir_name = "womask_sphere"
else:
conf_name_dir_name = conf_name
self.exp_dir = self.exp_root / conf_name_dir_name / scene_name
os.makedirs(self.exp_dir, exist_ok=True)
self.dataset = Dataset(
conf=self.conf["dataset"],
scene_name=scene_name,
corres_enabled=self.conf.get_bool(
"train.corres_enabled",
default=False,
)
and not inference_only,
)
self.iter_step = 0
# Training parameters
self.end_iter = self.conf.get_int("train.end_iter")
self.save_freq = self.conf.get_int("train.save_freq")
self.report_freq = self.conf.get_int("train.report_freq")
self.val_freq = self.conf.get_int("train.val_freq")
self.val_mesh_freq = self.conf.get_int("train.val_mesh_freq")
self.batch_size = self.conf.get_int("train.batch_size")
self.validate_resolution_level = self.conf.get_int(
"train.validate_resolution_level"
)
self.learning_rate = self.conf.get_float("train.learning_rate")
self.learning_rate_alpha = self.conf.get_float("train.learning_rate_alpha")
self.use_white_bkgd = self.conf.get_bool("train.use_white_bkgd")
self.warm_up_end = self.conf.get_float("train.warm_up_end", default=0.0)
self.anneal_end = self.conf.get_float("train.anneal_end", default=0.0)
self.finetune_start_iter = self.conf.get_int(
"train.finetune_start_iter", default=0
)
# Corres parameters
self.corres_enabled = self.conf.get_bool(
"train.corres_enabled",
default=False,
)
self.corres_confidence_enabled = self.conf.get_bool(
"train.corres_confidence_enabled",
default=True,
)
self.corres_confidence_normalized = self.conf.get_bool(
"train.corres_confidence_normalized",
default=False,
)
self.corres_dist_enabled = self.conf.get_bool(
"train.corres_dist_enabled",
default=False,
)
self.corres_dist_tdist = self.conf.get_float(
"train.corres_dist_tdist",
default=0.0,
)
self.corres_dist_weight = self.conf.get_float(
"train.corres_dist_weight",
default=1.0, # 10.0 may be a reasonable value.
)
self.corres_dist_robust_ratio = self.conf.get_float(
"train.corres_dist_robust_ratio",
default=1.0,
)
self.corres_ds_enabled = self.conf.get_bool(
"train.corres_ds_enabled",
default=False,
)
self.corres_ds_squared = self.conf.get_float(
"train.corres_ds_squared",
default=False,
)
self.corres_ds_weight = self.conf.get_float(
"train.corres_ds_weight",
default=1.0, # 10.0 may be a reasonable value.
)
self.corres_ds_robust_ratio = self.conf.get_float(
"train.corres_ds_robust_ratio",
default=1.0,
)
# Weights
self.igr_weight = self.conf.get_float("train.igr_weight")
self.mask_weight = self.conf.get_float("train.mask_weight")
self.is_continue = is_continue
self.backup_code = backup_code
self.model_list = []
self.writer = None
# Networks
params_to_train = []
self.nerf_outside = NeRF(**self.conf["model.nerf"]).to(self.device)
self.sdf_network = SDFNetwork(**self.conf["model.sdf_network"]).to(self.device)
self.deviation_network = SingleVarianceNetwork(
**self.conf["model.variance_network"]
).to(self.device)
self.color_network = RenderingNetwork(
**self.conf["model.rendering_network"]
).to(self.device)
params_to_train += list(self.nerf_outside.parameters())
params_to_train += list(self.sdf_network.parameters())
params_to_train += list(self.deviation_network.parameters())
params_to_train += list(self.color_network.parameters())
self.optimizer = torch.optim.Adam(params_to_train, lr=self.learning_rate)
self.renderer = NeuSRenderer(
self.nerf_outside,
self.sdf_network,
self.deviation_network,
self.color_network,
**self.conf["model.neus_renderer"],
)
# Load checkpoint
latest_model_name = None
if from_checkpoint:
checkpoint_dir = Path(self.exp_dir) / "checkpoints"
checkpoint_path = checkpoint_dir / from_checkpoint
if not checkpoint_path.is_file():
raise ValueError(f"Checkpoint {checkpoint_path} does not exist.")
else:
latest_model_name = from_checkpoint
elif self.is_continue:
checkpoint_dir = Path(self.exp_dir) / "checkpoints"
if checkpoint_dir.is_dir():
model_list_raw = os.listdir(os.path.join(self.exp_dir, "checkpoints"))
model_list = []
for model_name in model_list_raw:
if (
model_name[-3:] == "pth"
and int(model_name[5:-4]) <= self.end_iter
):
model_list.append(model_name)
model_list.sort()
if len(model_list) == 0:
latest_model_name = None
else:
latest_model_name = model_list[-1]
else:
latest_model_name = None
if latest_model_name is not None:
logging.info("Found checkpoint: {}".format(latest_model_name))
self.load_checkpoint(latest_model_name)
# Backup codes and configs for debug
if self.backup_code:
self.file_backup()
def train(self):
print(f"exp_dir: {self.exp_dir}")
# Sanity check: fintuning config should not be used to train from scratch.
if self.iter_step < self.finetune_start_iter:
raise ValueError(
f"Fintuing failed, iter_step < finetune_start_iter: "
f"{self.iter_step} < {self.finetune_start_iter}"
)
self.writer = SummaryWriter(log_dir=os.path.join(self.exp_dir, "logs"))
self.update_learning_rate()
res_step = self.end_iter - self.iter_step
# Handle sparse view.
image_perm = self.get_image_perm()
if self.corres_enabled:
self.dataset.corres_map.filter_by_restricted_image_indices(
image_perm.detach().cpu().numpy()
)
print(f"Enabled views: {image_perm}")
# Print corres loss.
print(f"train.corres_enabled: {self.corres_enabled}")
for iter_i in tqdm(range(res_step)):
if self.corres_enabled:
# Pick one camera and generate random rays from it.
# data.shape: (batch_size, 10)
# batch_size: 512 by default
im_index = image_perm[self.iter_step % len(image_perm)]
im_indices = im_index.repeat(self.batch_size)
pixel_xs = torch.randint(
low=0, high=self.dataset.W, size=[self.batch_size]
)
pixel_ys = torch.randint(
low=0, high=self.dataset.H, size=[self.batch_size]
)
else:
im_index = image_perm[self.iter_step % len(image_perm)]
im_indices = im_index.repeat(self.batch_size)
pixel_xs = torch.randint(
low=0, high=self.dataset.W, size=[self.batch_size]
)
pixel_ys = torch.randint(
low=0, high=self.dataset.H, size=[self.batch_size]
)
rays_o, rays_d, true_rgb, mask = self.dataset.gen_random_rays_at_pixels(
im_indices, pixel_xs, pixel_ys
)
background_rgb = None
if self.use_white_bkgd:
background_rgb = torch.ones([1, 3])
if self.mask_weight > 0.0:
mask = (mask > 0.5).float()
else:
mask = torch.ones_like(mask)
mask_sum = mask.sum() + 1e-5
render_out = self.renderer.render(
rays_o,
rays_d,
background_rgb=background_rgb,
cos_anneal_ratio=self.get_cos_anneal_ratio(),
)
surface_points = render_out["surface_points"]
color_fine = render_out["color_fine"]
s_val = render_out["s_val"]
cdf_fine = render_out["cdf_fine"]
gradient_error = render_out["gradient_error"]
weight_max = render_out["weight_max"]
weight_sum = render_out["weight_sum"]
# Default losses for corres. If corres is enabled, the default loss
# values will be overwritten.
corres_dist_loss = torch.tensor(0.0).to(self.device)
corres_ds_loss = torch.tensor(0.0).to(self.device)
if self.corres_enabled:
# corres_result:
# 0 1 2 3 4 5 6 7
# [src_i, src_x, src_y, dst_i, dst_x, dst_y, confidence, mask]
corres_result = self.dataset.corres_map.query(
im_indices.cpu().numpy(),
pixel_xs.cpu().numpy(),
pixel_ys.cpu().numpy(),
)
if len(corres_result) > 0:
############################################################
# Compute rendered src_points and dst_points
############################################################
# Src surface points.
corres_query_mask = corres_result[:, 7].astype(np.int64)
foreground_mask = mask[corres_query_mask].bool().squeeze(axis=-1)
src_points = surface_points[corres_query_mask].reshape((-1, 3))
# Dst surface points.
dst_indices, dst_xs, dst_ys = (
corres_result[:, 3].astype(np.int64),
corres_result[:, 4].astype(np.int64),
corres_result[:, 5].astype(np.int64),
)
(
dst_rays_o,
dst_rays_d,
dst_true_rgb,
dst_mask,
) = self.dataset.gen_random_rays_at_pixels(
torch.from_numpy(dst_indices),
torch.from_numpy(dst_xs),
torch.from_numpy(dst_ys),
)
dst_background_rgb = None
if self.use_white_bkgd:
dst_background_rgb = torch.ones([1, 3])
if self.mask_weight > 0.0:
dst_mask = (dst_mask > 0.5).float()
else:
dst_mask = torch.ones_like(dst_mask)
dst_render_out = self.renderer.render(
dst_rays_o,
dst_rays_d,
background_rgb=dst_background_rgb,
cos_anneal_ratio=self.get_cos_anneal_ratio(),
)
dst_points = dst_render_out["surface_points"].reshape((-1, 3))
# Process confidences.
confidences = torch.tensor(corres_result[:, 6])
if self.corres_confidence_enabled:
if self.corres_confidence_normalized:
# Normalize [0.5, 1] to [0, 1].
confidences = (confidences - 0.5) * 2.0
confidences = confidences / confidences.mean()
else:
# Set confidences to 1 if corres is not enabled.
confidences = torch.ones_like(confidences)
# print(f"foreground_mask percentage true:"
# f"{foreground_mask.sum() / len(foreground_mask)}")
# Apply foreground mask.
src_points = src_points[foreground_mask]
dst_points = dst_points[foreground_mask]
confidences = confidences[foreground_mask]
assert src_points.shape == dst_points.shape
assert len(src_points) == len(dst_points) == len(confidences)
if len(src_points) > 0:
if self.corres_dist_enabled:
dists_squared = torch.sum(
(src_points - dst_points) ** 2, axis=1
)
tdist_squared = self.corres_dist_tdist**2
dists_squared = torch.clamp(
dists_squared - tdist_squared, min=0.0
)
# Robust term
num_robust = int(
math.floor(
len(src_points) * self.corres_dist_robust_ratio
)
)
robust_indices = dists_squared.topk(
num_robust, largest=False, sorted=False
).indices
corres_dist_losses = (
dists_squared[robust_indices]
* confidences[robust_indices]
)
if len(corres_dist_losses) > 0:
corres_dist_loss = torch.mean(corres_dist_losses)
else:
corres_dist_loss = torch.tensor(0.0).to(self.device)
if self.corres_ds_enabled:
np_foreground_mask = foreground_mask.cpu().numpy()
# - Key (tuple of length 6):
# (src_i, src_x, src_y, dst_i, dst_x, dst_y)
keys = corres_result[np_foreground_mask][:, :6]
# - Value (tuple of length 11):
# (src_px, src_py, src_pz, # 0, 1, 2
# dst_px, dst_py, dst_pz, # 3, 4, 5
# mid_px, mid_py, mid_pz, # 6, 7, 8
# src_depth, dst_depth) # 9, 10
vals = np.array(
[
self.dataset.corres_map.corres_points_map[
tuple(key)
]
for key in keys
]
)
src_gt_depths = vals[:, 9].astype(np.float32)
dst_gt_depths = vals[:, 10].astype(np.float32)
src_gt_depths = torch.tensor(src_gt_depths).to(self.device)
dst_gt_depths = torch.tensor(dst_gt_depths).to(self.device)
# Compute src and dst depths.
src_os = rays_o[corres_query_mask][foreground_mask]
dst_os = dst_rays_o[foreground_mask]
src_depths = torch.norm(src_os - src_points, dim=1)
dst_depths = torch.norm(dst_os - dst_points, dim=1)
# Compute corres_ds_loss, either L1 or squared.
if self.corres_ds_squared:
src_depth_losses = (
(src_depths - src_gt_depths) / src_gt_depths
) ** 2
dst_depth_losses = (
(dst_depths - dst_gt_depths) / dst_gt_depths
) ** 2
else:
src_depth_losses = torch.abs(src_depths - src_gt_depths)
dst_depth_losses = torch.abs(dst_depths - dst_gt_depths)
depth_losses = 0.5 * (src_depth_losses + dst_depth_losses)
# Remove non-robust correspondences.
num_robust = int(
math.floor(
len(depth_losses) * self.corres_ds_robust_ratio
)
)
robust_indices = depth_losses.topk(
num_robust, largest=False, sorted=False
).indices
depth_losses = (
depth_losses[robust_indices]
* confidences[robust_indices]
)
# Mean.
corres_ds_loss = torch.mean(depth_losses)
# Loss
color_error = (color_fine - true_rgb) * mask
color_fine_loss = (
F.l1_loss(color_error, torch.zeros_like(color_error), reduction="sum")
/ mask_sum
)
# color_fine: (512, 3)
# true_rgb : (512, 3)
# mask : (512, 1)
psnr = 20.0 * torch.log10(
1.0
/ (
((color_fine - true_rgb) ** 2 * mask).sum() / (mask_sum * 3.0)
).sqrt()
)
eikonal_loss = gradient_error
mask_loss = F.binary_cross_entropy(weight_sum.clip(1e-3, 1.0 - 1e-3), mask)
loss = (
corres_dist_loss * self.corres_dist_weight
+ corres_ds_loss * self.corres_ds_weight
+ color_fine_loss
+ eikonal_loss * self.igr_weight
+ mask_loss * self.mask_weight
)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.iter_step += 1
self.log_interval = 10 # hard-coded for now
if self.iter_step % self.log_interval == 0:
self.writer.add_scalar("Loss/loss", loss, self.iter_step)
self.writer.add_scalar(
"Loss/corres_dist_loss", corres_dist_loss, self.iter_step
)
self.writer.add_scalar(
"Loss/corres_ds_loss", corres_ds_loss, self.iter_step
)
self.writer.add_scalar(
"Loss/color_loss", color_fine_loss, self.iter_step
)
self.writer.add_scalar(
"Loss/eikonal_loss", eikonal_loss, self.iter_step
)
self.writer.add_scalar("Statistics/s_val", s_val.mean(), self.iter_step)
self.writer.add_scalar(
"Statistics/cdf",
(cdf_fine[:, :1] * mask).sum() / mask_sum,
self.iter_step,
)
self.writer.add_scalar(
"Statistics/weight_max",
(weight_max * mask).sum() / mask_sum,
self.iter_step,
)
self.writer.add_scalar("Statistics/psnr", psnr, self.iter_step)
if self.iter_step % self.report_freq == 0:
print(
f"iter:{self.iter_step:8>d}, "
f"loss = {loss:.4f}, "
f"corres_dist_loss={corres_dist_loss:.4E}, "
f"corres_ds_loss={corres_ds_loss:.4E}, "
f"lr={self.optimizer.param_groups[0]['lr']:.4E}"
)
if self.iter_step % self.save_freq == 0:
self.save_checkpoint()
if self.iter_step % self.val_freq == 0:
validate_indices = []
validate_indices.append(0)
validate_indices.append(np.random.randint(self.dataset.n_images))
if self.dataset.n_images > 26:
validate_indices.append(26)
for i in validate_indices:
self.validate_image(i)
if self.iter_step % self.val_mesh_freq == 0:
self.validate_mesh()
self.update_learning_rate()
if self.iter_step % len(image_perm) == 0:
image_perm = self.get_image_perm()
def eval_corres(self):
"""
Evaluate correspondence quality on a converged model.
Computes the distances of all corres pairs.
"""
def batchify(iterable, batch_size=1):
length = len(iterable)
for index in range(0, length, batch_size):
yield iterable[index : min(index + batch_size, length)]
# map: (im_index, x, y) -> (im_index, x, y); x: col, y row.
corres_map = self.dataset.corres_map.corres_map
print(f"Batch size: {self.batch_size}")
print(f"Number of corres pairs: {len(corres_map)}")
half_batch_size = int(self.batch_size // 2)
corres_list = [
[k[0], k[1], k[2], v[0], v[1], v[2]] for k, v in corres_map.items()
]
corres_list = np.array(corres_list)
# Call inference on batches
batches = list(batchify(corres_list, half_batch_size))
all_distances = []
for batch_data in tqdm(batches):
batch_data = torch.from_numpy(batch_data).to(self.device)
src_indices, src_xs, src_ys, dst_indices, dst_xs, dst_ys = (
batch_data[:, 0],
batch_data[:, 1],
batch_data[:, 2],
batch_data[:, 3],
batch_data[:, 4],
batch_data[:, 5],
)
# Generate rays
(
src_rays_o,
src_rays_d,
src_true_rgb,
src_mask,
) = self.dataset.gen_random_rays_at_pixels(src_indices, src_xs, src_ys)
(
dst_rays_o,
dst_rays_d,
dst_true_rgb,
dst_mask,
) = self.dataset.gen_random_rays_at_pixels(dst_indices, dst_xs, dst_ys)
# Render
src_render_out = self.renderer.render(
src_rays_o,
src_rays_d,
background_rgb=None,
cos_anneal_ratio=self.get_cos_anneal_ratio(),
)
dst_render_out = self.renderer.render(
dst_rays_o,
dst_rays_d,
background_rgb=None,
cos_anneal_ratio=self.get_cos_anneal_ratio(),
)
# Compute surface points
src_points = src_render_out["surface_points"].reshape((-1, 3))
dst_points = dst_render_out["surface_points"].reshape((-1, 3))
# Apply mask
if self.mask_weight > 0.0:
src_mask = src_mask > 0.5
dst_mask = dst_mask > 0.5
else:
src_mask = torch.ones_like(src_mask).bool()
dst_mask = torch.ones_like(dst_mask).bool()
# foreground_mask is true when both the src and dst are foreground
foreground_mask = torch.logical_and(src_mask, dst_mask).flatten()
src_points = src_points[foreground_mask]
dst_points = dst_points[foreground_mask]
# Compute distances
distances = torch.norm(src_points - dst_points, dim=1)
distances = distances.detach().cpu().numpy().flatten()
all_distances.append(distances)
all_distances = np.concatenate(all_distances)
print(f"# distances : {len(all_distances)}")
print(f"Avg distance : {np.mean(all_distances)}")
print(f"Median distance: {np.median(all_distances)}")
print(f"Max distance : {np.max(all_distances)}")
print(f"Min distance : {np.min(all_distances)}")
print(f"Std distance : {np.std(all_distances)}")
# Plot histogram of distances
plt.hist(all_distances, bins=200)
plt.title("Histogram of distances")
plt.xlabel("Distance")
plt.show()
def get_image_perm(self):
all_views = np.array(self.dataset.sparse_views)
all_views = torch.from_numpy(all_views).to(self.device)
perm_all_views = all_views[torch.randperm(len(all_views))]
return perm_all_views
def get_cos_anneal_ratio(self):
if self.anneal_end == 0.0:
return 1.0
else:
return np.min([1.0, self.iter_step / self.anneal_end])
def update_learning_rate(self):
if self.iter_step < self.warm_up_end:
learning_factor = self.iter_step / self.warm_up_end
else:
alpha = self.learning_rate_alpha
progress_start = max(self.warm_up_end, self.finetune_start_iter)
progress = (self.iter_step - progress_start) / (
self.end_iter - progress_start
)
learning_factor = (np.cos(np.pi * progress) + 1.0) * 0.5 * (
1 - alpha
) + alpha
for g in self.optimizer.param_groups:
g["lr"] = self.learning_rate * learning_factor
def file_backup(self):
dir_lis = self.conf["general.recording"]
os.makedirs(os.path.join(self.exp_dir, "recording"), exist_ok=True)
for dir_name in dir_lis:
cur_dir = os.path.join(self.exp_dir, "recording", dir_name)
os.makedirs(cur_dir, exist_ok=True)
files = os.listdir(dir_name)
for f_name in files:
if f_name[-3:] == ".py":
copyfile(
os.path.join(dir_name, f_name), os.path.join(cur_dir, f_name)
)
copyfile(self.conf_path, os.path.join(self.exp_dir, "recording", "config.conf"))
def load_checkpoint(self, checkpoint_name):
checkpoint = torch.load(
os.path.join(self.exp_dir, "checkpoints", checkpoint_name),
map_location=self.device,
)
self.nerf_outside.load_state_dict(checkpoint["nerf"])
self.sdf_network.load_state_dict(checkpoint["sdf_network_fine"])
self.deviation_network.load_state_dict(checkpoint["variance_network_fine"])
self.color_network.load_state_dict(checkpoint["color_network_fine"])
self.optimizer.load_state_dict(checkpoint["optimizer"])
self.iter_step = checkpoint["iter_step"]
logging.info("End")
def save_checkpoint(self):
checkpoint = {
"nerf": self.nerf_outside.state_dict(),
"sdf_network_fine": self.sdf_network.state_dict(),
"variance_network_fine": self.deviation_network.state_dict(),
"color_network_fine": self.color_network.state_dict(),
"optimizer": self.optimizer.state_dict(),
"iter_step": self.iter_step,
}
os.makedirs(os.path.join(self.exp_dir, "checkpoints"), exist_ok=True)
torch.save(
checkpoint,
os.path.join(
self.exp_dir, "checkpoints", "ckpt_{:0>6d}.pth".format(self.iter_step)
),
)
def render_image(self, camera_indices, resolution_level):
print(
f"render_image(camera_indices={camera_indices}, \n"
f" resolution_level={resolution_level})"
)
for idx in tqdm(camera_indices, leave=False, desc="Rendering images"):
rays_o, rays_d = self.dataset.gen_rays_at(
idx, resolution_level=resolution_level
)
H, W, _ = rays_o.shape
rays_o = rays_o.reshape(-1, 3).split(self.batch_size)
rays_d = rays_d.reshape(-1, 3).split(self.batch_size)
out_rgb_fine = []
for rays_o_batch, rays_d_batch in zip(rays_o, rays_d):
background_rgb = torch.ones([1, 3]) if self.use_white_bkgd else None
render_out = self.renderer.render(
rays_o_batch,
rays_d_batch,
cos_anneal_ratio=self.get_cos_anneal_ratio(),
background_rgb=background_rgb,
)
def feasible(key):
return (key in render_out) and (render_out[key] is not None)
if feasible("color_fine"):
out_rgb_fine.append(render_out["color_fine"].detach().cpu().numpy())
else:
raise ValueError("Internal error: color_fine not feasible")
del render_out
img_fine = (
np.concatenate(out_rgb_fine, axis=0).reshape([H, W, 3]) * 256
).clip(0, 255)
img_dir = self.exp_dir / "renders"
img_dir.mkdir(exist_ok=True, parents=True)
img_name = f"{self.iter_step:0>8d}_{idx:0>3d}_rl{resolution_level}.png"
img_path = img_dir / img_name
cv.imwrite(str(img_path), img_fine)
def validate_image(self, idx=-1, resolution_level=-1):
if idx < 0:
idx = np.random.randint(self.dataset.n_images)
print("Validate: iter: {}, camera: {}".format(self.iter_step, idx))
if resolution_level < 0:
resolution_level = self.validate_resolution_level
rays_o, rays_d = self.dataset.gen_rays_at(
idx, resolution_level=resolution_level
)
H, W, _ = rays_o.shape
rays_o = rays_o.reshape(-1, 3).split(self.batch_size)
rays_d = rays_d.reshape(-1, 3).split(self.batch_size)
out_rgb_fine = []
out_normal_fine = []
for rays_o_batch, rays_d_batch in zip(rays_o, rays_d):
background_rgb = torch.ones([1, 3]) if self.use_white_bkgd else None
render_out = self.renderer.render(
rays_o_batch,
rays_d_batch,
cos_anneal_ratio=self.get_cos_anneal_ratio(),
background_rgb=background_rgb,
)
def feasible(key):
return (key in render_out) and (render_out[key] is not None)
if feasible("color_fine"):
out_rgb_fine.append(render_out["color_fine"].detach().cpu().numpy())
if feasible("gradients") and feasible("weights"):
n_samples = self.renderer.n_samples + self.renderer.n_importance
normals = (
render_out["gradients"] * render_out["weights"][:, :n_samples, None]
)
if feasible("inside_sphere"):
normals = normals * render_out["inside_sphere"][..., None]
normals = normals.sum(dim=1).detach().cpu().numpy()
out_normal_fine.append(normals)
del render_out
img_fine = None
if len(out_rgb_fine) > 0:
img_fine = (
np.concatenate(out_rgb_fine, axis=0).reshape([H, W, 3, -1]) * 256
).clip(0, 255)
normal_img = None
if len(out_normal_fine) > 0:
normal_img = np.concatenate(out_normal_fine, axis=0)
rot = np.linalg.inv(
self.dataset.pose_all[idx, :3, :3].detach().cpu().numpy()
)
normal_img = (
np.matmul(rot[None, :, :], normal_img[:, :, None]).reshape(
[H, W, 3, -1]
)
* 128
+ 128
).clip(0, 255)
os.makedirs(os.path.join(self.exp_dir, "validations_fine"), exist_ok=True)
os.makedirs(os.path.join(self.exp_dir, "normals"), exist_ok=True)
for i in range(img_fine.shape[-1]):
if len(out_rgb_fine) > 0:
cv.imwrite(
os.path.join(
self.exp_dir,
"validations_fine",
"{:0>8d}_{}_{}.png".format(self.iter_step, i, idx),
),
np.concatenate(
[
img_fine[..., i],
self.dataset.image_at(
idx, resolution_level=resolution_level
),
]
),
)
if len(out_normal_fine) > 0:
cv.imwrite(
os.path.join(
self.exp_dir,
"normals",
"{:0>8d}_{}_{}.png".format(self.iter_step, i, idx),
),
normal_img[..., i],
)
def render_novel_view(self, K, T, resolution_level=1):
"""
Render novel view with given K, T
"""
if resolution_level < 0:
resolution_level = self.validate_resolution_level
rays_o, rays_d = self.dataset.gen_rays_at_custom_camera(
K, T, resolution_level=resolution_level
)
H, W, _ = rays_o.shape
rays_o = rays_o.reshape(-1, 3).split(self.batch_size)
rays_d = rays_d.reshape(-1, 3).split(self.batch_size)
out_rgb_fine = []
out_normal_fine = []
for rays_o_batch, rays_d_batch in zip(rays_o, rays_d):
background_rgb = torch.ones([1, 3]) if self.use_white_bkgd else None
render_out = self.renderer.render(
rays_o_batch,
rays_d_batch,
cos_anneal_ratio=self.get_cos_anneal_ratio(),
background_rgb=background_rgb,
)
def feasible(key):
return (key in render_out) and (render_out[key] is not None)
if feasible("color_fine"):
out_rgb_fine.append(render_out["color_fine"].detach().cpu().numpy())
if feasible("gradients") and feasible("weights"):
n_samples = self.renderer.n_samples + self.renderer.n_importance
normals = (
render_out["gradients"] * render_out["weights"][:, :n_samples, None]
)
if feasible("inside_sphere"):
normals = normals * render_out["inside_sphere"][..., None]
normals = normals.sum(dim=1).detach().cpu().numpy()
out_normal_fine.append(normals)
del render_out
img_fine = None
if len(out_rgb_fine) > 0:
img_fine = (
np.concatenate(out_rgb_fine, axis=0).reshape([H, W, 3, -1]) * 256
).clip(0, 255)
normal_img = None
if len(out_normal_fine) > 0:
normal_img = np.concatenate(out_normal_fine, axis=0)
rot = T[:3, :3]
normal_img = (
np.matmul(rot[None, :, :], normal_img[:, :, None]).reshape(
[H, W, 3, -1]
)
* 128
+ 128
).clip(0, 255)
return img_fine, normal_img
def render_novel_image(self, idx_0, idx_1, ratio, resolution_level):
"""
Interpolate view between two cameras.
"""
rays_o, rays_d = self.dataset.gen_rays_between(
idx_0, idx_1, ratio, resolution_level=resolution_level
)
H, W, _ = rays_o.shape
rays_o = rays_o.reshape(-1, 3).split(self.batch_size)
rays_d = rays_d.reshape(-1, 3).split(self.batch_size)
out_rgb_fine = []
for rays_o_batch, rays_d_batch in zip(rays_o, rays_d):
background_rgb = torch.ones([1, 3]) if self.use_white_bkgd else None
render_out = self.renderer.render(
rays_o_batch,
rays_d_batch,
cos_anneal_ratio=self.get_cos_anneal_ratio(),
background_rgb=background_rgb,
)
out_rgb_fine.append(render_out["color_fine"].detach().cpu().numpy())
del render_out
img_fine = (
(np.concatenate(out_rgb_fine, axis=0).reshape([H, W, 3]) * 256)
.clip(0, 255)
.astype(np.uint8)
)
return img_fine
def validate_mesh(self, world_space=False, resolution=64, threshold=0.0):
bound_min = torch.tensor(self.dataset.object_bbox_min, dtype=torch.float32)
bound_max = torch.tensor(self.dataset.object_bbox_max, dtype=torch.float32)
vertices, triangles = self.renderer.extract_geometry(
bound_min, bound_max, resolution=resolution, threshold=threshold
)
os.makedirs(os.path.join(self.exp_dir, "meshes"), exist_ok=True)
if world_space:
vertices = (
vertices * self.dataset.scale_mats_np[0][0, 0]
+ self.dataset.scale_mats_np[0][:3, 3][None]
)
print("[Validate Mesh] Using world space")
else:
print("[Validate Mesh] Using unit space")
suffix = "world" if world_space else "unit"
mesh = trimesh.Trimesh(vertices, triangles)
mesh.export(
os.path.join(self.exp_dir, "meshes", f"{self.iter_step:0>8d}_{suffix}.ply")
)
logging.info("End")
def interpolate_view(self, img_idx_0, img_idx_1):
images = []
n_frames = 60
for i in range(n_frames):
print(i)
images.append(
self.render_novel_image(
img_idx_0,
img_idx_1,
np.sin(((i / n_frames) - 0.5) * np.pi) * 0.5 + 0.5,
resolution_level=4,
)
)
for i in range(n_frames):
images.append(images[n_frames - i - 1])
fourcc = cv.VideoWriter_fourcc(*"mp4v")
video_dir = os.path.join(self.exp_dir, "render")
print(f"video_dir: {video_dir}")
os.makedirs(video_dir, exist_ok=True)
h, w, _ = images[0].shape
writer = cv.VideoWriter(
os.path.join(
video_dir,
"{:0>8d}_{}_{}.mp4".format(self.iter_step, img_idx_0, img_idx_1),
),
fourcc,
30,
(w, h),
)
for image in images:
writer.write(image)
writer.release()
| yxlao/corres-nerf | 49 | (NeurIPS 2023) CorresNeRF: Image Correspondence Priors for Neural Radiance Fields | yxlao | Yixing Lao | HKU-CS | |
src/tools/run_matcher.py | Python | import argparse
import itertools
import json
from pathlib import Path
import camtools as ct
import numpy as np
from tqdm import tqdm
from ..corres.corres_map import CorresMap, load_camera_split
from ..corres.dataloader import load_llff, load_dtu
from ..corres.matcher_dkm import MatcherDKM
from ..corres.fused_matcher import FusedMatcher
_matchers = dict()
def match_scene(
matcher_name,
ims,
sparse_views,
match_dir,
corres_name,
):
"""
Run a matcher on a given scene.
Args:
matcher_name: Name of the matcher.
ims: List of ALL images.
sparse_views: A list of indices of views to use.
match_dir: Directory to save matches to.
corres_name: Name of the corres file, including extension.
"""
# Extract sparse view images.
for sparse_view in sparse_views:
assert sparse_view < len(ims) and sparse_view >= 0
ims = [ims[i] for i in sparse_views]
# Match image pairs.
matcher = _matchers[matcher_name]
corres_list = matcher.match_image_pairs(ims)
# Convert sparse index to full index.
sparse_index_to_full_index = dict()
for sparse_index, full_index in enumerate(sparse_views):
sparse_index_to_full_index[sparse_index] = full_index
new_corres_list = []
for corres in corres_list:
src_index, src_x, src_y, dst_index, dst_x, dst_y, confidence = corres
src_index = sparse_index_to_full_index[src_index]
dst_index = sparse_index_to_full_index[dst_index]
new_corres = [src_index, src_x, src_y, dst_index, dst_x, dst_y, confidence]
new_corres_list.append(new_corres)
corres_list = np.array(new_corres_list, dtype=np.float32)
corres_list = corres_list.reshape((-1, 7))
# Save (in full index).
corres_path = match_dir / corres_name
print(f"Saving correspondence: {corres_path}")
np.savez_compressed(corres_path, corres_list=corres_list)
def filter_scene(
Ks,
Ts,
match_dir,
input_corres_name,
output_corres_name,
ray_reproject_min_dist_divider,
enable_propagate,
enable_fusion,
):
"""
Filter a scene using a matcher.
Args:
matcher_name: Name of the matcher.
ims: List of ALL images.
sparse_views: A list of indices of views to use.
match_dir: Directory to save matches to.
input_corres_name: Name of the input corres file, including extension.
output_corres_name: Name of the output corres file, including extension.
ray_reproject_min_dist_divider:
ray_reproject_min_dist = min(H, W) / ray_reproject_min_dist_divider
enable_propagate: Whether to propagate correspondences.
enable_fusion: Run filter_by_fuse_corres_unidirectional.
"""
# Build CorresMap.
input_corres_path = match_dir / input_corres_name
output_corres_path = match_dir / output_corres_name
# Get image dimensions from K.
cx, cy = Ks[0][0, 2], Ks[0][1, 2]
W = int(2 * cx)
H = int(2 * cy)
ray_reproject_min_median_dist = min(H, W) / 250
ray_reproject_min_dist = min(H, W) / ray_reproject_min_dist_divider
print(f"ray_reproject_min_dist_divider: {ray_reproject_min_dist_divider}")
print(f"min(H, W): {min(H, W)}")
print(f"ray_reproject_min_dist: {ray_reproject_min_dist}")
cm = CorresMap.from_npz(input_corres_path)
print(f"len(corres_list) before: {len(cm.corres_list)}")
# cm.filter_by_min_num_corres(min_num_corres=20)
cm.filter_by_ray_reproject_dist(
Ks=Ks,
Ts=Ts,
min_median_dist=ray_reproject_min_median_dist,
min_dist=ray_reproject_min_dist,
)
cm.filter_by_ransac(
Ks=Ks,
Ts=Ts,
ransac_min_inliers=50,
ransac_threshold=2.0,
ransac_prob=0.99999,
)
if enable_propagate:
print("Propagating correspondences (propagate_corres).")
cm.propagate_corres(max_dist=2)
cm.filter_by_ray_reproject_dist(
Ks=Ks,
Ts=Ts,
min_median_dist=ray_reproject_min_median_dist,
min_dist=ray_reproject_min_dist,
)
cm.filter_by_ransac(
Ks=Ks,
Ts=Ts,
ransac_min_inliers=50,
ransac_threshold=2.0,
ransac_prob=0.99999,
)
if enable_fusion:
print("Fusing correspondences (filter_by_fuse_corres_unidirectional).")
cm.filter_by_fuse_corres_unidirectional()
cm.save_npz(output_corres_path)
print(f"len(corres_list) after: {len(cm.corres_list)}")
def visualize_matches(
matcher_name,
ims,
sparse_views,
match_dir,
corres_name,
no_text,
):
"""
Visualize and save matches images.
"""
cm = CorresMap.from_npz(match_dir / corres_name)
src_dst_indices = list(itertools.combinations(sparse_views, 2))
print(f"Visualizing image pairs: {src_dst_indices}")
for src_index, dst_index in tqdm(src_dst_indices, leave=False, desc="Visualize"):
im_src = ims[src_index]
im_dst = ims[dst_index]
result = cm.query_image_pair(src_index, dst_index)
src_pixels = result[:, 1:3]
dst_pixels = result[:, 4:6]
confidences = result[:, 6]
num_corres = len(np.unique(src_pixels, axis=0))
num_pixels = im_src.shape[0] * im_src.shape[1]
corres_ratio = num_corres / num_pixels
if no_text:
texts = []
else:
texts = [
f"{matcher_name}",
f"({src_index}, {dst_index})",
f"{num_corres:,}",
f"{corres_ratio * 100:.2f}%",
]
im_lines = ct.image.make_corres_image(
im_src=im_src,
im_dst=im_dst,
src_pixels=src_pixels.astype(np.int64),
dst_pixels=dst_pixels.astype(np.int64),
confidences=confidences,
texts=texts,
point_color=None,
text_color=(0, 0, 0),
)
save_path = match_dir / f"{src_index:03d}_{dst_index:03d}_connections.jpg"
ct.io.imwrite(save_path, im_lines)
im_points = ct.image.make_corres_image(
im_src=im_src,
im_dst=im_dst,
src_pixels=src_pixels.astype(np.int64),
dst_pixels=dst_pixels.astype(np.int64),
confidences=confidences,
texts=texts,
line_color=None,
text_color=(0, 0, 0),
)
save_path = match_dir / f"{src_index:03d}_{dst_index:03d}_points.jpg"
ct.io.imwrite(save_path, im_points)
def main():
np.random.seed(0)
parser = argparse.ArgumentParser()
parser.add_argument(
"--enable_matcher",
action="store_true",
default=False,
help="Enable running matcher.",
)
parser.add_argument(
"--enable_filter",
action="store_true",
default=False,
help="Enable running filter.",
)
parser.add_argument(
"--enable_fusion",
action="store_true",
default=False,
help="Enable running filter_by_fuse_corres_unidirectional.",
)
parser.add_argument(
"--disable_augmentation",
action="store_true",
default=False,
help="Disable all augmentation. This does not affect propagation.",
)
parser.add_argument(
"--no_text",
action="store_true",
help="Do not show text on images.",
)
parser.add_argument(
"--ray_reproject_min_dist_divider",
type=float,
default=1000,
help="The minimum distance divider for ray reprojection. ",
)
parser.add_argument(
"--enable_propagate",
action="store_true",
default=False,
help="Enable propagating correspondences.",
)
parser.add_argument(
"--matcher_name",
type=str,
dest="matcher_name",
help="The matcher to run",
choices=sorted(
[
"dkm_indoor",
"dkm_outdoor",
"dkm",
]
),
required=True,
)
parser.add_argument(
"--config_name",
type=str,
dest="config_name",
help="The config name. This is used to identify different configs "
"for the same matcher. The match_dir will be named as "
"match_dir = {matcher_name}_{config_name}_{num_views}. For example, "
"dkm_default_3",
required=True,
)
parser.add_argument(
"--num_views",
type=str,
help="Number of sparse views to use. "
"This will read corres/selected_cameras.json",
required=True,
)
parser.add_argument(
"--dataset",
type=str,
help="Dataset category, e.g. dtu.",
choices=["dtu", "llff", "blender"],
required=True,
)
parser.add_argument(
"--scene_dir",
type=str,
help="Scene directory. Depending on the dataset, different data loader"
"will be used.",
required=True,
)
parser.add_argument(
"--corres_dir",
type=str,
help="Correspondence dir, which is also the output directory. Typically, "
"this follows ${scene_dir}/${corres_dir}/${match_dir}. There must be "
"${corres_dir}/selected_cameras.json inside corres_dir.",
required=True,
)
parser.add_argument(
"--camera_split_path",
type=str,
help="Path to the camera split JSON file.",
default=None,
)
args = parser.parse_args()
# Initialize matchers.
if args.disable_augmentation:
_matchers["dkm_indoor"] = MatcherDKM(model_type="indoor", augmentations=())
_matchers["dkm_outdoor"] = MatcherDKM(model_type="outdoor", augmentations=())
_matchers["dkm"] = FusedMatcher(
[
_matchers["dkm_indoor"],
_matchers["dkm_outdoor"],
]
)
else:
_matchers["dkm_indoor"] = MatcherDKM(model_type="indoor")
_matchers["dkm_outdoor"] = MatcherDKM(model_type="outdoor")
_matchers["dkm"] = FusedMatcher(
[
_matchers["dkm_indoor"],
_matchers["dkm_outdoor"],
]
)
if not args.enable_matcher and not args.enable_filter:
raise ValueError("Either --enable_matcher or --enable_filter must be set.")
print(f"args: {args}")
# Directory structure.
# scene # scene_dir
# ├── corres # corres_dir
# │ ├── selected_cameras.json
# │ ├── dkm_loftr_22_25_28 # match_dir
# │ │ ├── corres.npz
# │ │ ├── 000_001_connections.jpg
# │ │ ├── 000_001_points.jpg
# │ │ ├── ...
# │ ├── dkm_loftr_6_9_30_33_36
# │ ├── ...
# Check corres_dir.
print(f"Using corres_dir: {args.corres_dir}")
corres_dir = Path(args.corres_dir)
corres_dir.mkdir(exist_ok=True, parents=True)
# Read selected cameras.
if args.camera_split_path is None:
if args.dataset == "llff":
args.camera_split_path = Path("configs") / "nerf_llff_camera_split.json"
elif args.dataset == "dtu":
args.camera_split_path = Path("configs") / "neus_dtu_camera_split.json"
else:
raise ValueError(f"Unknown dataset: {args.dataset}")
print(f"Using camera_split_path: {args.camera_split_path}")
train_ids, _ = load_camera_split(
camera_split_path=args.camera_split_path,
scene_name=Path(args.scene_dir).name,
num_views=int(args.num_views),
)
print(f"To match sparse views: {train_ids}")
# Read dataset.
if args.dataset == "llff":
# NeRF on LLFF, factor = 8.
ims, Ks, Ts = load_llff(args.scene_dir, factor=8)
elif args.dataset == "dtu":
# NeuS on DTU, factor = 4.
# NeuS does not downsample images during training. It only downsamples
# images during rendering as an option.
ims, Ks, Ts = load_dtu(args.scene_dir)
else:
raise ValueError(f"Unknown dataset: {args.dataset}")
# Create match output dir.
match_dir = corres_dir / f"{args.matcher_name}_{args.config_name}_{args.num_views}"
match_dir.mkdir(exist_ok=True, parents=True)
print(f"Using match_dir: {match_dir}")
# Match.
raw_corres_name = "corres_raw.npz"
filtered_corres_name = "corres.npz"
if args.enable_matcher:
match_scene(
matcher_name=args.matcher_name,
ims=ims,
sparse_views=train_ids,
match_dir=match_dir,
corres_name=raw_corres_name,
)
# Filter.
if args.enable_filter:
filter_scene(
Ks=Ks,
Ts=Ts,
match_dir=match_dir,
input_corres_name=raw_corres_name,
output_corres_name=filtered_corres_name,
ray_reproject_min_dist_divider=args.ray_reproject_min_dist_divider,
enable_propagate=args.enable_propagate,
enable_fusion=args.enable_fusion,
)
# Visualize.
if args.enable_filter:
final_corres_name = filtered_corres_name
else:
final_corres_name = raw_corres_name
visualize_matches(
matcher_name=args.matcher_name,
ims=ims,
sparse_views=train_ids,
match_dir=match_dir,
corres_name=final_corres_name,
no_text=args.no_text,
)
if __name__ == "__main__":
main()
| yxlao/corres-nerf | 49 | (NeurIPS 2023) CorresNeRF: Image Correspondence Priors for Neural Radiance Fields | yxlao | Yixing Lao | HKU-CS | |
src/tools/run_nerf.py | Python | import math
import os
import sys
import time
from pathlib import Path
import camtools as ct
import imageio
import numpy as np
import torch
import torch.nn.functional as F
from ..nerf.load_llff import load_llff_data
from ..nerf.run_nerf_helpers import (
img2mse,
mse2psnr,
to8b,
Embedder,
get_embedder,
NeRF,
get_rays,
get_rays_np,
get_rays_np_with_coords,
ndc_rays,
sample_pdf,
)
from tqdm import tqdm
import open3d as o3d
from torch.utils.tensorboard import SummaryWriter
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
np.random.seed(0)
DEBUG = False
from ..corres.corres_map import CorresMap, load_camera_split
from ..corres.dataloader import load_llff_cameras
def points_to_pixels_individual_cameras(points, Ps, cam_indices):
"""
Project points to pixels, where each point belongs to an independent camera.
Args:
points: [N, 3], torch tensor.
Ps: [num_cameras, 3, 4], world-to-pixel projection matrix.
P = K @ [R | t] = K @ T[:3, :].
cam_indices: [N], torch tensor int64, with min = 0, max = num_cameras - 1.
"""
assert isinstance(points, torch.Tensor)
assert isinstance(Ps, torch.Tensor)
assert isinstance(cam_indices, torch.Tensor)
assert points.ndim == 2
assert points.shape[1] == 3
assert Ps.ndim == 3
num_points = len(points)
num_cameras = len(Ps)
assert Ps.shape == (num_cameras, 3, 4)
assert cam_indices.shape == (num_points,)
# Index all Ps
# (num_points, 3, 4)
points_Ps = Ps[cam_indices]
# Convert points to homogeneous coordinates.
# (num_points, 4)
points_homo = torch.cat(
[points, torch.ones(num_points, 1, device=points.device)], dim=1
)
# (num_points, 4, 1)
points_homo = points_homo[..., None]
# Batched matmul of points_Ps @ points_homo
# (num_points, 3, 1)
pixels = torch.bmm(points_Ps, points_homo)
# (num_points, 3)
pixels = pixels.squeeze(-1)
assert pixels.shape == (num_points, 3)
# Convert homogeneous coordinates to pixels.
# (num_points, 2)
pixels = pixels[:, :2] / pixels[:, 2:]
return pixels
def batchify(fn, chunk):
"""Constructs a version of 'fn' that applies to smaller batches."""
if chunk is None:
return fn
def ret(inputs):
return torch.cat(
[fn(inputs[i : i + chunk]) for i in range(0, inputs.shape[0], chunk)], 0
)
return ret
def run_network(inputs, viewdirs, fn, embed_fn, embeddirs_fn, netchunk=1024 * 64):
"""Prepares inputs and applies network 'fn'."""
inputs_flat = torch.reshape(inputs, [-1, inputs.shape[-1]])
embedded = embed_fn(inputs_flat)
if viewdirs is not None:
input_dirs = viewdirs[:, None].expand(inputs.shape)
input_dirs_flat = torch.reshape(input_dirs, [-1, input_dirs.shape[-1]])
embedded_dirs = embeddirs_fn(input_dirs_flat)
embedded = torch.cat([embedded, embedded_dirs], -1)
outputs_flat = batchify(fn, netchunk)(embedded)
outputs = torch.reshape(
outputs_flat, list(inputs.shape[:-1]) + [outputs_flat.shape[-1]]
)
return outputs
def batchify_rays(rays_flat, chunk=1024 * 32, **kwargs):
"""Render rays in smaller minibatches to avoid OOM."""
all_ret = {}
for i in range(0, rays_flat.shape[0], chunk):
ret = render_rays(rays_flat[i : i + chunk], **kwargs)
for k in ret:
if k not in all_ret:
all_ret[k] = []
all_ret[k].append(ret[k])
all_ret = {k: torch.cat(all_ret[k], 0) for k in all_ret}
return all_ret
def render(
H,
W,
K,
chunk=1024 * 32,
rays=None,
c2w=None,
ndc=True,
near=0.0,
far=1.0,
use_viewdirs=False,
c2w_staticcam=None,
**kwargs,
):
"""Render rays
Args:
H: int. Height of image in pixels.
W: int. Width of image in pixels.
focal: float. Focal length of pinhole camera.
chunk: int. Maximum number of rays to process simultaneously. Used to
control maximum memory usage. Does not affect final results.
rays: array of shape [2, batch_size, 3]. Ray origin and direction for
each example in batch.
c2w: array of shape [3, 4]. Camera-to-world transformation matrix.
ndc: bool. If True, represent ray origin, direction in NDC coordinates.
near: float or array of shape [batch_size]. Nearest distance for a ray.
far: float or array of shape [batch_size]. Farthest distance for a ray.
use_viewdirs: bool. If True, use viewing direction of a point in space in model.
c2w_staticcam: array of shape [3, 4]. If not None, use this transformation matrix for
camera while using other c2w argument for viewing directions.
Returns:
rgb_map: [batch_size, 3]. Predicted RGB values for rays.
disp_map: [batch_size]. Disparity map. Inverse of depth.
acc_map: [batch_size]. Accumulated opacity (alpha) along a ray.
extras: dict with everything returned by render_rays().
"""
if c2w is not None:
# special case to render full image
rays_o, rays_d = get_rays(H, W, K, c2w)
else:
# use provided ray batch
# c2w is none for LLFF
rays_o, rays_d = rays
if use_viewdirs:
# provide ray directions as input
viewdirs = rays_d
# c2w_staticcam is none for LLFF
if c2w_staticcam is not None:
# special case to visualize effect of viewdirs
rays_o, rays_d = get_rays(H, W, K, c2w_staticcam)
viewdirs = viewdirs / torch.norm(viewdirs, dim=-1, keepdim=True)
viewdirs = torch.reshape(viewdirs, [-1, 3]).float()
sh = rays_d.shape # [..., 3]
if ndc:
# for forward facing scenes
rays_o, rays_d = ndc_rays(H, W, K[0][0], 1.0, rays_o, rays_d)
# Create ray batch
rays_o = torch.reshape(rays_o, [-1, 3]).float()
rays_d = torch.reshape(rays_d, [-1, 3]).float()
near, far = near * torch.ones_like(rays_d[..., :1]), far * torch.ones_like(
rays_d[..., :1]
)
rays = torch.cat([rays_o, rays_d, near, far], -1)
if use_viewdirs:
rays = torch.cat([rays, viewdirs], -1)
# Render and reshape
all_ret = batchify_rays(rays, chunk, **kwargs)
for k in all_ret:
k_sh = list(sh[:-1]) + list(all_ret[k].shape[1:])
all_ret[k] = torch.reshape(all_ret[k], k_sh)
k_extract = ["rgb_map", "disp_map", "acc_map", "depth_map"]
ret_list = [all_ret[k] for k in k_extract]
ret_dict = {k: all_ret[k] for k in all_ret if k not in k_extract}
return ret_list + [ret_dict]
def render_path(
render_poses,
hwf,
K,
chunk,
render_kwargs,
gt_imgs=None,
savedir=None,
render_factor=0,
):
H, W, focal = hwf
if render_factor != 0:
# Render downsampled for speed
H = H // render_factor
W = W // render_factor
focal = focal / render_factor
rgbs = []
disps = []
t = time.time()
for i, c2w in enumerate(tqdm(render_poses)):
print(i, time.time() - t)
t = time.time()
rgb, disp, acc, depth, _ = render(
H, W, K, chunk=chunk, c2w=c2w[:3, :4], **render_kwargs
)
rgbs.append(rgb.cpu().numpy())
disps.append(disp.cpu().numpy())
if i == 0:
print(rgb.shape, disp.shape)
"""
if gt_imgs is not None and render_factor==0:
p = -10. * np.log10(np.mean(np.square(rgb.cpu().numpy() - gt_imgs[i])))
print(p)
"""
if savedir is not None:
rgb8 = to8b(rgbs[-1])
filename = os.path.join(savedir, "{:03d}.png".format(i))
imageio.imwrite(filename, rgb8)
rgbs = np.stack(rgbs, 0)
disps = np.stack(disps, 0)
return rgbs, disps
def create_nerf(args):
"""Instantiate NeRF's MLP model."""
embed_fn, input_ch = get_embedder(args.multires, args.i_embed)
input_ch_views = 0
embeddirs_fn = None
if args.use_viewdirs:
embeddirs_fn, input_ch_views = get_embedder(args.multires_views, args.i_embed)
output_ch = 5 if args.N_importance > 0 else 4
skips = [4]
model = NeRF(
D=args.netdepth,
W=args.netwidth,
input_ch=input_ch,
output_ch=output_ch,
skips=skips,
input_ch_views=input_ch_views,
use_viewdirs=args.use_viewdirs,
).to(device)
grad_vars = list(model.parameters())
model_fine = None
if args.N_importance > 0:
model_fine = NeRF(
D=args.netdepth_fine,
W=args.netwidth_fine,
input_ch=input_ch,
output_ch=output_ch,
skips=skips,
input_ch_views=input_ch_views,
use_viewdirs=args.use_viewdirs,
).to(device)
grad_vars += list(model_fine.parameters())
network_query_fn = lambda inputs, viewdirs, network_fn: run_network(
inputs,
viewdirs,
network_fn,
embed_fn=embed_fn,
embeddirs_fn=embeddirs_fn,
netchunk=args.netchunk,
)
# Create optimizer
optimizer = torch.optim.Adam(params=grad_vars, lr=args.lrate, betas=(0.9, 0.999))
start = 0
basedir = args.basedir
expname = args.expname
##########################
# Load checkpoints
if args.ft_path is not None and args.ft_path != "None":
ckpts = [args.ft_path]
else:
ckpts = [
Path(args.expdir) / f
for f in sorted(os.listdir(Path(args.expdir)))
if "tar" in f
]
print(f"Found ckpts: {ckpts}")
# Check for non-compatible options.
if args.checkpoint_iter is not None and args.no_reload:
raise ValueError("ags.checkpoint_iter and args.no_reload both specified.")
print("Found ckpts", ckpts)
if len(ckpts) > 0 and not args.no_reload:
if args.checkpoint_iter is not None:
ckpt_path = None
for ckpt in ckpts:
if Path(ckpt).name == f"{args.checkpoint_iter:06d}.tar":
ckpt_path = ckpt
break
if ckpt_path is None:
raise ValueError(
f"Could not find checkpoint {args.checkpoint_iter:06d}.tar"
)
else:
ckpt_path = ckpts[-1]
print("Reloading from", ckpt_path)
ckpt = torch.load(ckpt_path)
start = ckpt["global_step"]
optimizer.load_state_dict(ckpt["optimizer_state_dict"])
# Load model
model.load_state_dict(ckpt["network_fn_state_dict"])
if model_fine is not None:
model_fine.load_state_dict(ckpt["network_fine_state_dict"])
##########################
render_kwargs_train = {
"network_query_fn": network_query_fn,
"perturb": args.perturb,
"N_importance": args.N_importance,
"network_fine": model_fine,
"N_samples": args.N_samples,
"network_fn": model,
"use_viewdirs": args.use_viewdirs,
"white_bkgd": args.white_bkgd,
"raw_noise_std": args.raw_noise_std,
}
# NDC only good for LLFF-style forward facing data
if args.dataset_type != "llff" or args.no_ndc:
print("Not ndc!")
render_kwargs_train["ndc"] = False
render_kwargs_train["lindisp"] = args.lindisp
render_kwargs_test = {k: render_kwargs_train[k] for k in render_kwargs_train}
render_kwargs_test["perturb"] = False
render_kwargs_test["raw_noise_std"] = 0.0
return render_kwargs_train, render_kwargs_test, start, grad_vars, optimizer
def raw2outputs(raw, z_vals, rays_d, raw_noise_std=0, white_bkgd=False, pytest=False):
"""Transforms model's predictions to semantically meaningful values.
Args:
raw: [num_rays, num_samples along ray, 4]. Prediction from model.
z_vals: [num_rays, num_samples along ray]. Integration time.
rays_d: [num_rays, 3]. Direction of each ray.
Returns:
rgb_map: [num_rays, 3]. Estimated RGB color of a ray.
disp_map: [num_rays]. Disparity map. Inverse of depth map.
acc_map: [num_rays]. Sum of weights along each ray.
weights: [num_rays, num_samples]. Weights assigned to each sampled color.
depth_map: [num_rays]. Estimated distance to object.
"""
raw2alpha = lambda raw, dists, act_fn=F.relu: 1.0 - torch.exp(-act_fn(raw) * dists)
dists = z_vals[..., 1:] - z_vals[..., :-1]
dists = torch.cat(
[dists, torch.Tensor([1e10]).expand(dists[..., :1].shape)], -1
) # [N_rays, N_samples]
dists = dists * torch.norm(rays_d[..., None, :], dim=-1)
rgb = torch.sigmoid(raw[..., :3]) # [N_rays, N_samples, 3]
noise = 0.0
if raw_noise_std > 0.0:
noise = torch.randn(raw[..., 3].shape) * raw_noise_std
# Overwrite randomly sampled data if pytest
if pytest:
np.random.seed(0)
noise = np.random.rand(*list(raw[..., 3].shape)) * raw_noise_std
noise = torch.Tensor(noise)
alpha = raw2alpha(raw[..., 3] + noise, dists) # [N_rays, N_samples]
# weights = alpha * tf.math.cumprod(1.-alpha + 1e-10, -1, exclusive=True)
weights = (
alpha
* torch.cumprod(
torch.cat([torch.ones((alpha.shape[0], 1)), 1.0 - alpha + 1e-10], -1), -1
)[:, :-1]
)
rgb_map = torch.sum(weights[..., None] * rgb, -2) # [N_rays, 3]
depth_map = torch.sum(weights * z_vals, -1)
disp_map = 1.0 / torch.max(
1e-10 * torch.ones_like(depth_map), depth_map / torch.sum(weights, -1)
)
acc_map = torch.sum(weights, -1)
if white_bkgd:
rgb_map = rgb_map + (1.0 - acc_map[..., None])
return rgb_map, disp_map, acc_map, weights, depth_map
def render_rays(
ray_batch,
network_fn,
network_query_fn,
N_samples,
retraw=False,
lindisp=False,
perturb=0.0,
N_importance=0,
network_fine=None,
white_bkgd=False,
raw_noise_std=0.0,
verbose=False,
pytest=False,
):
"""Volumetric rendering.
Args:
ray_batch: array of shape [batch_size, ...]. All information necessary
for sampling along a ray, including: ray origin, ray direction, min
dist, max dist, and unit-magnitude viewing direction.
network_fn: function. Model for predicting RGB and density at each point
in space.
network_query_fn: function used for passing queries to network_fn.
N_samples: int. Number of different times to sample along each ray.
retraw: bool. If True, include model's raw, unprocessed predictions.
lindisp: bool. If True, sample linearly in inverse depth rather than in depth.
perturb: float, 0 or 1. If non-zero, each ray is sampled at stratified
random points in time.
N_importance: int. Number of additional times to sample along each ray.
These samples are only passed to network_fine.
network_fine: "fine" network with same spec as network_fn.
white_bkgd: bool. If True, assume a white background.
raw_noise_std: ...
verbose: bool. If True, print more debugging info.
Returns:
rgb_map: [num_rays, 3]. Estimated RGB color of a ray. Comes from fine model.
disp_map: [num_rays]. Disparity map. 1 / depth.
acc_map: [num_rays]. Accumulated opacity along each ray. Comes from fine model.
raw: [num_rays, num_samples, 4]. Raw predictions from model.
rgb0: See rgb_map. Output for coarse model.
disp0: See disp_map. Output for coarse model.
acc0: See acc_map. Output for coarse model.
z_std: [num_rays]. Standard deviation of distances along ray for each
sample.
"""
N_rays = ray_batch.shape[0]
rays_o, rays_d = ray_batch[:, 0:3], ray_batch[:, 3:6] # [N_rays, 3] each
viewdirs = ray_batch[:, -3:] if ray_batch.shape[-1] > 8 else None
bounds = torch.reshape(ray_batch[..., 6:8], [-1, 1, 2])
near, far = bounds[..., 0], bounds[..., 1] # [-1,1]
t_vals = torch.linspace(0.0, 1.0, steps=N_samples)
if not lindisp:
z_vals = near * (1.0 - t_vals) + far * (t_vals)
else:
z_vals = 1.0 / (1.0 / near * (1.0 - t_vals) + 1.0 / far * (t_vals))
z_vals = z_vals.expand([N_rays, N_samples])
if perturb > 0.0:
# get intervals between samples
mids = 0.5 * (z_vals[..., 1:] + z_vals[..., :-1])
upper = torch.cat([mids, z_vals[..., -1:]], -1)
lower = torch.cat([z_vals[..., :1], mids], -1)
# stratified samples in those intervals
t_rand = torch.rand(z_vals.shape)
# Pytest, overwrite u with numpy's fixed random numbers
if pytest:
np.random.seed(0)
t_rand = np.random.rand(*list(z_vals.shape))
t_rand = torch.Tensor(t_rand)
z_vals = lower + (upper - lower) * t_rand
# [N_rays, N_samples, 3]
pts = rays_o[..., None, :] + rays_d[..., None, :] * z_vals[..., :, None]
# Debug only, save rendered points
# global all_pts
# global all_rays_os
# all_pts = np.concatenate(
# (all_pts, pts.reshape((-1, 3)).cpu().numpy()),
# axis=0,
# )
# all_rays_os = np.concatenate(
# (all_rays_os, rays_o.reshape((-1, 3)).cpu().numpy()),
# axis=0,
# )
# np.save('all_pts.npy', all_pts)
# np.save('all_rays_os.npy', all_rays_os)
# exit(0)
# raw = run_network(pts)
raw = network_query_fn(pts, viewdirs, network_fn)
rgb_map, disp_map, acc_map, weights, depth_map = raw2outputs(
raw, z_vals, rays_d, raw_noise_std, white_bkgd, pytest=pytest
)
if N_importance > 0:
rgb_map_0, disp_map_0, acc_map_0 = rgb_map, disp_map, acc_map
z_vals_mid = 0.5 * (z_vals[..., 1:] + z_vals[..., :-1])
z_samples = sample_pdf(
z_vals_mid,
weights[..., 1:-1],
N_importance,
det=(perturb == 0.0),
pytest=pytest,
)
z_samples = z_samples.detach()
z_vals, _ = torch.sort(torch.cat([z_vals, z_samples], -1), -1)
# [N_rays, N_samples + N_importance, 3]
pts = rays_o[..., None, :] + rays_d[..., None, :] * z_vals[..., :, None]
run_fn = network_fn if network_fine is None else network_fine
# raw = run_network(pts, fn=run_fn)
raw = network_query_fn(pts, viewdirs, run_fn)
rgb_map, disp_map, acc_map, weights, depth_map = raw2outputs(
raw, z_vals, rays_d, raw_noise_std, white_bkgd, pytest=pytest
)
ret = {
"rgb_map": rgb_map,
"disp_map": disp_map,
"acc_map": acc_map,
"depth_map": depth_map,
}
if retraw:
ret["raw"] = raw
if N_importance > 0:
ret["rgb0"] = rgb_map_0
ret["disp0"] = disp_map_0
ret["acc0"] = acc_map_0
ret["z_std"] = torch.std(z_samples, dim=-1, unbiased=False) # [N_rays]
for k in ret:
if (torch.isnan(ret[k]).any() or torch.isinf(ret[k]).any()) and DEBUG:
print(f"! [Numerical Error] {k} contains nan or inf.")
return ret
def parse_args():
import configargparse
parser = configargparse.ArgumentParser()
parser.add_argument(
"--config",
is_config_file=True,
help="config file path",
)
parser.add_argument(
"--basedir",
type=str,
default="./logs/",
help="where to store ckpts and logs",
)
parser.add_argument(
"--scene",
type=str,
required=True,
)
parser.add_argument(
"--n_iters",
type=int,
default=200000,
help="total number of iterations",
)
# training options
parser.add_argument(
"--netdepth",
type=int,
default=8,
help="layers in network",
)
parser.add_argument(
"--netwidth",
type=int,
default=256,
help="channels per layer",
)
parser.add_argument(
"--netdepth_fine",
type=int,
default=8,
help="layers in fine network",
)
parser.add_argument(
"--netwidth_fine",
type=int,
default=256,
help="channels per layer in fine network",
)
parser.add_argument(
"--N_rand",
type=int,
default=32 * 32 * 4,
help="batch size (number of random rays per gradient step)",
)
parser.add_argument(
"--lrate",
type=float,
default=5e-4,
help="learning rate",
)
parser.add_argument(
"--lrate_decay",
type=int,
default=250,
help="exponential learning rate decay (in 1000 steps)",
)
parser.add_argument(
"--chunk",
type=int,
default=1024 * 32,
help="number of rays processed in parallel, decrease if running out of memory",
)
parser.add_argument(
"--netchunk",
type=int,
default=1024 * 64,
help="number of pts sent through network in parallel, decrease if running out of memory",
)
parser.add_argument(
"--no_batching",
action="store_true",
help="only take random rays from 1 image at a time",
)
parser.add_argument(
"--no_reload",
action="store_true",
help="do not reload weights from saved ckpt",
)
parser.add_argument(
"--ft_path",
type=str,
default=None,
help="specific weights npy file to reload for coarse network",
)
parser.add_argument(
"--checkpoint_iter",
type=int,
default=None,
help="Specific checkpoint iter to load. Error if not found.",
)
# rendering options
parser.add_argument(
"--N_samples",
type=int,
default=64,
help="number of coarse samples per ray",
)
parser.add_argument(
"--N_importance",
type=int,
default=0,
help="number of additional fine samples per ray",
)
parser.add_argument(
"--perturb",
type=float,
default=1.0,
help="set to 0. for no jitter, 1. for jitter",
)
parser.add_argument(
"--use_viewdirs",
action="store_true",
help="use full 5D input instead of 3D",
)
parser.add_argument(
"--i_embed",
type=int,
default=0,
help="set 0 for default positional encoding, -1 for none",
)
parser.add_argument(
"--multires",
type=int,
default=10,
help="log2 of max freq for positional encoding (3D location)",
)
parser.add_argument(
"--multires_views",
type=int,
default=4,
help="log2 of max freq for positional encoding (2D direction)",
)
parser.add_argument(
"--raw_noise_std",
type=float,
default=0.0,
help="std dev of noise added to regularize sigma_a output, 1e0 recommended",
)
parser.add_argument(
"--render_all",
action="store_true",
help="render all views (train and test) in depth and rgb",
)
parser.add_argument(
"--render_only",
action="store_true",
help="do not optimize, reload weights and render out render_poses path",
)
parser.add_argument(
"--render_test",
action="store_true",
help="render the test set instead of render_poses path",
)
parser.add_argument(
"--depth_only",
action="store_true",
help="do not optimize, reload weights and render out depths, "
"by default, this renders all views",
)
parser.add_argument(
"--render_factor",
type=int,
default=0,
help="downsampling factor to speed up rendering, set 4 or 8 for fast preview",
)
# training options
parser.add_argument(
"--precrop_iters",
type=int,
default=0,
help="number of steps to train on central crops",
)
parser.add_argument(
"--precrop_frac",
type=float,
default=0.5,
help="fraction of img taken for central crops",
)
# dataset options
parser.add_argument(
"--dataset_type",
type=str,
default="llff",
help="options: llff / blender / deepvoxels",
)
parser.add_argument(
"--testskip",
type=int,
default=8,
help="will load 1/N images from test/val sets, useful for large datasets like deepvoxels",
)
## deepvoxels flags
parser.add_argument(
"--shape",
type=str,
default="greek",
help="options : armchair / cube / greek / vase",
)
## blender flags
parser.add_argument(
"--white_bkgd",
action="store_true",
help="set to render synthetic data on a white bkgd (always use for dvoxels)",
)
parser.add_argument(
"--half_res",
action="store_true",
help="load blender synthetic data at 400x400 instead of 800x800",
)
## llff flags
parser.add_argument(
"--factor",
type=int,
default=8,
help="downsample factor for LLFF images",
)
parser.add_argument(
"--no_ndc",
action="store_true",
help="do not use normalized device coordinates (set for non-forward facing scenes)",
)
parser.add_argument(
"--lindisp",
action="store_true",
help="sampling linearly in disparity rather than depth",
)
parser.add_argument(
"--spherify", action="store_true", help="set for spherical 360 scenes"
)
parser.add_argument(
"--llffhold",
type=int,
default=8,
help="will take every 1/N images as LLFF test set, paper uses 8",
)
# logging/saving options
parser.add_argument(
"--i_print",
type=int,
default=100,
help="frequency of console printout and metric loggin",
)
parser.add_argument(
"--i_img",
type=int,
default=2000,
help="frequency of tensorboard image logging",
)
parser.add_argument(
"--i_weights",
type=int,
default=2000,
help="frequency of weight ckpt saving",
)
parser.add_argument(
"--i_testset",
type=int,
default=10000,
help="frequency of testset saving",
)
parser.add_argument(
"--i_video",
type=int,
default=100000,
help="frequency of render_poses video saving",
)
# sparse views and correspondence
parser.add_argument(
"--num_sparse_views",
type=int,
default=0,
help="# of sparse views to use. 0 for all views.",
)
parser.add_argument(
"--corres_enabled",
action="store_true",
help="enable correspondence (main control)",
)
parser.add_argument(
"--corres_confidence_enabled",
action="store_true",
help="enable confidence weighting",
)
parser.add_argument(
"--corres_confidence_normalized",
action="store_true",
help="normalize confidence to [0, 1]",
)
parser.add_argument(
"--corres_matcher",
type=str,
help="Corres matcher name (for directory name). "
"This is typically the actual matcher name with the "
"config name, e.g. dkm_default",
)
parser.add_argument(
"--corres_config",
type=str,
help="Corres matcher config name.",
default="default",
)
parser.add_argument(
"--corres_robust_ratio",
type=float,
default=1.0,
help="(deprecated) ratio of inliers for robust loss",
)
parser.add_argument(
"--corres_keep_outlier",
action="store_true",
help="If true, do not remove statistical outlier.",
)
parser.add_argument(
"--corres_random_keep_ratio",
type=float,
default=1.0,
help="ratio of corres to randomly keep",
)
parser.add_argument(
"--corres_corres_noise_std",
type=float,
default=0.0,
help="std of noise added to corres in both x and y pixels",
)
parser.add_argument(
"--corres_dist_enabled",
action="store_true",
help="enable correspondence distance loss",
)
parser.add_argument(
"--corres_dist_weight",
type=float,
default=0.1,
help="correspondence loss weight",
)
parser.add_argument(
"--corres_dist_tdist",
type=float,
default=0.0,
help="distance threshold, in world space",
)
parser.add_argument(
"--corres_dist_robust_ratio",
type=float,
default=None,
help="dist robust ratio, None -> corres_robust_ratio",
)
parser.add_argument(
"--corres_pixel_enabled",
action="store_true",
help="enable correspondence pixel loss",
)
parser.add_argument(
"--corres_pixel_weight",
type=float,
default=0.1,
help="correspondence pixel loss weight",
)
parser.add_argument(
"--corres_pixel_tdist_max",
type=float,
default=0.0,
help="pixel distance threshold, in image space",
)
parser.add_argument(
"--corres_pixel_tdist_min",
type=float,
default=0.0,
help="pixel distance threshold, in image space",
)
parser.add_argument(
"--corres_pixel_robust_ratio",
type=float,
default=None,
help="pixel robust ratio, None -> corres_robust_ratio",
)
parser.add_argument(
"--corres_ds_enabled",
action="store_true",
help="enable correspondence depth-supervised loss",
)
parser.add_argument(
"--corres_ds_squared",
action="store_true",
help="use L2 loss, otherwise use L1 loss",
)
parser.add_argument(
"--corres_ds_weight",
type=float,
default=0.1,
help="correspondence depth-supervised loss weight",
)
parser.add_argument(
"--corres_ds_robust_ratio",
type=float,
default=None,
help="ds robust ratio, None -> corres_robust_ratio",
)
parser.add_argument(
"--camera_split_path",
type=str,
default=None,
help="Path to a json file specifying the train and test views.",
)
args = parser.parse_args()
if args.dataset_type == "llff":
# Determine expname
args.expname = f"{Path(args.config).stem}_{args.scene}"
# Determine output dir
args.expdir = Path(args.basedir) / Path(args.config).stem / args.scene
# Set data dir from args.scene
args.datadir = Path("data") / "nerf_llff" / args.scene
if not Path(args.datadir).is_dir():
raise ValueError(f"Data dir {args.datadir} not found!")
# Set corres_dir from args.scene
args.corres_dir = Path("data_corres") / "nerf_llff" / args.scene
# Derive camera_split_path from args.config
if args.camera_split_path is None:
config_dir = Path(args.config).parent
args.camera_split_path = config_dir / "nerf_llff_camera_split.json"
# Print paths
print("Paths:")
print(f"- expname : {args.expname}")
print(f"- expdir : {args.expdir}")
print(f"- datadir : {args.datadir}")
print(f"- corres_dir : {args.corres_dir}")
print(f"- camera_split_path: {args.camera_split_path}")
else:
raise ValueError("Unknown dataset type: {}".format(args.dataset_type))
print(f"Setting args.expname: {args.expname}")
print(f"Setting args.datadir: {args.datadir}")
return args
def train():
args = parse_args()
args.expdir.mkdir(parents=True, exist_ok=True)
# - For some datasets, K is known, write to K and K will be used.
# Example: DTU
# - For other datasets, K is unknown, K will be calculate from H, W, focal.
# Example: LLFF
K = None
# Load data
if args.dataset_type == "llff":
# images : (20, 378, 504, 3)
# poses : (20, 3, 4)
# bds : (20, 2)
# render_poses: (120, 3, 5)
images, poses, bds, render_poses, i_test = load_llff_data(
args.datadir,
args.factor,
recenter=True,
bd_factor=0.75,
spherify=args.spherify,
)
hwf = poses[0, :3, -1]
poses = poses[:, :3, :4]
print("Loaded llff", images.shape, render_poses.shape, hwf, args.datadir)
num_images = int(images.shape[0])
# Cameras using the standard pinhole model
Ks, Ts = load_llff_cameras(scene_dir=args.datadir, factor=args.factor)
Cs = [ct.convert.T_to_C(T) for T in Ts]
Ps = [ct.convert.K_T_to_P(K, T) for K, T in zip(Ks, Ts)]
Ks = torch.tensor(np.array(Ks)).float().to(device)
Ts = torch.tensor(np.array(Ts)).float().to(device)
Cs = torch.tensor(np.array(Cs)).float().to(device)
Ps = torch.tensor(np.array(Ps)).float().to(device)
num_cameras = int(Ks.shape[0])
assert Ks.shape == (num_cameras, 3, 3)
assert Ts.shape == (num_cameras, 4, 4)
assert Cs.shape == (num_cameras, 3)
assert Ps.shape == (num_cameras, 3, 4)
# Default split
if not isinstance(i_test, list):
i_test = [i_test]
i_val = i_test
i_train = np.array(
[i for i in range(num_images) if (i not in i_test and i not in i_val)]
)
# Overwrite, if args.llffhold is > 0
if args.llffhold > 0:
print("Auto LLFF holdout,", args.llffhold)
i_test = np.arange(num_images)[:: args.llffhold]
i_val = i_test
i_train = np.array(
[i for i in range(num_images) if (i not in i_test and i not in i_val)]
)
# Overwrite, if args.num_sparse_views is not zero.
if args.num_sparse_views == -1:
# Use all views for training/testing/validation.
i_train = np.arange(num_images)
i_val = i_train
i_test = i_train
elif args.num_sparse_views == 0:
# Do not overwrite. This will use the default train/test split.
print(f"num_sparse_views == 0, use the default train/test split.")
elif args.num_sparse_views > 0:
if not args.camera_split_path.is_file():
raise ValueError(
f"Camera split file {args.camera_split_path} not found!"
)
print(f"Using camera split: {args.camera_split_path}")
i_train, i_test = load_camera_split(
camera_split_path=args.camera_split_path,
scene_name=args.scene,
num_views=args.num_sparse_views,
)
i_val = i_test
else:
print(f"Invild num_sparse_views: {args.num_sparse_views}.")
print("DEFINING BOUNDS")
if args.no_ndc:
near = np.ndarray.min(bds) * 0.9
far = np.ndarray.max(bds) * 1.0
else:
near = 0.0
far = 1.0
print("NEAR FAR", near, far)
if args.corres_enabled and not (
args.render_only or args.render_test or args.render_all or args.depth_only
):
corres_name = (
f"{args.corres_matcher}_{args.corres_config}_{args.num_sparse_views}"
)
h, w = int(hwf[0]), int(hwf[1])
corres_path = Path(args.corres_dir) / corres_name / "corres.npz"
corres_map = CorresMap.from_npz(corres_path=corres_path)
print(f"Using corres: {corres_path}")
# Try to get the corres_res_h, fullres_w of the image dimension
# when corres is generated.
# TODO: remove this in the future.
vis_jpg_paths = list(corres_path.parent.glob("*connections.jpg"))
if not vis_jpg_paths:
raise FileNotFoundError(
f"No visualization file found in {corres_path.parent}!"
)
vis_jpg_path = vis_jpg_paths[0]
vis_im = ct.io.imread(vis_jpg_path)
corres_res_h = int(vis_im.shape[0])
fullres_w = int(vis_im.shape[1] / 2)
# The scaling shall be exact.
scale_h = corres_res_h / h
scale_w = fullres_w / w
if not np.isclose(scale_h, scale_w):
raise ValueError(f"scale_h {scale_h} != scale_w {scale_w}!")
else:
print(f"Images are scaled by {scale_h}.")
if scale_h != 1.0 or scale_w != 1.0:
corres_map.rescale_corres(
src_wh=(fullres_w, corres_res_h),
dst_wh=(w, h),
)
if args.corres_random_keep_ratio < 1.0:
corres_map.filter_by_random_keep_ratio(
random_keep_ratio=args.corres_random_keep_ratio
)
if args.corres_corres_noise_std > 0.0:
corres_map.add_corres_noise(
noise_std=args.corres_corres_noise_std,
image_hw=(h, w),
)
Ks_numpy = Ks.cpu().numpy()
Ts_numpy = Ts.cpu().numpy()
ray_reproject_min_median_dist = 100.0 # Effectively ignored
ray_reproject_min_dist = 1.0 # Main filter
corres_map.filter_by_ray_reproject_dist(
Ks=Ks_numpy,
Ts=Ts_numpy,
min_median_dist=ray_reproject_min_median_dist,
min_dist=ray_reproject_min_dist,
)
# Filter corres_map by removing statistical outliers
debug_save_path = (
Path(args.basedir) / args.expname / "corres_statistical_outliers.pcd"
)
corres_map.filter_by_statistical_outliers(
Ks.cpu().numpy(),
Ts.cpu().numpy(),
nb_neighbors=20,
std_ratio=0.2,
debug_save_path=debug_save_path,
)
# Compute corres points with Ks, Ts.
corres_map.update_corres_points_map(
normalized_Ks=Ks.cpu().numpy(), normalized_Ts=Ts.cpu().numpy()
)
else:
print("Unknown dataset type", args.dataset_type, "exiting")
return
# Cast intrinsics to right types
H, W, focal = hwf
H, W = int(H), int(W)
hwf = [H, W, focal]
if K is None:
K = np.array([[focal, 0, 0.5 * W], [0, focal, 0.5 * H], [0, 0, 1]])
if args.render_test:
render_poses = np.array(poses[i_test])
# Create log dir and copy the config file
basedir = args.basedir
with open(args.expdir / "args.txt", "w") as file:
for arg in sorted(vars(args)):
attr = getattr(args, arg)
file.write("{} = {}\n".format(arg, attr))
if args.config is not None:
with open(args.expdir / "config.txt", "w") as file:
file.write(open(args.config, "r").read())
# Create nerf model
render_kwargs_train, render_kwargs_test, start, grad_vars, optimizer = create_nerf(
args
)
global_step = start
bds_dict = {
"near": near,
"far": far,
}
render_kwargs_train.update(bds_dict)
render_kwargs_test.update(bds_dict)
# Move testing data to GPU
render_poses = torch.Tensor(render_poses).to(device)
def render_all(iter_index):
"""
iter_index: only affects `save_dir` name.
"""
with torch.no_grad():
# Make dirs
save_dir = args.expdir / f"renderall_{iter_index:06d}"
save_dir.mkdir(parents=True, exist_ok=True)
rgb_dir = save_dir / "rgb"
depth_dir = save_dir / "depth"
rgb_dir.mkdir(parents=True, exist_ok=True)
depth_dir.mkdir(parents=True, exist_ok=True)
# Check images and poses.
assert len(images) == len(poses)
print(f"Render poses shape: {poses.shape}")
# Render rgb and depths.
print("Rendering all images and depths...")
im_rgbs, im_disps = render_path(
torch.Tensor(poses).to(device),
hwf,
K,
args.chunk,
render_kwargs_test,
gt_imgs=images,
savedir=save_dir,
render_factor=args.render_factor,
)
im_depths = 1.0 / im_disps
# Write RGB and depths.
for i, (im_rgb, im_depth) in enumerate(zip(im_rgbs, im_depths)):
# im_rgb: float32, 0-1, np
# im_depth: float32, np, typically > 0, unscaled
im_rgb_path = rgb_dir / f"{i:06d}.png"
im_depth_path = depth_dir / f"{i:06d}.png"
# im_rgb might be slightly oob
im_rgb[im_rgb > 1.0] = 1.0
im_rgb[im_rgb < 0.0] = 0.0
ct.io.imwrite(im_rgb_path, im_rgb)
ct.io.imwrite_depth(im_depth_path, im_depth)
print(f"im_rgb written to {im_rgb_path}")
print(f"im_depth written to {im_depth_path}")
# Render all rgbs and all depths
if args.render_all:
print("RENDER ALL")
# Dirs.
basedir = Path(basedir)
save_dir = args.expdir / f"renderall_{start+1:06d}"
rgb_dir = save_dir / "rgb"
depth_dir = save_dir / "depth"
# Check if file exists.
all_rendered = True
all_paths = []
for i in range(len(images)):
im_rgb_path = rgb_dir / f"{i:06d}.png"
im_depth_path = depth_dir / f"{i:06d}.png"
all_paths.append(im_rgb_path)
all_paths.append(im_depth_path)
if not im_rgb_path.is_file() or not im_depth_path.is_file():
all_rendered = False
break
if all_rendered:
print("All rendered, skip.")
print("All paths:")
print(all_paths)
else:
render_all(iter_index=start + 1)
# End shortcut.
return
# Short circuit if only rendering out from trained model
if args.render_only:
print("RENDER ONLY")
with torch.no_grad():
if args.render_test:
# render_test switches to test poses
images = images[i_test]
else:
# Default is smoother render_poses path
images = None
subset = "test" if args.render_test else "path"
testsavedir = args.expdir / f"renderonly_{subset}_{start+1:06d}"
os.makedirs(testsavedir, exist_ok=True)
print("test poses shape", render_poses.shape)
rgbs, _ = render_path(
render_poses,
hwf,
K,
args.chunk,
render_kwargs_test,
gt_imgs=images,
savedir=testsavedir,
render_factor=args.render_factor,
)
print("Done rendering", testsavedir)
imageio.mimwrite(
os.path.join(testsavedir, "video.mp4"), to8b(rgbs), fps=30, quality=8
)
return
# Short circuit if only extracting depths
if args.depth_only:
print("DEPTH ONLY")
with torch.no_grad():
if args.render_test:
# render_test switches to test poses
images = images[i_test]
else:
# Default is smoother render_poses path
images = None
assert len(images) == len(render_poses)
testsavedir = args.expdir / "depthonly_{}_{:06d}".format(
"test" if args.render_test else "path", start
)
os.makedirs(testsavedir, exist_ok=True)
print("test poses shape", render_poses.shape)
_, im_disps = render_path(
render_poses,
hwf,
K,
args.chunk,
render_kwargs_test,
gt_imgs=images,
savedir=testsavedir,
render_factor=args.render_factor,
)
im_depths = 1.0 / im_disps
print("Done rendering", testsavedir)
for test_idx, im_depth in zip(i_test, im_depths):
ct.io.imwrite_depth(
Path(testsavedir) / f"{test_idx:03d}_depth.png", im_depth
)
return
# Prepare raybatch tensor if batching random rays
N_rand = args.N_rand
use_batching = not args.no_batching
if use_batching:
# [N, ro+rd, H, W, 3], N=num_cameras
official_rays = np.stack([get_rays_np(H, W, K, p) for p in poses[:, :3, :4]], 0)
rays_cxy_to_rays_od = dict() # Map from (c, x, y) to (ray_o, ray_d)
rays_os = []
rays_ds = []
rays_cs = [] # Ray's camera index
rays_xs = [] # W
rays_ys = [] # H
for c in tqdm(range(len(poses[:, :3, :4])), desc="get rays", leave=False):
# Get p.
p = poses[:, :3, :4][c]
# Fill rays containers.
rays_o, rays_d, xs, ys = get_rays_np_with_coords(H, W, K, p)
cs = np.ones_like(xs) * c
rays_os.append(rays_o)
rays_ds.append(rays_d)
rays_cs.append(cs)
rays_xs.append(xs)
rays_ys.append(ys)
# Fill query map.
for x, y, ray_o, ray_d in zip(
xs.flatten(),
ys.flatten(),
rays_o.reshape((-1, 3)),
rays_d.reshape((-1, 3)),
):
c = int(c)
x = int(x)
y = int(y)
cxy = (c, x, y)
ray_od = np.stack([ray_o, ray_d], axis=0)
rays_cxy_to_rays_od[cxy] = ray_od
rays_os = np.asarray(rays_os) # (N, H, W, 3)
rays_ds = np.asarray(rays_ds) # (N, H, W, 3)
rays_cs = np.asarray(rays_cs) # (N, H, W)
rays_xs = np.asarray(rays_xs) # (N, H, W)
rays_ys = np.asarray(rays_ys) # (N, H, W)
rays_cxy = np.stack([rays_cs, rays_xs, rays_ys], -1) # (N, H, W, 3)
rays = np.stack([rays_os, rays_ds], 1)
np.testing.assert_allclose(rays, official_rays)
print("done, concats")
# [N, ro+rd+rgb, H, W, 3]
rays_rgb = np.concatenate([rays, images[:, None]], 1)
# [N, H, W, ro+rd+rgb, 3]
rays_rgb = np.transpose(rays_rgb, [0, 2, 3, 1, 4])
# train images only
rays_rgb = np.stack([rays_rgb[i] for i in i_train], 0)
# [NumTrain*H*W, ro+rd+rgb, 3]
rays_rgb = np.reshape(rays_rgb, [-1, 3, 3])
rays_rgb = rays_rgb.astype(np.float32)
# Only pick train images for rays_cxy
# (NumTrain, H, W, 3)
rays_cxy = np.stack([rays_cxy[i] for i in i_train], 0)
# (NumTrain*H*W, 3)
rays_cxy = rays_cxy.reshape((-1, 3)).astype(np.int64)
# Clean up rays_cxy_to_rays_od.
for cxy in list(rays_cxy_to_rays_od.keys()):
if cxy[0] not in i_train:
del rays_cxy_to_rays_od[cxy]
assert len(rays_cxy_to_rays_od) == len(rays_rgb)
print("shuffle rays")
assert len(rays_rgb) == len(rays_cxy)
permute = np.random.permutation(len(rays_rgb))
rays_rgb = rays_rgb[permute]
rays_cxy = rays_cxy[permute]
print("done")
i_batch = 0
# Move training data to GPU
if use_batching:
images = torch.Tensor(images).to(device)
rays_rgb = torch.Tensor(rays_rgb).to(device)
rays_cxy = torch.Tensor(rays_cxy).to(device)
poses = torch.Tensor(poses).to(device)
print("Begin")
print("TRAIN views are:", i_train)
print("VAL views are :", i_val)
print("TEST views are :", i_test)
# Summary writers
logdir = args.expdir / "summary"
writer = SummaryWriter(log_dir=logdir)
start = start + 1
for i in tqdm(range(start, args.n_iters + 1), initial=start):
# Sample random ray batch
if use_batching:
# Random over all images
batch = rays_rgb[i_batch : i_batch + N_rand] # [B, 2+1, 3*?]
batch = torch.transpose(batch, 0, 1)
batch_rays, target_s = batch[:2], batch[2]
batch_cxys = rays_cxy[i_batch : i_batch + N_rand]
i_batch += N_rand
if i_batch >= rays_rgb.shape[0]:
print("Shuffle data after an epoch!")
rand_idx = torch.randperm(rays_rgb.shape[0])
rays_rgb = rays_rgb[rand_idx]
rays_cxy = rays_cxy[rand_idx]
i_batch = 0
else:
# Random from one image
img_i = np.random.choice(i_train)
target = images[img_i]
target = torch.Tensor(target).to(device)
pose = poses[img_i, :3, :4]
if N_rand is not None:
rays_o, rays_d = get_rays(
H, W, K, torch.Tensor(pose)
) # (H, W, 3), (H, W, 3)
if i < args.precrop_iters:
dH = int(H // 2 * args.precrop_frac)
dW = int(W // 2 * args.precrop_frac)
coords = torch.stack(
torch.meshgrid(
torch.linspace(H // 2 - dH, H // 2 + dH - 1, 2 * dH),
torch.linspace(W // 2 - dW, W // 2 + dW - 1, 2 * dW),
),
-1,
)
if i == start:
print(
f"[Config] Center cropping of size {2*dH} x {2*dW} is enabled until iter {args.precrop_iters}"
)
else:
coords = torch.stack(
torch.meshgrid(
torch.linspace(0, H - 1, H), torch.linspace(0, W - 1, W)
),
-1,
) # (H, W, 2)
coords = torch.reshape(coords, [-1, 2]) # (H * W, 2)
select_inds = np.random.choice(
coords.shape[0], size=[N_rand], replace=False
) # (N_rand,)
select_coords = coords[select_inds].long() # (N_rand, 2)
rays_o = rays_o[select_coords[:, 0], select_coords[:, 1]] # (N_rand, 3)
rays_d = rays_d[select_coords[:, 0], select_coords[:, 1]] # (N_rand, 3)
batch_rays = torch.stack([rays_o, rays_d], 0)
target_s = target[
select_coords[:, 0], select_coords[:, 1]
] # (N_rand, 3)
##### Core optimization loop #####
rgb, src_disp, acc, src_depths, extras = render(
H,
W,
K,
chunk=args.chunk,
rays=batch_rays,
verbose=i < 10,
retraw=True,
**render_kwargs_train,
)
# Default losses for corres. If corres is enabled, the default loss
# values will be overwritten.
corres_dist_loss = torch.tensor(0.0).to(device)
corres_pixel_loss = torch.tensor(0.0).to(device)
corres_ds_loss = torch.tensor(0.0).to(device)
if args.corres_enabled:
# Query correspondence.
batch_cxys = batch_cxys.cpu().numpy().astype(np.int64)
im_indices = batch_cxys[:, 0]
pixel_xs = batch_cxys[:, 1]
pixel_ys = batch_cxys[:, 2]
corres_result = corres_map.query(im_indices, pixel_xs, pixel_ys)
corres_mask = corres_result[:, 7].astype(np.int64)
num_corres = len(corres_mask)
corres_mask = torch.Tensor(corres_mask).to(device).long()
if num_corres > 0:
###############################################################
# Compute rendered src_points and dst_points
###############################################################
# ray_o: camera centers, this will match our Cs
# ray_d: ray directions
# batch_rays: (2, 1024, 3)
# src_batch_rays: (2, num_corres, 3)
src_depths = src_depths[corres_mask]
src_rays = batch_rays[:, corres_mask, :]
# Compute dst_rays: (2, num_corres, 3)
# rays_cxy_to_rays_od can be directly used here.
# dst_rays will be used for rendering.
rays_od = [] # (num_corres, 2, 3)
for ray_cxy in corres_result[:, 3:6]:
ray_od = rays_cxy_to_rays_od[tuple(ray_cxy)]
rays_od.append(ray_od)
dst_rays = np.stack(rays_od, axis=1) # (2, num_corres, 3)
dst_rays = torch.Tensor(dst_rays).to(device)
# Render dst points.
_, _, _, dst_depths, _ = render(
H,
W,
K,
chunk=args.chunk,
rays=dst_rays,
verbose=i < 10,
retraw=True,
**render_kwargs_train,
)
# Depths.
assert src_depths.shape == dst_depths.shape
# Get camera directions.
src_rays_o = src_rays[0, :, :]
src_rays_d = src_rays[1, :, :]
src_rays_d = src_rays_d / torch.norm(src_rays_d, dim=1, keepdim=True)
dst_rays_o = dst_rays[0, :, :]
dst_rays_d = dst_rays[1, :, :]
dst_rays_d = dst_rays_d / torch.norm(dst_rays_d, dim=1, keepdim=True)
# Compute rendered points.
src_points = src_rays_o + src_depths[:, None] * src_rays_d
dst_points = dst_rays_o + dst_depths[:, None] * dst_rays_d
# Normalize confidences
# 0 1 2 3 4 5 6 7
# [src_i, src_x, src_y, dst_i, dst_x, dst_y, confidence, mask]
confidences = torch.tensor(corres_result[:, 6]).to(device)
if args.corres_confidence_normalized:
# Normalize [0.5, 1] to [0, 1].
confidences = (confidences - 0.5) * 2.0
confidences = confidences / confidences.mean()
# Set confidences to 1 if corres is not enabled.
if not args.corres_confidence_enabled:
confidences = torch.ones_like(confidences)
if "corres_dist_enabled" in args and args.corres_dist_enabled:
# 0 1 2 3 4 5 6 7
# [src_i, src_x, src_y, dst_i, dst_x, dst_y, confidence, mask]
# Compute corres_dist_loss.
dists_squared = torch.sum((src_points - dst_points) ** 2, axis=1)
tdist_squared = args.corres_dist_tdist**2
dists_squared = torch.clamp(dists_squared - tdist_squared, min=0.0)
# Robust term
if args.corres_dist_robust_ratio is None:
corres_dist_robust_ratio = args.corres_robust_ratio
else:
corres_dist_robust_ratio = args.corres_dist_robust_ratio
num_robust = int(math.floor(num_corres * corres_dist_robust_ratio))
robust_indices = dists_squared.topk(
num_robust, largest=False, sorted=False
).indices
corres_dist_losses = (
dists_squared[robust_indices] * confidences[robust_indices]
)
if len(corres_dist_losses) > 0:
corres_dist_loss = torch.mean(corres_dist_losses)
else:
corres_dist_loss = torch.tensor(0.0).to(device)
# Sometimes, src_disp or dst_disp is nan. In this case,
# corres_dist_loss will be nan. We need to set it to 0.
if torch.isnan(corres_dist_loss):
print("Warning: setting corres_dist_loss nan loss to 0.")
corres_dist_loss = torch.tensor(0.0).to(device)
if "corres_pixel_enabled" in args and args.corres_pixel_enabled:
# Compute corres_pixel_loss.
# corres_result is arranged in:
# 0 1 2 3 4 5 6 7
# [src_i, src_x, src_y, dst_i, dst_x, dst_y, confidence, mask]
src_is = torch.tensor(corres_result[:, 0]).to(device).long()
dst_is = torch.tensor(corres_result[:, 3]).to(device).long()
src_pixels = torch.tensor(corres_result[:, 1:3]).to(device)
dst_pixels = torch.tensor(corres_result[:, 4:6]).to(device)
# Project src_points to dst image and dst_points to src image.
src_points_proj_on_dst = points_to_pixels_individual_cameras(
src_points, Ps, dst_is
)
dst_points_proj_on_src = points_to_pixels_individual_cameras(
dst_points, Ps, src_is
)
# Clamp the projected pixels by tdist_squared.
src_dists_squared = torch.sum(
(dst_points_proj_on_src - src_pixels) ** 2, axis=1
)
dst_dists_squared = torch.sum(
(src_points_proj_on_dst - dst_pixels) ** 2, axis=1
)
dists_squared = 0.5 * (src_dists_squared + dst_dists_squared)
# If correspondences that are too far away, set dists to 0.
tdist_max_squared = args.corres_pixel_tdist_max**2
dists_squared[dists_squared > tdist_max_squared] = 0.0
# *CLAMP* correspondences that are too far near, such that
# the loss is smooth at corres_pixel_tdist_min.
# That is, if a corres dist squared is exaxtly
# corres_pixel_tdist_min, the loss should be zero.
tdist_min_squared = args.corres_pixel_tdist_min**2
dists_squared = torch.clamp(
dists_squared - tdist_min_squared, min=0.0
)
# Remove non-robust correspondences.
if args.corres_pixel_robust_ratio is None:
corres_pixel_robust_ratio = args.corres_robust_ratio
else:
corres_pixel_robust_ratio = args.corres_pixel_robust_ratio
num_robust = int(
math.floor(len(dists_squared) * corres_pixel_robust_ratio)
)
robust_indices = dists_squared.topk(
num_robust, largest=False, sorted=False
).indices
corres_pixel_losses = (
dists_squared[robust_indices] * confidences[robust_indices]
)
if len(corres_pixel_losses) > 0:
corres_pixel_loss = torch.mean(corres_pixel_losses)
else:
corres_pixel_loss = torch.tensor(0.0).to(device)
if torch.isnan(corres_pixel_loss):
print("Warning: setting corres_pixel_loss nan loss to 0.")
corres_pixel_loss = torch.tensor(0.0).to(device)
if "corres_ds_enabled" in args and args.corres_ds_enabled:
# corres_result is arranged in:
# 0 1 2 3 4 5 6 7
# [src_i, src_x, src_y, dst_i, dst_x, dst_y, confidence, mask]
#
# corres_map.corres_points_map:
# - Key (tuple of length 6):
# (src_i, src_x, src_y, dst_i, dst_x, dst_y)
# - Value (tuple of length 11):
# (src_px, src_py, src_pz, # 0, 1, 2
# dst_px, dst_py, dst_pz, # 3, 4, 5
# mid_px, mid_py, mid_pz, # 6, 7, 8
# src_depth, dst_depth) # 9, 10
keys = corres_result[:, :6]
vals = np.array(
[corres_map.corres_points_map[tuple(key)] for key in keys]
)
src_gt_depths = vals[:, 9].astype(np.float32)
dst_gt_depths = vals[:, 10].astype(np.float32)
src_gt_depths = torch.tensor(src_gt_depths).to(device)
dst_gt_depths = torch.tensor(dst_gt_depths).to(device)
# Compute corres_ds_loss, either L1 or squared.
if args.corres_ds_squared:
src_depth_losses = (
(src_depths - src_gt_depths) / src_gt_depths
) ** 2
dst_depth_losses = (
(dst_depths - dst_gt_depths) / dst_gt_depths
) ** 2
else:
src_depth_losses = torch.abs(src_depths - src_gt_depths)
dst_depth_losses = torch.abs(dst_depths - dst_gt_depths)
depth_losses = 0.5 * (src_depth_losses + dst_depth_losses)
# Remove non-robust correspondences.
if args.corres_ds_robust_ratio is None:
corres_ds_robust_ratio = args.corres_robust_ratio
else:
corres_ds_robust_ratio = args.corres_ds_robust_ratio
num_robust = int(
math.floor(len(depth_losses) * corres_ds_robust_ratio)
)
robust_indices = depth_losses.topk(
num_robust, largest=False, sorted=False
).indices
depth_losses = (
depth_losses[robust_indices] * confidences[robust_indices]
)
# Mean.
corres_ds_loss = torch.mean(depth_losses)
if torch.isnan(corres_ds_loss):
print("Warning: setting corres_ds_loss nan loss to 0.")
corres_ds_loss = torch.tensor(0.0).to(device)
###############################################################
# Clear.
optimizer.zero_grad()
# All losses.
img_loss = img2mse(rgb, target_s)
loss = img_loss
if "corres_dist_enabled" in args and args.corres_dist_enabled:
loss += args.corres_dist_weight * corres_dist_loss
if "corres_pixel_enabled" in args and args.corres_pixel_enabled:
loss += args.corres_pixel_weight * corres_pixel_loss
if "corres_ds_enabled" in args and args.corres_ds_enabled:
loss += args.corres_ds_weight * corres_ds_loss
# Eval PSNR.
# trans = extras['raw'][..., -1]
psnr = mse2psnr(img_loss)
img_loss0 = 0
if "rgb0" in extras:
img_loss0 = img2mse(extras["rgb0"], target_s)
loss = loss + img_loss0
psnr0 = mse2psnr(img_loss0)
if torch.isnan(loss):
print("NaN loss detected.")
print(f"loss : {loss}")
print(f"img_loss : {img_loss}")
print(f"img_loss0 : {img_loss0}")
print(f"corres_dist_loss: {corres_dist_loss}")
print(f"corres_pixel_loss: {corres_pixel_loss}")
print(f"corres_ds_loss : {corres_ds_loss}")
ipdb.set_trace()
pass
# Update.
loss.backward()
optimizer.step()
# NOTE: IMPORTANT!
### update learning rate ###
decay_rate = 0.1
decay_steps = args.lrate_decay * 1000
new_lrate = args.lrate * (decay_rate ** (global_step / decay_steps))
for param_group in optimizer.param_groups:
param_group["lr"] = new_lrate
# Rest is logging
if i % args.i_weights == 0:
path = args.expdir / "{:06d}.tar".format(i)
torch.save(
{
"global_step": global_step,
"network_fn_state_dict": render_kwargs_train[
"network_fn"
].state_dict(),
"network_fine_state_dict": render_kwargs_train[
"network_fine"
].state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
},
path,
)
print("Saved checkpoints at", path)
if i % args.i_video == 0 and i > 0:
pass
if i % args.i_testset == 0 and i > 0:
render_all(i)
print("Saved test set")
if i % args.i_print == 0:
print(
f"Step: {i}, "
f"loss: {loss:.6f}, "
f"img_loss: {img_loss:.6f}, "
f"img_loss0: {img_loss0:.6f}, "
f"corres_dist_loss: {corres_dist_loss:.6f}, "
f"corres_pixel_loss: {corres_pixel_loss:.6f}, "
f"corres_ds_loss: {corres_ds_loss:.6f}"
)
tqdm.write(f"[TRAIN] Iter: {i} Loss: {loss.item()} PSNR: {psnr.item()}")
# Tensorboard logging
writer.add_scalar("lr", new_lrate, i)
writer.add_scalar("loss", loss, i)
writer.add_scalar("img_loss", img_loss, i)
writer.add_scalar("img_loss0", img_loss0, i)
writer.add_scalar("corres_dist_loss", corres_dist_loss, i)
writer.add_scalar("corres_pixel_loss", corres_pixel_loss, i)
writer.add_scalar("corres_ds_loss", corres_ds_loss, i)
if i % args.i_img == 0:
# Render the first image in the test set i_test.
selected_idx = i_test[0]
pose = poses[selected_idx, :3, :4]
with torch.no_grad():
rgb, disp, acc, depth, _ = render(
H, W, K, chunk=args.chunk, c2w=pose, **render_kwargs_test
)
psnr = mse2psnr(img2mse(rgb, images[selected_idx]))
writer.add_scalar("psnr_test_first", psnr, i)
writer.add_image("rgb_test_first", rgb, i, dataformats="HWC")
# Rander a random image in the test set.
selected_idx = np.random.choice(i_test)
pose = poses[selected_idx, :3, :4]
with torch.no_grad():
rgb, disp, acc, depth, _ = render(
H, W, K, chunk=args.chunk, c2w=pose, **render_kwargs_test
)
psnr = mse2psnr(img2mse(rgb, images[selected_idx]))
writer.add_scalar("psnr_test_random", psnr, i)
writer.add_image("rgb_test_random", rgb, i, dataformats="HWC")
global_step += 1
if __name__ == "__main__":
torch.set_default_tensor_type("torch.cuda.FloatTensor")
train()
| yxlao/corres-nerf | 49 | (NeurIPS 2023) CorresNeRF: Image Correspondence Priors for Neural Radiance Fields | yxlao | Yixing Lao | HKU-CS | |
src/tools/run_neus.py | Python | import logging
import argparse
import numpy as np
from ..neus.runner import Runner
import torch
def func_train(args):
runner = Runner(
args.conf,
scene_name=args.case,
is_continue=args.is_continue,
from_checkpoint=args.from_checkpoint,
backup_code=True,
)
runner.train()
def func_render(args):
runner = Runner(
args.conf,
scene_name=args.case,
is_continue=args.is_continue,
from_checkpoint=args.from_checkpoint,
inference_only=True,
)
if args.index is None:
camera_indices = list(range(runner.dataset.n_images))
else:
camera_indices = [args.index]
if args.compare:
for idx in camera_indices:
runner.validate_image(
idx=idx,
resolution_level=args.resolution_level,
)
else:
runner.render_image(
camera_indices=camera_indices,
resolution_level=args.resolution_level,
)
def func_extract_mesh(args):
runner = Runner(
args.conf,
scene_name=args.case,
is_continue=args.is_continue,
from_checkpoint=args.from_checkpoint,
inference_only=True,
)
runner.validate_mesh(
world_space=not args.unit,
resolution=args.resolution,
)
def func_interpolate(args):
runner = Runner(
args.conf,
scene_name=args.case,
is_continue=args.is_continue,
from_checkpoint=args.from_checkpoint,
)
runner.interpolate_view(
img_idx_0=args.index_0,
img_idx_1=args.index_1,
)
def func_eval_corres(args):
runner = Runner(
args.conf,
scene_name=args.case,
is_continue=args.is_continue,
from_checkpoint=args.from_checkpoint,
)
runner.eval_corres()
def main():
# Basic setups.
torch.set_default_tensor_type("torch.cuda.FloatTensor")
FORMAT = "[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s"
logging.basicConfig(level=logging.DEBUG, format=FORMAT)
# Main parser.
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(help="mode")
# Global arguments: config and scene.
parser.add_argument("--conf", type=str, default="./confs/base.conf")
parser.add_argument("--case", type=str, default="")
# Global arguments: weight loading and GPU selection.
parser.add_argument("--is_continue", default=True, action="store_true")
parser.add_argument("--from_checkpoint", type=str, default=None)
parser.add_argument("--gpu", type=int, default=0)
# Subparser: train.
parser_train = subparsers.add_parser(
"train",
help="Train model.",
)
parser_train.set_defaults(func=func_train)
# Subparser: render.
parser_render = subparsers.add_parser(
"render",
help="Render image.",
)
parser_render.add_argument(
"--index",
type=int,
default=None,
help="Index of the camera. If none, all cameras will be rendered.",
)
parser_render.add_argument(
"--compare",
action="store_true",
default=False,
help="If True, GT image will be generated below, a.k.a. validate.",
)
parser_render.add_argument(
"--resolution_level",
type=int,
default=4,
help="1 for original resolution, 2 for half, etc.",
)
parser_render.set_defaults(func=func_render)
# Subparser: extract_mesh.
parser_extract_mesh = subparsers.add_parser(
"extract_mesh",
help="Extract mesh with marching cubes.",
)
parser_extract_mesh.add_argument(
"--unit",
action="store_true",
default=False,
help="True to use unit sphere space. World-space is used by default.",
)
parser_extract_mesh.add_argument(
"--resolution",
type=int,
default=512,
help="Resolution of the marching cubes.",
)
parser_extract_mesh.set_defaults(func=func_extract_mesh)
# Subparser: interpolate
parser_interpolate = subparsers.add_parser(
"interpolate",
help="Interpolate between two images.",
)
parser_interpolate.add_argument(
"--index_0",
type=int,
default=None,
help="Index of the first camera.",
)
parser_interpolate.add_argument(
"--index_1",
type=int,
default=None,
help="Index of the second camera.",
)
parser_interpolate.set_defaults(func=func_interpolate)
# Subparser: eval_corres
parser_eval_corres = subparsers.add_parser(
"eval_corres",
help="Eval corres.",
)
parser_eval_corres.set_defaults(func=func_eval_corres)
# Call subparsers.
args = parser.parse_args()
torch.cuda.set_device(args.gpu)
args.func(args)
if __name__ == "__main__":
main()
| yxlao/corres-nerf | 49 | (NeurIPS 2023) CorresNeRF: Image Correspondence Priors for Neural Radiance Fields | yxlao | Yixing Lao | HKU-CS | |
src/utils/eval_nerf_llff.py | Python | from pathlib import Path
import numpy as np
from tqdm import tqdm
import csv
from ..corres.corres_map import read_selected_cameras
from ..utils.eval_utils import (
eval_image_metrics_by_paths,
eval_depth_metrics_by_paths,
eval_dsnerf_depth_metrics_by_paths,
post_process_results,
)
# Global paths.
_project_root = Path(__file__).parent.resolve()
_nerf_root = _project_root / "nerf"
_eval_root = _project_root / "eval"
def save_results_to_csv(results, csv_path):
if len(results) == 0:
raise ValueError("No results to save.")
# Sort results by metric, config, scene.
results = post_process_results(results)
with open(csv_path, "w") as f:
writer = csv.DictWriter(f, fieldnames=results[0].keys(), lineterminator="\n")
writer.writeheader()
writer.writerows(results)
print(f"Saved results to {csv_path}.")
def eval_and_save_ours(scenes, config, iteration, num_views, force_eval=False):
"""
Evaluate our method for PSNR, SSIM, and LPIPS.
Args:
scenes: List of scene names.
config: Config name.
iteration: Iteration number.
num_views: Number of training views.
force_eval: If True, force evaluation even if results exist.
Save results to csv file at:
eval_root / "nerf_llff" / f"nerf_llff_{num_views}view_{iteration:06d}_{config}.csv"
"""
csv_path = (
_eval_root
/ "nerf_llff"
/ f"nerf_llff_{num_views}view_{iteration:06d}_{config}.csv"
)
if csv_path.exists() and not force_eval:
print(f"Skip evaluation for {config} {iteration} {num_views}view.")
return
else:
print(f"Evaluating {config} {num_views} views at {iteration} iteration.")
print(f"Going to save results to {csv_path}.")
csv_path.parent.mkdir(parents=True, exist_ok=True)
results = []
for scene in scenes:
# Get train view indices.
gt_root = _nerf_root / "data" / "nerf_llff_data" / scene
_, test_ids = read_selected_cameras(
gt_root / "corres",
num_views=num_views,
)
# Image prediction. All views will was rendered.
pd_root = _nerf_root / "logs" / f"{scene}_{config}"
pd_rgb_dir = pd_root / f"renderall_{iteration:06d}" / "rgb"
pd_rgb_paths = sorted(pd_rgb_dir.glob("*.png"))
if np.max(test_ids) >= len(pd_rgb_paths):
import ipdb
ipdb.set_trace()
pd_rgb_paths = [pd_rgb_paths[i] for i in test_ids]
# Depth prediction. All views will was rendered.
pd_depth_dir = pd_root / f"renderall_{iteration:06d}" / "depth"
pd_depth_paths = sorted(pd_depth_dir.glob("*.png"))
if np.max(test_ids) >= len(pd_depth_paths):
import ipdb
ipdb.set_trace()
pd_depth_paths = [pd_depth_paths[i] for i in test_ids]
# Image ground-truth.
gt_rgb_dir = gt_root / "images_8"
gt_rgb_paths = sorted(gt_rgb_dir.glob("*.png"))
if np.max(test_ids) >= len(gt_rgb_paths):
import ipdb
ipdb.set_trace()
gt_rgb_paths = [gt_rgb_paths[i] for i in test_ids]
# Depth ground-truth.
gt_depth_dir = (
_nerf_root
/ "logs"
/ f"{scene}_llff_00_all_nondc"
/ "renderall_200000"
/ "depth"
)
gt_depth_paths = sorted(gt_depth_dir.glob("*.png"))
if np.max(test_ids) >= len(gt_depth_paths):
import ipdb
ipdb.set_trace()
gt_depth_paths = [gt_depth_paths[i] for i in test_ids]
# All paths shall have num_test_views of length.
num_test_views = len(test_ids)
if (
len(pd_rgb_paths) != num_test_views
or len(pd_depth_paths) != num_test_views
or len(gt_rgb_paths) != num_test_views
or len(gt_depth_paths) != num_test_views
):
print(f"Warning: {config} {scene} scene not complete!")
import ipdb
ipdb.set_trace()
pass
# Eval image metrics.
if len(pd_rgb_paths) == len(gt_rgb_paths):
scene_rgb_results = eval_image_metrics_by_paths(
pd_rgb_paths,
gt_rgb_paths,
)
else:
print(f"Warning: {config} {scene} RGB scene not complete!")
scene_rgb_results = {
"psnr": 0,
"ssim": 0,
"lpips": 100,
}
# Eval depth metrics.
if len(pd_depth_paths) == len(gt_depth_paths):
scene_depth_results = eval_depth_metrics_by_paths(
pd_depth_paths,
gt_depth_paths,
)
else:
print(f"Warning: {config} {scene} depth scene not complete!")
scene_depth_results = {
"mse": 100,
"mae": 100,
}
# Put in dicts.
results.extend(
[
{
"metric": "psnr",
"config": config,
"scene": scene,
"value": scene_rgb_results["psnr"],
},
{
"metric": "ssim",
"config": config,
"scene": scene,
"value": scene_rgb_results["ssim"],
},
{
"metric": "lpips",
"config": config,
"scene": scene,
"value": scene_rgb_results["lpips"],
},
{
"metric": "mse",
"config": config,
"scene": scene,
"value": scene_depth_results["mse"],
},
{
"metric": "mae",
"config": config,
"scene": scene,
"value": scene_depth_results["mae"],
},
]
)
save_results_to_csv(results, csv_path)
def eval_and_save_dsnerf(scenes, iteration, num_views, force_eval=False):
"""
Evaluate DSNerRF PSNR, SSIM, LPIPS, depth MSE, and depth MAE.
Args:
scenes: List of scene names.
iteration: Iteration number.
num_views: Number of training views.
Save results to csv file at:
eval_root / "nerf_llff" / f"nerf_llff_{num_views}view_{iteration:06d}_dsnerf.csv"
"""
csv_path = (
_eval_root
/ "nerf_llff"
/ f"nerf_llff_{num_views}view_{iteration:06d}_dsnerf.csv"
)
dsnerf_root = (_project_root.parent / "DSNeRF").resolve()
if csv_path.exists() and not force_eval:
print(f"DSNeRF {iteration} already evaluated.")
return
else:
print(f"Evaluating DSNeRF {num_views} views at {iteration} iteration.")
print(f"Going to save results to {csv_path}.")
csv_path.parent.mkdir(parents=True, exist_ok=True)
results = []
for scene in tqdm(scenes, desc="Evaluate DSNeRF"):
pd_dir = dsnerf_root / "logs" / f"{scene}_3v" / f"testset_{iteration:06d}"
gt_root = _nerf_root / "data" / "nerf_llff_data" / scene
gt_rgb_dir = gt_root / "images_8"
# Read sparse views.
train_ids, test_ids = read_selected_cameras(
gt_root / "corres",
num_views=num_views,
)
# Predicted RGB and depth.
pd_paths = sorted(pd_dir.glob("*.png"))
pd_rgb_paths = [p for p in pd_paths if "depth" not in p.name]
pd_npz_paths = sorted(pd_dir.glob("*.npz"))
assert len(pd_rgb_paths) == len(pd_npz_paths) == len(test_ids)
# Ground-truth RGB.
gt_rgb_paths = sorted(gt_rgb_dir.glob("*.png"))
gt_rgb_paths = [gt_rgb_paths[i] for i in test_ids]
# Ground-truth Depth.
gt_depth_dir = (
_nerf_root
/ "logs"
/ f"{scene}_llff_00_all_nondc"
/ "renderall_200000"
/ "depth"
)
gt_depth_paths = sorted(gt_depth_dir.glob("*.png"))
gt_depth_paths = [gt_depth_paths[i] for i in test_ids]
# Eval.
scene_rgb_results = eval_image_metrics_by_paths(
pd_rgb_paths,
gt_rgb_paths,
)
scene_depth_results = eval_dsnerf_depth_metrics_by_paths(
pd_npz_paths,
gt_depth_paths,
)
results.extend(
[
{
"metric": "psnr",
"config": "dsnerf",
"scene": scene,
"value": scene_rgb_results["psnr"],
},
{
"metric": "ssim",
"config": "dsnerf",
"scene": scene,
"value": scene_rgb_results["ssim"],
},
{
"metric": "lpips",
"config": "dsnerf",
"scene": scene,
"value": scene_rgb_results["lpips"],
},
{
"metric": "mse",
"config": "dsnerf",
"scene": scene,
"value": scene_depth_results["mse"],
},
{
"metric": "mae",
"config": "dsnerf",
"scene": scene,
"value": scene_depth_results["mae"],
},
]
)
save_results_to_csv(results, csv_path)
def eval_and_save_regnerf(scenes, num_views, iteration, force_eval=False):
"""
Evaluate DSNerRF PSNR, SSIM, LPIPS, depth MSE, and depth MAE.
Args:
scenes: List of scene names.
num_views: Number of training views.
iteration: Iteration number (not used).
force_eval: If True, force evaluation even if csv file exists.
Returns:
A list of result dicts.
"""
csv_path = (
_eval_root
/ "nerf_llff"
/ f"nerf_llff_{num_views}view_{iteration:06d}_regnerf.csv"
)
if csv_path.exists() and not force_eval:
print(f"RegNeRF {iteration} already evaluated.")
return
else:
print(f"Evaluating RegNeRF {num_views} views at {iteration} iteration.")
print(f"Going to save results to {csv_path}.")
csv_path.parent.mkdir(parents=True, exist_ok=True)
assert num_views in [3, 6, 9]
regnerf_root = (_project_root.parent / "regnerf").resolve()
scene_root = regnerf_root / "out" / f"llff{num_views}"
results = []
for scene in tqdm(scenes, desc="Evaluate RegNeRF"):
scene_dir = scene_root / scene
pd_dir = scene_dir / "test_preds"
gt_root = _nerf_root / "data" / "nerf_llff_data" / scene
gt_rgb_dir = gt_root / "images_8"
# Read sparse views.
train_ids, test_ids = read_selected_cameras(
gt_root / "corres",
num_views=num_views,
)
# Predicted RGB and depth.
pd_paths = sorted(pd_dir.glob("*.png"))
pd_rgb_paths = [p for p in pd_paths if "color" in p.name]
pd_depth_paths = [p for p in pd_paths if "depth" in p.name]
assert len(pd_rgb_paths) == len(pd_depth_paths) == len(test_ids)
# Ground-truth RGB.
gt_rgb_paths = sorted(gt_rgb_dir.glob("*.png"))
gt_rgb_paths = [gt_rgb_paths[i] for i in test_ids]
# Ground-truth Depth.
gt_depth_dir = (
_nerf_root
/ "logs"
/ f"{scene}_llff_00_all_nondc"
/ "renderall_200000"
/ "depth"
)
gt_depth_paths = sorted(gt_depth_dir.glob("*.png"))
gt_depth_paths = [gt_depth_paths[i] for i in test_ids]
# Eval.
scene_rgb_results = eval_image_metrics_by_paths(
pd_rgb_paths,
gt_rgb_paths,
)
scene_depth_results = eval_depth_metrics_by_paths(
pd_depth_paths,
gt_depth_paths,
)
results.extend(
[
{
"metric": "psnr",
"config": "regnerf",
"scene": scene,
"value": scene_rgb_results["psnr"],
},
{
"metric": "ssim",
"config": "regnerf",
"scene": scene,
"value": scene_rgb_results["ssim"],
},
{
"metric": "lpips",
"config": "regnerf",
"scene": scene,
"value": scene_rgb_results["lpips"],
},
{
"metric": "mse",
"config": "regnerf",
"scene": scene,
"value": scene_depth_results["mse"],
},
{
"metric": "mae",
"config": "regnerf",
"scene": scene,
"value": scene_depth_results["mae"],
},
]
)
save_results_to_csv(results, csv_path)
def main():
# Scenes.
scenes = [
"fern",
"flower",
"fortress",
"horns",
"leaves",
"orchids",
"room",
"trex",
]
# Configs
configs_num_views = [
("config_name", 3),
]
iteration = 50000
# A list of dicts for results.
# results = [
# {"metric": "psnr", "config": "xxx", "scene": "fern", "value": 0.0},
# ...
# ]
results = []
# Eval DSNeRF.
# eval_and_save_dsnerf(scenes=scenes, iteration=iteration, num_views=3)
# Eval RegNeRF.
# eval_and_save_regnerf(scenes=scenes, iteration=iteration, num_views=3)
# Eval ours.
for config, num_views in configs_num_views:
eval_and_save_ours(
scenes=scenes, config=config, iteration=iteration, num_views=num_views
)
if __name__ == "__main__":
main()
| yxlao/corres-nerf | 49 | (NeurIPS 2023) CorresNeRF: Image Correspondence Priors for Neural Radiance Fields | yxlao | Yixing Lao | HKU-CS | |
src/utils/eval_neus_dtu.py | Python | from pathlib import Path
from pyhocon import ConfigFactory
import json
from ..utils.eval_utils import (
eval_image_metrics_by_paths,
)
from ..corres.corres_map import read_selected_cameras
from ..utils.pydtu import dtu_chamfer_l1
from pprint import pprint
from tqdm import tqdm
import argparse
_pwd = Path(__file__).parent.resolve()
_neus_root = _pwd.parent.parent
_eval_root = _neus_root / "eval"
_exp_root = _neus_root / "exp"
_data_root = _neus_root / "data"
_config_root = _neus_root / "confs"
_eval_dir = _eval_root / "sdf_dtu" # {task}_{dataset}
def get_eval_path(method, config, scene, iteration):
return _eval_dir / f"{method}_{config}_{scene}_{iteration}.json"
def is_neus_data_ready(config, scene, iteration, verbose=False):
"""
Check if all the eval files are ready. for NeuS
"""
# Folders.
exp_dir = _exp_root / scene / config
data_dir = _data_root / scene
pd_rgb_dir = exp_dir / "renders"
gt_rgb_dir = data_dir / "image"
gt_mask_dir = data_dir / "mask"
# Image paths.
iter_pattern = f"{iteration:0>8d}"
pd_rgb_paths = sorted(list(pd_rgb_dir.glob("*.png")))
pd_rgb_paths = [p for p in pd_rgb_paths if iter_pattern in str(p)]
gt_rgb_paths = sorted(list(gt_rgb_dir.glob("*.png")))
gt_mask_paths = sorted(list(gt_mask_dir.glob("*.png")))
# Other paths.
config_path = Path(_config_root / f"{config}.conf")
mesh_path = exp_dir / "meshes" / f"{iteration:08}_world.ply"
is_all_found = True
if not len(pd_rgb_paths) == len(gt_rgb_paths) == len(gt_mask_paths):
is_all_found = False
if len(pd_rgb_paths) == 0:
is_all_found = False
if not config_path.is_file():
is_all_found = False
if not mesh_path.is_file():
is_all_found = False
def _print_paths():
print(f"exp_dir : {exp_dir}")
print(f"data_dir : {data_dir}")
print(
f"pd_rgb_dir : {pd_rgb_dir}\n"
f" found {len(pd_rgb_paths)} images"
)
print(
f"gt_rgb_dir : {gt_rgb_dir}\n"
f" found {len(gt_rgb_paths)} images"
)
print(
f"gt_mask_dir: {gt_mask_dir}\n"
f" found {len(gt_mask_paths)} images"
)
print(
f"config_path: {config_path}\n"
f" found: {config_path.is_file()}"
)
print(
f"mesh_path : {mesh_path}\n" f" found: {mesh_path.is_file()}"
)
if not is_all_found and verbose:
print(f"Some files are missing for {config}-{scene}-{iteration}")
_print_paths()
return is_all_found
def eval_neus(config, scene, iteration):
"""
Eval image and geometric metrics, and store the results.
- Filename: {method}_{config}_{scene}_{iteration}.json,
e.g.: _eval_dir / "neus_dtu_50_s3_init_dtu_scan24.json"
- Content: A dict of metrics
e.g.: {
"method": xxx,
"config": xxx,
"iteration": xxx,
"psnr": xxx,
"ssim": xxx,
"lpips": xxx
"chamfer": xxx,
}
"""
print(f"[Eval] {config}, {scene}, {iteration}")
# Paths.
exp_dir = _exp_root / scene / config
data_dir = _data_root / scene
pd_rgb_dir = exp_dir / "renders"
gt_rgb_dir = data_dir / "image"
gt_mask_dir = data_dir / "mask"
config_path = Path(_config_root / f"{config}.conf")
mesh_path = exp_dir / "meshes" / f"{iteration:08}_world.ply"
# Load image paths.
# Sometimes for one model, it may be rendered at different iterations.
iter_pattern = f"{iteration:0>8d}"
pd_rgb_paths = sorted(list(pd_rgb_dir.glob("*.png")))
pd_rgb_paths = [p for p in pd_rgb_paths if iter_pattern in str(p)]
gt_rgb_paths = sorted(list(gt_rgb_dir.glob("*.png")))
gt_mask_paths = sorted(list(gt_mask_dir.glob("*.png")))
# Sanity checks.
def _print_paths():
print(f"exp_dir : {exp_dir}")
print(f"data_dir : {data_dir}")
print(
f"pd_rgb_dir : {pd_rgb_dir}"
f" found {len(pd_rgb_paths)} images"
)
print(
f"gt_rgb_dir : {gt_rgb_dir}"
f" found {len(gt_rgb_paths)} images"
)
print(
f"gt_mask_dir: {gt_mask_dir}"
f" found {len(gt_mask_paths)} images"
)
print(
f"config_path: {config_path}" f" found: {config_path.is_file()}"
)
print(f"mesh_path : {mesh_path}" f" found: {mesh_path.is_file()}")
if not len(pd_rgb_paths) == len(gt_rgb_paths) == len(gt_mask_paths):
_print_paths()
raise ValueError("Error image paths.")
if len(pd_rgb_paths) == 0:
_print_paths()
raise ValueError("Error image paths.")
if not config_path.is_file():
_print_paths()
raise ValueError("Error config paths.")
if not mesh_path.is_file():
_print_paths()
raise ValueError("Error mesh paths.")
# Read config to get the sparse views.
conf = ConfigFactory.parse_file(config_path)["dataset"]
num_sparse_views = conf.get_int("num_sparse_views")
train_ids, test_ids = read_selected_cameras(
Path(data_dir) / "corres", num_sparse_views
)
print(f"train_ids: {train_ids}")
print(f"test_ids : {test_ids}")
pd_rgb_paths = [pd_rgb_paths[i] for i in test_ids]
gt_rgb_paths = [gt_rgb_paths[i] for i in test_ids]
gt_mask_paths = [gt_mask_paths[i] for i in test_ids]
# Eval image metrics.
rgb_result = eval_image_metrics_by_paths(
pd_rgb_paths,
gt_rgb_paths,
gt_mask_paths,
verbose=True,
)
rgb_nomask_result = eval_image_metrics_by_paths(
pd_rgb_paths,
gt_rgb_paths,
gt_mask_paths=None,
verbose=True,
)
# Eval chamfer-f1 metric.
scene_index = int(scene.split("dtu_scan")[1])
chamfer = dtu_chamfer_l1(
in_path=mesh_path,
scene=scene_index,
mode="mesh",
verbose=False,
parallel=False,
)
# Write to file.
result = {
"method": "neus",
"config": config,
"scene": scene,
"iteration": iteration,
"psnr": rgb_result["psnr"],
"ssim": rgb_result["ssim"],
"lpips": rgb_result["lpips"],
"psnr_nomask": rgb_nomask_result["psnr"],
"ssim_nomask": rgb_nomask_result["ssim"],
"lpips_nomask": rgb_nomask_result["lpips"],
"chamfer": chamfer,
}
pprint(result)
eval_path = get_eval_path("neus", config, scene, iteration)
eval_path.parent.mkdir(parents=True, exist_ok=True)
with eval_path.open("w") as f:
json.dump(result, f, indent=4)
print(f"Results saved to {eval_path}")
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--reverse",
action="store_true",
help="Reverse the order of configs for parallelization.",
)
args = parser.parse_args()
configs = [
"config_name",
]
configs = configs[::-1] if args.reverse else configs
scenes = [p.name for p in _data_root.iterdir() if p.is_dir()]
iteration = 200000
for config in configs:
for scene in tqdm(scenes, desc=f"Eval {config}"):
eval_path = get_eval_path("neus", config, scene, iteration)
is_evaled = eval_path.is_file()
is_data_ready = is_neus_data_ready(config, scene, iteration)
if eval_path.is_file():
print(f"Skip {eval_path} because it already exists.")
else:
if is_data_ready:
eval_neus(config=config, scene=scene, iteration=iteration)
else:
print(f"Skip {eval_path} because data is not ready.")
if __name__ == "__main__":
main()
| yxlao/corres-nerf | 49 | (NeurIPS 2023) CorresNeRF: Image Correspondence Priors for Neural Radiance Fields | yxlao | Yixing Lao | HKU-CS | |
src/utils/eval_utils.py | Python | from pathlib import Path
import camtools as ct
import numpy as np
from tqdm import tqdm
def post_process_results(
results, sort_keys=("metric", "config", "scene"), decimal_points=3
):
"""
1. Sort results by sort_keys.
2. Round floating point numbers.
Args:
results (list[dict]): List of result dicts.
sort_keys (tuple[str]): Keys to sort by. Each result dict must have all
sort_keys.
decimal_points (int): Number of decimal points to round to.
"""
# Sort.
results = sorted(results, key=lambda x: [x[k] for k in sort_keys])
# Round.
for result in results:
for k, v in result.items():
if isinstance(v, (np.floating, float)):
result[k] = f"{v:.{decimal_points}f}"
return results
def eval_image_metrics_by_paths(pd_paths, gt_paths, gt_mask_paths=None, verbose=False):
"""
Evaluate the avg PSNR, avg SSIM, and avg LPIPS give two lists of paths.
Args:
pd_paths: Paths to predicted images.
gt_paths: Paths to ground truth images.
gt_mask_paths: Paths to masks. If None, then no mask is used.
verbose: Set to True to enable tqdm.
Returns:
A dict of metrics, e.g.:
{"psnr": float, "ssim": float, "lpips": float}
"""
pd_paths = [Path(pd_path) for pd_path in pd_paths]
gt_paths = [Path(gt_path) for gt_path in gt_paths]
if len(pd_paths) != len(gt_paths):
raise ValueError("Lengths of pd_paths and gt_paths must be equal.")
for pd_path, gt_path in zip(pd_paths, gt_paths):
if not pd_path.is_file():
raise ValueError(f"{pd_path} not found.")
if not gt_path.is_file():
raise ValueError(f"{gt_path} not found.")
if gt_mask_paths is not None:
gt_mask_paths = [Path(gt_mask_path) for gt_mask_path in gt_mask_paths]
if len(gt_mask_paths) != len(pd_paths):
raise ValueError("Lengths of gt_mask_paths and pd_paths must be equal.")
for gt_mask_path in gt_mask_paths:
if not gt_mask_path.is_file():
raise ValueError(f"{gt_mask_path} not found.")
else:
gt_mask_paths = [None] * len(pd_paths)
psnrs = []
ssims = []
lpipses = []
zipped = zip(pd_paths, gt_paths, gt_mask_paths)
for pd_path, gt_path, gt_mask_path in tqdm(
list(zipped),
desc="eval_image_metrics_by_paths",
leave=False,
disable=not verbose,
):
im_pd, im_gt, im_mask = ct.metric.load_im_pd_im_gt_im_mask_for_eval(
im_pd_path=pd_path,
im_gt_path=gt_path,
im_mask_path=gt_mask_path,
alpha_mode="white",
)
psnrs.append(ct.metric.image_psnr(im_pd, im_gt, im_mask))
ssims.append(ct.metric.image_ssim(im_pd, im_gt, im_mask))
lpipses.append(ct.metric.image_lpips(im_pd, im_gt, im_mask))
return {
"psnr": np.mean(np.array(psnrs)),
"ssim": np.mean(np.array(ssims)),
"lpips": np.mean(np.array(lpipses)),
}
def eval_depth_metrics_by_paths(pd_paths, gt_paths, gt_mask_paths=None):
"""
Evaluate the avg MSE and avg MAE given two lists of paths.
Args:
pd_paths: Paths to predicted depth maps.
gt_paths: Paths to ground truth depth maps.
gt_mask_paths: Paths to masks. If None, then no mask is used.
Returns:
A dict of metrics:
{"mse": float, "mae": float}
"""
pd_paths = [Path(pd_path) for pd_path in pd_paths]
gt_paths = [Path(gt_path) for gt_path in gt_paths]
if len(pd_paths) != len(gt_paths):
raise ValueError("Lengths of pd_paths and gt_paths must be equal.")
for pd_path, gt_path in zip(pd_paths, gt_paths):
if not pd_path.is_file():
raise ValueError(f"{pd_path} not found.")
if not gt_path.is_file():
raise ValueError(f"{gt_path} not found.")
if gt_mask_paths is not None:
gt_mask_paths = [Path(gt_mask_path) for gt_mask_path in gt_mask_paths]
if len(gt_mask_paths) != len(pd_paths):
raise ValueError("Lengths of gt_mask_paths and pd_paths must be equal.")
for gt_mask_path in gt_mask_paths:
if not gt_mask_path.is_file():
raise ValueError(f"{gt_mask_path} not found.")
else:
gt_mask_paths = [None] * len(pd_paths)
mses = []
maes = []
for pd_path, gt_path, gt_mask_path in zip(pd_paths, gt_paths, gt_mask_paths):
im_depth_pd = ct.io.imread_depth(pd_path)
im_depth_gt = ct.io.imread_depth(gt_path)
shape_wh = (im_depth_gt.shape[1], im_depth_gt.shape[0])
if gt_mask_path is None:
depth_mse = ((im_depth_pd - im_depth_gt) ** 2).mean()
depth_mae = (np.abs(im_depth_pd - im_depth_gt)).mean()
else:
im_mask = ct.io.imread(gt_mask_path, alpha_mode="ignore")
im_mask = ct.image.resize(im_mask, shape_wh=shape_wh)
if im_mask.ndim == 3:
im_mask = im_mask[:, :, 0]
im_mask = (im_mask > 0.5).astype(bool)
im_depth_pd = im_depth_pd[im_mask]
im_depth_gt = im_depth_gt[im_mask]
depth_mse = ((im_depth_pd - im_depth_gt) ** 2).mean()
depth_mae = (np.abs(im_depth_pd - im_depth_gt)).mean()
mses.append(depth_mse)
maes.append(depth_mae)
return {
"mse": np.mean(np.array(mses)),
"mae": np.mean(np.array(maes)),
}
def eval_dsnerf_depth_metrics_by_paths(pd_npz_paths, gt_paths):
"""
Evaluate the avg MSE and avg MAE given two lists of paths.
Args:
pd_npz_paths: Paths to predicted npz files. Each npz file must contain
a key "depth" which is a numpy array of shape (H, W).
gt_paths: Paths to ground truth depth maps. This depth map is loaded
with ct.io.imread_depth()
Returns:
A dict of metrics:
{"mse": float, "mae": float}
"""
pd_npz_paths = [Path(pd_npz_path) for pd_npz_path in pd_npz_paths]
gt_paths = [Path(gt_path) for gt_path in gt_paths]
if len(pd_npz_paths) != len(gt_paths):
raise ValueError("Lengths of pd_npz_paths and gt_paths must be equal.")
for pd_npz_path, gt_path in zip(pd_npz_paths, gt_paths):
if not pd_npz_path.is_file():
raise ValueError(f"{pd_npz_path} not found.")
if not gt_path.is_file():
raise ValueError(f"{gt_path} not found.")
mses = []
maes = []
for pd_npz_path, gt_path in zip(pd_npz_paths, gt_paths):
im_depth_pd = np.load(pd_npz_path)["depth"]
im_depth_gt = ct.io.imread_depth(gt_path)
depth_mse = ((im_depth_pd - im_depth_gt) ** 2).mean()
depth_mae = (np.abs(im_depth_pd - im_depth_gt)).mean()
mses.append(depth_mse)
maes.append(depth_mae)
return {
"mse": np.mean(np.array(mses)),
"mae": np.mean(np.array(maes)),
}
| yxlao/corres-nerf | 49 | (NeurIPS 2023) CorresNeRF: Image Correspondence Priors for Neural Radiance Fields | yxlao | Yixing Lao | HKU-CS | |
src/utils/pydtu.py | Python | import numpy as np
import open3d as o3d
import sklearn.neighbors as skln
from scipy.io import loadmat
import multiprocessing as mp
import argparse
from pathlib import Path
from tqdm import tqdm
def sample_single_tri(input_):
n1, n2, v1, v2, tri_vert = input_
c = np.mgrid[:n1 + 1, :n2 + 1]
c += 0.5
c[0] /= max(n1, 1e-7)
c[1] /= max(n2, 1e-7)
c = np.transpose(c, (1, 2, 0))
k = c[c.sum(axis=-1) < 1] # m2
q = v1 * k[:, :1] + v2 * k[:, 1:] + tri_vert
return q
def write_vis_pcd(file, points, colors):
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(points)
pcd.colors = o3d.utility.Vector3dVector(colors)
o3d.io.write_point_cloud(file, pcd)
def sample_mesh_to_points(vertices, triangles, sample_dist, parallel=True):
"""
Convert mesh to point cloud with sampling.
Args:
vertices: (n, 3) array of vertices.
triangles: (m, 3) array of triangle indices.
parallel: Whether to use parallel sampling.
Returns:
points: (n', 3) array of sampled points.
"""
if not isinstance(vertices, np.ndarray):
raise TypeError("vertices must be numpy array")
if not isinstance(triangles, np.ndarray):
raise TypeError("triangles must be numpy array")
if vertices.ndim != 2 or vertices.shape[1] != 3:
raise ValueError("vertices must be (n, 3) array")
if triangles.ndim != 2 or triangles.shape[1] != 3:
raise ValueError("triangles must be (m, 3) array")
tri_vert = vertices[triangles]
# Sample pcd from mesh
v1 = tri_vert[:, 1] - tri_vert[:, 0]
v2 = tri_vert[:, 2] - tri_vert[:, 0]
l1 = np.linalg.norm(v1, axis=-1, keepdims=True)
l2 = np.linalg.norm(v2, axis=-1, keepdims=True)
area2 = np.linalg.norm(np.cross(v1, v2), axis=-1, keepdims=True)
non_zero_area = (area2 > 0)[:, 0]
l1, l2, area2, v1, v2, tri_vert = [
arr[non_zero_area] for arr in [l1, l2, area2, v1, v2, tri_vert]
]
thr = sample_dist * np.sqrt(l1 * l2 / area2)
n1 = np.floor(l1 / thr)
n2 = np.floor(l2 / thr)
if parallel:
with mp.Pool() as mp_pool:
new_pts = mp_pool.map(
sample_single_tri,
((
n1[i, 0],
n2[i, 0],
v1[i:i + 1],
v2[i:i + 1],
tri_vert[i:i + 1, 0],
) for i in range(len(n1))),
chunksize=1024,
)
else:
new_pts = []
for i in tqdm(range(len(n1)), desc="sample_mesh_to_points",
leave=False):
new_pts.append(
sample_single_tri((
n1[i, 0],
n2[i, 0],
v1[i:i + 1],
v2[i:i + 1],
tri_vert[i:i + 1, 0],
)))
new_pts = np.concatenate(new_pts, axis=0)
points = np.concatenate([vertices, new_pts], axis=0)
return points
def dtu_chamfer_l1(in_path, scene, mode, verbose=False, parallel=True):
"""
Compute DTU Chamfer L1 metric.
Args:
in_path (str): Path to the input pointcloud or mesh. Must be in `.ply`.
scene (int): Index of DTU scene. Must be 1-128.
mode (str): Evaluation mode, {"point", "mesh"}.
verbose (bool): Whether to print verbose information.
Returns:
chamfer (float): DTU Chamfer L1 metric.
This is simply the average of the "accuracy" and "completeness" scores
of the DTU paper. See the supp material for occupancy network for formal
definitions. IDR, VolSDF, UNISURF, and NeuS all use this metric for DTU.
In the original DTU Matlab implementation, Chamfer-L1 can be computed as:
(BaseStat.MeanStl + BaseStat.MeanData) / 2.
"""
# Sanity checks.
if not isinstance(scene, int):
raise ValueError(f"scene must be int, but got {scene}.")
if not 1 <= scene <= 128:
raise ValueError(f"scene must be in [1, 128], but got {scene}.")
if not isinstance(mode, str):
raise ValueError(f"mode must be str, but got {mode}.")
if not mode in ["point", "mesh"]:
raise ValueError(f"mode must be in ['point', 'mesh'], but got {mode}.")
# These are constant numbers from DTU. Do not modify.
sample_dist = 0.2
patch_size = 60
max_dist = 20
# Data dirs.
dtu_dirs = [
Path("/mnt/data/datasets/dtu/extract"),
Path("/export/share/datasets/DTU/extract"),
Path("/export/work/ylao/data/datasets/dtu/extract"),
Path.home() / "data/dtu/extract",
Path.home() / "data/dataset/dtu/extract",
]
for dtu_dir in dtu_dirs:
dtu_dir = dtu_dir
if dtu_dir.is_dir():
print(f"Using DTU dir: {dtu_dir}")
break
if not dtu_dir.is_dir():
raise ValueError(f"Could not find dtu_dir in {dtu_dirs}.")
# Paths.
in_path = Path(in_path)
gt_path = dtu_dir / "Points" / "stl" / f"stl{scene:03}_total.ply"
mask_path = dtu_dir / "ObsMask" / f"ObsMask{scene}_10.mat"
plane_path = dtu_dir / "ObsMask" / f"Plane{scene}.mat"
if not in_path.is_file():
raise ValueError(f"in_path does not exist: {in_path}.")
if not in_path.suffix == ".ply":
raise ValueError(f"in_path must be .ply, but got {in_path}.")
if not gt_path.is_file():
raise ValueError(f"gt_path does not exist: {gt_path}.")
if not mask_path.is_file():
raise ValueError(f"mask_path does not exist: {mask_path}.")
if not plane_path.is_file():
raise ValueError(f"plane_path does not exist: {plane_path}.")
if verbose:
print(f"in_path : {in_path}")
print(f"gt_path : {gt_path}")
print(f"mask_path : {mask_path}")
print(f"plane_path: {plane_path}")
# Load input point cloud or mesh (sample to points).
if mode == "mesh":
o3d_mesh = o3d.io.read_triangle_mesh(str(in_path))
in_points = sample_mesh_to_points(
np.asarray(o3d_mesh.vertices),
np.asarray(o3d_mesh.triangles),
sample_dist,
parallel=parallel,
)
elif mode == "point":
o3d_pcd = o3d.io.read_point_cloud(str(in_path))
in_points = np.asarray(o3d_pcd.points)
# Random shuffle pcd index.
shuffle_rng = np.random.default_rng()
shuffle_rng.shuffle(in_points, axis=0)
# Down sample pcd.
nn = skln.NearestNeighbors(n_neighbors=1,
radius=sample_dist,
algorithm="kd_tree",
n_jobs=-1)
nn.fit(in_points)
rnn_idxs = nn.radius_neighbors(in_points,
radius=sample_dist,
return_distance=False)
mask = np.ones(in_points.shape[0], dtype=np.bool_)
for curr, idxs in enumerate(rnn_idxs):
if mask[curr]:
mask[idxs] = 0
mask[curr] = 1
data_down = in_points[mask]
# Masking data pcd.
obs_mask_file = loadmat(str(mask_path))
ObsMask, BB, Res = [
obs_mask_file[attr] for attr in ["ObsMask", "BB", "Res"]
]
BB = BB.astype(np.float32)
patch = patch_size
inbound = ((data_down >= BB[:1] - patch) &
(data_down < BB[1:] + patch * 2)).sum(axis=-1) == 3
data_in = data_down[inbound]
data_grid = np.around((data_in - BB[:1]) / Res).astype(np.int32)
grid_inbound = ((data_grid >= 0) &
(data_grid < np.expand_dims(ObsMask.shape, 0))).sum(
axis=-1) == 3
data_grid_in = data_grid[grid_inbound]
in_obs = ObsMask[data_grid_in[:, 0], data_grid_in[:, 1],
data_grid_in[:, 2]].astype(np.bool_)
data_in_obs = data_in[grid_inbound][in_obs]
# Read stl pcd.
stl_pcd = o3d.io.read_point_cloud(str(gt_path))
stl = np.asarray(stl_pcd.points)
# Compute data2stl.
nn.fit(stl)
dist_d2s, idx_d2s = nn.kneighbors(data_in_obs,
n_neighbors=1,
return_distance=True)
max_dist = max_dist
mean_d2s = dist_d2s[dist_d2s < max_dist].mean()
# Compute stl2data.
ground_plane = loadmat(str(plane_path))["P"]
stl_hom = np.concatenate([stl, np.ones_like(stl[:, :1])], -1)
above = (ground_plane.reshape((1, 4)) * stl_hom).sum(-1) > 0
stl_above = stl[above]
nn.fit(data_in)
dist_s2d, idx_s2d = nn.kneighbors(stl_above,
n_neighbors=1,
return_distance=True)
mean_s2d = dist_s2d[dist_s2d < max_dist].mean()
# # Visualize error.
# vis_dist = 10
# vis_out_dir = 0
# R = np.array([[1, 0, 0]], dtype=np.float64)
# G = np.array([[0, 1, 0]], dtype=np.float64)
# B = np.array([[0, 0, 1]], dtype=np.float64)
# W = np.array([[1, 1, 1]], dtype=np.float64)
# data_color = np.tile(B, (data_down.shape[0], 1))
# data_alpha = dist_d2s.clip(max=vis_dist) / vis_dist
# data_color[np.where(inbound)[0][grid_inbound]
# [in_obs]] = R * data_alpha + W * (1 - data_alpha)
# data_color[np.where(inbound)[0][grid_inbound][in_obs][
# dist_d2s[:, 0] >= max_dist]] = G
# write_vis_pcd(f"{vis_out_dir}/vis_{scene:03}_d2s.ply", data_down,
# data_color)
# stl_color = np.tile(B, (stl.shape[0], 1))
# stl_alpha = dist_s2d.clip(max=vis_dist) / vis_dist
# stl_color[np.where(above)[0]] = R * stl_alpha + W * (1 - stl_alpha)
# stl_color[np.where(above)[0][dist_s2d[:, 0] >= max_dist]] = G
# write_vis_pcd(f"{vis_out_dir}/vis_{scene:03}_s2d.ply", stl,
# stl_color)
chamfer = (mean_d2s + mean_s2d) / 2
if verbose:
print(f"{in_path},{chamfer:.4f}")
return chamfer
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--data", type=str, default="data_in.ply")
parser.add_argument("--scene", type=int, default=1)
parser.add_argument("--mode",
type=str,
default="mesh",
choices=["mesh", "point"])
parser.add_argument("--verbose", action="store_true", default=False)
args = parser.parse_args()
dtu_chamfer_l1(args.data, args.scene, args.mode, args.verbose)
if __name__ == "__main__":
main()
| yxlao/corres-nerf | 49 | (NeurIPS 2023) CorresNeRF: Image Correspondence Priors for Neural Radiance Fields | yxlao | Yixing Lao | HKU-CS | |
euler_angle_normalization.py | Python | from pathlib import Path
import open3d as o3d
from scipy.spatial.transform import Rotation
import numpy as np
import json
import open3d as o3d
import numpy as np
from scipy.spatial.transform import Rotation
from typing import Tuple, List
import copy
def main():
# rotmat = Rotation.from_matrix(R_bbox2cam)
# euler_angle = rotmat.as_euler('xyz', degrees=True).tolist()
axes = o3d.geometry.TriangleMesh.create_coordinate_frame(size=1, origin=[0, 0, 0])
# Cube
cube = o3d.geometry.TriangleMesh.create_box(width=1, height=2, depth=3)
cube = o3d.geometry.LineSet.create_from_triangle_mesh(cube)
cube.paint_uniform_color([1, 0, 0])
# Apply translation, z axis, 5 units
mat_translation = np.eye(4)
mat_translation[:3, 3] = [0, 0, 5]
cube.transform(mat_translation)
o3d.visualization.draw_geometries([cube, axes])
def normalize_euler_angles(a, b, c) -> Tuple[float, float, float]:
"""
Returns normalized euler angles such that -90 < a <= 90, -90 < b <= 90,
-90 < c <= 90.
Return the normalized euler angles such that rectangular box after the
new rotation can overlap with the original box. Note that they are not
equivalent, only the bbox is overlapping.
This is due to the fact that for rectangles, if we rotate around one
axis for 180 degrees, the rectangle still overlaps with the original one,
although the euler angles are different.
Note that naively plus minus 180 will not work, as the euler angles are
tightly coupled. We need to be very careful for each case.
"""
# Create a rotation matrix from the input Euler angles
rotation = Rotation.from_euler("XYZ", [a, b, c], degrees=True)
# Generate all possible equivalent rotations by applying 180° rotations
# around principal axes
# Original rotation matrix
rot_original = rotation.as_matrix()
# Define 180° rotation matrices around each axis
rot_x_180 = np.array([[1, 0, 0], [0, -1, 0], [0, 0, -1]])
rot_y_180 = np.array([[-1, 0, 0], [0, 1, 0], [0, 0, -1]])
rot_z_180 = np.array([[-1, 0, 0], [0, -1, 0], [0, 0, 1]])
# Generate all possible equivalent rotation matrices
equivalent_matrices = [
rot_original,
rot_original @ rot_x_180,
rot_original @ rot_y_180,
rot_original @ rot_z_180,
rot_original @ rot_x_180 @ rot_y_180,
rot_original @ rot_x_180 @ rot_z_180,
rot_original @ rot_y_180 @ rot_z_180,
rot_original @ rot_x_180 @ rot_y_180 @ rot_z_180
]
# Try different rotation sequences to find one that gives angles in the desired range
rotation_sequences = ["XYZ", "XZY", "YXZ", "YZX", "ZXY", "ZYX"]
best_angles = None
# First try all combinations of matrices and sequences
for matrix in equivalent_matrices:
for seq in rotation_sequences:
try:
rot = Rotation.from_matrix(matrix)
angles = rot.as_euler(seq, degrees=True)
# Check if all angles are already in the target range
if all(-90 < angle <= 90 for angle in angles):
# Convert back to XYZ for consistency
if seq != "XYZ":
rot_consistent = Rotation.from_euler(seq, angles, degrees=True)
angles = rot_consistent.as_euler("XYZ", degrees=True)
# Verify all angles are still in the target range
if all(-90 < angle <= 90 for angle in angles):
best_angles = tuple(angles)
break
except:
continue
if best_angles is not None:
break
# If we found a solution, return it
if best_angles is not None:
return best_angles
# If we couldn't find a solution with the direct approach, try manually normalizing
# Create a copy of the original rotation
rot_copy = Rotation.from_euler("XYZ", [a, b, c], degrees=True)
# Try all possible equivalent rotations and manually adjust angles
for matrix in equivalent_matrices:
try:
rot = Rotation.from_matrix(matrix)
a_eq, b_eq, c_eq = rot.as_euler("XYZ", degrees=True)
# Normalize each angle into (-90, 90] range
adjustments = []
# Try different adjustments
for a_adj in [0, 180, -180]:
for b_adj in [0, 180, -180]:
for c_adj in [0, 180, -180]:
new_a = a_eq + a_adj
new_b = b_eq + b_adj
new_c = c_eq + c_adj
# Ensure angles are in the primary range
while new_a <= -180: new_a += 360
while new_a > 180: new_a -= 360
while new_b <= -180: new_b += 360
while new_b > 180: new_b -= 360
while new_c <= -180: new_c += 360
while new_c > 180: new_c -= 360
# Check if in target range
if -90 < new_a <= 90 and -90 < new_b <= 90 and -90 < new_c <= 90:
# Verify this is an equivalent rotation
test_rot = Rotation.from_euler("XYZ", [new_a, new_b, new_c], degrees=True)
orig_points = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 1, 1]])
rotated_1 = rot.apply(orig_points)
rotated_2 = test_rot.apply(orig_points)
# Check if the rotations are approximately equivalent for box corners
if np.allclose(np.abs(rotated_1), np.abs(rotated_2), atol=1e-6):
adjustments.append((new_a, new_b, new_c))
if adjustments:
# Use the first valid adjustment
best_angles = adjustments[0]
break
except:
continue
# If still no solution found, try using the original angles but with normalization
if best_angles is None:
# Start with original angles
a_norm, b_norm, c_norm = a, b, c
# Normalize to primary range first
while a_norm <= -180: a_norm += 360
while a_norm > 180: a_norm -= 360
while b_norm <= -180: b_norm += 360
while b_norm > 180: b_norm -= 360
while c_norm <= -180: c_norm += 360
while c_norm > 180: c_norm -= 360
# Try to get all angles in (-90, 90] range by applying 180° rotations
if a_norm > 90:
a_norm -= 180
b_norm = -b_norm
c_norm = c_norm + 180 if c_norm <= 0 else c_norm - 180
elif a_norm <= -90:
a_norm += 180
b_norm = -b_norm
c_norm = c_norm + 180 if c_norm <= 0 else c_norm - 180
if b_norm > 90:
b_norm -= 180
a_norm = -a_norm
c_norm = c_norm + 180 if c_norm <= 0 else c_norm - 180
elif b_norm <= -90:
b_norm += 180
a_norm = -a_norm
c_norm = c_norm + 180 if c_norm <= 0 else c_norm - 180
if c_norm > 90:
c_norm -= 180
a_norm = -a_norm
b_norm = -b_norm
elif c_norm <= -90:
c_norm += 180
a_norm = -a_norm
b_norm = -b_norm
# Ensure angles are in primary range again
while a_norm <= -180: a_norm += 360
while a_norm > 180: a_norm -= 360
while b_norm <= -180: b_norm += 360
while b_norm > 180: b_norm -= 360
while c_norm <= -180: c_norm += 360
while c_norm > 180: c_norm -= 360
best_angles = (a_norm, b_norm, c_norm)
# Validate angles
a_result, b_result, c_result = best_angles
# Ensure all angles are in the desired range
if not -90 < a_result <= 90:
raise ValueError(f"a_result is not in the range (-90, 90]: {a_result}")
if not -90 < b_result <= 90:
raise ValueError(f"b_result is not in the range (-90, 90]: {b_result}")
if not -90 < c_result <= 90:
raise ValueError(f"c_result is not in the range (-90, 90]: {c_result}")
return best_angles
def normalize_bbox(bbox: np.ndarray) -> np.ndarray:
"""
Normalize the bounding box elment of the bbox.
bbox: [x, y, z, w, h, l, euler_a, euler_b, euler_c]
"""
assert bbox.ndim == 1 and len(bbox) == 9
normalized_bbox = copy.deepcopy(bbox)
normalized_bbox[-3:] = normalize_euler_angles(*bbox[-3:])
return normalized_bbox
def test_normalize_bbox(bbox_a: np.ndarray, visualize=False):
# - euler rotate around x: rotation from y to z
# - euler rotate around y: rotation from z to x
# - euler rotate around z: rotation from x to y
bbox_b = normalize_bbox(bbox_a)
print(f"bbox_a: {bbox_a}")
print(f"bbox_b: {bbox_b}")
# Bounding box a
xyz_a = bbox_a[:3]
extent_a = bbox_a[3:6]
rotation_a = Rotation.from_euler("XYZ", bbox_a[6:], degrees=True).as_matrix()
box_a = o3d.geometry.OrientedBoundingBox(
center=xyz_a, R=np.array(rotation_a), extent=extent_a
)
box_lines_a = o3d.geometry.LineSet.create_from_oriented_bounding_box(box_a)
box_lines_a.paint_uniform_color([0.5, 0, 0])
# Bounding box b
xyz_b = bbox_b[:3]
whl_b = bbox_b[3:6]
rotation_b = Rotation.from_euler("XYZ", bbox_b[6:], degrees=True).as_matrix()
box_b = o3d.geometry.OrientedBoundingBox(
center=xyz_b, R=np.array(rotation_b), extent=whl_b
)
box_lines_b = o3d.geometry.LineSet.create_from_oriented_bounding_box(box_b)
box_lines_b.paint_uniform_color([0, 0.5, 0])
# Check if the bbox_a's points overlap with bbox_b's points
points_a = np.asarray(box_lines_a.points)
points_b = np.asarray(box_lines_b.points)
points_a_sorted = np.array(sorted(points_a.tolist()))
points_b_sorted = np.array(sorted(points_b.tolist()))
if not np.allclose(points_a_sorted, points_b_sorted, atol=1e-3, rtol=1e-3):
raise ValueError("bbox_a's points do not overlap with bbox_b's points")
else:
print("bbox_a's points overlap with bbox_b's points")
# Visulized for comparision
if visualize:
axes = o3d.geometry.TriangleMesh.create_coordinate_frame(size=1.0)
o3d.visualization.draw_geometries([axes, box_lines_a, box_lines_b])
def generate_test_cases() -> List[np.ndarray]:
"""
Generate test cases for euler angle normalization.
For each angle a, b, c:
- below -90
- within [-90, 90]
- above 90
Returns:
List of test cases, each case is a 9D array:
[x, y, z, w, h, l, euler_a, euler_b, euler_c]
"""
# Base bbox parameters (arbitrary but reasonable values)
x, y, z = 1.0, 1.0, 1.0
w, h, l = 0.5, 0.3, 0.7
# Angle ranges for testing
angle_ranges = {
'below_90': -100, # below -90
'within': 20, # within [-90, 90]
'above_90': 115 # above 90
}
test_cases = []
for a_range in ['below_90', 'within', 'above_90']:
for b_range in ['below_90', 'within', 'above_90']:
for c_range in ['below_90', 'within', 'above_90']:
a = angle_ranges[a_range]
b = angle_ranges[b_range]
c = angle_ranges[c_range]
test_case = np.array([x, y, z, w, h, l, a, b, c])
test_cases.append(test_case)
return test_cases
def main():
# Original test cases
print("Running original test cases:")
print("-" * 50)
bbox_a = np.array([2.0, 0.0, 0.0, 0.2, 0.4, 0.8, 95, 100, 105])
test_normalize_bbox(bbox_a=bbox_a, visualize=False)
print()
bbox_a = np.array([0.94, 0.59, 1.78, 0.88, 0.5, 0.88, -179.94, -68.27, 179.94])
test_normalize_bbox(bbox_a=bbox_a, visualize=False)
print()
bbox_a = np.array([5.0, 0.0, 0.0, 0.2, 0.4, 0.8, -179.94, -68.27, 179.94])
test_normalize_bbox(bbox_a=bbox_a, visualize=False)
print()
# Comprehensive test cases
print("\nRunning comprehensive test cases:")
print("-" * 50)
test_cases = generate_test_cases()
for i, test_case in enumerate(test_cases, 1):
print(f"\nTest case {i}/27:")
print(f"Input angles (a, b, c): {test_case[6:]}")
test_normalize_bbox(test_case, visualize=False)
print("-" * 30)
if __name__ == "__main__":
main()
| yxlao/exchange | 0 | Sharing code snippets | Python | yxlao | Yixing Lao | HKU-CS |
o3d_texture/draw_texture_cube.py | Python | from pathlib import Path
import numpy as np
import open3d as o3d
from pathlib import Path
def main():
# Create source mesh
pwd = Path(__file__).parent
mesh_src = o3d.io.read_triangle_mesh(str(pwd / "cube.obj"),
enable_post_processing=True)
# Get all elements from src mesh
vertices = np.asarray(mesh_src.vertices)
triangles = np.asarray(mesh_src.triangles)
textures = np.asarray(mesh_src.textures[0])
triangle_uvs = np.asarray(mesh_src.triangle_uvs)
triangle_material_ids = np.zeros((len(triangles), ), dtype=np.int32)
# Create dst mesh
mesh_dst = o3d.geometry.TriangleMesh()
mesh_dst.vertices = o3d.utility.Vector3dVector(vertices)
mesh_dst.triangles = o3d.utility.Vector3iVector(triangles)
mesh_dst.textures = [o3d.geometry.Image(textures)]
mesh_dst.triangle_uvs = o3d.utility.Vector2dVector(triangle_uvs)
mesh_dst.triangle_material_ids = o3d.utility.IntVector(
triangle_material_ids)
# Visualize
mesh_dst.compute_vertex_normals()
o3d.visualization.draw_geometries([mesh_dst])
if __name__ == '__main__':
main()
| yxlao/exchange | 0 | Sharing code snippets | Python | yxlao | Yixing Lao | HKU-CS |
open3d_ply_dtypes_0.15.py | Python | import open3d as o3d
import numpy as np
def main():
dataset = o3d.data.PLYPointCloud()
print(f"PLY path: {dataset.path}")
# Read with legacy. Legacy point cloud alway has double color.
pcd = o3d.io.read_point_cloud(dataset.path)
colors = np.array(pcd.colors)
print("Read using legacy point cloud")
print(f"dtype: {colors.dtype}") # float64
print(f"min, max: {colors.min()}, {colors.max()}") # 0.0, 1.0
# Write with legacy. Legacy point cloud is alway written as uint8 color.
# Run `head -n 15 pcd_legacy.ply` to see the header
print("Write using legacy point cloud")
o3d.io.write_point_cloud("pcd_legacy.ply", pcd) # saved as uint8
# Read with tensor
pcd = o3d.t.io.read_point_cloud(dataset.path)
colors = pcd.point["colors"].numpy()
print("Read using tensor point cloud")
print(f"dtype: {colors.dtype}") # uint8
print(f"min, max: {colors.min()}, {colors.max()}") # 0, 255
# Write with tensor, uint8
# Run `head -n 15 pcd_tensor_uint8.ply` to see the header
o3d.t.io.write_point_cloud("pcd_tensor_uint8.ply", pcd) # saved as uint8
pcd_read = o3d.t.io.read_point_cloud("pcd_tensor_uint8.ply")
colors = pcd_read.point["colors"].numpy()
print("Read using tensor point cloud uint8")
print(f"dtype: {colors.dtype}") # uint8
print(f"min, max: {colors.min()}, {colors.max()}") # 0, 255
# Write with tensor, float32
# Run `head -n 15 pcd_tensor_float32.ply` to see the header
pcd.point.colors = colors.astype(np.float32) / 255.0
o3d.t.io.write_point_cloud("pcd_tensor_float32.ply", pcd) # float32
pcd_read = o3d.t.io.read_point_cloud("pcd_tensor_float32.ply")
colors = pcd_read.point["colors"].numpy()
print("Read using tensor point cloud float32")
print(f"dtype: {colors.dtype}") # float32
print(f"min, max: {colors.min()}, {colors.max()}") # 0.0, 1.0
if __name__ == "__main__":
main()
| yxlao/exchange | 0 | Sharing code snippets | Python | yxlao | Yixing Lao | HKU-CS |
open3d_ply_dtypes_master.py | Python | import open3d as o3d
import numpy as np
def main():
dataset = o3d.data.PLYPointCloud()
print(f"PLY path: {dataset.path}")
# Read with legacy. Legacy point cloud alway has double color.
pcd = o3d.io.read_point_cloud(dataset.path)
colors = np.array(pcd.colors)
print("Read using legacy point cloud")
print(f"dtype: {colors.dtype}") # float64
print(f"min, max: {colors.min()}, {colors.max()}") # 0.0, 1.0
# Write with legacy. Legacy point cloud is alway written as uint8 color.
# Run `head -n 15 pcd_legacy.ply` to see the header
print("Write using legacy point cloud")
o3d.io.write_point_cloud("pcd_legacy.ply", pcd) # saved as uint8
# Read with tensor
pcd = o3d.t.io.read_point_cloud(dataset.path)
colors = pcd.point.colors.numpy()
print("Read using tensor point cloud")
print(f"dtype: {colors.dtype}") # uint8
print(f"min, max: {colors.min()}, {colors.max()}") # 0, 255
# Write with tensor, uint8
# Run `head -n 15 pcd_tensor_uint8.ply` to see the header
o3d.t.io.write_point_cloud("pcd_tensor_uint8.ply", pcd) # saved as uint8
pcd_read = o3d.t.io.read_point_cloud("pcd_tensor_uint8.ply")
colors = pcd_read.point.colors.numpy()
print("Read using tensor point cloud uint8")
print(f"dtype: {colors.dtype}") # uint8
print(f"min, max: {colors.min()}, {colors.max()}") # 0, 255
# Write with tensor, float32
# Run `head -n 15 pcd_tensor_float32.ply` to see the header
pcd.point.colors = colors.astype(np.float32) / 255.0
o3d.t.io.write_point_cloud("pcd_tensor_float32.ply", pcd) # float32
pcd_read = o3d.t.io.read_point_cloud("pcd_tensor_float32.ply")
colors = pcd_read.point.colors.numpy()
print("Read using tensor point cloud float32")
print(f"dtype: {colors.dtype}") # float32
print(f"min, max: {colors.min()}, {colors.max()}") # 0.0, 1.0
if __name__ == "__main__":
main()
| yxlao/exchange | 0 | Sharing code snippets | Python | yxlao | Yixing Lao | HKU-CS |
lit/__init__.py | Python | import pkg_resources
__version__ = pkg_resources.get_distribution("lit").version
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/bg_reconstructor.py | Python | import nksr
import open3d as o3d
import torch
class BGReconstructor:
"""
Background reconstructor with NKSR.
"""
def __init__(self, voxel_size=None, chunked=True) -> None:
"""
Args:
voxel_size: Voxel size for reconstruction. None means 0.1.
chunked: Whether to use chunked reconstruction.
"""
self.device = torch.device("cuda:0")
self.chunk_tmp_device = torch.device("cpu")
self.reconstructor = nksr.Reconstructor(self.device)
# Important parameters for NKSR.
if chunked and voxel_size is not None:
raise ValueError(
"Cannot use chunked reconstruction with custom voxel size."
)
self.voxel_size = voxel_size
self.chunk_size = 51.2 if chunked else -1
def recon(self, points, lidar_centers):
"""
Reconstruct background points.
Args:
points: (N, 3) points.
lidar_centers: (N, 3) per-point lidar centers.
"""
points = torch.from_numpy(points).float().to(self.device)
lidar_centers = torch.from_numpy(lidar_centers).float().to(self.device)
field = self.reconstructor.reconstruct(
xyz=points,
sensor=lidar_centers,
detail_level=None,
voxel_size=self.voxel_size, # If chunk is used, voxel_size is ignored.
# Minor configs for better efficiency (not necessary)
approx_kernel_grad=True,
solver_tol=1e-4,
fused_mode=True,
# Chunked reconstruction (if OOM)
chunk_size=self.chunk_size,
preprocess_fn=nksr.get_estimate_normal_preprocess_fn(64, 85.0),
)
nksr_mesh = field.extract_dual_mesh(mise_iter=1)
mesh = o3d.geometry.TriangleMesh()
mesh.vertices = o3d.utility.Vector3dVector(nksr_mesh.v.cpu().numpy())
mesh.triangles = o3d.utility.Vector3iVector(nksr_mesh.f.cpu().numpy())
mesh.compute_vertex_normals()
return mesh
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/containers/base_container.py | Python | import pickle
from abc import ABC
from collections.abc import Iterable
from pathlib import Path
from typing import Any, Iterable
import numpy as np
class BaseContainer(ABC):
"""
Abstract base class that implements the save() and load() methods by calling
to_dict() and from_dict(). Child classes must implement to_dict() and
from_dict(). All np.float32 checks are enforced before saving.
"""
def __post_init__(self):
BaseContainer._assert_float32(self)
def to_dict(self):
raise NotImplementedError(f"{cls.__name__} must implement from_dict method")
@classmethod
def from_dict(cls, dict_data: dict):
raise NotImplementedError(f"{cls.__name__} must implement from_dict method")
def save(self, path: Path, verbose=False):
data = self.to_dict()
BaseContainer._assert_float32(data)
with open(path, "wb") as file:
pickle.dump(data, file)
if verbose:
print(f"Saved {self.__class__.__name__} to {path}")
@classmethod
def load(cls, path: Path):
with open(path, "rb") as file:
data = pickle.load(file)
return cls.from_dict(data)
@staticmethod
def _assert_float32(value: Any, name: str = None):
if isinstance(value, np.ndarray) and np.issubdtype(value.dtype, np.floating):
if value.dtype != np.float32:
if name is None:
raise ValueError(f"Array must be np.float32, got {value.dtype}.")
else:
raise ValueError(f"{name} must be np.float32, got {value.dtype}.")
elif isinstance(value, BaseContainer):
BaseContainer._assert_float32(value.to_dict())
elif isinstance(value, dict):
for k, v in value.items():
BaseContainer._assert_float32(k)
BaseContainer._assert_float32(v, name=str(k))
elif isinstance(value, Iterable) and not isinstance(value, (str, bytes)):
for item in value:
BaseContainer._assert_float32(item)
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/containers/fg_box.py | Python | from dataclasses import dataclass
from typing import List
import camtools as ct
import numpy as np
import open3d as o3d
from lit.containers.base_container import BaseContainer
from lit.recon_utils import bbox_to_lineset, scale_points_with_bbox
@dataclass
class FGBox(BaseContainer):
"""
Foreground box containing points, bbox, etc.
"""
scene_name: str = None # Scene name.
frame_index: int = None # Index of the frame.
frame_pose: np.ndarray = None # (4, 4) pose of the frame (vehicle).
object_id: str = None # Globally unique object id of the box.
local_points: np.ndarray = None # (N, 3), local-coord points.
local_bbox: np.ndarray = None # (8,) box: (x, y, z, dx, dy, dz, heading, class).
def __post_init__(self):
super().__post_init__()
if self.object_id is None:
raise ValueError("object_id must be provided.")
def __str__(self):
return f"FGBox(frame_index={self.frame_index}, object_id={self.object_id})"
def __repr__(self):
return self.__str__()
def to_dict(self):
return {
"scene_name": self.scene_name,
"frame_index": self.frame_index,
"frame_pose": self.frame_pose,
"object_id": self.object_id,
"local_points": self.local_points,
"local_bbox": self.local_bbox,
}
@classmethod
def from_dict(cls, dict_data: dict):
return cls(
scene_name=dict_data["scene_name"],
frame_index=dict_data["frame_index"],
frame_pose=dict_data["frame_pose"],
object_id=dict_data["object_id"],
local_points=dict_data["local_points"],
local_bbox=dict_data["local_bbox"],
)
def scale_by(self, src_to_dst_scales: List[float]):
"""
Scale the FGBox by src_to_dst_scales.
"""
new_local_points, new_local_bbox = scale_points_with_bbox(
points=self.local_points,
bbox=self.local_bbox,
src_to_dst_scales=src_to_dst_scales,
)
return FGBox(
scene_name=self.scene_name,
frame_index=self.frame_index,
frame_pose=self.frame_pose,
object_id=self.object_id,
local_points=new_local_points,
local_bbox=new_local_bbox,
)
def compute_local_pseudo_pose(self):
"""
Compute the pseudo pose of the box, which is the pose of transforming
a axis aligned box centered at (0, 0, 0) to the current box.
Usage:
```python
# Center points to the origin for reconstruction.
pseudo_pose = self.compute_local_pseudo_pose()
pseudo_T = ct.convert.pose_to_T(pseudo_pose)
centered_points = ct.transform.transform_points(
self.local_points, pseudo_T
)
# Recon.
centered_pcd = o3d.geometry.PointCloud()
centered_pcd.points = o3d.utility.Vector3dVector(centered_points)
centered_mesh = (
o3d.geometry.TriangleMesh.create_from_point_cloud_alpha_shape(
centered_pcd, alpha=0.15
)
)
# Transform back to the world coordinate.
# mesh_vertices = ct.transform.transform_points(
# np.asarray(centered_mesh.vertices), self.frame_pose @ pseudo_pose
# )
# mesh_triangles = np.asarray(centered_mesh.triangles)
# world_mesh = o3d.geometry.TriangleMesh()
# world_mesh.vertices = o3d.utility.Vector3dVector(mesh_vertices)
# world_mesh.triangles = o3d.utility.Vector3iVector(mesh_triangles)
world_mesh = centered_mesh.transform(self.frame_pose @ pseudo_pose)
world_mesh.compute_vertex_normals()
# Our usual way to get world lineset.
world_ls = bbox_to_lineset(self.local_bbox, frame_pose=self.frame_pose)
# Visualize.
o3d.visualization.draw_geometries([world_ls, world_mesh])
```
Notes:
Current bbox:
(x, y, z, dx, dy, dz, heading)
Canonical bbox:
Axis aligned bbox of the same size centered at the origin
(0, 0, 0, dx, dy, dz, 0)
"""
theta = self.local_bbox[6]
R = np.array(
[
[np.cos(theta), np.sin(theta), 0],
[-np.sin(theta), np.cos(theta), 0],
[0, 0, 1],
]
)
R = R.T
t = self.local_bbox[:3]
pseudo_pose = np.eye(4)
pseudo_pose[:3, :3] = R
pseudo_pose[:3, 3] = t
visualize = False
if visualize:
def visualization_01():
"""
Transform a canonical bbox to the local bbox.
"""
# Visualization 1: transform a canonical bbox to local bbox.
# They shall overlap.
local_ls = bbox_to_lineset(self.local_bbox)
local_ls.paint_uniform_color([0, 0, 1])
x, y, z, dx, dy, dz, heading = self.local_bbox[:7]
canonical_bbox = np.array([0, 0, 0, dx, dy, dz, 0])
canonical_ls = bbox_to_lineset(canonical_bbox, pseudo_pose)
canonical_ls.paint_uniform_color([1, 0, 0])
o3d.visualization.draw_geometries([local_ls, canonical_ls])
def visualization_02():
"""
Center the local points to origin for reconstruction.
"""
centered_ls = bbox_to_lineset(self.local_bbox)
centered_ls.paint_uniform_color([0, 0, 1])
centered_ls.points = o3d.utility.Vector3dVector(
ct.transform.transform_points(
np.asarray(centered_ls.points), np.linalg.inv(pseudo_pose)
)
)
centered_points = np.copy(self.local_points)
centered_points = ct.transform.transform_points(
centered_points, np.linalg.inv(pseudo_pose)
)
centered_pcd = o3d.geometry.PointCloud()
centered_pcd.points = o3d.utility.Vector3dVector(centered_points)
axes = o3d.geometry.TriangleMesh.create_coordinate_frame(size=1)
o3d.visualization.draw_geometries([axes, centered_pcd, centered_ls])
def visualization_03():
"""
A full recon pipeline:
1. Transform bbox and points to centered canonical bbox.
2. Recon mesh.
3. Transform mesh back to the world coordinate.
"""
# Center points to the origin for reconstruction.
# pseudo_pose = self.compute_local_pseudo_pose() # Avoid recursion.
pseudo_T = ct.convert.pose_to_T(pseudo_pose)
centered_points = ct.transform.transform_points(
self.local_points, pseudo_T
)
# Recon.
centered_pcd = o3d.geometry.PointCloud()
centered_pcd.points = o3d.utility.Vector3dVector(centered_points)
centered_mesh = (
o3d.geometry.TriangleMesh.create_from_point_cloud_alpha_shape(
centered_pcd, alpha=0.15
)
)
# Transform back to the world coordinate.
# mesh_vertices = ct.transform.transform_points(
# np.asarray(centered_mesh.vertices), self.frame_pose @ pseudo_pose
# )
# mesh_triangles = np.asarray(centered_mesh.triangles)
# world_mesh = o3d.geometry.TriangleMesh()
# world_mesh.vertices = o3d.utility.Vector3dVector(mesh_vertices)
# world_mesh.triangles = o3d.utility.Vector3iVector(mesh_triangles)
# world_mesh.compute_vertex_normals()
world_mesh = centered_mesh.transform(self.frame_pose @ pseudo_pose)
# Our usual way to get world lineset.
world_ls = bbox_to_lineset(self.local_bbox, frame_pose=self.frame_pose)
# Visualize.
o3d.visualization.draw_geometries([world_ls, world_mesh])
visualization_01()
visualization_02()
# visualization_03()
return pseudo_pose
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/containers/fg_multiview.py | Python | from dataclasses import asdict, dataclass, field
from pathlib import Path
from typing import List
import camtools as ct
import numpy as np
import open3d as o3d
from pycg import vis
@dataclass
class FGMultiview:
"""
Container for multi-view foreground data. An instance of this class contains
multi-view scans of one synthetic vehicle in a real-world trajectory with
reconstructed background mesh.
"""
"""
Section 1: Basic GT info
"""
# ShapeNet object_id, while the "synset_id" (category) is always "02958343".
shapenet_id: str = None
# Ground-truth mesh.
# This is the ground-truth ShapeNet mesh.
# This mesh is stored in canonical axes.
gt_mesh: o3d.geometry.TriangleMesh = None
# Ground-truth latent code.
# (1, 256)
# The latent code can be used to produce mesh in ShapeNet axes convention.
# The neural network shall learn to reconstruct this latent code.
gt_latent: np.array = None
"""
Section 2: Points in canonical space (canonical axes and canonical scale)
"""
# Canonical points of one foreground object.
# - Scale: canonical scale (same as bbox)
# - Axes : canonical axes (same as waymo)
# - Shape: (N, 3)
fused_canonical_points: np.ndarray = None
# List of multi-view canonical points.
# List of (x, 3) of length B, where B is the number of scans.
# - Scale: canonical scale (same as bbox)
# - Axes : canonical axes (same as waymo)
mv_canonical_points: List[np.ndarray] = field(default_factory=list)
"""
Section 3: Points/latents in DeepSDF space (ShapeNet axes and DeepSDF scale)
"""
# Fused DeepSDF points.
# - Scale: DeepSDF scale (normalized with deepsdf_normalization)
# - Axes : ShapeNet axes (same as DeepSDF
fused_deepsdf_points: np.ndarray = None
# Fused DeepSDF latent code (256,), computed from fused_deepsdf_points.
fused_deepsdf_latent: np.ndarray = None
# Multi-view DeepSDF points.
# - Scale: DeepSDF scale (normalized with deepsdf_normalization)
# - Axes : ShapeNet axes (same as DeepSDF
mv_deepsdf_points: List[np.ndarray] = field(default_factory=list)
# FPS-sampled multi-view DeepSDF points.
# - Scale : DeepSDF scale (normalized with deepsdf_normalization)
# - Axes : ShapeNet axes (same as DeepSDF)
# - Sample: Sampled to at most 256 points
mv_fps_deepsdf_points: List[np.ndarray] = field(default_factory=list)
# Enforced FPS-sampled multi-view DeepSDF points.
# This makes sure allow_fewer_points=False during sampling.
mv_enforced_fps_deepsdf_points: List[np.ndarray] = field(default_factory=list)
# Multi-view latent codes from mv_deepsdf_points (non-sampled).
# B x (256,), where B is number of scans.
mv_deepsdf_latents: List[np.ndarray] = field(default_factory=list)
"""
Section 4: Additional states
"""
# Normalization (offset, scale) for points.
# The normalization is computed from the fused_canonical_points, and can
# be applied to single-view or multi-view fused points.
_deepsdf_normalization_offset_scale: tuple[float] = field(default_factory=tuple)
def __post_init__(self):
super().__post_init__()
def visualize(self):
axes = o3d.geometry.TriangleMesh.create_coordinate_frame(size=2)
# Canonical axes objects.
gt_mesh = copy.deepcopy(self.gt_mesh)
gt_mesh.compute_vertex_normals()
fused_canonical_pcd = o3d.geometry.PointCloud()
fused_canonical_pcd.points = o3d.utility.Vector3dVector(
self.fused_canonical_points
)
# DeepSDF axes objects.
fused_deepsdf_pcd = o3d.geometry.PointCloud()
fused_deepsdf_pcd.points = o3d.utility.Vector3dVector(self.fused_deepsdf_points)
_ = vis.show_3d(
[axes, gt_mesh],
[axes, fused_canonical_pcd],
[axes, fused_deepsdf_pcd],
use_new_api=False,
)
@staticmethod
def _compute_deepsdf_normalization(fused_points, buffer=1.03):
"""
Normalize points for running DeepSDF models, such that:
1. The center (avg of min max bounds, not centroid) of the object is at
the origin.
2. Max distance of a point from the origin is (1 * buffer).
Normalization does not change axes convention. Both ShapeNet axes and
Canonical axes points can be normalized with the same normalization.
DeepSDF uses normalized points in ShapeNet axes convention.
"""
if not isinstance(fused_points, np.ndarray):
raise ValueError(f"points must be np.ndarray, got {type(fused_points)}")
if fused_points.ndim != 2 or fused_points.shape[1] != 3:
raise ValueError(f"points must be (N, 3), got {fused_points.shape}")
if fused_points.size == 0:
raise ValueError("Points array is empty.")
min_vals = np.min(fused_points, axis=0)
max_vals = np.max(fused_points, axis=0)
center = (min_vals + max_vals) / 2.0
offset = -center
centered_points = fused_points - center
max_distance = np.max(np.linalg.norm(centered_points, axis=1))
max_distance *= buffer
scale = 1.0 / max_distance
return offset, scale
def normalize_by_fused_normalization(self, points):
if self._deepsdf_normalization_offset_scale is None:
if self.fused_canonical_points is None:
raise ValueError(
"self.fused_canonical_points is None, "
"cannot compute fused normalization"
)
self._deepsdf_normalization_offset_scale = (
ForegroundMultiView._compute_deepsdf_normalization(
fused_points=self.fused_canonical_points
)
)
offset, scale = self._deepsdf_normalization_offset_scale
return (points + offset) * scale
def denormalize_by_fused_normalization(self, points):
if self._deepsdf_normalization_offset_scale is None:
if self.fused_canonical_points is None:
raise ValueError(
"self.fused_canonical_points is None, "
"cannot compute fused normalization"
)
self._deepsdf_normalization_offset_scale = (
ForegroundMultiView._compute_deepsdf_normalization(
fused_points=self.fused_canonical_points
)
)
offset, scale = self._deepsdf_normalization_offset_scale
return points / scale - offset
@staticmethod
def rotate_axes_canonical_to_shapenet(points):
rotate_c2s = np.array(
[
[0, -1, 0, 0],
[0, 0, 1, 0],
[-1, 0, 0, 0],
[0, 0, 0, 1],
],
dtype=np.float32,
)
return ct.transform.transform_points(points, rotate_c2s)
@staticmethod
def rotate_axes_shapenet_to_canonical(points):
rotate_s2c = np.array(
[
[0, 0, -1, 0],
[-1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 1],
],
dtype=np.float32,
)
return ct.transform.transform_points(points, rotate_s2c)
def one_step_shapenet_to_canonical(self, points_or_mesh):
"""
Denormalizes and rotates points or mesh from ShapeNet axes to canonical
axes in one step, without modifying the original points or mesh.
Args:
points_or_mesh: np.ndarray of points (N, 3) or
open3d.geometry.TriangleMesh.
Returns:
Transformed np.ndarray of points (N, 3) or
a new open3d.geometry.TriangleMesh in canonical axes.
"""
if isinstance(points_or_mesh, np.ndarray):
points = points_or_mesh
if points.ndim != 2 or points.shape[1] != 3:
raise ValueError(f"points must be (N, 3), got shape {points.shape}")
points = self.rotate_axes_shapenet_to_canonical(points)
points = self.denormalize_by_fused_normalization(points)
return points
elif isinstance(points_or_mesh, o3d.geometry.TriangleMesh):
mesh = copy.deepcopy(points_or_mesh)
vertices = np.asarray(mesh.vertices)
vertices = self.rotate_axes_shapenet_to_canonical(vertices)
vertices = self.denormalize_by_fused_normalization(vertices)
mesh.vertices = o3d.utility.Vector3dVector(vertices)
if mesh.has_vertex_normals():
mesh.compute_vertex_normals()
return mesh
else:
raise TypeError(
"Input must be either an np.ndarray of points or an TriangleMesh."
)
def one_step_canonical_to_shapenet(self, points_or_mesh):
"""
Normalizes and rotates points or mesh from canonical axes to ShapeNet
axes in one step, without modifying the original points or mesh.
Args:
points_or_mesh: np.ndarray of points (N, 3) or
open3d.geometry.TriangleMesh.
Returns:
Transformed np.ndarray of points (N, 3) or
a new open3d.geometry.TriangleMesh in ShapeNet axes.
"""
if isinstance(points_or_mesh, np.ndarray):
points = points_or_mesh
if points.ndim != 2 or points.shape[1] != 3:
raise ValueError(f"points must be (N, 3), got shape {points.shape}")
normalized_points = self.normalize_by_fused_normalization(points)
points = self.rotate_axes_canonical_to_shapenet(normalized_points)
return points
elif isinstance(points_or_mesh, o3d.geometry.TriangleMesh):
mesh = copy.deepcopy(points_or_mesh)
vertices = np.asarray(mesh.vertices)
normalized_vertices = self.normalize_by_fused_normalization(vertices)
vertices = self.rotate_axes_canonical_to_shapenet(normalized_vertices)
mesh.vertices = o3d.utility.Vector3dVector(vertices)
if mesh.has_vertex_normals():
mesh.compute_vertex_normals()
return mesh
else:
raise TypeError(
"Input must be either an np.ndarray of points or an TriangleMesh."
)
def save(self, path: Path):
path = Path(path)
if path.suffix != ".pkl":
raise ValueError(f"path must end with .pkl, got {path}")
# Convert the dataclass to a dictionary
data = asdict(self)
# Process the ground truth mesh for saving
if self.gt_mesh is not None:
data["gt_mesh"] = {
"vertices": np.asarray(self.gt_mesh.vertices, dtype=np.float32),
"triangles": np.asarray(self.gt_mesh.triangles, dtype=np.int32),
}
# Convert numpy arrays to float32 for points and latent codes
for key in [
"fused_canonical_points",
"mv_canonical_points",
"fused_deepsdf_points",
"fused_deepsdf_latent",
"mv_deepsdf_points",
"mv_fps_deepsdf_points",
"mv_enforced_fps_deepsdf_points",
"mv_deepsdf_latents",
]:
if key in data:
if isinstance(data[key], list):
data[key] = [np.array(item, dtype=np.float32) for item in data[key]]
else: # For single arrays
data[key] = np.array(data[key], dtype=np.float32)
with open(path, "wb") as f:
pickle.dump(data, f)
@classmethod
def load(cls, path: Path):
path = Path(path)
if path.suffix != ".pkl":
raise ValueError(f"path must end with .pkl, got {path}")
with open(path, "rb") as f:
data = pickle.load(f)
# Reconstruct the ground truth mesh
if "gt_mesh" in data:
mesh_data = data.pop("gt_mesh")
gt_mesh = o3d.geometry.TriangleMesh(
vertices=o3d.utility.Vector3dVector(mesh_data["vertices"]),
triangles=o3d.utility.Vector3iVector(mesh_data["triangles"]),
)
data["gt_mesh"] = gt_mesh
return cls(**data)
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/containers/fg_object.py | Python | from dataclasses import dataclass, field
from typing import List
import numpy as np
import open3d as o3d
from lit.containers.base_container import BaseContainer
from lit.containers.fg_box import FGBox
from lit.recon_utils import bbox_to_lineset
@dataclass
class FGObject(BaseContainer):
"""
A group of FGBoxes that are of the same object.
A FGObject contains FGBoxes from different frames, but they must have the
same object_id. The FGObject also contains the reconstructed mesh of
(mesh_vertices, mesh_triangles).
"""
# Main data.
object_id: str = None # Object id of the group, all fg_boxes must have the same id
fg_boxes: List[FGBox] = field(default_factory=list) # len == # frame with object_id
# Derived data.
# For each foreground group, we have one mesh centered at the canonical
# position. This mesh needs to be transformed to the correct position
# according to each fg_box.
mesh_vertices: np.ndarray = None # (N, 3), vertices of the reconstructed mesh
mesh_triangles: np.ndarray = None # (M, 3), triangles of the reconstructed mesh
def __post_init__(self):
super().__post_init__()
if self.fg_boxes is None:
self.fg_boxes = []
def __str__(self):
return f"FGObject({len(self.fg_boxes)} fg_boxes)"
def __repr__(self):
return self.__str__()
def __getitem__(self, idx):
return self.fg_boxes[idx]
def __len__(self):
return len(self.fg_boxes)
def to_dict(self):
return {
"object_id": self.object_id,
"fg_boxes": [fg_box.to_dict() for fg_box in self.fg_boxes],
"mesh_vertices": self.mesh_vertices,
"mesh_triangles": self.mesh_triangles,
}
@classmethod
def from_dict(cls, dict_data: dict):
return cls(
object_id=dict_data["object_id"],
fg_boxes=[FGBox.from_dict(fg_box) for fg_box in dict_data["fg_boxes"]],
mesh_vertices=dict_data["mesh_vertices"],
mesh_triangles=dict_data["mesh_triangles"],
)
def render_debug(self):
"""
Render the mesh across frames.
"""
mesh = o3d.geometry.TriangleMesh()
mesh.vertices = o3d.utility.Vector3dVector(self.mesh_vertices)
mesh.triangles = o3d.utility.Vector3iVector(self.mesh_triangles)
mesh.compute_vertex_normals()
skip_every = 4
random_color = np.random.rand(3)
fg_object_ls = o3d.geometry.LineSet()
for fg_box in self.fg_boxes:
if fg_box.frame_index % skip_every != 0:
continue
frame_pose = fg_box.frame_pose
frame_ls = bbox_to_lineset(fg_box.local_bbox, frame_pose)
frame_ls.paint_uniform_color(random_color)
fg_object_ls += frame_ls
o3d.visualization.draw_geometries([mesh])
o3d.visualization.draw_geometries([fg_object_ls])
return mesh
def insert(self, fg_box: FGBox):
"""
Insert a fg_box to group. The inserted fg_box must have the same object_id.
Args:
fg_box: FGBox, the fg_box to insert.
"""
if not isinstance(fg_box, FGBox):
raise ValueError(f"fg_box must be FGBox, got {type(fg_box)}")
if self.object_id is None:
self.object_id = fg_box.object_id
elif self.object_id != fg_box.object_id:
raise ValueError(
f"Cannot insert fg_box with object_id {fg_box.object_id} to group "
f"with object_id {self.object_id}."
)
self.fg_boxes.append(fg_box)
def scale_by(self, src_to_dst_scales: List[float]):
"""
Scale the FGObject by src_to_dst_scales.
Args:
src_to_dst_scales: List of 3 floats, the scale ratio from src to dst.
"""
new_object_id = self.object_id
new_fg_boxes = [fg_box.scale_by(src_to_dst_scales) for fg_box in self.fg_boxes]
# As the mesh is centered at the canonical position, we scale the
# vertices directly.
new_mesh_vertices = self.mesh_vertices * src_to_dst_scales
new_mesh_triangles = self.mesh_triangles.copy()
return FGObject(
object_id=new_object_id,
fg_boxes=new_fg_boxes,
mesh_vertices=new_mesh_vertices,
mesh_triangles=new_mesh_triangles,
)
def get_fg_mesh(self, frame_index):
"""
Get the foreground mesh in world coord at a specific frame index.
In a FGObject, the could be 0 or 1 FGBox with the specified frame_index:
- 0 FGBox with the specified frame_index, return None.
- 1 FGBox with the specified frame_index, return the
corresponding mesh in world coord.
- More than 1 FGBox with the specified frame_index, raise ValueError.
Returns the mesh in **world** coord.
"""
fg_mesh = None
num_fg_boxes_with_frame_index = 0
for fg_box in self.fg_boxes:
if fg_box.frame_index == frame_index:
# The centered, canonical mesh.
fg_mesh = o3d.geometry.TriangleMesh()
fg_mesh.vertices = o3d.utility.Vector3dVector(self.mesh_vertices)
fg_mesh.triangles = o3d.utility.Vector3iVector(self.mesh_triangles)
# Convert to world coord.
frame_pose = fg_box.frame_pose
pseudo_pose = fg_box.compute_local_pseudo_pose()
fg_mesh = fg_mesh.transform(frame_pose @ pseudo_pose)
# Count.
num_fg_boxes_with_frame_index += 1
if not num_fg_boxes_with_frame_index in [0, 1]:
raise ValueError(
f"For each FGObject, there could only be 0 or 1 FGBox "
f"in a particular frame. But got {num_fg_boxes_with_frame_index}."
)
return fg_mesh
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/containers/fg_scene.py | Python | from dataclasses import dataclass, field
from typing import List
import numpy as np
import open3d as o3d
from lit.containers.base_container import BaseContainer
from lit.containers.fg_box import FGBox
from lit.containers.fg_object import FGObject
from lit.recon_utils import bbox_to_lineset
@dataclass
class FGScene(BaseContainer):
"""
An FGScene contains a list of FGObjects. These FGObjects are from different
frames of the same scene.
With FGScene, one can extract the foreground mesh at a specific frame index.
"""
fg_objects: List[FGObject] = field(default_factory=list)
def __post_init__(self):
super().__post_init__()
if self.fg_objects is None:
self.fg_objects = []
def __str__(self):
return f"FGScene(with {len(self.fg_objects)} fg_objects)"
def __repr__(self):
return self.__str__()
def __len__(self):
return len(self.fg_objects)
def __getitem__(self, idx):
return self.fg_objects[idx]
def append(self, fg_object: FGObject):
if not isinstance(fg_object, FGObject):
raise ValueError(f"Type must be FGObject, but got {type(fg_object)}")
self.fg_objects.append(fg_object)
def to_dict(self):
return {
"fg_objects": [fg_object.to_dict() for fg_object in self.fg_objects],
}
@classmethod
def from_dict(cls, dict_data: dict):
return cls(
fg_objects=[
FGObject.from_dict(fg_object) for fg_object in dict_data["fg_objects"]
],
)
@classmethod
def from_fg_boxes(cls, fg_boxes: List[FGBox]):
"""
Group multiple foreground cub into multiple foreground groups.
Args:
fg_boxes: List of FGBox, the foreground boxes.
"""
fg_objects = []
for fg_box in fg_boxes:
# Try to insert the fg_box in previous fg_objects.
found_fg_object = False
for fg_object in fg_objects:
if fg_box.object_id == fg_object.object_id:
fg_object.insert(fg_box)
found_fg_object = True
break
# If not found any fg_objects, create a new fg_object.
if not found_fg_object:
fg_object = FGObject()
fg_object.insert(fg_box)
fg_objects.append(fg_object)
fg_scene = cls(fg_objects=fg_objects)
return fg_scene
def get_frame_mesh(
self,
frame_index: int,
src_to_dst_scales: List[float] = None,
):
"""
Get the fused foreground mesh in **world coord** at a specific frame.
Args:
frame_index: int, the frame index.
src_to_dst_scales: List of 3 floats, the scale ratio from src to dst.
Return:
Open3D mesh, the fused foreground mesh in **world coord**.
"""
all_group_fg_mesh = o3d.geometry.TriangleMesh()
for fg_object in self.fg_objects:
# Scale fg_object if needed.
if src_to_dst_scales is not None:
fg_object = fg_object.scale_by(src_to_dst_scales)
# Append mesh.
fg_mesh = fg_object.get_fg_mesh(frame_index)
if fg_mesh is not None:
all_group_fg_mesh += fg_mesh
return all_group_fg_mesh
def get_frame_ls(
self,
frame_index: int,
src_to_dst_scales: List[float] = None,
):
"""
Get the fused world-coord foreground line set at a specific frame index.
Args:
frame_index: int, the frame index.
src_to_dst_scales: List of 3 floats, the scale ratio from src to dst.
Return:
Open3D line set, the fused foreground line set in **world coord**.
"""
fg_ls = o3d.geometry.LineSet()
for fg_object in self.fg_objects:
# Scale fg_object if needed.
if src_to_dst_scales is not None:
fg_object = fg_object.scale_by(src_to_dst_scales)
# Append ls.
for fg_box in fg_object.fg_boxes:
if fg_box.frame_index == frame_index:
fg_box_ls = bbox_to_lineset(
fg_box.local_bbox,
frame_pose=fg_box.frame_pose,
)
fg_ls += fg_box_ls
return fg_ls
def get_frame_local_bboxes(
self,
frame_index: int,
src_to_dst_scales: List[float] = None,
):
"""
Get the fused foreground local bboxes at a specific frame index.
Args:
frame_index: int, the frame index.
src_to_dst_scales: List of 3 floats, the scale ratio from src to dst.
Return:
bboxes, shape (N, 8). BBoxes are in **local coordinates**.
Each bbox is: (x, y, z, dx, dy, dz, heading, class)
"""
local_bboxes = []
for fg_object in self.fg_objects:
# Scale fg_object if needed.
if src_to_dst_scales is not None:
fg_object = fg_object.scale_by(src_to_dst_scales)
# Append bboxes.
for fg_box in fg_object.fg_boxes:
if fg_box.frame_index == frame_index:
assert fg_box.local_bbox.shape == (8,)
local_bboxes.append(fg_box.local_bbox)
local_bboxes = np.asarray(local_bboxes).reshape((-1, 8)).astype(np.float32)
return local_bboxes
def get_frame_indices(self):
"""
Get all sample indices of the fg_objects.
"""
frame_indices = []
for fg_object in self.fg_objects:
for fg_box in fg_object.fg_boxes:
frame_indices.append(fg_box.frame_index)
frame_indices = sorted(list(set(frame_indices)))
natural_indices = list(range(len(frame_indices)))
if not np.all(np.array(frame_indices) == np.array(natural_indices)):
raise ValueError("Sample indices are not continuous.")
return frame_indices
def discard_large_fg_objects(self, discard_ratio: float) -> None:
"""
Discard large fg_objects by their volume of the reconstructed mesh's
axis-aligned bounding box in canonical coordinates.
Args:
discard_ratio: float, the ratio of the largest fg_objects to be discarded.
"""
num_discard = int(len(self.fg_objects) * discard_ratio)
print(f"To discard {num_discard} out of {len(self.fg_objects)} fg_scene.")
# Compute the volume of the axis-aligned bounding box of each fg_object.
volumes = []
for fg_object in self.fg_objects:
min_bound = np.min(fg_object.mesh_vertices, axis=0)
max_bound = np.max(fg_object.mesh_vertices, axis=0)
volume = np.prod(max_bound - min_bound)
volumes.append(volume)
# Discard.
keep_indices = np.argsort(volumes)[:-num_discard]
self.fg_objects = [self.fg_objects[i] for i in keep_indices]
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
lit/containers/frame.py | Python | import copy
from dataclasses import dataclass
from typing import List
import camtools as ct
import numpy as np
from lit.containers.base_container import BaseContainer
@dataclass
class Frame(BaseContainer):
"""
TODO: rename points and bbox to local_points, etc.
"""
scene_name: str # Scene name
frame_index: int # Frame index in the scene
frame_pose: np.ndarray # (4, 4) pose of the frame (vehicle)
lidar_to_vehicle_poses: List[np.ndarray] # List of (4, 4) poses of the lidars
num_points_of_each_lidar: List[int] # After NLZ_flag filtering
local_points: np.ndarray # (N, 5) point cloud: (x, y, z, i, e)
local_bboxes: np.ndarray # (M, 8) boxes: (x, y, z, dx, dy, dz, heading, class)
object_ids: List[str] # (M,) object ids of the boxes
def __post_init__(self):
"""
Sanity checks for the frame data.
"""
super().__post_init__()
if len(self.num_points_of_each_lidar) != len(self.lidar_to_vehicle_poses):
raise ValueError(
f"len(num_points_of_each_lidar) != len(lidar_to_vehicle_poses): "
f"{len(self.num_points_of_each_lidar)} != "
f"{len(self.lidar_to_vehicle_poses)}"
)
if len(self.local_points) != np.sum(self.num_points_of_each_lidar):
raise ValueError(
f"len(points) != np.sum(num_points_of_each_lidar): "
f"{len(self.local_points)} != {np.sum(self.num_points_of_each_lidar)}"
)
ct.sanity.assert_pose(self.frame_pose)
for pose in self.lidar_to_vehicle_poses:
ct.sanity.assert_pose(pose)
def __str__(self):
return f"Frame(scene_name={self.scene_name}, frame_index={self.frame_index})"
def __repr__(self):
return self.__str__()
def to_dict(self):
return {
"scene_name": self.scene_name,
"frame_index": self.frame_index,
"frame_pose": self.frame_pose,
"lidar_to_vehicle_poses": self.lidar_to_vehicle_poses,
"num_points_of_each_lidar": self.num_points_of_each_lidar,
"local_points": self.local_points,
"local_bboxes": self.local_bboxes,
"object_ids": self.object_ids,
}
@classmethod
def from_dict(cls, dict_data: dict):
return cls(
scene_name=dict_data["scene_name"],
frame_index=dict_data["frame_index"],
frame_pose=dict_data["frame_pose"],
lidar_to_vehicle_poses=dict_data["lidar_to_vehicle_poses"],
num_points_of_each_lidar=dict_data["num_points_of_each_lidar"],
local_points=dict_data["local_points"],
local_bboxes=dict_data["local_bboxes"],
object_ids=dict_data["object_ids"],
)
def clone(self):
"""
Clone the Frame.
"""
return copy.deepcopy(self)
def visualize(self, **kwargs):
"""
Visualize the frame.
"""
raise NotImplementedError
| yxlao/lit | 24 | (NeurIPS 2024) LiT: Unifying LiDAR "Languages" with LiDAR Translator | Python | yxlao | Yixing Lao | HKU-CS |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.