id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
162,224 | from __future__ import absolute_import
import json
import sys
import time
from datetime import datetime, timedelta
from typing import Any, Dict, List, Tuple, Union
import celery
import sentry_sdk
import ujson
from asgiref.sync import async_to_sync
from billiard.exceptions import SoftTimeLimitExceeded
from botocore.exceptions import ClientError
from celery import group
from celery.app.task import Context
from celery.concurrency import asynpool
from celery.schedules import crontab
from celery.signals import (
task_failure,
task_prerun,
task_received,
task_rejected,
task_retry,
task_revoked,
task_success,
task_unknown,
)
from cloudaux import sts_conn
from cloudaux.aws.iam import get_all_managed_policies
from cloudaux.aws.s3 import list_buckets
from cloudaux.aws.sns import list_topics
from cloudaux.aws.sts import boto3_cached_conn
from retrying import retry
from sentry_sdk.integrations.aiohttp import AioHttpIntegration
from sentry_sdk.integrations.celery import CeleryIntegration
from sentry_sdk.integrations.redis import RedisIntegration
from sentry_sdk.integrations.tornado import TornadoIntegration
from consoleme.config import config
from consoleme.lib.account_indexers import (
cache_cloud_accounts,
get_account_id_to_name_mapping,
)
from consoleme.lib.aws import (
allowed_to_sync_role,
cache_all_scps,
cache_org_structure,
get_aws_principal_owner,
get_enabled_regions_for_account,
remove_temp_policies,
)
from consoleme.lib.aws_config import aws_config
from consoleme.lib.cache import (
retrieve_json_data_from_redis_or_s3,
store_json_results_in_redis_and_s3,
)
from consoleme.lib.cloud_credential_authorization_mapping import (
generate_and_store_credential_authorization_mapping,
generate_and_store_reverse_authorization_mapping,
)
from consoleme.lib.cloudtrail import CloudTrail
from consoleme.lib.dynamo import IAMRoleDynamoHandler, UserDynamoHandler
from consoleme.lib.event_bridge.access_denies import (
detect_cloudtrail_denies_and_update_cache,
)
from consoleme.lib.event_bridge.role_updates import detect_role_changes_and_update_cache
from consoleme.lib.generic import un_wrap_json_and_dump_values
from consoleme.lib.git import store_iam_resources_in_git
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.policies import get_aws_config_history_url_for_resource
from consoleme.lib.redis import RedisHandler
from consoleme.lib.requests import cache_all_policy_requests
from consoleme.lib.self_service.typeahead import cache_self_service_typeahead
from consoleme.lib.templated_resources import cache_resource_templates
from consoleme.lib.timeout import Timeout
from consoleme.lib.v2.notifications import cache_notifications_to_redis_s3
log = config.get_logger()
async def cache_resource_templates() -> TemplatedFileModelArray:
templated_file_array = TemplatedFileModelArray(templated_resources=[])
for repository in config.get("cache_resource_templates.repositories", []):
if repository.get("type") == "git":
result = await cache_resource_templates_for_repository(repository)
templated_file_array.templated_resources.extend(result.templated_resources)
await store_json_results_in_redis_and_s3(
templated_file_array.dict(),
redis_key=config.get(
"cache_resource_templates.redis.key", "cache_templated_resources_v1"
),
s3_bucket=config.get("cache_resource_templates.s3.bucket"),
s3_key=config.get(
"cache_resource_templates.s3.file",
"cache_templated_resources/cache_templated_resources_v1.json.gz",
),
)
return templated_file_array
def cache_resource_templates_task() -> Dict:
function = f"{__name__}.{sys._getframe().f_code.co_name}"
templated_file_array = async_to_sync(cache_resource_templates)()
log_data = {
"function": function,
"message": "Successfully cached resource templates",
"num_templated_files": len(templated_file_array.templated_resources),
}
log.debug(log_data)
return log_data | null |
162,225 | from __future__ import absolute_import
import json
import sys
import time
from datetime import datetime, timedelta
from typing import Any, Dict, List, Tuple, Union
import celery
import sentry_sdk
import ujson
from asgiref.sync import async_to_sync
from billiard.exceptions import SoftTimeLimitExceeded
from botocore.exceptions import ClientError
from celery import group
from celery.app.task import Context
from celery.concurrency import asynpool
from celery.schedules import crontab
from celery.signals import (
task_failure,
task_prerun,
task_received,
task_rejected,
task_retry,
task_revoked,
task_success,
task_unknown,
)
from cloudaux import sts_conn
from cloudaux.aws.iam import get_all_managed_policies
from cloudaux.aws.s3 import list_buckets
from cloudaux.aws.sns import list_topics
from cloudaux.aws.sts import boto3_cached_conn
from retrying import retry
from sentry_sdk.integrations.aiohttp import AioHttpIntegration
from sentry_sdk.integrations.celery import CeleryIntegration
from sentry_sdk.integrations.redis import RedisIntegration
from sentry_sdk.integrations.tornado import TornadoIntegration
from consoleme.config import config
from consoleme.lib.account_indexers import (
cache_cloud_accounts,
get_account_id_to_name_mapping,
)
from consoleme.lib.aws import (
allowed_to_sync_role,
cache_all_scps,
cache_org_structure,
get_aws_principal_owner,
get_enabled_regions_for_account,
remove_temp_policies,
)
from consoleme.lib.aws_config import aws_config
from consoleme.lib.cache import (
retrieve_json_data_from_redis_or_s3,
store_json_results_in_redis_and_s3,
)
from consoleme.lib.cloud_credential_authorization_mapping import (
generate_and_store_credential_authorization_mapping,
generate_and_store_reverse_authorization_mapping,
)
from consoleme.lib.cloudtrail import CloudTrail
from consoleme.lib.dynamo import IAMRoleDynamoHandler, UserDynamoHandler
from consoleme.lib.event_bridge.access_denies import (
detect_cloudtrail_denies_and_update_cache,
)
from consoleme.lib.event_bridge.role_updates import detect_role_changes_and_update_cache
from consoleme.lib.generic import un_wrap_json_and_dump_values
from consoleme.lib.git import store_iam_resources_in_git
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.policies import get_aws_config_history_url_for_resource
from consoleme.lib.redis import RedisHandler
from consoleme.lib.requests import cache_all_policy_requests
from consoleme.lib.self_service.typeahead import cache_self_service_typeahead
from consoleme.lib.templated_resources import cache_resource_templates
from consoleme.lib.timeout import Timeout
from consoleme.lib.v2.notifications import cache_notifications_to_redis_s3
log = config.get_logger()
async def cache_self_service_typeahead() -> SelfServiceTypeaheadModelArray:
def cache_self_service_typeahead_task() -> Dict:
function = f"{__name__}.{sys._getframe().f_code.co_name}"
self_service_typeahead = async_to_sync(cache_self_service_typeahead)()
log_data = {
"function": function,
"message": "Successfully cached IAM principals and templates for self service typeahead",
"num_typeahead_entries": len(self_service_typeahead.typeahead_entries),
}
log.debug(log_data)
return log_data | null |
162,226 | from __future__ import absolute_import
import json
import sys
import time
from datetime import datetime, timedelta
from typing import Any, Dict, List, Tuple, Union
import celery
import sentry_sdk
import ujson
from asgiref.sync import async_to_sync
from billiard.exceptions import SoftTimeLimitExceeded
from botocore.exceptions import ClientError
from celery import group
from celery.app.task import Context
from celery.concurrency import asynpool
from celery.schedules import crontab
from celery.signals import (
task_failure,
task_prerun,
task_received,
task_rejected,
task_retry,
task_revoked,
task_success,
task_unknown,
)
from cloudaux import sts_conn
from cloudaux.aws.iam import get_all_managed_policies
from cloudaux.aws.s3 import list_buckets
from cloudaux.aws.sns import list_topics
from cloudaux.aws.sts import boto3_cached_conn
from retrying import retry
from sentry_sdk.integrations.aiohttp import AioHttpIntegration
from sentry_sdk.integrations.celery import CeleryIntegration
from sentry_sdk.integrations.redis import RedisIntegration
from sentry_sdk.integrations.tornado import TornadoIntegration
from consoleme.config import config
from consoleme.lib.account_indexers import (
cache_cloud_accounts,
get_account_id_to_name_mapping,
)
from consoleme.lib.aws import (
allowed_to_sync_role,
cache_all_scps,
cache_org_structure,
get_aws_principal_owner,
get_enabled_regions_for_account,
remove_temp_policies,
)
from consoleme.lib.aws_config import aws_config
from consoleme.lib.cache import (
retrieve_json_data_from_redis_or_s3,
store_json_results_in_redis_and_s3,
)
from consoleme.lib.cloud_credential_authorization_mapping import (
generate_and_store_credential_authorization_mapping,
generate_and_store_reverse_authorization_mapping,
)
from consoleme.lib.cloudtrail import CloudTrail
from consoleme.lib.dynamo import IAMRoleDynamoHandler, UserDynamoHandler
from consoleme.lib.event_bridge.access_denies import (
detect_cloudtrail_denies_and_update_cache,
)
from consoleme.lib.event_bridge.role_updates import detect_role_changes_and_update_cache
from consoleme.lib.generic import un_wrap_json_and_dump_values
from consoleme.lib.git import store_iam_resources_in_git
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.policies import get_aws_config_history_url_for_resource
from consoleme.lib.redis import RedisHandler
from consoleme.lib.requests import cache_all_policy_requests
from consoleme.lib.self_service.typeahead import cache_self_service_typeahead
from consoleme.lib.templated_resources import cache_resource_templates
from consoleme.lib.timeout import Timeout
from consoleme.lib.v2.notifications import cache_notifications_to_redis_s3
app = Celery(
"tasks",
broker=config.get(
f"celery.broker.{config.region}",
config.get("celery.broker.global", "redis://127.0.0.1:6379/1"),
),
backend=config.get(
f"celery.backend.{config.region}",
config.get("celery.backend.global"),
),
)
app.conf.result_expires = config.get("celery.result_expires", 60)
app.conf.worker_prefetch_multiplier = config.get("celery.worker_prefetch_multiplier", 4)
app.conf.task_acks_late = config.get("celery.task_acks_late", True)
if config.get("celery.purge"):
# Useful to clear celery queue in development
with Timeout(seconds=5, error_message="Timeout: Are you sure Redis is running?"):
app.control.purge()
log = config.get_logger()
def cache_credential_authorization_mapping() -> Dict:
function = f"{__name__}.{sys._getframe().f_code.co_name}"
log_data = {
"function": function,
}
if is_task_already_running(function, []):
log_data["message"] = "Skipping task: An identical task is currently running"
log.debug(log_data)
return log_data
authorization_mapping = async_to_sync(
generate_and_store_credential_authorization_mapping
)()
reverse_mapping = async_to_sync(generate_and_store_reverse_authorization_mapping)(
authorization_mapping
)
log_data["num_group_authorizations"] = len(authorization_mapping)
log_data["num_identities"] = len(reverse_mapping)
log.debug(
{
**log_data,
"message": "Successfully cached cloud credential authorization mapping",
}
)
return log_data
if config.get("development", False):
# If debug mode, we will set up the schedule to run the next minute after the job starts
time_to_start = datetime.utcnow() + timedelta(minutes=1)
dev_schedule = crontab(hour=time_to_start.hour, minute=time_to_start.minute)
schedule_30_minute = dev_schedule
schedule_45_minute = dev_schedule
schedule_1_hour = dev_schedule
schedule_6_hours = dev_schedule
schedule_5_minutes = dev_schedule
if config.get("celery.trigger_credential_mapping_refresh_from_role_changes.enabled"):
schedule["trigger_credential_mapping_refresh_from_role_changes"] = {
"task": "consoleme.celery_tasks.celery_tasks.trigger_credential_mapping_refresh_from_role_changes",
"options": {"expires": 300},
"schedule": schedule_minute,
}
if config.get("celery.cache_cloudtrail_denies.enabled"):
schedule["cache_cloudtrail_denies"] = {
"task": "consoleme.celery_tasks.celery_tasks.cache_cloudtrail_denies",
"options": {"expires": 300},
"schedule": schedule_minute,
}
schedule["cache_cloudtrail_errors_by_arn"] = {
"task": "consoleme.celery_tasks.celery_tasks.cache_cloudtrail_errors_by_arn",
"options": {"expires": 300},
"schedule": schedule_1_hour,
}
if config.get("celery.clear_tasks_for_development", False):
schedule = {}
app.conf.beat_schedule = schedule
app.conf.timezone = "UTC"
def detect_role_changes_and_update_cache(celery_app):
"""
This function detects role changes through event bridge rules, and forces a refresh of the roles.
"""
log_data = {"function": f"{__name__}.{sys._getframe().f_code.co_name}"}
queue_arn = config.get(
"event_bridge.detect_role_changes_and_update_cache.queue_arn", ""
).format(region=config.region)
if not queue_arn:
raise MissingConfigurationValue(
"Unable to find required configuration value: "
"`event_bridge.detect_role_changes_and_update_cache.queue_arn`"
)
queue_name = queue_arn.split(":")[-1]
queue_account_number = queue_arn.split(":")[4]
queue_region = queue_arn.split(":")[3]
# Optionally assume a role before receiving messages from the queue
queue_assume_role = config.get(
"event_bridge.detect_role_changes_and_update_cache.assume_role"
)
sqs_client = boto3_cached_conn(
"sqs",
service_type="client",
region=queue_region,
retry_max_attempts=2,
account_number=queue_account_number,
assume_role=queue_assume_role,
client_kwargs=config.get("boto3.client_kwargs", {}),
)
queue_url_res = sqs_client.get_queue_url(QueueName=queue_name)
queue_url = queue_url_res.get("QueueUrl")
if not queue_url:
raise DataNotRetrievable(f"Unable to retrieve Queue URL for {queue_arn}")
roles_to_update = set()
messages = sqs_client.receive_message(
QueueUrl=queue_url, MaxNumberOfMessages=10
).get("Messages", [])
while messages:
processed_messages = []
for message in messages:
try:
message_body = json.loads(message["Body"])
try:
if "Message" in message_body:
decoded_message = json.loads(message_body["Message"])["detail"]
else:
decoded_message = message_body["detail"]
except Exception as e:
log.error(
{
**log_data,
"message": "Unable to process Cloudtrail message",
"message_body": message_body,
"error": str(e),
}
)
sentry_sdk.capture_exception()
continue
role_name = decoded_message["requestParameters"]["roleName"]
role_account_id = decoded_message.get(
"account", decoded_message.get("recipientAccountId")
)
role_arn = f"arn:aws:iam::{role_account_id}:role/{role_name}"
if role_arn not in roles_to_update:
celery_app.send_task(
"consoleme.celery_tasks.celery_tasks.refresh_iam_role",
args=[role_arn],
)
roles_to_update.add(role_arn)
except Exception as e:
log.error(
{**log_data, "error": str(e), "raw_message": message}, exc_info=True
)
sentry_sdk.capture_exception()
processed_messages.append(
{
"Id": message["MessageId"],
"ReceiptHandle": message["ReceiptHandle"],
}
)
sqs_client.delete_message_batch(QueueUrl=queue_url, Entries=processed_messages)
messages = sqs_client.receive_message(
QueueUrl=queue_url, MaxNumberOfMessages=10
).get("Messages", [])
log.debug(
{
**log_data,
"num_roles": len(roles_to_update),
"message": "Triggered role cache update for roles that were created or changed",
}
)
return roles_to_update
The provided code snippet includes necessary dependencies for implementing the `trigger_credential_mapping_refresh_from_role_changes` function. Write a Python function `def trigger_credential_mapping_refresh_from_role_changes()` to solve the following problem:
This task triggers a role cache refresh for any role that a change was detected for. This feature requires an Event Bridge rule monitoring Cloudtrail for your accounts for IAM role mutation. This task will trigger a credential authorization refresh if any changes were detected. This task should run in all regions to force IAM roles to be refreshed in each region's cache on change. :return:
Here is the function:
def trigger_credential_mapping_refresh_from_role_changes():
"""
This task triggers a role cache refresh for any role that a change was detected for. This feature requires an
Event Bridge rule monitoring Cloudtrail for your accounts for IAM role mutation.
This task will trigger a credential authorization refresh if any changes were detected.
This task should run in all regions to force IAM roles to be refreshed in each region's cache on change.
:return:
"""
function = f"{__name__}.{sys._getframe().f_code.co_name}"
if not config.get(
"celery.trigger_credential_mapping_refresh_from_role_changes.enabled"
):
return {
"function": function,
"message": "Not running Celery task because it is not enabled.",
}
roles_changed = detect_role_changes_and_update_cache(app)
log_data = {
"function": function,
"message": "Successfully checked role changes",
"num_roles_changed": len(roles_changed),
}
if roles_changed:
# Trigger credential authorization mapping refresh. We don't want credential authorization mapping refreshes
# running in parallel, so the cache_credential_authorization_mapping is protected to prevent parallel runs.
# This task can run in parallel without negative impact.
cache_credential_authorization_mapping.apply_async(countdown=30)
log.debug(log_data)
return log_data | This task triggers a role cache refresh for any role that a change was detected for. This feature requires an Event Bridge rule monitoring Cloudtrail for your accounts for IAM role mutation. This task will trigger a credential authorization refresh if any changes were detected. This task should run in all regions to force IAM roles to be refreshed in each region's cache on change. :return: |
162,227 | from __future__ import absolute_import
import json
import sys
import time
from datetime import datetime, timedelta
from typing import Any, Dict, List, Tuple, Union
import celery
import sentry_sdk
import ujson
from asgiref.sync import async_to_sync
from billiard.exceptions import SoftTimeLimitExceeded
from botocore.exceptions import ClientError
from celery import group
from celery.app.task import Context
from celery.concurrency import asynpool
from celery.schedules import crontab
from celery.signals import (
task_failure,
task_prerun,
task_received,
task_rejected,
task_retry,
task_revoked,
task_success,
task_unknown,
)
from cloudaux import sts_conn
from cloudaux.aws.iam import get_all_managed_policies
from cloudaux.aws.s3 import list_buckets
from cloudaux.aws.sns import list_topics
from cloudaux.aws.sts import boto3_cached_conn
from retrying import retry
from sentry_sdk.integrations.aiohttp import AioHttpIntegration
from sentry_sdk.integrations.celery import CeleryIntegration
from sentry_sdk.integrations.redis import RedisIntegration
from sentry_sdk.integrations.tornado import TornadoIntegration
from consoleme.config import config
from consoleme.lib.account_indexers import (
cache_cloud_accounts,
get_account_id_to_name_mapping,
)
from consoleme.lib.aws import (
allowed_to_sync_role,
cache_all_scps,
cache_org_structure,
get_aws_principal_owner,
get_enabled_regions_for_account,
remove_temp_policies,
)
from consoleme.lib.aws_config import aws_config
from consoleme.lib.cache import (
retrieve_json_data_from_redis_or_s3,
store_json_results_in_redis_and_s3,
)
from consoleme.lib.cloud_credential_authorization_mapping import (
generate_and_store_credential_authorization_mapping,
generate_and_store_reverse_authorization_mapping,
)
from consoleme.lib.cloudtrail import CloudTrail
from consoleme.lib.dynamo import IAMRoleDynamoHandler, UserDynamoHandler
from consoleme.lib.event_bridge.access_denies import (
detect_cloudtrail_denies_and_update_cache,
)
from consoleme.lib.event_bridge.role_updates import detect_role_changes_and_update_cache
from consoleme.lib.generic import un_wrap_json_and_dump_values
from consoleme.lib.git import store_iam_resources_in_git
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.policies import get_aws_config_history_url_for_resource
from consoleme.lib.redis import RedisHandler
from consoleme.lib.requests import cache_all_policy_requests
from consoleme.lib.self_service.typeahead import cache_self_service_typeahead
from consoleme.lib.templated_resources import cache_resource_templates
from consoleme.lib.timeout import Timeout
from consoleme.lib.v2.notifications import cache_notifications_to_redis_s3
app = Celery(
"tasks",
broker=config.get(
f"celery.broker.{config.region}",
config.get("celery.broker.global", "redis://127.0.0.1:6379/1"),
),
backend=config.get(
f"celery.backend.{config.region}",
config.get("celery.backend.global"),
),
)
app.conf.result_expires = config.get("celery.result_expires", 60)
app.conf.worker_prefetch_multiplier = config.get("celery.worker_prefetch_multiplier", 4)
app.conf.task_acks_late = config.get("celery.task_acks_late", True)
if config.get("celery.purge"):
# Useful to clear celery queue in development
with Timeout(seconds=5, error_message="Timeout: Are you sure Redis is running?"):
app.control.purge()
log = config.get_logger()
def cache_cloudtrail_errors_by_arn() -> Dict:
function: str = f"{__name__}.{sys._getframe().f_code.co_name}"
log_data: Dict = {"function": function}
if is_task_already_running(function, []):
log_data["message"] = "Skipping task: An identical task is currently running"
log.debug(log_data)
return log_data
ct = CloudTrail()
process_cloudtrail_errors_res: Dict = async_to_sync(ct.process_cloudtrail_errors)(
aws
)
cloudtrail_errors = process_cloudtrail_errors_res["error_count_by_role"]
red.setex(
config.get(
"celery.cache_cloudtrail_errors_by_arn.redis_key",
"CLOUDTRAIL_ERRORS_BY_ARN",
),
86400,
json.dumps(cloudtrail_errors),
)
if process_cloudtrail_errors_res["num_new_or_changed_notifications"] > 0:
cache_notifications.delay()
log_data["number_of_roles_with_errors"]: len(cloudtrail_errors.keys())
log_data["number_errors"]: sum(cloudtrail_errors.values())
log.debug(log_data)
return log_data
if config.get("development", False):
# If debug mode, we will set up the schedule to run the next minute after the job starts
time_to_start = datetime.utcnow() + timedelta(minutes=1)
dev_schedule = crontab(hour=time_to_start.hour, minute=time_to_start.minute)
schedule_30_minute = dev_schedule
schedule_45_minute = dev_schedule
schedule_1_hour = dev_schedule
schedule_6_hours = dev_schedule
schedule_5_minutes = dev_schedule
if config.get("celery.trigger_credential_mapping_refresh_from_role_changes.enabled"):
schedule["trigger_credential_mapping_refresh_from_role_changes"] = {
"task": "consoleme.celery_tasks.celery_tasks.trigger_credential_mapping_refresh_from_role_changes",
"options": {"expires": 300},
"schedule": schedule_minute,
}
if config.get("celery.cache_cloudtrail_denies.enabled"):
schedule["cache_cloudtrail_denies"] = {
"task": "consoleme.celery_tasks.celery_tasks.cache_cloudtrail_denies",
"options": {"expires": 300},
"schedule": schedule_minute,
}
schedule["cache_cloudtrail_errors_by_arn"] = {
"task": "consoleme.celery_tasks.celery_tasks.cache_cloudtrail_errors_by_arn",
"options": {"expires": 300},
"schedule": schedule_1_hour,
}
if config.get("celery.clear_tasks_for_development", False):
schedule = {}
app.conf.beat_schedule = schedule
app.conf.timezone = "UTC"
async def detect_cloudtrail_denies_and_update_cache(
celery_app,
event_ttl=config.get(
"event_bridge.detect_cloudtrail_denies_and_update_cache.event_ttl", 86400
),
max_num_messages_to_process=config.get(
"event_bridge.detect_cloudtrail_denies_and_update_cache.max_num_messages_to_process",
100,
),
) -> Dict[str, Any]:
log_data = {"function": f"{__name__}.{sys._getframe().f_code.co_name}"}
dynamo = UserDynamoHandler()
queue_arn = config.get(
"event_bridge.detect_cloudtrail_denies_and_update_cache.queue_arn", ""
).format(region=config.region)
if not queue_arn:
raise MissingConfigurationValue(
"Unable to find required configuration value: "
"`event_bridge.detect_cloudtrail_denies_and_update_cache.queue_arn`"
)
queue_name = queue_arn.split(":")[-1]
queue_account_number = queue_arn.split(":")[4]
queue_region = queue_arn.split(":")[3]
# Optionally assume a role before receiving messages from the queue
queue_assume_role = config.get(
"event_bridge.detect_cloudtrail_denies_and_update_cache.assume_role"
)
# Modify existing cloudtrail deny samples
all_cloudtrail_denies_l = await dynamo.parallel_scan_table_async(
dynamo.cloudtrail_table
)
all_cloudtrail_denies = {}
for cloudtrail_deny in all_cloudtrail_denies_l:
all_cloudtrail_denies[cloudtrail_deny["request_id"]] = cloudtrail_deny
sqs_client = await sync_to_async(boto3_cached_conn)(
"sqs",
service_type="client",
region=queue_region,
retry_max_attempts=2,
account_number=queue_account_number,
assume_role=queue_assume_role,
client_kwargs=config.get("boto3.client_kwargs", {}),
)
queue_url_res = await sync_to_async(sqs_client.get_queue_url)(QueueName=queue_name)
queue_url = queue_url_res.get("QueueUrl")
if not queue_url:
raise DataNotRetrievable(f"Unable to retrieve Queue URL for {queue_arn}")
messages_awaitable = await sync_to_async(sqs_client.receive_message)(
QueueUrl=queue_url, MaxNumberOfMessages=10
)
new_events = 0
messages = messages_awaitable.get("Messages", [])
num_events = 0
reached_limit_on_num_messages_to_process = False
while messages:
if num_events >= max_num_messages_to_process:
reached_limit_on_num_messages_to_process = True
break
processed_messages = []
for message in messages:
try:
message_body = json.loads(message["Body"])
try:
if "Message" in message_body:
decoded_message = json.loads(message_body["Message"])["detail"]
else:
decoded_message = message_body["detail"]
except Exception as e:
log.error(
{
**log_data,
"message": "Unable to process Cloudtrail message",
"message_body": message_body,
"error": str(e),
}
)
sentry_sdk.capture_exception()
continue
event_name = decoded_message.get("eventName")
event_source = decoded_message.get("eventSource")
for event_source_substitution in config.get(
"event_bridge.detect_cloudtrail_denies_and_update_cache.event_bridge_substitutions",
[".amazonaws.com"],
):
event_source = event_source.replace(event_source_substitution, "")
event_time = decoded_message.get("eventTime")
utc_time = datetime.strptime(event_time, "%Y-%m-%dT%H:%M:%SZ")
epoch_event_time = int(
(utc_time - datetime(1970, 1, 1)).total_seconds()
)
# Skip entries older than a day
if int(time.time()) - 86400 > epoch_event_time:
continue
try:
session_name = decoded_message["userIdentity"]["arn"].split("/")[-1]
except (
IndexError,
KeyError,
): # If IAM user, there won't be a session name
session_name = ""
try:
principal_arn = decoded_message["userIdentity"]["sessionContext"][
"sessionIssuer"
]["arn"]
except KeyError: # Skip events without a parsable ARN
continue
event_call = f"{event_source}:{event_name}"
ct_event = dict(
error_code=decoded_message.get("errorCode"),
error_message=decoded_message.get("errorMessage"),
arn=principal_arn,
# principal_owner=owner,
session_name=session_name,
source_ip=decoded_message["sourceIPAddress"],
event_call=event_call,
epoch_event_time=epoch_event_time,
ttl=epoch_event_time + event_ttl,
count=1,
)
resource = await get_resource_from_cloudtrail_deny(
ct_event, decoded_message
)
ct_event["resource"] = resource
request_id = f"{principal_arn}-{session_name}-{event_call}-{resource}"
ct_event["request_id"] = request_id
generated_policy = await generate_policy_from_cloudtrail_deny(ct_event)
if generated_policy:
ct_event["generated_policy"] = generated_policy
if all_cloudtrail_denies.get(request_id):
existing_count = all_cloudtrail_denies[request_id].get("count", 1)
ct_event["count"] += existing_count
all_cloudtrail_denies[request_id] = ct_event
else:
all_cloudtrail_denies[request_id] = ct_event
new_events += 1
num_events += 1
except Exception as e:
log.error({**log_data, "error": str(e)}, exc_info=True)
sentry_sdk.capture_exception()
processed_messages.append(
{
"Id": message["MessageId"],
"ReceiptHandle": message["ReceiptHandle"],
}
)
if processed_messages:
await sync_to_async(sqs_client.delete_message_batch)(
QueueUrl=queue_url, Entries=processed_messages
)
await sync_to_async(dynamo.batch_write_cloudtrail_events)(
all_cloudtrail_denies.values()
)
messages_awaitable = await sync_to_async(sqs_client.receive_message)(
QueueUrl=queue_url, MaxNumberOfMessages=10
)
messages = messages_awaitable.get("Messages", [])
if reached_limit_on_num_messages_to_process:
# We hit our limit. Let's spawn another task immediately to process remaining messages
celery_app.send_task(
"consoleme.celery_tasks.celery_tasks.cache_cloudtrail_denies",
)
log_data["message"] = "Successfully cached Cloudtrail Access Denies"
log_data["num_events"] = num_events
log_data["new_events"] = new_events
log.debug(log_data)
return log_data
The provided code snippet includes necessary dependencies for implementing the `cache_cloudtrail_denies` function. Write a Python function `def cache_cloudtrail_denies()` to solve the following problem:
This task caches access denies reported by Cloudtrail. This feature requires an Event Bridge rule monitoring Cloudtrail for your accounts for access deny errors.
Here is the function:
def cache_cloudtrail_denies():
"""
This task caches access denies reported by Cloudtrail. This feature requires an
Event Bridge rule monitoring Cloudtrail for your accounts for access deny errors.
"""
function = f"{__name__}.{sys._getframe().f_code.co_name}"
if not (
config.region == config.get("celery.active_region", config.region)
or config.get("environment") in ["dev", "test"]
):
return {
"function": function,
"message": "Not running Celery task in inactive region",
}
events = async_to_sync(detect_cloudtrail_denies_and_update_cache)(app)
if events["new_events"] > 0:
# Spawn off a task to cache errors by ARN for the UI
cache_cloudtrail_errors_by_arn.delay()
log_data = {
"function": function,
"message": "Successfully cached cloudtrail denies",
# Total CT denies
"num_cloudtrail_denies": events["num_events"],
# "New" CT messages that we don't already have cached in Dynamo DB. Not a "repeated" error
"num_new_cloudtrail_denies": events["new_events"],
}
log.debug(log_data)
return log_data | This task caches access denies reported by Cloudtrail. This feature requires an Event Bridge rule monitoring Cloudtrail for your accounts for access deny errors. |
162,228 | from __future__ import absolute_import
import json
import sys
import time
from datetime import datetime, timedelta
from typing import Any, Dict, List, Tuple, Union
import celery
import sentry_sdk
import ujson
from asgiref.sync import async_to_sync
from billiard.exceptions import SoftTimeLimitExceeded
from botocore.exceptions import ClientError
from celery import group
from celery.app.task import Context
from celery.concurrency import asynpool
from celery.schedules import crontab
from celery.signals import (
task_failure,
task_prerun,
task_received,
task_rejected,
task_retry,
task_revoked,
task_success,
task_unknown,
)
from cloudaux import sts_conn
from cloudaux.aws.iam import get_all_managed_policies
from cloudaux.aws.s3 import list_buckets
from cloudaux.aws.sns import list_topics
from cloudaux.aws.sts import boto3_cached_conn
from retrying import retry
from sentry_sdk.integrations.aiohttp import AioHttpIntegration
from sentry_sdk.integrations.celery import CeleryIntegration
from sentry_sdk.integrations.redis import RedisIntegration
from sentry_sdk.integrations.tornado import TornadoIntegration
from consoleme.config import config
from consoleme.lib.account_indexers import (
cache_cloud_accounts,
get_account_id_to_name_mapping,
)
from consoleme.lib.aws import (
allowed_to_sync_role,
cache_all_scps,
cache_org_structure,
get_aws_principal_owner,
get_enabled_regions_for_account,
remove_temp_policies,
)
from consoleme.lib.aws_config import aws_config
from consoleme.lib.cache import (
retrieve_json_data_from_redis_or_s3,
store_json_results_in_redis_and_s3,
)
from consoleme.lib.cloud_credential_authorization_mapping import (
generate_and_store_credential_authorization_mapping,
generate_and_store_reverse_authorization_mapping,
)
from consoleme.lib.cloudtrail import CloudTrail
from consoleme.lib.dynamo import IAMRoleDynamoHandler, UserDynamoHandler
from consoleme.lib.event_bridge.access_denies import (
detect_cloudtrail_denies_and_update_cache,
)
from consoleme.lib.event_bridge.role_updates import detect_role_changes_and_update_cache
from consoleme.lib.generic import un_wrap_json_and_dump_values
from consoleme.lib.git import store_iam_resources_in_git
from consoleme.lib.plugins import get_plugin_by_name
from consoleme.lib.policies import get_aws_config_history_url_for_resource
from consoleme.lib.redis import RedisHandler
from consoleme.lib.requests import cache_all_policy_requests
from consoleme.lib.self_service.typeahead import cache_self_service_typeahead
from consoleme.lib.templated_resources import cache_resource_templates
from consoleme.lib.timeout import Timeout
from consoleme.lib.v2.notifications import cache_notifications_to_redis_s3
aws = get_plugin_by_name(config.get("plugins.aws", "default_aws"))
The provided code snippet includes necessary dependencies for implementing the `refresh_iam_role` function. Write a Python function `def refresh_iam_role(role_arn)` to solve the following problem:
This task is called on demand to asynchronously refresh an AWS IAM role in Redis/DDB
Here is the function:
def refresh_iam_role(role_arn):
"""
This task is called on demand to asynchronously refresh an AWS IAM role in Redis/DDB
"""
account_id = role_arn.split(":")[4]
async_to_sync(aws().fetch_iam_role)(
account_id, role_arn, force_refresh=True, run_sync=True
) | This task is called on demand to asynchronously refresh an AWS IAM role in Redis/DDB |
162,229 | import collections.abc
import datetime
import logging
import os
import socket
import sys
import threading
import time
from logging import LoggerAdapter, LogRecord
from threading import Timer
from typing import Any, Dict, List, Optional, Union
import boto3
import botocore.exceptions
import logmatic
import ujson as json
import yaml
from asgiref.sync import async_to_sync
from pytz import timezone
from consoleme.lib.aws_secret_manager import get_aws_secret
from consoleme.lib.plugins import get_plugin_by_name
class UserDynamoHandler(BaseDynamoHandler):
def __init__(self, user_email: Optional[str] = None) -> None:
try:
self.requests_table = self._get_dynamo_table(
config.get("aws.requests_dynamo_table", "consoleme_requests_global")
)
self.users_table = self._get_dynamo_table(
config.get("aws.users_dynamo_table", "consoleme_users_global")
)
self.group_log = self._get_dynamo_table(
config.get("aws.group_log_dynamo_table", "consoleme_audit_global")
)
self.dynamic_config = self._get_dynamo_table(
config.get("aws.group_log_dynamo_table", "consoleme_config_global")
)
self.policy_requests_table = self._get_dynamo_table(
config.get(
"aws.policy_requests_dynamo_table", "consoleme_policy_requests"
)
)
self.api_health_roles_table = self._get_dynamo_table(
config.get(
"aws.api_health_apps_table_dynamo_table",
"consoleme_api_health_apps",
)
)
self.resource_cache_table = self._get_dynamo_table(
config.get(
"aws.resource_cache_dynamo_table", "consoleme_resource_cache"
)
)
self.cloudtrail_table = self._get_dynamo_table(
config.get("aws.cloudtrail_table", "consoleme_cloudtrail")
)
self.notifications_table = self._get_dynamo_table(
config.get("aws.notifications_table", "consoleme_notifications")
)
if user_email:
self.user = self.get_or_create_user(user_email)
self.affected_user = self.user
except Exception:
if config.get("development"):
log.error(
"Unable to connect to Dynamo. Trying to set user via development configuration",
exc_info=True,
)
self.user = self.sign_request(
{
"last_updated": int(time.time()),
"username": user_email,
"requests": [],
}
)
self.affected_user = self.user
else:
log.error("Unable to get Dynamo table.", exc_info=True)
raise
def write_resource_cache_data(self, data):
self.parallel_write_table(
self.resource_cache_table, data, ["resourceId", "resourceType"]
)
async def get_dynamic_config_yaml(self) -> bytes:
"""Retrieve dynamic configuration yaml."""
return await sync_to_async(self.get_dynamic_config_yaml_sync)()
def get_dynamic_config_yaml_sync(self) -> bytes:
"""Retrieve dynamic configuration yaml synchronously"""
c = b""
try:
current_config = self.dynamic_config.get_item(Key={"id": "master"})
if not current_config:
return c
compressed_config = current_config.get("Item", {}).get("config", "")
if not compressed_config:
return c
c = zlib.decompress(compressed_config.value)
except Exception: # noqa
sentry_sdk.capture_exception()
return c
def get_dynamic_config_dict(self) -> dict:
"""Retrieve dynamic configuration dictionary that can be merged with primary configuration dictionary."""
try:
loop = asyncio.get_running_loop()
except RuntimeError: # if cleanup: 'RuntimeError: There is no current event loop..'
loop = None
if loop and loop.is_running():
current_config_yaml = self.get_dynamic_config_yaml_sync()
else:
current_config_yaml = asyncio.run(self.get_dynamic_config_yaml())
config_d = yaml.safe_load(current_config_yaml)
return config_d
async def get_all_api_health_alerts(self) -> list:
"""Return all requests. If a status is specified, only requests with the specified status will be returned.
:param status:
:return:
"""
response: dict = self.api_health_roles_table.scan()
items = response.get("Items", [])
while "LastEvaluatedKey" in response:
response = self.api_health_roles_table.scan(
ExclusiveStartKey=response["LastEvaluatedKey"]
)
items.extend(self._data_from_dynamo_replace(response["Items"]))
return items
async def get_api_health_alert_app(self, app_name) -> dict:
resp: dict = await sync_to_async(self.api_health_roles_table.get_item)(
Key={"appName": app_name}
)
return resp.get("Item", None)
async def write_api_health_alert_info(self, request, user_email: str):
"""
Writes a health alert role to the appropriate DynamoDB table
"""
function: str = (
f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}"
)
# enrich request
request["app_create_time"]: int = int(time.time())
request["updated_by"]: str = user_email
request["last_updated"]: int = int(time.time())
try:
await sync_to_async(self.api_health_roles_table.put_item)(
Item=self._data_to_dynamo_replace(request)
)
except Exception:
error = {
"message": "Unable to add new api_health info request",
"request": request,
"function": function,
}
log.error(error, exc_info=True)
raise
return request
async def update_api_health_alert_info(
self, request: dict, user_email=None, update_by=None, last_updated=None
):
"""
Update api_health_alert_info by roleName
"""
function: str = (
f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}"
)
# enrich request
if update_by:
request["updated_by"] = update_by
else:
request["updated_by"] = user_email
if last_updated:
request["last_updated"] = last_updated
else:
request["last_updated"] = int(time.time())
try:
await sync_to_async(self.api_health_roles_table.put_item)(
Item=self._data_to_dynamo_replace(request)
)
except Exception as e:
error: dict = {
"function": function,
"message": "Unable to update api_health_info request",
"request": request,
"error": str(e),
}
log.error(error, exc_info=True)
raise Exception(error)
return request
async def delete_api_health_alert_info(self, app: str) -> None:
"""
Delete api_health_alert_info by roleName
"""
function: str = (
f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}"
)
try:
await sync_to_async(self.api_health_roles_table.delete_item)(
Key={"appName": app}
)
except Exception:
error: dict = {
"function": function,
"message": "Unable to delete api_health info",
"app": app,
}
log.error(error, exc_info=True)
raise
async def write_policy_request(
self,
user_email: str,
justification: str,
arn: str,
policy_name: str,
policy_changes: dict,
resources: List[str],
resource_policies: List[Dict],
request_time: int = None,
request_uuid=None,
policy_status="pending",
cross_account_request: bool = False,
dry_run: bool = False,
):
"""
Writes a policy request to the appropriate DynamoDB table
dry_run will create the request format, but won't actually write it
Sample run:
write_policy_request(policy_changes)
"""
request_time = request_time or int(time.time())
# Craft the new request json
timestamp = int(time.time())
request_id = request_uuid or str(uuid.uuid4())
new_request = {
"request_id": request_id,
"arn": arn,
"status": policy_status,
"justification": justification,
"request_time": request_time,
"updated_by": user_email,
"last_updated": timestamp,
"username": user_email,
"policy_name": policy_name,
"policy_changes": json.dumps(policy_changes),
"resources": resources,
"resource_policies": resource_policies,
"cross_account_request": cross_account_request,
}
if not dry_run:
try:
await sync_to_async(self.policy_requests_table.put_item)(
Item=self._data_to_dynamo_replace(new_request)
)
except Exception as e:
error = f"Unable to add new policy request: {new_request}: {str(e)}"
log.error(error, exc_info=True)
raise Exception(error)
else:
log_data = {
"function": f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}",
"request": new_request,
"message": "Dry run, skipping adding request to dynamo",
}
log.debug(log_data)
return new_request
async def write_policy_request_v2(self, extended_request: ExtendedRequestModel):
"""
Writes a policy request v2 to the appropriate DynamoDB table
Sample run:
write_policy_request_v2(request)
"""
new_request = {
"request_id": extended_request.id,
"principal": extended_request.principal.dict(),
"status": extended_request.request_status.value,
"justification": extended_request.justification,
"request_time": extended_request.timestamp,
"last_updated": int(time.time()),
"version": "2",
"extended_request": json.loads(extended_request.json()),
"username": extended_request.requester_email,
}
if extended_request.principal.principal_type == "AwsResource":
new_request["arn"] = extended_request.principal.principal_arn
elif extended_request.principal.principal_type == "HoneybeeAwsResourceTemplate":
repository_name = extended_request.principal.repository_name
resource_identifier = extended_request.principal.resource_identifier
new_request["arn"] = f"{repository_name}-{resource_identifier}"
else:
raise Exception("Invalid principal type")
log_data = {
"function": f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}",
"message": "Writing policy request v2 to Dynamo",
"request": new_request,
}
log.debug(log_data)
try:
await sync_to_async(self.policy_requests_table.put_item)(
Item=self._data_to_dynamo_replace(new_request)
)
log_data[
"message"
] = "Successfully finished writing policy request v2 to Dynamo"
log.debug(log_data)
except Exception as e:
log_data["message"] = "Error occurred writing policy request v2 to Dynamo"
log_data["error"] = str(e)
log.error(log_data, exc_info=True)
error = f"{log_data['message']}: {str(e)}"
raise Exception(error)
return new_request
async def update_policy_request(self, updated_request):
"""
Update a policy request by request ID
Sample run:
update_policy_request(policy_changes)
"""
updated_request["last_updated"] = int(time.time())
try:
await sync_to_async(self.policy_requests_table.put_item)(
Item=self._data_to_dynamo_replace(updated_request)
)
except Exception as e:
error = f"Unable to add updated policy request: {updated_request}: {str(e)}"
log.error(error, exc_info=True)
raise Exception(error)
return updated_request
async def get_policy_requests(self, arn=None, request_id=None):
"""Reads a policy request from the appropriate DynamoDB table"""
if not arn and not request_id:
raise Exception("Must pass in ARN or policy request ID")
if request_id:
requests = self.policy_requests_table.query(
KeyConditionExpression="request_id = :ri",
ExpressionAttributeValues={":ri": request_id},
)
else:
requests = self.policy_requests_table.query(
KeyConditionExpression="arn = :arn",
ExpressionAttributeValues={":arn": arn},
)
matching_requests = []
if requests["Items"]:
items = self._data_from_dynamo_replace(requests["Items"])
items = await self.convert_policy_requests_to_v3(items)
matching_requests.extend(items)
return matching_requests
async def convert_policy_requests_to_v3(self, requests):
# Remove this function and calls to this function after a grace period of
changed = False
for request in requests:
if not request.get("version") in ["2"]:
continue
if request.get("extended_request") and not request.get("principal"):
principal_arn = request.pop("arn")
request["principal"] = {
"principal_arn": principal_arn,
"principal_type": "AwsResource",
}
request["extended_request"]["principal"] = {
"principal_arn": principal_arn,
"principal_type": "AwsResource",
}
request.pop("arn", None)
changes = (
request.get("extended_request", {})
.get("changes", {})
.get("changes", [])
)
for change in changes:
if not change.get("principal_arn"):
continue
if not change.get("version") in ["2.0", "2", 2]:
continue
change["principal"] = {
"principal_arn": change["principal_arn"],
"principal_type": "AwsResource",
}
change.pop("principal_arn")
change["version"] = "3.0"
changed = True
if changed:
self.parallel_write_table(self.policy_requests_table, requests)
return requests
async def get_all_policy_requests(
self, status: Optional[str] = "pending"
) -> List[Dict[str, Union[int, List[str], str]]]:
"""Return all policy requests. If a status is specified, only requests with the specified status will be
returned.
:param status:
:return:
"""
requests = await sync_to_async(self.parallel_scan_table)(
self.policy_requests_table
)
requests = await self.convert_policy_requests_to_v3(requests)
return_value = []
if status:
for item in requests:
if status and item["status"] == status:
return_value.append(item)
else:
return_value = requests
return return_value
async def update_dynamic_config(self, config: str, updated_by: str) -> None:
"""Take a YAML config and writes to DDB (The reason we use YAML instead of JSON is to preserve comments)."""
# Validate that config loads as yaml, raises exception if not
yaml.safe_load(config)
stats.count("update_dynamic_config", tags={"updated_by": updated_by})
current_config_entry = self.dynamic_config.get_item(Key={"id": "master"})
if current_config_entry.get("Item"):
old_config = {
"id": current_config_entry["Item"]["updated_at"],
"updated_by": current_config_entry["Item"]["updated_by"],
"config": current_config_entry["Item"]["config"],
"updated_at": str(int(time.time())),
}
self.dynamic_config.put_item(Item=self._data_to_dynamo_replace(old_config))
new_config = {
"id": "master",
"config": zlib.compress(config.encode()),
"updated_by": updated_by,
"updated_at": str(int(time.time())),
}
self.dynamic_config.put_item(Item=self._data_to_dynamo_replace(new_config))
def validate_signature(self, items):
signature = items.pop("signature")
if isinstance(signature, Binary):
signature = signature.value
json_request = json.dumps(items, sort_keys=True)
if not crypto.verify(json_request, signature):
raise Exception(f"Invalid signature for request: {json_request}")
def sign_request(
self, user_entry: Dict[str, Union[Decimal, List[str], Binary, str]]
) -> Dict[str, Union[Decimal, List[str], str, bytes]]:
"""
Sign the request and returned request with signature
:param user_entry:
:return:
"""
# Remove old signature if it exists
user_entry.pop("signature", None)
user_entry = self._data_from_dynamo_replace(user_entry)
json_request = json.dumps(user_entry, sort_keys=True, use_decimal=True)
sig = crypto.sign(json_request)
user_entry["signature"] = sig
return user_entry
async def authenticate_user(self, login_attempt) -> AuthenticationResponse:
function: str = (
f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}"
)
log_data = {
"function": function,
"user_email": login_attempt.username,
"after_redirect_uri": login_attempt.after_redirect_uri,
}
user_entry = await sync_to_async(self.users_table.query)(
KeyConditionExpression="username = :un",
ExpressionAttributeValues={":un": login_attempt.username},
)
user = None
generic_error = ["User doesn't exist, or password is incorrect. "]
if user_entry and "Items" in user_entry and len(user_entry["Items"]) == 1:
user = user_entry["Items"][0]
if not user:
delay_error = await wait_after_authentication_failure(
login_attempt.username
)
error = f"Unable to find user: {login_attempt.username}"
log.error({**log_data, "message": error + delay_error})
return AuthenticationResponse(
authenticated=False, errors=generic_error + [delay_error]
)
if not user.get("password"):
delay_error = await wait_after_authentication_failure(
login_attempt.username
)
error = "User exists, but doesn't have a password stored in the database"
log.error({**log_data, "message": error + delay_error})
return AuthenticationResponse(
authenticated=False, errors=generic_error + [delay_error]
)
password_hash_matches = bcrypt.checkpw(
login_attempt.password.encode("utf-8"), user["password"].value
)
if not password_hash_matches:
delay_error = await wait_after_authentication_failure(
login_attempt.username
)
error = "Password does not match. "
log.error({**log_data, "message": error + delay_error})
return AuthenticationResponse(
authenticated=False, errors=generic_error + [delay_error]
)
return AuthenticationResponse(
authenticated=True, username=user["username"], groups=user["groups"]
)
def create_user(
self,
user_email,
password: Optional[str] = None,
groups: Optional[List[str]] = None,
):
if not groups:
groups = []
timestamp = int(time.time())
unsigned_user_entry = {
"created": timestamp,
"last_updated": timestamp,
"username": user_email,
"requests": [],
"groups": groups,
}
if password:
pw = bytes(password, "utf-8")
salt = bcrypt.gensalt()
unsigned_user_entry["password"] = bcrypt.hashpw(pw, salt)
user_entry = self.sign_request(unsigned_user_entry)
try:
self.users_table.put_item(Item=self._data_to_dynamo_replace(user_entry))
except Exception as e:
error = f"Unable to add user submission: {user_entry}: {str(e)}"
log.error(error, exc_info=True)
raise Exception(error)
return user_entry
def update_user(
self,
user_email,
password: Optional[str] = None,
groups: Optional[List[str]] = None,
):
if not groups:
groups = []
user_ddb = self.users_table.query(
KeyConditionExpression="username = :un",
ExpressionAttributeValues={":un": user_email},
)
user = None
if user_ddb and "Items" in user_ddb and len(user_ddb["Items"]) == 1:
user = user_ddb["Items"][0]
if not user:
raise DataNotRetrievable(f"Unable to find user: {user_email}")
timestamp = int(time.time())
if password:
pw = bytes(password, "utf-8")
salt = bcrypt.gensalt()
user["password"] = bcrypt.hashpw(pw, salt)
if groups:
user["groups"] = groups
user["last_updated"] = timestamp
user_entry = self.sign_request(user)
try:
self.users_table.put_item(Item=self._data_to_dynamo_replace(user_entry))
except Exception as e:
error = f"Unable to add user submission: {user_entry}: {str(e)}"
log.error(error, exc_info=True)
raise Exception(error)
return user_entry
def delete_user(self, user_email):
function: str = (
f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}"
)
log_data = {"function": function, "user_email": user_email}
log.debug(log_data)
user_entry = {"username": user_email}
try:
self.users_table.delete_item(Key=self._data_to_dynamo_replace(user_entry))
except Exception as e:
error = f"Unable to delete user: {user_entry}: {str(e)}"
log.error(error, exc_info=True)
raise Exception(error)
async def get_user(
self, user_email: str
) -> Optional[Dict[str, Union[Decimal, List[str], Binary, str]]]:
function: str = (
f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}"
)
log_data = {"function": function, "user_email": user_email}
log.debug(log_data)
user = self.users_table.query(
KeyConditionExpression="username = :un",
ExpressionAttributeValues={":un": user_email},
)
if user and "Items" in user and len(user["Items"]) == 1:
return user["Items"][0]
return None
def get_or_create_user(
self, user_email: str
) -> Dict[str, Union[Decimal, List[str], Binary, str]]:
function: str = (
f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}"
)
log_data = {"function": function, "user_email": user_email}
log.debug(log_data)
user = self.users_table.query(
KeyConditionExpression="username = :un",
ExpressionAttributeValues={":un": user_email},
)
items = []
if user and "Items" in user:
items = user["Items"]
if not items:
return self.create_user(user_email)
return items[0]
def resolve_request_ids(
self, request_ids: List[str]
) -> List[Dict[str, Union[int, str]]]:
requests = []
for request_id in request_ids:
request = self.requests_table.query(
KeyConditionExpression="request_id = :ri",
ExpressionAttributeValues={":ri": request_id},
)
if request["Items"]:
items = self._data_from_dynamo_replace(request["Items"])
requests.append(items[0])
else:
raise NoMatchingRequest(
f"No matching request for request_id: {request_id}"
)
return requests
def add_request_id_to_user(
self,
affected_user: Dict[str, Union[Decimal, List[str], Binary, str]],
request_id: str,
) -> None:
affected_user["requests"].append(request_id)
self.users_table.put_item(
Item=self._data_to_dynamo_replace(self.sign_request(affected_user))
)
def add_request(
self,
user_email: str,
group: str,
justification: str,
request_time: None = None,
status: str = "pending",
updated_by: Optional[str] = None,
) -> Dict[str, Union[int, str]]:
"""
Add a user request to the dynamo table
Sample run:
add_request("user@example.com", "engtest", "because")
:param user_email: Email address of user
:param group: Name of group user is requesting access to
:param justification:
:param request_id:
:param request_time:
:param status:
:param updated_by:
:return:
"""
"""
Request:
group
justification
role
request_time
approval_time
updated_by
approval_reason
status
user@example.com:
requests: []
last_updated: 1
signature: xxxx
#pending: []
#expired: []
# How to expire requests if soemeone maliciously deletes content
# How to query for all approved requests for group X
# What if we want to send email saying your request is expiring in 7 days? Maybe celery to query all
# What about concept of request ID? Maybe base64 encoded thing?
# Need an all-in-one page to show all pending requests, all expired/approved requests
"""
request_time = request_time or int(time.time())
stats.count("new_group_request", tags={"user": user_email, "group": group})
if self.affected_user.get("username") != user_email:
self.affected_user = self.get_or_create_user(user_email)
# Get current user. Create if they do not already exist
# self.user = self.get_or_create_user(user_email)
# Get current user requests, which will validate existing signature
# existing_request_ids = self.user["requests"]
# existing_requests = self.resolve_request_ids(existing_request_ids)
existing_pending_requests_for_group = self.get_requests_by_user(
user_email, group, status="pending"
)
# Craft the new request json
timestamp = int(time.time())
request_id = str(uuid.uuid4())
new_request = {
"request_id": request_id,
"group": group,
"status": status,
"justification": justification,
"request_time": request_time,
"updated_by": updated_by,
"last_updated": timestamp,
"username": user_email,
}
# See if user already has an active or pending request for the group
if existing_pending_requests_for_group:
for request in existing_pending_requests_for_group:
raise PendingRequestAlreadyExists(
f"Pending request for group: {group} already exists: {request}"
)
try:
self.requests_table.put_item(Item=self._data_to_dynamo_replace(new_request))
except Exception as e:
error = {
"error": f"Unable to add user request: {str(e)}",
"request": new_request,
}
log.error(error, exc_info=True)
raise Exception(error)
self.add_request_id_to_user(self.affected_user, request_id)
return new_request
async def get_all_requests(self, status=None):
"""Return all requests. If a status is specified, only requests with the specified status will be returned.
:param status:
:return:
"""
items = await sync_to_async(self.parallel_scan_table)(self.requests_table)
return_value = []
if status:
for item in items:
new_json = []
for j in item["json"]:
if j["status"] == status:
new_json.append(j)
item["json"] = new_json
if new_json:
return_value.append(item)
else:
return_value = items
return return_value
def get_requests_by_user(
self,
user_email: str,
group: str = None,
status: str = None,
use_cache: bool = False,
) -> dict:
"""Get requests by user. Group and status can also be specified to filter results.
:param user_email:
:param group:
:param status:
:return:
"""
red_key = f"USER_REQUESTS_{user_email}-{group}-{status}"
if use_cache:
requests_to_return = red.get(red_key)
if requests_to_return:
return json.loads(requests_to_return)
if self.affected_user.get("username") != user_email:
self.affected_user = self.get_or_create_user(user_email)
existing_request_ids = self.affected_user["requests"]
existing_requests = self.resolve_request_ids(existing_request_ids)
requests_to_return = []
if existing_requests:
for request in existing_requests:
if group and request["group"] != group:
continue
if status and request["status"] != status:
continue
requests_to_return.append(request)
if use_cache:
red.setex(red_key, 120, json.dumps(requests_to_return))
return requests_to_return
def change_request_status(
self, user_email, group, new_status, updated_by=None, reviewer_comments=None
):
"""
:param user:
:param status:
:param request_id:
:return:
"""
stats.count(
"update_group_request",
tags={
"user": user_email,
"group": group,
"new_status": new_status,
"updated_by": updated_by,
},
)
modified_request = None
if self.affected_user.get("username") != user_email:
self.affected_user = self.get_or_create_user(user_email)
timestamp = int(time.time())
if new_status not in POSSIBLE_STATUSES:
raise Exception(
f"Invalid status. Status must be one of {POSSIBLE_STATUSES}"
)
if new_status == "approved" and not updated_by:
raise Exception(
"You must provide `updated_by` to change a request status to approved."
)
existing_requests = self.get_requests_by_user(user_email)
if existing_requests:
updated = False
for request in existing_requests:
if request["group"] == group:
request["updated_by"] = updated_by
request["status"] = new_status
request["last_updated"] = timestamp
request["reviewer_comments"] = reviewer_comments
modified_request = request
try:
self.requests_table.put_item(
Item=self._data_to_dynamo_replace(request)
)
except Exception as e:
error = f"Unable to add user request: {request}: {str(e)}"
log.error(error, exc_info=True)
raise Exception(error)
updated = True
if not updated:
raise NoExistingRequest(
f"Unable to find existing request for user: {user_email} and group: {group}."
)
else:
raise NoExistingRequest(
f"Unable to find existing requests for user: {user_email}"
)
return modified_request
def change_request_status_by_id(
self,
request_id: str,
new_status: str,
updated_by: Optional[str] = None,
reviewer_comments: Optional[str] = None,
) -> Dict[str, Union[int, str]]:
"""
Change request status by ID
:param request_id:
:param new_status:
:param updated_by:
:return: new requests
"""
modified_request = None
if new_status == "approved" and not updated_by:
raise Exception(
"You must provide `updated_by` to change a request status to approved."
)
requests = self.resolve_request_ids([request_id])
if new_status not in POSSIBLE_STATUSES:
raise Exception(
f"Invalid status. Status must be one of {POSSIBLE_STATUSES}"
)
for request in requests:
request["status"] = new_status
request["updated_by"] = updated_by
request["last_updated"] = int(time.time())
request["reviewer_comments"] = reviewer_comments
modified_request = request
try:
self.requests_table.put_item(Item=self._data_to_dynamo_replace(request))
except Exception as e:
error = f"Unable to add user request: {request} : {str(e)}"
log.error(error, exc_info=True)
raise Exception(error)
return modified_request
def get_all_policies(self):
"""Return all policies."""
response = self.policies_table.scan()
items = []
if response and "Items" in response:
items = self._data_from_dynamo_replace(response["Items"])
while "LastEvaluatedKey" in response:
response = self.policies_table.scan(
ExclusiveStartKey=response["LastEvaluatedKey"]
)
items.extend(self._data_from_dynamo_replace(response["Items"]))
return items
async def create_group_log_entry(
self,
group: str,
username: str,
updated_by: str,
action: str,
updated_at: None = None,
extra: None = None,
) -> None:
updated_at = updated_at or int(time.time())
log_id = str(uuid.uuid4())
log_entry = {
"uuid": log_id,
"group": group,
"username": username,
"updated_by": updated_by,
"updated_at": updated_at,
"action": action,
"extra": extra,
}
self.group_log.put_item(Item=self._data_to_dynamo_replace(log_entry))
async def get_all_audit_logs(self) -> List[Dict[str, Union[int, None, str]]]:
response = await sync_to_async(self.group_log.scan)()
items = []
if response and "Items" in response:
items = self._data_from_dynamo_replace(response["Items"])
while "LastEvaluatedKey" in response:
response = await sync_to_async(self.group_log.scan)(
ExclusiveStartKey=response["LastEvaluatedKey"]
)
items.extend(self._data_from_dynamo_replace(response["Items"]))
return items
async def get_all_pending_requests(self):
return await self.get_all_requests(status="pending")
def batch_write_cloudtrail_events(self, items):
with self.cloudtrail_table.batch_writer(
overwrite_by_pkeys=["arn", "request_id"]
) as batch:
for item in items:
batch.put_item(Item=self._data_to_dynamo_replace(item))
return True
async def get_top_cloudtrail_errors_by_arn(self, arn, n=5):
response: dict = await sync_to_async(self.cloudtrail_table.query)(
KeyConditionExpression=Key("arn").eq(arn)
)
items = response.get("Items", [])
aggregated_errors = defaultdict(dict)
for item in items:
if int(item["ttl"]) < int(time.time()):
continue
event_call = item["event_call"]
event_resource = item.get("resource", "")
event_string = f"{event_call}|||{event_resource}"
if not aggregated_errors.get(event_string):
aggregated_errors[event_string]["count"] = 0
aggregated_errors[event_string]["generated_policy"] = item.get(
"generated_policy", {}
)
aggregated_errors[event_string]["count"] += 1
top_n_errors = {
k: v
for k, v in sorted(
aggregated_errors.items(),
key=lambda item: item[1]["count"],
reverse=True,
)[:n]
}
return top_n_errors
def count_arn_errors(self, error_count, items):
for item in items:
arn = item.get("arn")
if not error_count.get(arn):
error_count[arn] = 0
error_count[arn] += item.get("count", 1)
return error_count
def refresh_dynamic_config(ddb=None):
if not ddb:
# This function runs frequently. We provide the option to pass in a UserDynamoHandler
# so we don't need to import on every invocation
from consoleme.lib.dynamo import UserDynamoHandler
ddb = UserDynamoHandler()
return ddb.get_dynamic_config_dict() | null |
162,230 | import os
import subprocess
from pathlib import Path
from aws_cdk import aws_lambda as lambda_
def create_dependencies_layer(self, function_name: str) -> lambda_.LayerVersion:
base_path = f"./resources/{function_name}"
output_dir = f"{base_path}/build/{function_name}"
if not os.environ.get("SKIP_PIP"):
build_folder = Path(f"{base_path}/build")
requirements_file = Path(f"{base_path}/requirements.txt")
# Remove build folder from previous runs
if build_folder.is_dir():
subprocess.check_call(f"rm -rf {base_path}/build".split())
# Remove requirements file from previous runs
if requirements_file.is_file():
subprocess.check_call(f"rm -rf {base_path}/requirements.txt".split())
# Create requirements file using pipreqs
subprocess.check_call(f"pipreqs {base_path}".split())
requirements_file = Path(f"{base_path}/requirements.txt")
if requirements_file.is_file():
subprocess.check_call(
f"pip install -r {base_path}/requirements.txt -t {output_dir}/python".split()
)
layer_id = f"{function_name}-dependencies"
layer_code = lambda_.Code.from_asset(output_dir)
return lambda_.LayerVersion(self, layer_id, code=layer_code) | null |
162,231 | import os
import boto3
import cfnresponse
import yaml
def on_create(event, context):
with open("config.yaml") as f:
config_yaml = f.read()
config_yaml = config_yaml.format(
issuer=os.getenv("ISSUER"),
oidc_metadata_url=os.getenv("OIDC_METADATA_URL"),
redis_host=os.getenv("REDIS_HOST"),
aws_region=os.getenv("AWS_REGION"),
ses_identity_arn=os.getenv("SES_IDENTITY_ARN"),
support_chat_url=os.getenv("SUPPORT_CHAT_URL"),
application_admin=os.getenv("APPLICATION_ADMIN"),
account_number=os.getenv("ACCOUNT_NUMBER"),
spoke_accounts_objects_list_yaml=spoke_accounts_objects_list_yaml,
config_secret_name=os.getenv("CONFIG_SECRET_NAME"),
)
encoded_config = config_yaml.encode("utf-8")
bucket_name = os.getenv("DEPLOYMENT_BUCKET")
file_name = "config.yaml"
s3_path = file_name
try:
result = s3_client.put_object(
Bucket=bucket_name, Key=s3_path, Body=encoded_config
)
cfnresponse.send(event, context, cfnresponse.SUCCESS, result)
except Exception as ex:
cfnresponse.send(event, context, cfnresponse.FAILED, str(ex))
def on_delete(event, context):
bucket_name = os.getenv("DEPLOYMENT_BUCKET")
file_name = "config.yaml"
s3_path = file_name
try:
result = s3_client.delete_object(Bucket=bucket_name, Key=s3_path)
cfnresponse.send(event, context, cfnresponse.SUCCESS, result)
except Exception as ex:
cfnresponse.send(event, context, cfnresponse.FAILED, str(ex))
def handler(event, context):
request_type = event["RequestType"]
if request_type == "Create":
return on_create(event, context)
if request_type == "Update":
return on_create(event, context)
if request_type == "Delete":
return on_delete(event, context)
raise Exception("Invalid request type: %s" % request_type) | null |
162,232 | import base64
import errno
import logging
import os
import sys
import boto3
def split_s3_path(s3_path):
path_parts = s3_path.replace("s3://", "").split("/")
b = path_parts.pop(0)
k = "/".join(path_parts)
return b, k | null |
162,233 | import base64
import errno
import logging
import os
import sys
import boto3
def make_directories(loc):
head, _ = os.path.split(loc)
try:
os.makedirs(head)
except OSError as e:
if e.errno != errno.EEXIST:
raise | null |
162,234 | import argparse
import concurrent.futures
import os
import time
from concurrent.futures.thread import ThreadPoolExecutor
from asgiref.sync import async_to_sync
from consoleme.celery_tasks import celery_tasks as celery
from consoleme.config import config
from consoleme.default_plugins.plugins.celery_tasks import (
celery_tasks as default_celery_tasks,
)
from consoleme.lib.account_indexers import get_account_id_to_name_mapping
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.") | null |
162,235 | import json
import os
import sys
import yaml
def load_template_config():
template_config_path = f"{os.path.dirname(__file__)}/template_config.yaml"
try:
with open(template_config_path, "r") as f:
template_config = yaml.safe_load(f)
return template_config
except Exception as e:
print(f"Error loading template config file: {str(e)}")
sys.exit(1) | null |
162,236 | import json
import os
import sys
import yaml
def generate_questions(template_config):
generated_config = []
counter = 0
placeholders_map = {}
for question in template_config["questions"]:
cur_generated_question = {}
# if the question has a condition
if "depends_on" in question:
depends_on_actual = (
question["depends_on"]
if question["depends_on"].startswith("__")
else placeholders_map[question["depends_on"]]
)
cur_generated_question["visibleIf"] = (
"{"
+ f"{depends_on_actual}"
+ "}"
+ f" = '{question['depends_on_val'][0]}'"
)
for idx in range(1, len(question["depends_on_val"])):
value = question["depends_on_val"][idx]
cur_generated_question["visibleIf"] += (
" or {" + f"{depends_on_actual}" + "}" + f" = '{value}'"
)
if question["config_variable"].startswith("__"):
cur_generated_question["name"] = question["config_variable"]
else:
# Some of our questions have the same "name" which causes problems. We need to de-duplicate
cur_generated_question["name"] = (
question["config_variable"] + f"_PLACEHOLDER_{counter}"
)
placeholders_map[question["config_variable"]] = cur_generated_question[
"name"
]
counter += 1
# if the question has a default answer
# default_ans = question.get("default", "")
if "default" in question:
cur_generated_question["defaultValue"] = question["default"]
if isinstance(question["default"], str) and "{" in question["default"]:
variable = question["default"].split("{", 1)[1].split("}", 1)[0]
placeholder_variable = placeholders_map[variable.replace("-", ".")]
cur_generated_question["defaultValue"] = question["default"].replace(
variable, placeholder_variable
)
# formatted keys
if "format_text" in question:
cur_generated_question["__format_text"] = question["format_text"]
if question["type"] == "no_question":
cur_generated_question["type"] = "text"
cur_generated_question["readOnly"] = True
# cur_generated_question["visible"] = True
cur_generated_question["defaultValue"] = question["value"]
generated_config.append(cur_generated_question)
continue
if question.get("required", False):
cur_generated_question["isRequired"] = True
# Generate the question text to ask
question_text = template_config["default"][question["type"]].format(
friendly_name=question["friendly_name"],
friendly_description=question["friendly_description"],
)
cur_generated_question["title"] = question_text
# Different prompts based on question type
if question["type"] == "email":
cur_generated_question["type"] = "text"
cur_generated_question["inputType"] = "email"
cur_generated_question["autoComplete"] = "email"
cur_generated_question["validators"] = [{"type": "email"}]
elif question["type"] == "confirmation":
cur_generated_question["type"] = "boolean"
elif question["type"] == "text":
cur_generated_question["type"] = "text"
elif question["type"] == "select":
cur_generated_question["type"] = "radiogroup"
cur_generated_question["colCount"] = 1
cur_generated_question["choices"] = question["choices"]
cur_generated_question["isRequired"] = True
elif question["type"] == "list" or question["type"] == "list_dict":
cur_generated_question["type"] = "text"
cur_generated_question["__extra_details"] = question["type"]
generated_config.append(cur_generated_question)
return generated_config | null |
162,237 | import json
import os
import re
import sys
import time
import questionary
import yaml
def load_template_config():
template_config_path = f"{os.path.dirname(__file__)}/template_config.yaml"
try:
with open(template_config_path, "r") as f:
template_config = yaml.safe_load(f)
return template_config
except Exception as e:
print(f"Error loading template config file: {str(e)}")
sys.exit(1) | null |
162,238 | import json
import os
import re
import sys
import time
import questionary
import yaml
def get_generated_config_path():
default_path = f"{os.path.dirname(__file__)}"
generated_config_dir = questionary.path(
message="Where do you want to save the generated config file?",
default=default_path,
only_directories=True,
).unsafe_ask()
if not os.path.isdir(generated_config_dir):
print(f"Invalid path provided, saving to default path instead: {default_path}")
return default_path
return generated_config_dir | null |
162,239 | import json
import os
import re
import sys
import time
import questionary
import yaml
def email_validator(input_email: str):
if re.search(email_regex, input_email):
return True
return "Invalid email: please enter a valid email address"
def ask_questions(template_config):
generated_config = {}
generated_config_dash_delimited = {}
for question in template_config["questions"]:
generated_config_dash_delimited = {
k.replace(".", "-"): v for k, v in generated_config.items()
}
# if the question has a condition and it is not same, don't ask the question
if "depends_on" in question:
# If the the depended on key isn't present at all, then skip question
if question["depends_on"] not in generated_config:
continue
if (
generated_config[question["depends_on"]]
not in question["depends_on_val"]
):
continue
# if it is not a question
if question["type"] == "no_question":
generated_config[question["config_variable"]] = question["value"]
continue
# Generate the question text to ask
question_text = template_config["default"][question["type"]].format(
friendly_name=question["friendly_name"],
friendly_description=question["friendly_description"],
)
# if the question has a default answer
default_ans = question.get("default", "")
if question.get("default") and isinstance(question["default"], str):
try:
default_ans = question.get("default", "").format(
**generated_config_dash_delimited
)
except KeyError:
pass
# Different prompts based on question type
if question["type"] == "email":
generated_config[question["config_variable"]] = questionary.text(
message=question_text, validate=email_validator, default=default_ans
).unsafe_ask()
elif question["type"] == "confirmation":
generated_config[question["config_variable"]] = questionary.confirm(
message=question_text, default=default_ans
).unsafe_ask()
elif question["type"] == "text":
if question.get("required", False):
generated_config[question["config_variable"]] = questionary.text(
message=question_text,
default=default_ans,
validate=lambda text: True
if len(text) > 0
else "This is a required field",
).unsafe_ask()
else:
generated_config[question["config_variable"]] = questionary.text(
message=question_text,
default=default_ans,
).unsafe_ask()
elif question["type"] == "select":
choices = question["choices"]
generated_config[question["config_variable"]] = questionary.select(
choices=choices, message=question_text
).unsafe_ask()
elif question["type"] == "list" or question["type"] == "list_dict":
if question.get("required", False):
values = questionary.text(
message=question_text,
default=default_ans,
validate=lambda text: True
if len(text) > 0
else "This is a required field",
).unsafe_ask()
else:
values = questionary.text(
message=question_text, default=default_ans
).unsafe_ask()
if values != "":
values = values.split(",")
if question["type"] == "list":
generated_config[question["config_variable"]] = []
for val in values:
generated_config[question["config_variable"]].append(
val.strip()
)
else:
generated_config[question["config_variable"]] = {}
for val in values:
val = val.strip()
val = val.split(":")
cur_key = question["config_variable"] + "." + val[0]
generated_config[cur_key] = val[1]
else:
generated_config[question["config_variable"]] = []
# formatted keys
if "format_text" in question:
generated_config[question["config_variable"]] = question[
"format_text"
].format(generated_config[question["config_variable"]])
return generated_config | null |
162,240 | import json
import os
import re
import sys
import time
import questionary
import yaml
def update_nested_dict(d, k, v):
if "." in k:
cur_key = k.split(".")[0]
leftover_key = k.split(".", 1)[1]
d[cur_key] = update_nested_dict(d.get(cur_key, {}), leftover_key, v)
else:
d[k] = v
return d
def generate_consoleme_style_config(generated_config):
consoleme_generated_config = {}
for k in generated_config:
# skip those that are templated config variables and not actual config variables
if k.startswith("__"):
continue
# skip non-values
val = generated_config[k]
if (isinstance(val, str) or isinstance(val, list)) and len(val) == 0:
continue
update_nested_dict(consoleme_generated_config, k, val)
return consoleme_generated_config | null |
162,241 | import asyncio
import sys
from consoleme.lib.dynamo import UserDynamoHandler
class UserDynamoHandler(BaseDynamoHandler):
def __init__(self, user_email: Optional[str] = None) -> None:
try:
self.requests_table = self._get_dynamo_table(
config.get("aws.requests_dynamo_table", "consoleme_requests_global")
)
self.users_table = self._get_dynamo_table(
config.get("aws.users_dynamo_table", "consoleme_users_global")
)
self.group_log = self._get_dynamo_table(
config.get("aws.group_log_dynamo_table", "consoleme_audit_global")
)
self.dynamic_config = self._get_dynamo_table(
config.get("aws.group_log_dynamo_table", "consoleme_config_global")
)
self.policy_requests_table = self._get_dynamo_table(
config.get(
"aws.policy_requests_dynamo_table", "consoleme_policy_requests"
)
)
self.api_health_roles_table = self._get_dynamo_table(
config.get(
"aws.api_health_apps_table_dynamo_table",
"consoleme_api_health_apps",
)
)
self.resource_cache_table = self._get_dynamo_table(
config.get(
"aws.resource_cache_dynamo_table", "consoleme_resource_cache"
)
)
self.cloudtrail_table = self._get_dynamo_table(
config.get("aws.cloudtrail_table", "consoleme_cloudtrail")
)
self.notifications_table = self._get_dynamo_table(
config.get("aws.notifications_table", "consoleme_notifications")
)
if user_email:
self.user = self.get_or_create_user(user_email)
self.affected_user = self.user
except Exception:
if config.get("development"):
log.error(
"Unable to connect to Dynamo. Trying to set user via development configuration",
exc_info=True,
)
self.user = self.sign_request(
{
"last_updated": int(time.time()),
"username": user_email,
"requests": [],
}
)
self.affected_user = self.user
else:
log.error("Unable to get Dynamo table.", exc_info=True)
raise
def write_resource_cache_data(self, data):
self.parallel_write_table(
self.resource_cache_table, data, ["resourceId", "resourceType"]
)
async def get_dynamic_config_yaml(self) -> bytes:
"""Retrieve dynamic configuration yaml."""
return await sync_to_async(self.get_dynamic_config_yaml_sync)()
def get_dynamic_config_yaml_sync(self) -> bytes:
"""Retrieve dynamic configuration yaml synchronously"""
c = b""
try:
current_config = self.dynamic_config.get_item(Key={"id": "master"})
if not current_config:
return c
compressed_config = current_config.get("Item", {}).get("config", "")
if not compressed_config:
return c
c = zlib.decompress(compressed_config.value)
except Exception: # noqa
sentry_sdk.capture_exception()
return c
def get_dynamic_config_dict(self) -> dict:
"""Retrieve dynamic configuration dictionary that can be merged with primary configuration dictionary."""
try:
loop = asyncio.get_running_loop()
except RuntimeError: # if cleanup: 'RuntimeError: There is no current event loop..'
loop = None
if loop and loop.is_running():
current_config_yaml = self.get_dynamic_config_yaml_sync()
else:
current_config_yaml = asyncio.run(self.get_dynamic_config_yaml())
config_d = yaml.safe_load(current_config_yaml)
return config_d
async def get_all_api_health_alerts(self) -> list:
"""Return all requests. If a status is specified, only requests with the specified status will be returned.
:param status:
:return:
"""
response: dict = self.api_health_roles_table.scan()
items = response.get("Items", [])
while "LastEvaluatedKey" in response:
response = self.api_health_roles_table.scan(
ExclusiveStartKey=response["LastEvaluatedKey"]
)
items.extend(self._data_from_dynamo_replace(response["Items"]))
return items
async def get_api_health_alert_app(self, app_name) -> dict:
resp: dict = await sync_to_async(self.api_health_roles_table.get_item)(
Key={"appName": app_name}
)
return resp.get("Item", None)
async def write_api_health_alert_info(self, request, user_email: str):
"""
Writes a health alert role to the appropriate DynamoDB table
"""
function: str = (
f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}"
)
# enrich request
request["app_create_time"]: int = int(time.time())
request["updated_by"]: str = user_email
request["last_updated"]: int = int(time.time())
try:
await sync_to_async(self.api_health_roles_table.put_item)(
Item=self._data_to_dynamo_replace(request)
)
except Exception:
error = {
"message": "Unable to add new api_health info request",
"request": request,
"function": function,
}
log.error(error, exc_info=True)
raise
return request
async def update_api_health_alert_info(
self, request: dict, user_email=None, update_by=None, last_updated=None
):
"""
Update api_health_alert_info by roleName
"""
function: str = (
f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}"
)
# enrich request
if update_by:
request["updated_by"] = update_by
else:
request["updated_by"] = user_email
if last_updated:
request["last_updated"] = last_updated
else:
request["last_updated"] = int(time.time())
try:
await sync_to_async(self.api_health_roles_table.put_item)(
Item=self._data_to_dynamo_replace(request)
)
except Exception as e:
error: dict = {
"function": function,
"message": "Unable to update api_health_info request",
"request": request,
"error": str(e),
}
log.error(error, exc_info=True)
raise Exception(error)
return request
async def delete_api_health_alert_info(self, app: str) -> None:
"""
Delete api_health_alert_info by roleName
"""
function: str = (
f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}"
)
try:
await sync_to_async(self.api_health_roles_table.delete_item)(
Key={"appName": app}
)
except Exception:
error: dict = {
"function": function,
"message": "Unable to delete api_health info",
"app": app,
}
log.error(error, exc_info=True)
raise
async def write_policy_request(
self,
user_email: str,
justification: str,
arn: str,
policy_name: str,
policy_changes: dict,
resources: List[str],
resource_policies: List[Dict],
request_time: int = None,
request_uuid=None,
policy_status="pending",
cross_account_request: bool = False,
dry_run: bool = False,
):
"""
Writes a policy request to the appropriate DynamoDB table
dry_run will create the request format, but won't actually write it
Sample run:
write_policy_request(policy_changes)
"""
request_time = request_time or int(time.time())
# Craft the new request json
timestamp = int(time.time())
request_id = request_uuid or str(uuid.uuid4())
new_request = {
"request_id": request_id,
"arn": arn,
"status": policy_status,
"justification": justification,
"request_time": request_time,
"updated_by": user_email,
"last_updated": timestamp,
"username": user_email,
"policy_name": policy_name,
"policy_changes": json.dumps(policy_changes),
"resources": resources,
"resource_policies": resource_policies,
"cross_account_request": cross_account_request,
}
if not dry_run:
try:
await sync_to_async(self.policy_requests_table.put_item)(
Item=self._data_to_dynamo_replace(new_request)
)
except Exception as e:
error = f"Unable to add new policy request: {new_request}: {str(e)}"
log.error(error, exc_info=True)
raise Exception(error)
else:
log_data = {
"function": f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}",
"request": new_request,
"message": "Dry run, skipping adding request to dynamo",
}
log.debug(log_data)
return new_request
async def write_policy_request_v2(self, extended_request: ExtendedRequestModel):
"""
Writes a policy request v2 to the appropriate DynamoDB table
Sample run:
write_policy_request_v2(request)
"""
new_request = {
"request_id": extended_request.id,
"principal": extended_request.principal.dict(),
"status": extended_request.request_status.value,
"justification": extended_request.justification,
"request_time": extended_request.timestamp,
"last_updated": int(time.time()),
"version": "2",
"extended_request": json.loads(extended_request.json()),
"username": extended_request.requester_email,
}
if extended_request.principal.principal_type == "AwsResource":
new_request["arn"] = extended_request.principal.principal_arn
elif extended_request.principal.principal_type == "HoneybeeAwsResourceTemplate":
repository_name = extended_request.principal.repository_name
resource_identifier = extended_request.principal.resource_identifier
new_request["arn"] = f"{repository_name}-{resource_identifier}"
else:
raise Exception("Invalid principal type")
log_data = {
"function": f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}",
"message": "Writing policy request v2 to Dynamo",
"request": new_request,
}
log.debug(log_data)
try:
await sync_to_async(self.policy_requests_table.put_item)(
Item=self._data_to_dynamo_replace(new_request)
)
log_data[
"message"
] = "Successfully finished writing policy request v2 to Dynamo"
log.debug(log_data)
except Exception as e:
log_data["message"] = "Error occurred writing policy request v2 to Dynamo"
log_data["error"] = str(e)
log.error(log_data, exc_info=True)
error = f"{log_data['message']}: {str(e)}"
raise Exception(error)
return new_request
async def update_policy_request(self, updated_request):
"""
Update a policy request by request ID
Sample run:
update_policy_request(policy_changes)
"""
updated_request["last_updated"] = int(time.time())
try:
await sync_to_async(self.policy_requests_table.put_item)(
Item=self._data_to_dynamo_replace(updated_request)
)
except Exception as e:
error = f"Unable to add updated policy request: {updated_request}: {str(e)}"
log.error(error, exc_info=True)
raise Exception(error)
return updated_request
async def get_policy_requests(self, arn=None, request_id=None):
"""Reads a policy request from the appropriate DynamoDB table"""
if not arn and not request_id:
raise Exception("Must pass in ARN or policy request ID")
if request_id:
requests = self.policy_requests_table.query(
KeyConditionExpression="request_id = :ri",
ExpressionAttributeValues={":ri": request_id},
)
else:
requests = self.policy_requests_table.query(
KeyConditionExpression="arn = :arn",
ExpressionAttributeValues={":arn": arn},
)
matching_requests = []
if requests["Items"]:
items = self._data_from_dynamo_replace(requests["Items"])
items = await self.convert_policy_requests_to_v3(items)
matching_requests.extend(items)
return matching_requests
async def convert_policy_requests_to_v3(self, requests):
# Remove this function and calls to this function after a grace period of
changed = False
for request in requests:
if not request.get("version") in ["2"]:
continue
if request.get("extended_request") and not request.get("principal"):
principal_arn = request.pop("arn")
request["principal"] = {
"principal_arn": principal_arn,
"principal_type": "AwsResource",
}
request["extended_request"]["principal"] = {
"principal_arn": principal_arn,
"principal_type": "AwsResource",
}
request.pop("arn", None)
changes = (
request.get("extended_request", {})
.get("changes", {})
.get("changes", [])
)
for change in changes:
if not change.get("principal_arn"):
continue
if not change.get("version") in ["2.0", "2", 2]:
continue
change["principal"] = {
"principal_arn": change["principal_arn"],
"principal_type": "AwsResource",
}
change.pop("principal_arn")
change["version"] = "3.0"
changed = True
if changed:
self.parallel_write_table(self.policy_requests_table, requests)
return requests
async def get_all_policy_requests(
self, status: Optional[str] = "pending"
) -> List[Dict[str, Union[int, List[str], str]]]:
"""Return all policy requests. If a status is specified, only requests with the specified status will be
returned.
:param status:
:return:
"""
requests = await sync_to_async(self.parallel_scan_table)(
self.policy_requests_table
)
requests = await self.convert_policy_requests_to_v3(requests)
return_value = []
if status:
for item in requests:
if status and item["status"] == status:
return_value.append(item)
else:
return_value = requests
return return_value
async def update_dynamic_config(self, config: str, updated_by: str) -> None:
"""Take a YAML config and writes to DDB (The reason we use YAML instead of JSON is to preserve comments)."""
# Validate that config loads as yaml, raises exception if not
yaml.safe_load(config)
stats.count("update_dynamic_config", tags={"updated_by": updated_by})
current_config_entry = self.dynamic_config.get_item(Key={"id": "master"})
if current_config_entry.get("Item"):
old_config = {
"id": current_config_entry["Item"]["updated_at"],
"updated_by": current_config_entry["Item"]["updated_by"],
"config": current_config_entry["Item"]["config"],
"updated_at": str(int(time.time())),
}
self.dynamic_config.put_item(Item=self._data_to_dynamo_replace(old_config))
new_config = {
"id": "master",
"config": zlib.compress(config.encode()),
"updated_by": updated_by,
"updated_at": str(int(time.time())),
}
self.dynamic_config.put_item(Item=self._data_to_dynamo_replace(new_config))
def validate_signature(self, items):
signature = items.pop("signature")
if isinstance(signature, Binary):
signature = signature.value
json_request = json.dumps(items, sort_keys=True)
if not crypto.verify(json_request, signature):
raise Exception(f"Invalid signature for request: {json_request}")
def sign_request(
self, user_entry: Dict[str, Union[Decimal, List[str], Binary, str]]
) -> Dict[str, Union[Decimal, List[str], str, bytes]]:
"""
Sign the request and returned request with signature
:param user_entry:
:return:
"""
# Remove old signature if it exists
user_entry.pop("signature", None)
user_entry = self._data_from_dynamo_replace(user_entry)
json_request = json.dumps(user_entry, sort_keys=True, use_decimal=True)
sig = crypto.sign(json_request)
user_entry["signature"] = sig
return user_entry
async def authenticate_user(self, login_attempt) -> AuthenticationResponse:
function: str = (
f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}"
)
log_data = {
"function": function,
"user_email": login_attempt.username,
"after_redirect_uri": login_attempt.after_redirect_uri,
}
user_entry = await sync_to_async(self.users_table.query)(
KeyConditionExpression="username = :un",
ExpressionAttributeValues={":un": login_attempt.username},
)
user = None
generic_error = ["User doesn't exist, or password is incorrect. "]
if user_entry and "Items" in user_entry and len(user_entry["Items"]) == 1:
user = user_entry["Items"][0]
if not user:
delay_error = await wait_after_authentication_failure(
login_attempt.username
)
error = f"Unable to find user: {login_attempt.username}"
log.error({**log_data, "message": error + delay_error})
return AuthenticationResponse(
authenticated=False, errors=generic_error + [delay_error]
)
if not user.get("password"):
delay_error = await wait_after_authentication_failure(
login_attempt.username
)
error = "User exists, but doesn't have a password stored in the database"
log.error({**log_data, "message": error + delay_error})
return AuthenticationResponse(
authenticated=False, errors=generic_error + [delay_error]
)
password_hash_matches = bcrypt.checkpw(
login_attempt.password.encode("utf-8"), user["password"].value
)
if not password_hash_matches:
delay_error = await wait_after_authentication_failure(
login_attempt.username
)
error = "Password does not match. "
log.error({**log_data, "message": error + delay_error})
return AuthenticationResponse(
authenticated=False, errors=generic_error + [delay_error]
)
return AuthenticationResponse(
authenticated=True, username=user["username"], groups=user["groups"]
)
def create_user(
self,
user_email,
password: Optional[str] = None,
groups: Optional[List[str]] = None,
):
if not groups:
groups = []
timestamp = int(time.time())
unsigned_user_entry = {
"created": timestamp,
"last_updated": timestamp,
"username": user_email,
"requests": [],
"groups": groups,
}
if password:
pw = bytes(password, "utf-8")
salt = bcrypt.gensalt()
unsigned_user_entry["password"] = bcrypt.hashpw(pw, salt)
user_entry = self.sign_request(unsigned_user_entry)
try:
self.users_table.put_item(Item=self._data_to_dynamo_replace(user_entry))
except Exception as e:
error = f"Unable to add user submission: {user_entry}: {str(e)}"
log.error(error, exc_info=True)
raise Exception(error)
return user_entry
def update_user(
self,
user_email,
password: Optional[str] = None,
groups: Optional[List[str]] = None,
):
if not groups:
groups = []
user_ddb = self.users_table.query(
KeyConditionExpression="username = :un",
ExpressionAttributeValues={":un": user_email},
)
user = None
if user_ddb and "Items" in user_ddb and len(user_ddb["Items"]) == 1:
user = user_ddb["Items"][0]
if not user:
raise DataNotRetrievable(f"Unable to find user: {user_email}")
timestamp = int(time.time())
if password:
pw = bytes(password, "utf-8")
salt = bcrypt.gensalt()
user["password"] = bcrypt.hashpw(pw, salt)
if groups:
user["groups"] = groups
user["last_updated"] = timestamp
user_entry = self.sign_request(user)
try:
self.users_table.put_item(Item=self._data_to_dynamo_replace(user_entry))
except Exception as e:
error = f"Unable to add user submission: {user_entry}: {str(e)}"
log.error(error, exc_info=True)
raise Exception(error)
return user_entry
def delete_user(self, user_email):
function: str = (
f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}"
)
log_data = {"function": function, "user_email": user_email}
log.debug(log_data)
user_entry = {"username": user_email}
try:
self.users_table.delete_item(Key=self._data_to_dynamo_replace(user_entry))
except Exception as e:
error = f"Unable to delete user: {user_entry}: {str(e)}"
log.error(error, exc_info=True)
raise Exception(error)
async def get_user(
self, user_email: str
) -> Optional[Dict[str, Union[Decimal, List[str], Binary, str]]]:
function: str = (
f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}"
)
log_data = {"function": function, "user_email": user_email}
log.debug(log_data)
user = self.users_table.query(
KeyConditionExpression="username = :un",
ExpressionAttributeValues={":un": user_email},
)
if user and "Items" in user and len(user["Items"]) == 1:
return user["Items"][0]
return None
def get_or_create_user(
self, user_email: str
) -> Dict[str, Union[Decimal, List[str], Binary, str]]:
function: str = (
f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}"
)
log_data = {"function": function, "user_email": user_email}
log.debug(log_data)
user = self.users_table.query(
KeyConditionExpression="username = :un",
ExpressionAttributeValues={":un": user_email},
)
items = []
if user and "Items" in user:
items = user["Items"]
if not items:
return self.create_user(user_email)
return items[0]
def resolve_request_ids(
self, request_ids: List[str]
) -> List[Dict[str, Union[int, str]]]:
requests = []
for request_id in request_ids:
request = self.requests_table.query(
KeyConditionExpression="request_id = :ri",
ExpressionAttributeValues={":ri": request_id},
)
if request["Items"]:
items = self._data_from_dynamo_replace(request["Items"])
requests.append(items[0])
else:
raise NoMatchingRequest(
f"No matching request for request_id: {request_id}"
)
return requests
def add_request_id_to_user(
self,
affected_user: Dict[str, Union[Decimal, List[str], Binary, str]],
request_id: str,
) -> None:
affected_user["requests"].append(request_id)
self.users_table.put_item(
Item=self._data_to_dynamo_replace(self.sign_request(affected_user))
)
def add_request(
self,
user_email: str,
group: str,
justification: str,
request_time: None = None,
status: str = "pending",
updated_by: Optional[str] = None,
) -> Dict[str, Union[int, str]]:
"""
Add a user request to the dynamo table
Sample run:
add_request("user@example.com", "engtest", "because")
:param user_email: Email address of user
:param group: Name of group user is requesting access to
:param justification:
:param request_id:
:param request_time:
:param status:
:param updated_by:
:return:
"""
"""
Request:
group
justification
role
request_time
approval_time
updated_by
approval_reason
status
user@example.com:
requests: []
last_updated: 1
signature: xxxx
#pending: []
#expired: []
# How to expire requests if soemeone maliciously deletes content
# How to query for all approved requests for group X
# What if we want to send email saying your request is expiring in 7 days? Maybe celery to query all
# What about concept of request ID? Maybe base64 encoded thing?
# Need an all-in-one page to show all pending requests, all expired/approved requests
"""
request_time = request_time or int(time.time())
stats.count("new_group_request", tags={"user": user_email, "group": group})
if self.affected_user.get("username") != user_email:
self.affected_user = self.get_or_create_user(user_email)
# Get current user. Create if they do not already exist
# self.user = self.get_or_create_user(user_email)
# Get current user requests, which will validate existing signature
# existing_request_ids = self.user["requests"]
# existing_requests = self.resolve_request_ids(existing_request_ids)
existing_pending_requests_for_group = self.get_requests_by_user(
user_email, group, status="pending"
)
# Craft the new request json
timestamp = int(time.time())
request_id = str(uuid.uuid4())
new_request = {
"request_id": request_id,
"group": group,
"status": status,
"justification": justification,
"request_time": request_time,
"updated_by": updated_by,
"last_updated": timestamp,
"username": user_email,
}
# See if user already has an active or pending request for the group
if existing_pending_requests_for_group:
for request in existing_pending_requests_for_group:
raise PendingRequestAlreadyExists(
f"Pending request for group: {group} already exists: {request}"
)
try:
self.requests_table.put_item(Item=self._data_to_dynamo_replace(new_request))
except Exception as e:
error = {
"error": f"Unable to add user request: {str(e)}",
"request": new_request,
}
log.error(error, exc_info=True)
raise Exception(error)
self.add_request_id_to_user(self.affected_user, request_id)
return new_request
async def get_all_requests(self, status=None):
"""Return all requests. If a status is specified, only requests with the specified status will be returned.
:param status:
:return:
"""
items = await sync_to_async(self.parallel_scan_table)(self.requests_table)
return_value = []
if status:
for item in items:
new_json = []
for j in item["json"]:
if j["status"] == status:
new_json.append(j)
item["json"] = new_json
if new_json:
return_value.append(item)
else:
return_value = items
return return_value
def get_requests_by_user(
self,
user_email: str,
group: str = None,
status: str = None,
use_cache: bool = False,
) -> dict:
"""Get requests by user. Group and status can also be specified to filter results.
:param user_email:
:param group:
:param status:
:return:
"""
red_key = f"USER_REQUESTS_{user_email}-{group}-{status}"
if use_cache:
requests_to_return = red.get(red_key)
if requests_to_return:
return json.loads(requests_to_return)
if self.affected_user.get("username") != user_email:
self.affected_user = self.get_or_create_user(user_email)
existing_request_ids = self.affected_user["requests"]
existing_requests = self.resolve_request_ids(existing_request_ids)
requests_to_return = []
if existing_requests:
for request in existing_requests:
if group and request["group"] != group:
continue
if status and request["status"] != status:
continue
requests_to_return.append(request)
if use_cache:
red.setex(red_key, 120, json.dumps(requests_to_return))
return requests_to_return
def change_request_status(
self, user_email, group, new_status, updated_by=None, reviewer_comments=None
):
"""
:param user:
:param status:
:param request_id:
:return:
"""
stats.count(
"update_group_request",
tags={
"user": user_email,
"group": group,
"new_status": new_status,
"updated_by": updated_by,
},
)
modified_request = None
if self.affected_user.get("username") != user_email:
self.affected_user = self.get_or_create_user(user_email)
timestamp = int(time.time())
if new_status not in POSSIBLE_STATUSES:
raise Exception(
f"Invalid status. Status must be one of {POSSIBLE_STATUSES}"
)
if new_status == "approved" and not updated_by:
raise Exception(
"You must provide `updated_by` to change a request status to approved."
)
existing_requests = self.get_requests_by_user(user_email)
if existing_requests:
updated = False
for request in existing_requests:
if request["group"] == group:
request["updated_by"] = updated_by
request["status"] = new_status
request["last_updated"] = timestamp
request["reviewer_comments"] = reviewer_comments
modified_request = request
try:
self.requests_table.put_item(
Item=self._data_to_dynamo_replace(request)
)
except Exception as e:
error = f"Unable to add user request: {request}: {str(e)}"
log.error(error, exc_info=True)
raise Exception(error)
updated = True
if not updated:
raise NoExistingRequest(
f"Unable to find existing request for user: {user_email} and group: {group}."
)
else:
raise NoExistingRequest(
f"Unable to find existing requests for user: {user_email}"
)
return modified_request
def change_request_status_by_id(
self,
request_id: str,
new_status: str,
updated_by: Optional[str] = None,
reviewer_comments: Optional[str] = None,
) -> Dict[str, Union[int, str]]:
"""
Change request status by ID
:param request_id:
:param new_status:
:param updated_by:
:return: new requests
"""
modified_request = None
if new_status == "approved" and not updated_by:
raise Exception(
"You must provide `updated_by` to change a request status to approved."
)
requests = self.resolve_request_ids([request_id])
if new_status not in POSSIBLE_STATUSES:
raise Exception(
f"Invalid status. Status must be one of {POSSIBLE_STATUSES}"
)
for request in requests:
request["status"] = new_status
request["updated_by"] = updated_by
request["last_updated"] = int(time.time())
request["reviewer_comments"] = reviewer_comments
modified_request = request
try:
self.requests_table.put_item(Item=self._data_to_dynamo_replace(request))
except Exception as e:
error = f"Unable to add user request: {request} : {str(e)}"
log.error(error, exc_info=True)
raise Exception(error)
return modified_request
def get_all_policies(self):
"""Return all policies."""
response = self.policies_table.scan()
items = []
if response and "Items" in response:
items = self._data_from_dynamo_replace(response["Items"])
while "LastEvaluatedKey" in response:
response = self.policies_table.scan(
ExclusiveStartKey=response["LastEvaluatedKey"]
)
items.extend(self._data_from_dynamo_replace(response["Items"]))
return items
async def create_group_log_entry(
self,
group: str,
username: str,
updated_by: str,
action: str,
updated_at: None = None,
extra: None = None,
) -> None:
updated_at = updated_at or int(time.time())
log_id = str(uuid.uuid4())
log_entry = {
"uuid": log_id,
"group": group,
"username": username,
"updated_by": updated_by,
"updated_at": updated_at,
"action": action,
"extra": extra,
}
self.group_log.put_item(Item=self._data_to_dynamo_replace(log_entry))
async def get_all_audit_logs(self) -> List[Dict[str, Union[int, None, str]]]:
response = await sync_to_async(self.group_log.scan)()
items = []
if response and "Items" in response:
items = self._data_from_dynamo_replace(response["Items"])
while "LastEvaluatedKey" in response:
response = await sync_to_async(self.group_log.scan)(
ExclusiveStartKey=response["LastEvaluatedKey"]
)
items.extend(self._data_from_dynamo_replace(response["Items"]))
return items
async def get_all_pending_requests(self):
return await self.get_all_requests(status="pending")
def batch_write_cloudtrail_events(self, items):
with self.cloudtrail_table.batch_writer(
overwrite_by_pkeys=["arn", "request_id"]
) as batch:
for item in items:
batch.put_item(Item=self._data_to_dynamo_replace(item))
return True
async def get_top_cloudtrail_errors_by_arn(self, arn, n=5):
response: dict = await sync_to_async(self.cloudtrail_table.query)(
KeyConditionExpression=Key("arn").eq(arn)
)
items = response.get("Items", [])
aggregated_errors = defaultdict(dict)
for item in items:
if int(item["ttl"]) < int(time.time()):
continue
event_call = item["event_call"]
event_resource = item.get("resource", "")
event_string = f"{event_call}|||{event_resource}"
if not aggregated_errors.get(event_string):
aggregated_errors[event_string]["count"] = 0
aggregated_errors[event_string]["generated_policy"] = item.get(
"generated_policy", {}
)
aggregated_errors[event_string]["count"] += 1
top_n_errors = {
k: v
for k, v in sorted(
aggregated_errors.items(),
key=lambda item: item[1]["count"],
reverse=True,
)[:n]
}
return top_n_errors
def count_arn_errors(self, error_count, items):
for item in items:
arn = item.get("arn")
if not error_count.get(arn):
error_count[arn] = 0
error_count[arn] += item.get("count", 1)
return error_count
async def migrate():
# Get all policy requests
# iterate through changes
# if has principal_arn, convert
dynamo = UserDynamoHandler("consoleme")
requests = await dynamo.get_all_policy_requests(status=None)
for request in requests:
changes = (
request.get("extended_request", {}).get("changes", {}).get("changes", [])
)
for change in changes:
if not change.get("principal_arn"):
continue
if not change.get("version") == "2.0":
continue
change["principal"] = {
"principal_arn": change["principal_arn"],
"principal_type": "AwsResource",
}
change.pop("principal_arn")
change["version"] = "3.0"
dynamo.parallel_write_table(dynamo.policy_requests_table, requests) | null |
162,242 | import asyncio
import sys
from consoleme.lib.dynamo import UserDynamoHandler
class UserDynamoHandler(BaseDynamoHandler):
def __init__(self, user_email: Optional[str] = None) -> None:
try:
self.requests_table = self._get_dynamo_table(
config.get("aws.requests_dynamo_table", "consoleme_requests_global")
)
self.users_table = self._get_dynamo_table(
config.get("aws.users_dynamo_table", "consoleme_users_global")
)
self.group_log = self._get_dynamo_table(
config.get("aws.group_log_dynamo_table", "consoleme_audit_global")
)
self.dynamic_config = self._get_dynamo_table(
config.get("aws.group_log_dynamo_table", "consoleme_config_global")
)
self.policy_requests_table = self._get_dynamo_table(
config.get(
"aws.policy_requests_dynamo_table", "consoleme_policy_requests"
)
)
self.api_health_roles_table = self._get_dynamo_table(
config.get(
"aws.api_health_apps_table_dynamo_table",
"consoleme_api_health_apps",
)
)
self.resource_cache_table = self._get_dynamo_table(
config.get(
"aws.resource_cache_dynamo_table", "consoleme_resource_cache"
)
)
self.cloudtrail_table = self._get_dynamo_table(
config.get("aws.cloudtrail_table", "consoleme_cloudtrail")
)
self.notifications_table = self._get_dynamo_table(
config.get("aws.notifications_table", "consoleme_notifications")
)
if user_email:
self.user = self.get_or_create_user(user_email)
self.affected_user = self.user
except Exception:
if config.get("development"):
log.error(
"Unable to connect to Dynamo. Trying to set user via development configuration",
exc_info=True,
)
self.user = self.sign_request(
{
"last_updated": int(time.time()),
"username": user_email,
"requests": [],
}
)
self.affected_user = self.user
else:
log.error("Unable to get Dynamo table.", exc_info=True)
raise
def write_resource_cache_data(self, data):
self.parallel_write_table(
self.resource_cache_table, data, ["resourceId", "resourceType"]
)
async def get_dynamic_config_yaml(self) -> bytes:
"""Retrieve dynamic configuration yaml."""
return await sync_to_async(self.get_dynamic_config_yaml_sync)()
def get_dynamic_config_yaml_sync(self) -> bytes:
"""Retrieve dynamic configuration yaml synchronously"""
c = b""
try:
current_config = self.dynamic_config.get_item(Key={"id": "master"})
if not current_config:
return c
compressed_config = current_config.get("Item", {}).get("config", "")
if not compressed_config:
return c
c = zlib.decompress(compressed_config.value)
except Exception: # noqa
sentry_sdk.capture_exception()
return c
def get_dynamic_config_dict(self) -> dict:
"""Retrieve dynamic configuration dictionary that can be merged with primary configuration dictionary."""
try:
loop = asyncio.get_running_loop()
except RuntimeError: # if cleanup: 'RuntimeError: There is no current event loop..'
loop = None
if loop and loop.is_running():
current_config_yaml = self.get_dynamic_config_yaml_sync()
else:
current_config_yaml = asyncio.run(self.get_dynamic_config_yaml())
config_d = yaml.safe_load(current_config_yaml)
return config_d
async def get_all_api_health_alerts(self) -> list:
"""Return all requests. If a status is specified, only requests with the specified status will be returned.
:param status:
:return:
"""
response: dict = self.api_health_roles_table.scan()
items = response.get("Items", [])
while "LastEvaluatedKey" in response:
response = self.api_health_roles_table.scan(
ExclusiveStartKey=response["LastEvaluatedKey"]
)
items.extend(self._data_from_dynamo_replace(response["Items"]))
return items
async def get_api_health_alert_app(self, app_name) -> dict:
resp: dict = await sync_to_async(self.api_health_roles_table.get_item)(
Key={"appName": app_name}
)
return resp.get("Item", None)
async def write_api_health_alert_info(self, request, user_email: str):
"""
Writes a health alert role to the appropriate DynamoDB table
"""
function: str = (
f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}"
)
# enrich request
request["app_create_time"]: int = int(time.time())
request["updated_by"]: str = user_email
request["last_updated"]: int = int(time.time())
try:
await sync_to_async(self.api_health_roles_table.put_item)(
Item=self._data_to_dynamo_replace(request)
)
except Exception:
error = {
"message": "Unable to add new api_health info request",
"request": request,
"function": function,
}
log.error(error, exc_info=True)
raise
return request
async def update_api_health_alert_info(
self, request: dict, user_email=None, update_by=None, last_updated=None
):
"""
Update api_health_alert_info by roleName
"""
function: str = (
f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}"
)
# enrich request
if update_by:
request["updated_by"] = update_by
else:
request["updated_by"] = user_email
if last_updated:
request["last_updated"] = last_updated
else:
request["last_updated"] = int(time.time())
try:
await sync_to_async(self.api_health_roles_table.put_item)(
Item=self._data_to_dynamo_replace(request)
)
except Exception as e:
error: dict = {
"function": function,
"message": "Unable to update api_health_info request",
"request": request,
"error": str(e),
}
log.error(error, exc_info=True)
raise Exception(error)
return request
async def delete_api_health_alert_info(self, app: str) -> None:
"""
Delete api_health_alert_info by roleName
"""
function: str = (
f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}"
)
try:
await sync_to_async(self.api_health_roles_table.delete_item)(
Key={"appName": app}
)
except Exception:
error: dict = {
"function": function,
"message": "Unable to delete api_health info",
"app": app,
}
log.error(error, exc_info=True)
raise
async def write_policy_request(
self,
user_email: str,
justification: str,
arn: str,
policy_name: str,
policy_changes: dict,
resources: List[str],
resource_policies: List[Dict],
request_time: int = None,
request_uuid=None,
policy_status="pending",
cross_account_request: bool = False,
dry_run: bool = False,
):
"""
Writes a policy request to the appropriate DynamoDB table
dry_run will create the request format, but won't actually write it
Sample run:
write_policy_request(policy_changes)
"""
request_time = request_time or int(time.time())
# Craft the new request json
timestamp = int(time.time())
request_id = request_uuid or str(uuid.uuid4())
new_request = {
"request_id": request_id,
"arn": arn,
"status": policy_status,
"justification": justification,
"request_time": request_time,
"updated_by": user_email,
"last_updated": timestamp,
"username": user_email,
"policy_name": policy_name,
"policy_changes": json.dumps(policy_changes),
"resources": resources,
"resource_policies": resource_policies,
"cross_account_request": cross_account_request,
}
if not dry_run:
try:
await sync_to_async(self.policy_requests_table.put_item)(
Item=self._data_to_dynamo_replace(new_request)
)
except Exception as e:
error = f"Unable to add new policy request: {new_request}: {str(e)}"
log.error(error, exc_info=True)
raise Exception(error)
else:
log_data = {
"function": f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}",
"request": new_request,
"message": "Dry run, skipping adding request to dynamo",
}
log.debug(log_data)
return new_request
async def write_policy_request_v2(self, extended_request: ExtendedRequestModel):
"""
Writes a policy request v2 to the appropriate DynamoDB table
Sample run:
write_policy_request_v2(request)
"""
new_request = {
"request_id": extended_request.id,
"principal": extended_request.principal.dict(),
"status": extended_request.request_status.value,
"justification": extended_request.justification,
"request_time": extended_request.timestamp,
"last_updated": int(time.time()),
"version": "2",
"extended_request": json.loads(extended_request.json()),
"username": extended_request.requester_email,
}
if extended_request.principal.principal_type == "AwsResource":
new_request["arn"] = extended_request.principal.principal_arn
elif extended_request.principal.principal_type == "HoneybeeAwsResourceTemplate":
repository_name = extended_request.principal.repository_name
resource_identifier = extended_request.principal.resource_identifier
new_request["arn"] = f"{repository_name}-{resource_identifier}"
else:
raise Exception("Invalid principal type")
log_data = {
"function": f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}",
"message": "Writing policy request v2 to Dynamo",
"request": new_request,
}
log.debug(log_data)
try:
await sync_to_async(self.policy_requests_table.put_item)(
Item=self._data_to_dynamo_replace(new_request)
)
log_data[
"message"
] = "Successfully finished writing policy request v2 to Dynamo"
log.debug(log_data)
except Exception as e:
log_data["message"] = "Error occurred writing policy request v2 to Dynamo"
log_data["error"] = str(e)
log.error(log_data, exc_info=True)
error = f"{log_data['message']}: {str(e)}"
raise Exception(error)
return new_request
async def update_policy_request(self, updated_request):
"""
Update a policy request by request ID
Sample run:
update_policy_request(policy_changes)
"""
updated_request["last_updated"] = int(time.time())
try:
await sync_to_async(self.policy_requests_table.put_item)(
Item=self._data_to_dynamo_replace(updated_request)
)
except Exception as e:
error = f"Unable to add updated policy request: {updated_request}: {str(e)}"
log.error(error, exc_info=True)
raise Exception(error)
return updated_request
async def get_policy_requests(self, arn=None, request_id=None):
"""Reads a policy request from the appropriate DynamoDB table"""
if not arn and not request_id:
raise Exception("Must pass in ARN or policy request ID")
if request_id:
requests = self.policy_requests_table.query(
KeyConditionExpression="request_id = :ri",
ExpressionAttributeValues={":ri": request_id},
)
else:
requests = self.policy_requests_table.query(
KeyConditionExpression="arn = :arn",
ExpressionAttributeValues={":arn": arn},
)
matching_requests = []
if requests["Items"]:
items = self._data_from_dynamo_replace(requests["Items"])
items = await self.convert_policy_requests_to_v3(items)
matching_requests.extend(items)
return matching_requests
async def convert_policy_requests_to_v3(self, requests):
# Remove this function and calls to this function after a grace period of
changed = False
for request in requests:
if not request.get("version") in ["2"]:
continue
if request.get("extended_request") and not request.get("principal"):
principal_arn = request.pop("arn")
request["principal"] = {
"principal_arn": principal_arn,
"principal_type": "AwsResource",
}
request["extended_request"]["principal"] = {
"principal_arn": principal_arn,
"principal_type": "AwsResource",
}
request.pop("arn", None)
changes = (
request.get("extended_request", {})
.get("changes", {})
.get("changes", [])
)
for change in changes:
if not change.get("principal_arn"):
continue
if not change.get("version") in ["2.0", "2", 2]:
continue
change["principal"] = {
"principal_arn": change["principal_arn"],
"principal_type": "AwsResource",
}
change.pop("principal_arn")
change["version"] = "3.0"
changed = True
if changed:
self.parallel_write_table(self.policy_requests_table, requests)
return requests
async def get_all_policy_requests(
self, status: Optional[str] = "pending"
) -> List[Dict[str, Union[int, List[str], str]]]:
"""Return all policy requests. If a status is specified, only requests with the specified status will be
returned.
:param status:
:return:
"""
requests = await sync_to_async(self.parallel_scan_table)(
self.policy_requests_table
)
requests = await self.convert_policy_requests_to_v3(requests)
return_value = []
if status:
for item in requests:
if status and item["status"] == status:
return_value.append(item)
else:
return_value = requests
return return_value
async def update_dynamic_config(self, config: str, updated_by: str) -> None:
"""Take a YAML config and writes to DDB (The reason we use YAML instead of JSON is to preserve comments)."""
# Validate that config loads as yaml, raises exception if not
yaml.safe_load(config)
stats.count("update_dynamic_config", tags={"updated_by": updated_by})
current_config_entry = self.dynamic_config.get_item(Key={"id": "master"})
if current_config_entry.get("Item"):
old_config = {
"id": current_config_entry["Item"]["updated_at"],
"updated_by": current_config_entry["Item"]["updated_by"],
"config": current_config_entry["Item"]["config"],
"updated_at": str(int(time.time())),
}
self.dynamic_config.put_item(Item=self._data_to_dynamo_replace(old_config))
new_config = {
"id": "master",
"config": zlib.compress(config.encode()),
"updated_by": updated_by,
"updated_at": str(int(time.time())),
}
self.dynamic_config.put_item(Item=self._data_to_dynamo_replace(new_config))
def validate_signature(self, items):
signature = items.pop("signature")
if isinstance(signature, Binary):
signature = signature.value
json_request = json.dumps(items, sort_keys=True)
if not crypto.verify(json_request, signature):
raise Exception(f"Invalid signature for request: {json_request}")
def sign_request(
self, user_entry: Dict[str, Union[Decimal, List[str], Binary, str]]
) -> Dict[str, Union[Decimal, List[str], str, bytes]]:
"""
Sign the request and returned request with signature
:param user_entry:
:return:
"""
# Remove old signature if it exists
user_entry.pop("signature", None)
user_entry = self._data_from_dynamo_replace(user_entry)
json_request = json.dumps(user_entry, sort_keys=True, use_decimal=True)
sig = crypto.sign(json_request)
user_entry["signature"] = sig
return user_entry
async def authenticate_user(self, login_attempt) -> AuthenticationResponse:
function: str = (
f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}"
)
log_data = {
"function": function,
"user_email": login_attempt.username,
"after_redirect_uri": login_attempt.after_redirect_uri,
}
user_entry = await sync_to_async(self.users_table.query)(
KeyConditionExpression="username = :un",
ExpressionAttributeValues={":un": login_attempt.username},
)
user = None
generic_error = ["User doesn't exist, or password is incorrect. "]
if user_entry and "Items" in user_entry and len(user_entry["Items"]) == 1:
user = user_entry["Items"][0]
if not user:
delay_error = await wait_after_authentication_failure(
login_attempt.username
)
error = f"Unable to find user: {login_attempt.username}"
log.error({**log_data, "message": error + delay_error})
return AuthenticationResponse(
authenticated=False, errors=generic_error + [delay_error]
)
if not user.get("password"):
delay_error = await wait_after_authentication_failure(
login_attempt.username
)
error = "User exists, but doesn't have a password stored in the database"
log.error({**log_data, "message": error + delay_error})
return AuthenticationResponse(
authenticated=False, errors=generic_error + [delay_error]
)
password_hash_matches = bcrypt.checkpw(
login_attempt.password.encode("utf-8"), user["password"].value
)
if not password_hash_matches:
delay_error = await wait_after_authentication_failure(
login_attempt.username
)
error = "Password does not match. "
log.error({**log_data, "message": error + delay_error})
return AuthenticationResponse(
authenticated=False, errors=generic_error + [delay_error]
)
return AuthenticationResponse(
authenticated=True, username=user["username"], groups=user["groups"]
)
def create_user(
self,
user_email,
password: Optional[str] = None,
groups: Optional[List[str]] = None,
):
if not groups:
groups = []
timestamp = int(time.time())
unsigned_user_entry = {
"created": timestamp,
"last_updated": timestamp,
"username": user_email,
"requests": [],
"groups": groups,
}
if password:
pw = bytes(password, "utf-8")
salt = bcrypt.gensalt()
unsigned_user_entry["password"] = bcrypt.hashpw(pw, salt)
user_entry = self.sign_request(unsigned_user_entry)
try:
self.users_table.put_item(Item=self._data_to_dynamo_replace(user_entry))
except Exception as e:
error = f"Unable to add user submission: {user_entry}: {str(e)}"
log.error(error, exc_info=True)
raise Exception(error)
return user_entry
def update_user(
self,
user_email,
password: Optional[str] = None,
groups: Optional[List[str]] = None,
):
if not groups:
groups = []
user_ddb = self.users_table.query(
KeyConditionExpression="username = :un",
ExpressionAttributeValues={":un": user_email},
)
user = None
if user_ddb and "Items" in user_ddb and len(user_ddb["Items"]) == 1:
user = user_ddb["Items"][0]
if not user:
raise DataNotRetrievable(f"Unable to find user: {user_email}")
timestamp = int(time.time())
if password:
pw = bytes(password, "utf-8")
salt = bcrypt.gensalt()
user["password"] = bcrypt.hashpw(pw, salt)
if groups:
user["groups"] = groups
user["last_updated"] = timestamp
user_entry = self.sign_request(user)
try:
self.users_table.put_item(Item=self._data_to_dynamo_replace(user_entry))
except Exception as e:
error = f"Unable to add user submission: {user_entry}: {str(e)}"
log.error(error, exc_info=True)
raise Exception(error)
return user_entry
def delete_user(self, user_email):
function: str = (
f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}"
)
log_data = {"function": function, "user_email": user_email}
log.debug(log_data)
user_entry = {"username": user_email}
try:
self.users_table.delete_item(Key=self._data_to_dynamo_replace(user_entry))
except Exception as e:
error = f"Unable to delete user: {user_entry}: {str(e)}"
log.error(error, exc_info=True)
raise Exception(error)
async def get_user(
self, user_email: str
) -> Optional[Dict[str, Union[Decimal, List[str], Binary, str]]]:
function: str = (
f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}"
)
log_data = {"function": function, "user_email": user_email}
log.debug(log_data)
user = self.users_table.query(
KeyConditionExpression="username = :un",
ExpressionAttributeValues={":un": user_email},
)
if user and "Items" in user and len(user["Items"]) == 1:
return user["Items"][0]
return None
def get_or_create_user(
self, user_email: str
) -> Dict[str, Union[Decimal, List[str], Binary, str]]:
function: str = (
f"{__name__}.{self.__class__.__name__}.{sys._getframe().f_code.co_name}"
)
log_data = {"function": function, "user_email": user_email}
log.debug(log_data)
user = self.users_table.query(
KeyConditionExpression="username = :un",
ExpressionAttributeValues={":un": user_email},
)
items = []
if user and "Items" in user:
items = user["Items"]
if not items:
return self.create_user(user_email)
return items[0]
def resolve_request_ids(
self, request_ids: List[str]
) -> List[Dict[str, Union[int, str]]]:
requests = []
for request_id in request_ids:
request = self.requests_table.query(
KeyConditionExpression="request_id = :ri",
ExpressionAttributeValues={":ri": request_id},
)
if request["Items"]:
items = self._data_from_dynamo_replace(request["Items"])
requests.append(items[0])
else:
raise NoMatchingRequest(
f"No matching request for request_id: {request_id}"
)
return requests
def add_request_id_to_user(
self,
affected_user: Dict[str, Union[Decimal, List[str], Binary, str]],
request_id: str,
) -> None:
affected_user["requests"].append(request_id)
self.users_table.put_item(
Item=self._data_to_dynamo_replace(self.sign_request(affected_user))
)
def add_request(
self,
user_email: str,
group: str,
justification: str,
request_time: None = None,
status: str = "pending",
updated_by: Optional[str] = None,
) -> Dict[str, Union[int, str]]:
"""
Add a user request to the dynamo table
Sample run:
add_request("user@example.com", "engtest", "because")
:param user_email: Email address of user
:param group: Name of group user is requesting access to
:param justification:
:param request_id:
:param request_time:
:param status:
:param updated_by:
:return:
"""
"""
Request:
group
justification
role
request_time
approval_time
updated_by
approval_reason
status
user@example.com:
requests: []
last_updated: 1
signature: xxxx
#pending: []
#expired: []
# How to expire requests if soemeone maliciously deletes content
# How to query for all approved requests for group X
# What if we want to send email saying your request is expiring in 7 days? Maybe celery to query all
# What about concept of request ID? Maybe base64 encoded thing?
# Need an all-in-one page to show all pending requests, all expired/approved requests
"""
request_time = request_time or int(time.time())
stats.count("new_group_request", tags={"user": user_email, "group": group})
if self.affected_user.get("username") != user_email:
self.affected_user = self.get_or_create_user(user_email)
# Get current user. Create if they do not already exist
# self.user = self.get_or_create_user(user_email)
# Get current user requests, which will validate existing signature
# existing_request_ids = self.user["requests"]
# existing_requests = self.resolve_request_ids(existing_request_ids)
existing_pending_requests_for_group = self.get_requests_by_user(
user_email, group, status="pending"
)
# Craft the new request json
timestamp = int(time.time())
request_id = str(uuid.uuid4())
new_request = {
"request_id": request_id,
"group": group,
"status": status,
"justification": justification,
"request_time": request_time,
"updated_by": updated_by,
"last_updated": timestamp,
"username": user_email,
}
# See if user already has an active or pending request for the group
if existing_pending_requests_for_group:
for request in existing_pending_requests_for_group:
raise PendingRequestAlreadyExists(
f"Pending request for group: {group} already exists: {request}"
)
try:
self.requests_table.put_item(Item=self._data_to_dynamo_replace(new_request))
except Exception as e:
error = {
"error": f"Unable to add user request: {str(e)}",
"request": new_request,
}
log.error(error, exc_info=True)
raise Exception(error)
self.add_request_id_to_user(self.affected_user, request_id)
return new_request
async def get_all_requests(self, status=None):
"""Return all requests. If a status is specified, only requests with the specified status will be returned.
:param status:
:return:
"""
items = await sync_to_async(self.parallel_scan_table)(self.requests_table)
return_value = []
if status:
for item in items:
new_json = []
for j in item["json"]:
if j["status"] == status:
new_json.append(j)
item["json"] = new_json
if new_json:
return_value.append(item)
else:
return_value = items
return return_value
def get_requests_by_user(
self,
user_email: str,
group: str = None,
status: str = None,
use_cache: bool = False,
) -> dict:
"""Get requests by user. Group and status can also be specified to filter results.
:param user_email:
:param group:
:param status:
:return:
"""
red_key = f"USER_REQUESTS_{user_email}-{group}-{status}"
if use_cache:
requests_to_return = red.get(red_key)
if requests_to_return:
return json.loads(requests_to_return)
if self.affected_user.get("username") != user_email:
self.affected_user = self.get_or_create_user(user_email)
existing_request_ids = self.affected_user["requests"]
existing_requests = self.resolve_request_ids(existing_request_ids)
requests_to_return = []
if existing_requests:
for request in existing_requests:
if group and request["group"] != group:
continue
if status and request["status"] != status:
continue
requests_to_return.append(request)
if use_cache:
red.setex(red_key, 120, json.dumps(requests_to_return))
return requests_to_return
def change_request_status(
self, user_email, group, new_status, updated_by=None, reviewer_comments=None
):
"""
:param user:
:param status:
:param request_id:
:return:
"""
stats.count(
"update_group_request",
tags={
"user": user_email,
"group": group,
"new_status": new_status,
"updated_by": updated_by,
},
)
modified_request = None
if self.affected_user.get("username") != user_email:
self.affected_user = self.get_or_create_user(user_email)
timestamp = int(time.time())
if new_status not in POSSIBLE_STATUSES:
raise Exception(
f"Invalid status. Status must be one of {POSSIBLE_STATUSES}"
)
if new_status == "approved" and not updated_by:
raise Exception(
"You must provide `updated_by` to change a request status to approved."
)
existing_requests = self.get_requests_by_user(user_email)
if existing_requests:
updated = False
for request in existing_requests:
if request["group"] == group:
request["updated_by"] = updated_by
request["status"] = new_status
request["last_updated"] = timestamp
request["reviewer_comments"] = reviewer_comments
modified_request = request
try:
self.requests_table.put_item(
Item=self._data_to_dynamo_replace(request)
)
except Exception as e:
error = f"Unable to add user request: {request}: {str(e)}"
log.error(error, exc_info=True)
raise Exception(error)
updated = True
if not updated:
raise NoExistingRequest(
f"Unable to find existing request for user: {user_email} and group: {group}."
)
else:
raise NoExistingRequest(
f"Unable to find existing requests for user: {user_email}"
)
return modified_request
def change_request_status_by_id(
self,
request_id: str,
new_status: str,
updated_by: Optional[str] = None,
reviewer_comments: Optional[str] = None,
) -> Dict[str, Union[int, str]]:
"""
Change request status by ID
:param request_id:
:param new_status:
:param updated_by:
:return: new requests
"""
modified_request = None
if new_status == "approved" and not updated_by:
raise Exception(
"You must provide `updated_by` to change a request status to approved."
)
requests = self.resolve_request_ids([request_id])
if new_status not in POSSIBLE_STATUSES:
raise Exception(
f"Invalid status. Status must be one of {POSSIBLE_STATUSES}"
)
for request in requests:
request["status"] = new_status
request["updated_by"] = updated_by
request["last_updated"] = int(time.time())
request["reviewer_comments"] = reviewer_comments
modified_request = request
try:
self.requests_table.put_item(Item=self._data_to_dynamo_replace(request))
except Exception as e:
error = f"Unable to add user request: {request} : {str(e)}"
log.error(error, exc_info=True)
raise Exception(error)
return modified_request
def get_all_policies(self):
"""Return all policies."""
response = self.policies_table.scan()
items = []
if response and "Items" in response:
items = self._data_from_dynamo_replace(response["Items"])
while "LastEvaluatedKey" in response:
response = self.policies_table.scan(
ExclusiveStartKey=response["LastEvaluatedKey"]
)
items.extend(self._data_from_dynamo_replace(response["Items"]))
return items
async def create_group_log_entry(
self,
group: str,
username: str,
updated_by: str,
action: str,
updated_at: None = None,
extra: None = None,
) -> None:
updated_at = updated_at or int(time.time())
log_id = str(uuid.uuid4())
log_entry = {
"uuid": log_id,
"group": group,
"username": username,
"updated_by": updated_by,
"updated_at": updated_at,
"action": action,
"extra": extra,
}
self.group_log.put_item(Item=self._data_to_dynamo_replace(log_entry))
async def get_all_audit_logs(self) -> List[Dict[str, Union[int, None, str]]]:
response = await sync_to_async(self.group_log.scan)()
items = []
if response and "Items" in response:
items = self._data_from_dynamo_replace(response["Items"])
while "LastEvaluatedKey" in response:
response = await sync_to_async(self.group_log.scan)(
ExclusiveStartKey=response["LastEvaluatedKey"]
)
items.extend(self._data_from_dynamo_replace(response["Items"]))
return items
async def get_all_pending_requests(self):
return await self.get_all_requests(status="pending")
def batch_write_cloudtrail_events(self, items):
with self.cloudtrail_table.batch_writer(
overwrite_by_pkeys=["arn", "request_id"]
) as batch:
for item in items:
batch.put_item(Item=self._data_to_dynamo_replace(item))
return True
async def get_top_cloudtrail_errors_by_arn(self, arn, n=5):
response: dict = await sync_to_async(self.cloudtrail_table.query)(
KeyConditionExpression=Key("arn").eq(arn)
)
items = response.get("Items", [])
aggregated_errors = defaultdict(dict)
for item in items:
if int(item["ttl"]) < int(time.time()):
continue
event_call = item["event_call"]
event_resource = item.get("resource", "")
event_string = f"{event_call}|||{event_resource}"
if not aggregated_errors.get(event_string):
aggregated_errors[event_string]["count"] = 0
aggregated_errors[event_string]["generated_policy"] = item.get(
"generated_policy", {}
)
aggregated_errors[event_string]["count"] += 1
top_n_errors = {
k: v
for k, v in sorted(
aggregated_errors.items(),
key=lambda item: item[1]["count"],
reverse=True,
)[:n]
}
return top_n_errors
def count_arn_errors(self, error_count, items):
for item in items:
arn = item.get("arn")
if not error_count.get(arn):
error_count[arn] = 0
error_count[arn] += item.get("count", 1)
return error_count
async def revert_migrate():
# Get all policy requests
# iterate through changes
# if has principal, convert to principal_arn
dynamo = UserDynamoHandler("consoleme")
requests = await dynamo.get_all_policy_requests(status=None)
for request in requests:
changes = (
request.get("extended_request", {}).get("changes", {}).get("changes", [])
)
for change in changes:
if not change.get("principal"):
continue
if not change.get("version") == "3.0":
continue
principal_arn = change["principal"].get("principal_arn")
if not principal_arn:
continue
change["principal_arn"] = principal_arn
change.pop("principal")
change["version"] = "2.0"
dynamo.parallel_write_table(dynamo.policy_requests_table, requests) | null |
162,243 | import os
import sys
import json
import random
import logging
import zipfile
import requests
from termcolor import colored
logger = logging.getLogger(__name__)
The provided code snippet includes necessary dependencies for implementing the `clean_dir` function. Write a Python function `def clean_dir(path: str) -> None` to solve the following problem:
Removes every file in a directory. Args: path (str): Path to directory. Returns: None
Here is the function:
def clean_dir(path: str) -> None:
"""
Removes every file in a directory.
Args:
path (str): Path to directory.
Returns:
None
"""
try:
if not os.path.exists(path):
os.mkdir(path)
logger.info(f"Created directory: {path}")
for file in os.listdir(path):
file_path = os.path.join(path, file)
os.remove(file_path)
logger.info(f"Removed file: {file_path}")
logger.info(colored(f"Cleaned {path} directory", "green"))
except Exception as e:
logger.error(f"Error occurred while cleaning directory {path}: {str(e)}") | Removes every file in a directory. Args: path (str): Path to directory. Returns: None |
162,244 | import os
import sys
import json
import random
import logging
import zipfile
import requests
from termcolor import colored
logger = logging.getLogger(__name__)
The provided code snippet includes necessary dependencies for implementing the `fetch_songs` function. Write a Python function `def fetch_songs(zip_url: str) -> None` to solve the following problem:
Downloads songs into songs/ directory to use with geneated videos. Args: zip_url (str): The URL to the zip file containing the songs. Returns: None
Here is the function:
def fetch_songs(zip_url: str) -> None:
"""
Downloads songs into songs/ directory to use with geneated videos.
Args:
zip_url (str): The URL to the zip file containing the songs.
Returns:
None
"""
try:
logger.info(colored(f" => Fetching songs...", "magenta"))
files_dir = "../Songs"
if not os.path.exists(files_dir):
os.mkdir(files_dir)
logger.info(colored(f"Created directory: {files_dir}", "green"))
else:
# Skip if songs are already downloaded
return
# Download songs
response = requests.get(zip_url)
# Save the zip file
with open("../Songs/songs.zip", "wb") as file:
file.write(response.content)
# Unzip the file
with zipfile.ZipFile("../Songs/songs.zip", "r") as file:
file.extractall("../Songs")
# Remove the zip file
os.remove("../Songs/songs.zip")
logger.info(colored(" => Downloaded Songs to ../Songs.", "green"))
except Exception as e:
logger.error(colored(f"Error occurred while fetching songs: {str(e)}", "red")) | Downloads songs into songs/ directory to use with geneated videos. Args: zip_url (str): The URL to the zip file containing the songs. Returns: None |
162,245 | import os
import sys
import json
import random
import logging
import zipfile
import requests
from termcolor import colored
logger = logging.getLogger(__name__)
The provided code snippet includes necessary dependencies for implementing the `choose_random_song` function. Write a Python function `def choose_random_song() -> str` to solve the following problem:
Chooses a random song from the songs/ directory. Returns: str: The path to the chosen song.
Here is the function:
def choose_random_song() -> str:
"""
Chooses a random song from the songs/ directory.
Returns:
str: The path to the chosen song.
"""
try:
songs = os.listdir("../Songs")
song = random.choice(songs)
logger.info(colored(f"Chose song: {song}", "green"))
return f"../Songs/{song}"
except Exception as e:
logger.error(colored(f"Error occurred while choosing random song: {str(e)}", "red")) | Chooses a random song from the songs/ directory. Returns: str: The path to the chosen song. |
162,246 | import os
import sys
import json
import random
import logging
import zipfile
import requests
from termcolor import colored
logger = logging.getLogger(__name__)
The provided code snippet includes necessary dependencies for implementing the `check_env_vars` function. Write a Python function `def check_env_vars() -> None` to solve the following problem:
Checks if the necessary environment variables are set. Returns: None Raises: SystemExit: If any required environment variables are missing.
Here is the function:
def check_env_vars() -> None:
"""
Checks if the necessary environment variables are set.
Returns:
None
Raises:
SystemExit: If any required environment variables are missing.
"""
try:
required_vars = ["PEXELS_API_KEY", "TIKTOK_SESSION_ID", "IMAGEMAGICK_BINARY"]
missing_vars = [var + os.getenv(var) for var in required_vars if os.getenv(var) is None or (len(os.getenv(var)) == 0)]
if missing_vars:
missing_vars_str = ", ".join(missing_vars)
logger.error(colored(f"The following environment variables are missing: {missing_vars_str}", "red"))
logger.error(colored("Please consult 'EnvironmentVariables.md' for instructions on how to set them.", "yellow"))
sys.exit(1) # Aborts the program
except Exception as e:
logger.error(f"Error occurred while checking environment variables: {str(e)}")
sys.exit(1) # Aborts the program if an unexpected error occurs | Checks if the necessary environment variables are set. Returns: None Raises: SystemExit: If any required environment variables are missing. |
162,247 | import os
from utils import *
from dotenv import load_dotenv
from gpt import *
from video import *
from search import *
from uuid import uuid4
from tiktokvoice import *
from flask_cors import CORS
from termcolor import colored
from youtube import upload_video
from apiclient.errors import HttpError
from flask import Flask, request, jsonify
from moviepy.config import change_settings
AMOUNT_OF_STOCK_VIDEOS = 5
GENERATING = False
def upload_video(video_path, title, description, category, keywords, privacy_status):
try:
# Get the authenticated YouTube service
youtube = get_authenticated_service()
# Retrieve and print the channel ID for the authenticated user
channels_response = youtube.channels().list(mine=True, part='id').execute()
for channel in channels_response['items']:
print(colored(f" => Channel ID: {channel['id']}", "blue"))
# Initialize the upload process
video_response = initialize_upload(youtube, {
'file': video_path, # The path to the video file
'title': title,
'description': description,
'category': category,
'keywords': keywords,
'privacyStatus': privacy_status
})
return video_response # Return the response from the upload process
except HttpError as e:
print(colored(f"[-] An HTTP error {e.resp.status} occurred:\n{e.content}", "red"))
if e.resp.status in [401, 403]:
# Here you could refresh the credentials and retry the upload
youtube = get_authenticated_service() # This will prompt for re-authentication if necessary
video_response = initialize_upload(youtube, {
'file': video_path,
'title': title,
'description': description,
'category': category,
'keywords': keywords,
'privacyStatus': privacy_status
})
return video_response
else:
raise e
def generate():
try:
# Set global variable
global GENERATING
GENERATING = True
# Clean
clean_dir("../temp/")
clean_dir("../subtitles/")
# Parse JSON
data = request.get_json()
paragraph_number = int(data.get('paragraphNumber', 1)) # Default to 1 if not provided
ai_model = data.get('aiModel') # Get the AI model selected by the user
n_threads = data.get('threads') # Amount of threads to use for video generation
subtitles_position = data.get('subtitlesPosition') # Position of the subtitles in the video
text_color = data.get('color') # Color of subtitle text
# Get 'useMusic' from the request data and default to False if not provided
use_music = data.get('useMusic', False)
# Get 'automateYoutubeUpload' from the request data and default to False if not provided
automate_youtube_upload = data.get('automateYoutubeUpload', False)
# Get the ZIP Url of the songs
songs_zip_url = data.get('zipUrl')
# Download songs
if use_music:
# Downloads a ZIP file containing popular TikTok Songs
if songs_zip_url:
fetch_songs(songs_zip_url)
else:
# Default to a ZIP file containing popular TikTok Songs
fetch_songs("https://filebin.net/2avx134kdibc4c3q/drive-download-20240209T180019Z-001.zip")
# Print little information about the video which is to be generated
print(colored("[Video to be generated]", "blue"))
print(colored(" Subject: " + data["videoSubject"], "blue"))
print(colored(" AI Model: " + ai_model, "blue")) # Print the AI model being used
print(colored(" Custom Prompt: " + data["customPrompt"], "blue")) # Print the AI model being used
if not GENERATING:
return jsonify(
{
"status": "error",
"message": "Video generation was cancelled.",
"data": [],
}
)
voice = data["voice"]
voice_prefix = voice[:2]
if not voice:
print(colored("[!] No voice was selected. Defaulting to \"en_us_001\"", "yellow"))
voice = "en_us_001"
voice_prefix = voice[:2]
# Generate a script
script = generate_script(data["videoSubject"], paragraph_number, ai_model, voice, data["customPrompt"]) # Pass the AI model to the script generation
# Generate search terms
search_terms = get_search_terms(
data["videoSubject"], AMOUNT_OF_STOCK_VIDEOS, script, ai_model
)
# Search for a video of the given search term
video_urls = []
# Defines how many results it should query and search through
it = 15
# Defines the minimum duration of each clip
min_dur = 10
# Loop through all search terms,
# and search for a video of the given search term
for search_term in search_terms:
if not GENERATING:
return jsonify(
{
"status": "error",
"message": "Video generation was cancelled.",
"data": [],
}
)
found_urls = search_for_stock_videos(
search_term, os.getenv("PEXELS_API_KEY"), it, min_dur
)
# Check for duplicates
for url in found_urls:
if url not in video_urls:
video_urls.append(url)
break
# Check if video_urls is empty
if not video_urls:
print(colored("[-] No videos found to download.", "red"))
return jsonify(
{
"status": "error",
"message": "No videos found to download.",
"data": [],
}
)
# Define video_paths
video_paths = []
# Let user know
print(colored(f"[+] Downloading {len(video_urls)} videos...", "blue"))
# Save the videos
for video_url in video_urls:
if not GENERATING:
return jsonify(
{
"status": "error",
"message": "Video generation was cancelled.",
"data": [],
}
)
try:
saved_video_path = save_video(video_url)
video_paths.append(saved_video_path)
except Exception:
print(colored(f"[-] Could not download video: {video_url}", "red"))
# Let user know
print(colored("[+] Videos downloaded!", "green"))
# Let user know
print(colored("[+] Script generated!\n", "green"))
if not GENERATING:
return jsonify(
{
"status": "error",
"message": "Video generation was cancelled.",
"data": [],
}
)
# Split script into sentences
sentences = script.split(". ")
# Remove empty strings
sentences = list(filter(lambda x: x != "", sentences))
paths = []
# Generate TTS for every sentence
for sentence in sentences:
if not GENERATING:
return jsonify(
{
"status": "error",
"message": "Video generation was cancelled.",
"data": [],
}
)
current_tts_path = f"../temp/{uuid4()}.mp3"
tts(sentence, voice, filename=current_tts_path)
audio_clip = AudioFileClip(current_tts_path)
paths.append(audio_clip)
# Combine all TTS files using moviepy
final_audio = concatenate_audioclips(paths)
tts_path = f"../temp/{uuid4()}.mp3"
final_audio.write_audiofile(tts_path)
try:
subtitles_path = generate_subtitles(audio_path=tts_path, sentences=sentences, audio_clips=paths, voice=voice_prefix)
except Exception as e:
print(colored(f"[-] Error generating subtitles: {e}", "red"))
subtitles_path = None
# Concatenate videos
temp_audio = AudioFileClip(tts_path)
combined_video_path = combine_videos(video_paths, temp_audio.duration, 5, n_threads or 2)
# Put everything together
try:
final_video_path = generate_video(combined_video_path, tts_path, subtitles_path, n_threads or 2, subtitles_position, text_color or "#FFFF00")
except Exception as e:
print(colored(f"[-] Error generating final video: {e}", "red"))
final_video_path = None
# Define metadata for the video, we will display this to the user, and use it for the YouTube upload
title, description, keywords = generate_metadata(data["videoSubject"], script, ai_model)
print(colored("[-] Metadata for YouTube upload:", "blue"))
print(colored(" Title: ", "blue"))
print(colored(f" {title}", "blue"))
print(colored(" Description: ", "blue"))
print(colored(f" {description}", "blue"))
print(colored(" Keywords: ", "blue"))
print(colored(f" {', '.join(keywords)}", "blue"))
if automate_youtube_upload:
# Start Youtube Uploader
# Check if the CLIENT_SECRETS_FILE exists
client_secrets_file = os.path.abspath("./client_secret.json")
SKIP_YT_UPLOAD = False
if not os.path.exists(client_secrets_file):
SKIP_YT_UPLOAD = True
print(colored("[-] Client secrets file missing. YouTube upload will be skipped.", "yellow"))
print(colored("[-] Please download the client_secret.json from Google Cloud Platform and store this inside the /Backend directory.", "red"))
# Only proceed with YouTube upload if the toggle is True and client_secret.json exists.
if not SKIP_YT_UPLOAD:
# Choose the appropriate category ID for your videos
video_category_id = "28" # Science & Technology
privacyStatus = "private" # "public", "private", "unlisted"
video_metadata = {
'video_path': os.path.abspath(f"../temp/{final_video_path}"),
'title': title,
'description': description,
'category': video_category_id,
'keywords': ",".join(keywords),
'privacyStatus': privacyStatus,
}
# Upload the video to YouTube
try:
# Unpack the video_metadata dictionary into individual arguments
video_response = upload_video(
video_path=video_metadata['video_path'],
title=video_metadata['title'],
description=video_metadata['description'],
category=video_metadata['category'],
keywords=video_metadata['keywords'],
privacy_status=video_metadata['privacyStatus']
)
print(f"Uploaded video ID: {video_response.get('id')}")
except HttpError as e:
print(f"An HTTP error {e.resp.status} occurred:\n{e.content}")
video_clip = VideoFileClip(f"../temp/{final_video_path}")
if use_music:
# Select a random song
song_path = choose_random_song()
# Add song to video at 30% volume using moviepy
original_duration = video_clip.duration
original_audio = video_clip.audio
song_clip = AudioFileClip(song_path).set_fps(44100)
# Set the volume of the song to 10% of the original volume
song_clip = song_clip.volumex(0.1).set_fps(44100)
# Add the song to the video
comp_audio = CompositeAudioClip([original_audio, song_clip])
video_clip = video_clip.set_audio(comp_audio)
video_clip = video_clip.set_fps(30)
video_clip = video_clip.set_duration(original_duration)
video_clip.write_videofile(f"../{final_video_path}", threads=n_threads or 1)
else:
video_clip.write_videofile(f"../{final_video_path}", threads=n_threads or 1)
# Let user know
print(colored(f"[+] Video generated: {final_video_path}!", "green"))
# Stop FFMPEG processes
if os.name == "nt":
# Windows
os.system("taskkill /f /im ffmpeg.exe")
else:
# Other OS
os.system("pkill -f ffmpeg")
GENERATING = False
# Return JSON
return jsonify(
{
"status": "success",
"message": "Video generated! See MoneyPrinter/output.mp4 for result.",
"data": final_video_path,
}
)
except Exception as err:
print(colored(f"[-] Error: {str(err)}", "red"))
return jsonify(
{
"status": "error",
"message": f"Could not retrieve stock videos: {str(err)}",
"data": [],
}
) | null |
162,248 | import os
from utils import *
from dotenv import load_dotenv
from gpt import *
from video import *
from search import *
from uuid import uuid4
from tiktokvoice import *
from flask_cors import CORS
from termcolor import colored
from youtube import upload_video
from apiclient.errors import HttpError
from flask import Flask, request, jsonify
from moviepy.config import change_settings
GENERATING = False
def cancel():
print(colored("[!] Received cancellation request...", "yellow"))
global GENERATING
GENERATING = False
return jsonify({"status": "success", "message": "Cancelled video generation."}) | null |
162,249 | import base64
import requests
import threading
from typing import List
from termcolor import colored
from playsound import playsound
VOICES = [
# DISNEY VOICES
"en_us_ghostface", # Ghost Face
"en_us_chewbacca", # Chewbacca
"en_us_c3po", # C3PO
"en_us_stitch", # Stitch
"en_us_stormtrooper", # Stormtrooper
"en_us_rocket", # Rocket
# ENGLISH VOICES
"en_au_001", # English AU - Female
"en_au_002", # English AU - Male
"en_uk_001", # English UK - Male 1
"en_uk_003", # English UK - Male 2
"en_us_001", # English US - Female (Int. 1)
"en_us_002", # English US - Female (Int. 2)
"en_us_006", # English US - Male 1
"en_us_007", # English US - Male 2
"en_us_009", # English US - Male 3
"en_us_010", # English US - Male 4
# EUROPE VOICES
"fr_001", # French - Male 1
"fr_002", # French - Male 2
"de_001", # German - Female
"de_002", # German - Male
"es_002", # Spanish - Male
# AMERICA VOICES
"es_mx_002", # Spanish MX - Male
"br_001", # Portuguese BR - Female 1
"br_003", # Portuguese BR - Female 2
"br_004", # Portuguese BR - Female 3
"br_005", # Portuguese BR - Male
# ASIA VOICES
"id_001", # Indonesian - Female
"jp_001", # Japanese - Female 1
"jp_003", # Japanese - Female 2
"jp_005", # Japanese - Female 3
"jp_006", # Japanese - Male
"kr_002", # Korean - Male 1
"kr_003", # Korean - Female
"kr_004", # Korean - Male 2
# SINGING VOICES
"en_female_f08_salut_damour", # Alto
"en_male_m03_lobby", # Tenor
"en_female_f08_warmy_breeze", # Warmy Breeze
"en_male_m03_sunshine_soon", # Sunshine Soon
# OTHER
"en_male_narration", # narrator
"en_male_funny", # wacky
"en_female_emotional", # peaceful
]
current_endpoint = 0
TEXT_BYTE_LIMIT = 300
def split_string(string: str, chunk_size: int) -> List[str]:
words = string.split()
result = []
current_chunk = ""
for word in words:
if (
len(current_chunk) + len(word) + 1 <= chunk_size
): # Check if adding the word exceeds the chunk size
current_chunk += f" {word}"
else:
if current_chunk: # Append the current chunk if not empty
result.append(current_chunk.strip())
current_chunk = word
if current_chunk: # Append the last chunk if not empty
result.append(current_chunk.strip())
return result
def get_api_response() -> requests.Response:
url = f'{ENDPOINTS[current_endpoint].split("/a")[0]}'
response = requests.get(url)
return response
def save_audio_file(base64_data: str, filename: str = "output.mp3") -> None:
audio_bytes = base64.b64decode(base64_data)
with open(filename, "wb") as file:
file.write(audio_bytes)
def generate_audio(text: str, voice: str) -> bytes:
url = f"{ENDPOINTS[current_endpoint]}"
headers = {"Content-Type": "application/json"}
data = {"text": text, "voice": voice}
response = requests.post(url, headers=headers, json=data)
return response.content
def tts(
text: str,
voice: str = "none",
filename: str = "output.mp3",
play_sound: bool = False,
) -> None:
# checking if the website is available
global current_endpoint
if get_api_response().status_code == 200:
print(colored("[+] TikTok TTS Service available!", "green"))
else:
current_endpoint = (current_endpoint + 1) % 2
if get_api_response().status_code == 200:
print(colored("[+] TTS Service available!", "green"))
else:
print(colored("[-] TTS Service not available and probably temporarily rate limited, try again later..." , "red"))
return
# checking if arguments are valid
if voice == "none":
print(colored("[-] Please specify a voice", "red"))
return
if voice not in VOICES:
print(colored("[-] Voice not available", "red"))
return
if not text:
print(colored("[-] Please specify a text", "red"))
return
# creating the audio file
try:
if len(text) < TEXT_BYTE_LIMIT:
audio = generate_audio((text), voice)
if current_endpoint == 0:
audio_base64_data = str(audio).split('"')[5]
else:
audio_base64_data = str(audio).split('"')[3].split(",")[1]
if audio_base64_data == "error":
print(colored("[-] This voice is unavailable right now", "red"))
return
else:
# Split longer text into smaller parts
text_parts = split_string(text, 299)
audio_base64_data = [None] * len(text_parts)
# Define a thread function to generate audio for each text part
def generate_audio_thread(text_part, index):
audio = generate_audio(text_part, voice)
if current_endpoint == 0:
base64_data = str(audio).split('"')[5]
else:
base64_data = str(audio).split('"')[3].split(",")[1]
if audio_base64_data == "error":
print(colored("[-] This voice is unavailable right now", "red"))
return "error"
audio_base64_data[index] = base64_data
threads = []
for index, text_part in enumerate(text_parts):
# Create and start a new thread for each text part
thread = threading.Thread(
target=generate_audio_thread, args=(text_part, index)
)
thread.start()
threads.append(thread)
# Wait for all threads to complete
for thread in threads:
thread.join()
# Concatenate the base64 data in the correct order
audio_base64_data = "".join(audio_base64_data)
save_audio_file(audio_base64_data, filename)
print(colored(f"[+] Audio file saved successfully as '{filename}'", "green"))
if play_sound:
playsound(filename)
except Exception as e:
print(colored(f"[-] An error occurred during TTS: {e}", "red")) | null |
162,250 | import os
import uuid
import requests
import srt_equalizer
import assemblyai as aai
from typing import List
from moviepy.editor import *
from termcolor import colored
from dotenv import load_dotenv
from datetime import timedelta
from moviepy.video.fx.all import crop
from moviepy.video.tools.subtitles import SubtitlesClip
The provided code snippet includes necessary dependencies for implementing the `save_video` function. Write a Python function `def save_video(video_url: str, directory: str = "../temp") -> str` to solve the following problem:
Saves a video from a given URL and returns the path to the video. Args: video_url (str): The URL of the video to save. directory (str): The path of the temporary directory to save the video to Returns: str: The path to the saved video.
Here is the function:
def save_video(video_url: str, directory: str = "../temp") -> str:
"""
Saves a video from a given URL and returns the path to the video.
Args:
video_url (str): The URL of the video to save.
directory (str): The path of the temporary directory to save the video to
Returns:
str: The path to the saved video.
"""
video_id = uuid.uuid4()
video_path = f"{directory}/{video_id}.mp4"
with open(video_path, "wb") as f:
f.write(requests.get(video_url).content)
return video_path | Saves a video from a given URL and returns the path to the video. Args: video_url (str): The URL of the video to save. directory (str): The path of the temporary directory to save the video to Returns: str: The path to the saved video. |
162,251 | import os
import uuid
import requests
import srt_equalizer
import assemblyai as aai
from typing import List
from moviepy.editor import *
from termcolor import colored
from dotenv import load_dotenv
from datetime import timedelta
from moviepy.video.fx.all import crop
from moviepy.video.tools.subtitles import SubtitlesClip
ASSEMBLY_AI_API_KEY = os.getenv("ASSEMBLY_AI_API_KEY")
def __generate_subtitles_assemblyai(audio_path: str, voice: str) -> str:
"""
Generates subtitles from a given audio file and returns the path to the subtitles.
Args:
audio_path (str): The path to the audio file to generate subtitles from.
Returns:
str: The generated subtitles
"""
language_mapping = {
"br": "pt",
"id": "en", #AssemblyAI doesn't have Indonesian
"jp": "ja",
"kr": "ko",
}
if voice in language_mapping:
lang_code = language_mapping[voice]
else:
lang_code = voice
aai.settings.api_key = ASSEMBLY_AI_API_KEY
config = aai.TranscriptionConfig(language_code=lang_code)
transcriber = aai.Transcriber(config=config)
transcript = transcriber.transcribe(audio_path)
subtitles = transcript.export_subtitles_srt()
return subtitles
def __generate_subtitles_locally(sentences: List[str], audio_clips: List[AudioFileClip]) -> str:
"""
Generates subtitles from a given audio file and returns the path to the subtitles.
Args:
sentences (List[str]): all the sentences said out loud in the audio clips
audio_clips (List[AudioFileClip]): all the individual audio clips which will make up the final audio track
Returns:
str: The generated subtitles
"""
def convert_to_srt_time_format(total_seconds):
# Convert total seconds to the SRT time format: HH:MM:SS,mmm
if total_seconds == 0:
return "0:00:00,0"
return str(timedelta(seconds=total_seconds)).rstrip('0').replace('.', ',')
start_time = 0
subtitles = []
for i, (sentence, audio_clip) in enumerate(zip(sentences, audio_clips), start=1):
duration = audio_clip.duration
end_time = start_time + duration
# Format: subtitle index, start time --> end time, sentence
subtitle_entry = f"{i}\n{convert_to_srt_time_format(start_time)} --> {convert_to_srt_time_format(end_time)}\n{sentence}\n"
subtitles.append(subtitle_entry)
start_time += duration # Update start time for the next subtitle
return "\n".join(subtitles)
The provided code snippet includes necessary dependencies for implementing the `generate_subtitles` function. Write a Python function `def generate_subtitles(audio_path: str, sentences: List[str], audio_clips: List[AudioFileClip], voice: str) -> str` to solve the following problem:
Generates subtitles from a given audio file and returns the path to the subtitles. Args: audio_path (str): The path to the audio file to generate subtitles from. sentences (List[str]): all the sentences said out loud in the audio clips audio_clips (List[AudioFileClip]): all the individual audio clips which will make up the final audio track Returns: str: The path to the generated subtitles.
Here is the function:
def generate_subtitles(audio_path: str, sentences: List[str], audio_clips: List[AudioFileClip], voice: str) -> str:
"""
Generates subtitles from a given audio file and returns the path to the subtitles.
Args:
audio_path (str): The path to the audio file to generate subtitles from.
sentences (List[str]): all the sentences said out loud in the audio clips
audio_clips (List[AudioFileClip]): all the individual audio clips which will make up the final audio track
Returns:
str: The path to the generated subtitles.
"""
def equalize_subtitles(srt_path: str, max_chars: int = 10) -> None:
# Equalize subtitles
srt_equalizer.equalize_srt_file(srt_path, srt_path, max_chars)
# Save subtitles
subtitles_path = f"../subtitles/{uuid.uuid4()}.srt"
if ASSEMBLY_AI_API_KEY is not None and ASSEMBLY_AI_API_KEY != "":
print(colored("[+] Creating subtitles using AssemblyAI", "blue"))
subtitles = __generate_subtitles_assemblyai(audio_path, voice)
else:
print(colored("[+] Creating subtitles locally", "blue"))
subtitles = __generate_subtitles_locally(sentences, audio_clips)
# print(colored("[-] Local subtitle generation has been disabled for the time being.", "red"))
# print(colored("[-] Exiting.", "red"))
# sys.exit(1)
with open(subtitles_path, "w") as file:
file.write(subtitles)
# Equalize subtitles
equalize_subtitles(subtitles_path)
print(colored("[+] Subtitles generated.", "green"))
return subtitles_path | Generates subtitles from a given audio file and returns the path to the subtitles. Args: audio_path (str): The path to the audio file to generate subtitles from. sentences (List[str]): all the sentences said out loud in the audio clips audio_clips (List[AudioFileClip]): all the individual audio clips which will make up the final audio track Returns: str: The path to the generated subtitles. |
162,252 | import os
import uuid
import requests
import srt_equalizer
import assemblyai as aai
from typing import List
from moviepy.editor import *
from termcolor import colored
from dotenv import load_dotenv
from datetime import timedelta
from moviepy.video.fx.all import crop
from moviepy.video.tools.subtitles import SubtitlesClip
The provided code snippet includes necessary dependencies for implementing the `combine_videos` function. Write a Python function `def combine_videos(video_paths: List[str], max_duration: int, max_clip_duration: int, threads: int) -> str` to solve the following problem:
Combines a list of videos into one video and returns the path to the combined video. Args: video_paths (List): A list of paths to the videos to combine. max_duration (int): The maximum duration of the combined video. max_clip_duration (int): The maximum duration of each clip. threads (int): The number of threads to use for the video processing. Returns: str: The path to the combined video.
Here is the function:
def combine_videos(video_paths: List[str], max_duration: int, max_clip_duration: int, threads: int) -> str:
"""
Combines a list of videos into one video and returns the path to the combined video.
Args:
video_paths (List): A list of paths to the videos to combine.
max_duration (int): The maximum duration of the combined video.
max_clip_duration (int): The maximum duration of each clip.
threads (int): The number of threads to use for the video processing.
Returns:
str: The path to the combined video.
"""
video_id = uuid.uuid4()
combined_video_path = f"../temp/{video_id}.mp4"
# Required duration of each clip
req_dur = max_duration / len(video_paths)
print(colored("[+] Combining videos...", "blue"))
print(colored(f"[+] Each clip will be maximum {req_dur} seconds long.", "blue"))
clips = []
tot_dur = 0
# Add downloaded clips over and over until the duration of the audio (max_duration) has been reached
while tot_dur < max_duration:
for video_path in video_paths:
clip = VideoFileClip(video_path)
clip = clip.without_audio()
# Check if clip is longer than the remaining audio
if (max_duration - tot_dur) < clip.duration:
clip = clip.subclip(0, (max_duration - tot_dur))
# Only shorten clips if the calculated clip length (req_dur) is shorter than the actual clip to prevent still image
elif req_dur < clip.duration:
clip = clip.subclip(0, req_dur)
clip = clip.set_fps(30)
# Not all videos are same size,
# so we need to resize them
if round((clip.w/clip.h), 4) < 0.5625:
clip = crop(clip, width=clip.w, height=round(clip.w/0.5625), \
x_center=clip.w / 2, \
y_center=clip.h / 2)
else:
clip = crop(clip, width=round(0.5625*clip.h), height=clip.h, \
x_center=clip.w / 2, \
y_center=clip.h / 2)
clip = clip.resize((1080, 1920))
if clip.duration > max_clip_duration:
clip = clip.subclip(0, max_clip_duration)
clips.append(clip)
tot_dur += clip.duration
final_clip = concatenate_videoclips(clips)
final_clip = final_clip.set_fps(30)
final_clip.write_videofile(combined_video_path, threads=threads)
return combined_video_path | Combines a list of videos into one video and returns the path to the combined video. Args: video_paths (List): A list of paths to the videos to combine. max_duration (int): The maximum duration of the combined video. max_clip_duration (int): The maximum duration of each clip. threads (int): The number of threads to use for the video processing. Returns: str: The path to the combined video. |
162,253 | import os
import uuid
import requests
import srt_equalizer
import assemblyai as aai
from typing import List
from moviepy.editor import *
from termcolor import colored
from dotenv import load_dotenv
from datetime import timedelta
from moviepy.video.fx.all import crop
from moviepy.video.tools.subtitles import SubtitlesClip
The provided code snippet includes necessary dependencies for implementing the `generate_video` function. Write a Python function `def generate_video(combined_video_path: str, tts_path: str, subtitles_path: str, threads: int, subtitles_position: str, text_color : str) -> str` to solve the following problem:
This function creates the final video, with subtitles and audio. Args: combined_video_path (str): The path to the combined video. tts_path (str): The path to the text-to-speech audio. subtitles_path (str): The path to the subtitles. threads (int): The number of threads to use for the video processing. subtitles_position (str): The position of the subtitles. Returns: str: The path to the final video.
Here is the function:
def generate_video(combined_video_path: str, tts_path: str, subtitles_path: str, threads: int, subtitles_position: str, text_color : str) -> str:
"""
This function creates the final video, with subtitles and audio.
Args:
combined_video_path (str): The path to the combined video.
tts_path (str): The path to the text-to-speech audio.
subtitles_path (str): The path to the subtitles.
threads (int): The number of threads to use for the video processing.
subtitles_position (str): The position of the subtitles.
Returns:
str: The path to the final video.
"""
# Make a generator that returns a TextClip when called with consecutive
generator = lambda txt: TextClip(
txt,
font="../fonts/bold_font.ttf",
fontsize=100,
color=text_color,
stroke_color="black",
stroke_width=5,
)
# Split the subtitles position into horizontal and vertical
horizontal_subtitles_position, vertical_subtitles_position = subtitles_position.split(",")
# Burn the subtitles into the video
subtitles = SubtitlesClip(subtitles_path, generator)
result = CompositeVideoClip([
VideoFileClip(combined_video_path),
subtitles.set_pos((horizontal_subtitles_position, vertical_subtitles_position))
])
# Add the audio
audio = AudioFileClip(tts_path)
result = result.set_audio(audio)
result.write_videofile("../temp/output.mp4", threads=threads or 2)
return "output.mp4" | This function creates the final video, with subtitles and audio. Args: combined_video_path (str): The path to the combined video. tts_path (str): The path to the text-to-speech audio. subtitles_path (str): The path to the subtitles. threads (int): The number of threads to use for the video processing. subtitles_position (str): The position of the subtitles. Returns: str: The path to the final video. |
162,254 | import re
import json
import g4f
import openai
from typing import Tuple, List
from termcolor import colored
from dotenv import load_dotenv
import os
import google.generativeai as genai
def generate_response(prompt: str, ai_model: str) -> str:
"""
Generate a script for a video, depending on the subject of the video.
Args:
video_subject (str): The subject of the video.
ai_model (str): The AI model to use for generation.
Returns:
str: The response from the AI model.
"""
if ai_model == 'g4f':
response = g4f.ChatCompletion.create(
model=g4f.models.gpt_35_turbo_16k_0613,
messages=[{"role": "user", "content": prompt}],
)
elif ai_model in ["gpt3.5-turbo", "gpt4"]:
model_name = "gpt-3.5-turbo" if ai_model == "gpt3.5-turbo" else "gpt-4-1106-preview"
response = openai.chat.completions.create(
model=model_name,
messages=[{"role": "user", "content": prompt}],
).choices[0].message.content
elif ai_model == 'gemmini':
model = genai.GenerativeModel('gemini-pro')
response_model = model.generate_content(prompt)
response = response_model.text
else:
raise ValueError("Invalid AI model selected.")
return response
The provided code snippet includes necessary dependencies for implementing the `generate_script` function. Write a Python function `def generate_script(video_subject: str, paragraph_number: int, ai_model: str, voice: str, customPrompt: str) -> str` to solve the following problem:
Generate a script for a video, depending on the subject of the video, the number of paragraphs, and the AI model. Args: video_subject (str): The subject of the video. paragraph_number (int): The number of paragraphs to generate. ai_model (str): The AI model to use for generation. Returns: str: The script for the video.
Here is the function:
def generate_script(video_subject: str, paragraph_number: int, ai_model: str, voice: str, customPrompt: str) -> str:
"""
Generate a script for a video, depending on the subject of the video, the number of paragraphs, and the AI model.
Args:
video_subject (str): The subject of the video.
paragraph_number (int): The number of paragraphs to generate.
ai_model (str): The AI model to use for generation.
Returns:
str: The script for the video.
"""
# Build prompt
if customPrompt:
prompt = customPrompt
else:
prompt = """
Generate a script for a video, depending on the subject of the video.
The script is to be returned as a string with the specified number of paragraphs.
Here is an example of a string:
"This is an example string."
Do not under any circumstance reference this prompt in your response.
Get straight to the point, don't start with unnecessary things like, "welcome to this video".
Obviously, the script should be related to the subject of the video.
YOU MUST NOT INCLUDE ANY TYPE OF MARKDOWN OR FORMATTING IN THE SCRIPT, NEVER USE A TITLE.
YOU MUST WRITE THE SCRIPT IN THE LANGUAGE SPECIFIED IN [LANGUAGE].
ONLY RETURN THE RAW CONTENT OF THE SCRIPT. DO NOT INCLUDE "VOICEOVER", "NARRATOR" OR SIMILAR INDICATORS OF WHAT SHOULD BE SPOKEN AT THE BEGINNING OF EACH PARAGRAPH OR LINE. YOU MUST NOT MENTION THE PROMPT, OR ANYTHING ABOUT THE SCRIPT ITSELF. ALSO, NEVER TALK ABOUT THE AMOUNT OF PARAGRAPHS OR LINES. JUST WRITE THE SCRIPT.
"""
prompt += f"""
Subject: {video_subject}
Number of paragraphs: {paragraph_number}
Language: {voice}
"""
# Generate script
response = generate_response(prompt, ai_model)
print(colored(response, "cyan"))
# Return the generated script
if response:
# Clean the script
# Remove asterisks, hashes
response = response.replace("*", "")
response = response.replace("#", "")
# Remove markdown syntax
response = re.sub(r"\[.*\]", "", response)
response = re.sub(r"\(.*\)", "", response)
# Split the script into paragraphs
paragraphs = response.split("\n\n")
# Select the specified number of paragraphs
selected_paragraphs = paragraphs[:paragraph_number]
# Join the selected paragraphs into a single string
final_script = "\n\n".join(selected_paragraphs)
# Print to console the number of paragraphs used
print(colored(f"Number of paragraphs used: {len(selected_paragraphs)}", "green"))
return final_script
else:
print(colored("[-] GPT returned an empty response.", "red"))
return None | Generate a script for a video, depending on the subject of the video, the number of paragraphs, and the AI model. Args: video_subject (str): The subject of the video. paragraph_number (int): The number of paragraphs to generate. ai_model (str): The AI model to use for generation. Returns: str: The script for the video. |
162,255 | import re
import json
import g4f
import openai
from typing import Tuple, List
from termcolor import colored
from dotenv import load_dotenv
import os
import google.generativeai as genai
def generate_response(prompt: str, ai_model: str) -> str:
"""
Generate a script for a video, depending on the subject of the video.
Args:
video_subject (str): The subject of the video.
ai_model (str): The AI model to use for generation.
Returns:
str: The response from the AI model.
"""
if ai_model == 'g4f':
response = g4f.ChatCompletion.create(
model=g4f.models.gpt_35_turbo_16k_0613,
messages=[{"role": "user", "content": prompt}],
)
elif ai_model in ["gpt3.5-turbo", "gpt4"]:
model_name = "gpt-3.5-turbo" if ai_model == "gpt3.5-turbo" else "gpt-4-1106-preview"
response = openai.chat.completions.create(
model=model_name,
messages=[{"role": "user", "content": prompt}],
).choices[0].message.content
elif ai_model == 'gemmini':
model = genai.GenerativeModel('gemini-pro')
response_model = model.generate_content(prompt)
response = response_model.text
else:
raise ValueError("Invalid AI model selected.")
return response
def get_search_terms(video_subject: str, amount: int, script: str, ai_model: str) -> List[str]:
"""
Generate a JSON-Array of search terms for stock videos,
depending on the subject of a video.
Args:
video_subject (str): The subject of the video.
amount (int): The amount of search terms to generate.
script (str): The script of the video.
ai_model (str): The AI model to use for generation.
Returns:
List[str]: The search terms for the video subject.
"""
# Build prompt
prompt = f"""
Generate {amount} search terms for stock videos,
depending on the subject of a video.
Subject: {video_subject}
The search terms are to be returned as
a JSON-Array of strings.
Each search term should consist of 1-3 words,
always add the main subject of the video.
YOU MUST ONLY RETURN THE JSON-ARRAY OF STRINGS.
YOU MUST NOT RETURN ANYTHING ELSE.
YOU MUST NOT RETURN THE SCRIPT.
The search terms must be related to the subject of the video.
Here is an example of a JSON-Array of strings:
["search term 1", "search term 2", "search term 3"]
For context, here is the full text:
{script}
"""
# Generate search terms
response = generate_response(prompt, ai_model)
# Parse response into a list of search terms
search_terms = []
try:
search_terms = json.loads(response)
if not isinstance(search_terms, list) or not all(isinstance(term, str) for term in search_terms):
raise ValueError("Response is not a list of strings.")
except (json.JSONDecodeError, ValueError):
print(colored("[*] GPT returned an unformatted response. Attempting to clean...", "yellow"))
# Attempt to extract list-like string and convert to list
match = re.search(r'\["(?:[^"\\]|\\.)*"(?:,\s*"[^"\\]*")*\]', response)
if match:
try:
search_terms = json.loads(match.group())
except json.JSONDecodeError:
print(colored("[-] Could not parse response.", "red"))
return []
# Let user know
print(colored(f"\nGenerated {len(search_terms)} search terms: {', '.join(search_terms)}", "cyan"))
# Return search terms
return search_terms
The provided code snippet includes necessary dependencies for implementing the `generate_metadata` function. Write a Python function `def generate_metadata(video_subject: str, script: str, ai_model: str) -> Tuple[str, str, List[str]]` to solve the following problem:
Generate metadata for a YouTube video, including the title, description, and keywords. Args: video_subject (str): The subject of the video. script (str): The script of the video. ai_model (str): The AI model to use for generation. Returns: Tuple[str, str, List[str]]: The title, description, and keywords for the video.
Here is the function:
def generate_metadata(video_subject: str, script: str, ai_model: str) -> Tuple[str, str, List[str]]:
"""
Generate metadata for a YouTube video, including the title, description, and keywords.
Args:
video_subject (str): The subject of the video.
script (str): The script of the video.
ai_model (str): The AI model to use for generation.
Returns:
Tuple[str, str, List[str]]: The title, description, and keywords for the video.
"""
# Build prompt for title
title_prompt = f"""
Generate a catchy and SEO-friendly title for a YouTube shorts video about {video_subject}.
"""
# Generate title
title = generate_response(title_prompt, ai_model).strip()
# Build prompt for description
description_prompt = f"""
Write a brief and engaging description for a YouTube shorts video about {video_subject}.
The video is based on the following script:
{script}
"""
# Generate description
description = generate_response(description_prompt, ai_model).strip()
# Generate keywords
keywords = get_search_terms(video_subject, 6, script, ai_model)
return title, description, keywords | Generate metadata for a YouTube video, including the title, description, and keywords. Args: video_subject (str): The subject of the video. script (str): The script of the video. ai_model (str): The AI model to use for generation. Returns: Tuple[str, str, List[str]]: The title, description, and keywords for the video. |
162,256 | import requests
from typing import List
from termcolor import colored
The provided code snippet includes necessary dependencies for implementing the `search_for_stock_videos` function. Write a Python function `def search_for_stock_videos(query: str, api_key: str, it: int, min_dur: int) -> List[str]` to solve the following problem:
Searches for stock videos based on a query. Args: query (str): The query to search for. api_key (str): The API key to use. Returns: List[str]: A list of stock videos.
Here is the function:
def search_for_stock_videos(query: str, api_key: str, it: int, min_dur: int) -> List[str]:
"""
Searches for stock videos based on a query.
Args:
query (str): The query to search for.
api_key (str): The API key to use.
Returns:
List[str]: A list of stock videos.
"""
# Build headers
headers = {
"Authorization": api_key
}
# Build URL
qurl = f"https://api.pexels.com/videos/search?query={query}&per_page={it}"
# Send the request
r = requests.get(qurl, headers=headers)
# Parse the response
response = r.json()
# Parse each video
raw_urls = []
video_url = []
video_res = 0
try:
# loop through each video in the result
for i in range(it):
#check if video has desired minimum duration
if response["videos"][i]["duration"] < min_dur:
continue
raw_urls = response["videos"][i]["video_files"]
temp_video_url = ""
# loop through each url to determine the best quality
for video in raw_urls:
# Check if video has a valid download link
if ".com/external" in video["link"]:
# Only save the URL with the largest resolution
if (video["width"]*video["height"]) > video_res:
temp_video_url = video["link"]
video_res = video["width"]*video["height"]
# add the url to the return list if it's not empty
if temp_video_url != "":
video_url.append(temp_video_url)
except Exception as e:
print(colored("[-] No Videos found.", "red"))
print(colored(e, "red"))
# Let user know
print(colored(f"\t=> \"{query}\" found {len(video_url)} Videos", "cyan"))
# Return the video url
return video_url | Searches for stock videos based on a query. Args: query (str): The query to search for. api_key (str): The API key to use. Returns: List[str]: A list of stock videos. |
162,257 | import json
import os
import subprocess
from os.path import expanduser
from pathlib import Path
from typing import Optional
import tyro
from sshconf import empty_ssh_config_file, read_ssh_config
The provided code snippet includes necessary dependencies for implementing the `run_cmd` function. Write a Python function `def run_cmd(cmd: str)` to solve the following problem:
Run a command in the terminal.
Here is the function:
def run_cmd(cmd: str):
"""Run a command in the terminal."""
print("cmd:", cmd)
print("output:")
subprocess.Popen(cmd, shell=True).wait() | Run a command in the terminal. |
162,258 | import sys
from typing import Any, Callable, Dict, List, Optional, Tuple
import numpy as np
import torch
import zmq
def get_chunks(
lst: List[float], num_chunks: Optional[int] = None, size_of_chunk: Optional[int] = None
) -> List[List[float]]:
"""Returns list of n elements, constaining a sublist.
Args:
lst: List to be chunked up
num_chunks: number of chunks to split list into
size_of_chunk: size of each chunk
"""
if num_chunks:
assert not size_of_chunk
size = len(lst) // num_chunks
if size_of_chunk:
assert not num_chunks
size = size_of_chunk
chunks = []
for i in range(0, len(lst), size):
chunks.append(lst[i : i + size])
return chunks
def three_js_perspective_camera_focal_length(fov: float, image_height: int):
"""Returns the focal length of a three.js perspective camera.
Args:
fov: the field of view of the camera in degrees.
image_height: the height of the image in pixels.
"""
if fov is None:
print("Warning: fov is None, using default value")
return 50
pp_h = image_height / 2.0
focal_length = pp_h / np.tan(fov * (np.pi / 180.0) / 2.0)
return focal_length
The provided code snippet includes necessary dependencies for implementing the `get_intrinsics_matrix_and_camera_to_world_h` function. Write a Python function `def get_intrinsics_matrix_and_camera_to_world_h( camera_object: Dict[str, Any], image_height: int ) -> Tuple[torch.Tensor, torch.Tensor]` to solve the following problem:
Returns the camera intrinsics matrix and the camera to world homogeneous matrix. Args: camera_object: a Camera object. image_size: the size of the image (height, width)
Here is the function:
def get_intrinsics_matrix_and_camera_to_world_h(
camera_object: Dict[str, Any], image_height: int
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Returns the camera intrinsics matrix and the camera to world homogeneous matrix.
Args:
camera_object: a Camera object.
image_size: the size of the image (height, width)
"""
# intrinsics
fov = camera_object["fov"]
aspect = camera_object["aspect"]
image_width = aspect * image_height
pp_w = image_width / 2.0
pp_h = image_height / 2.0
focal_length = three_js_perspective_camera_focal_length(fov, image_height)
intrinsics_matrix = torch.tensor([[focal_length, 0, pp_w], [0, focal_length, pp_h], [0, 0, 1]]).float()
# extrinsics
camera_to_world_h = torch.tensor(get_chunks(camera_object["matrix"], size_of_chunk=4)).T.float()
camera_to_world_h = torch.stack(
[
camera_to_world_h[0, :],
camera_to_world_h[2, :],
camera_to_world_h[1, :],
camera_to_world_h[3, :],
],
dim=0,
)
return intrinsics_matrix, camera_to_world_h | Returns the camera intrinsics matrix and the camera to world homogeneous matrix. Args: camera_object: a Camera object. image_size: the size of the image (height, width) |
162,259 | import sys
from typing import Any, Callable, Dict, List, Optional, Tuple
import numpy as np
import torch
import zmq
The provided code snippet includes necessary dependencies for implementing the `find_available_port` function. Write a Python function `def find_available_port(func: Callable, default_port: int, max_attempts: int = 1000, **kwargs) -> None` to solve the following problem:
Finds and attempts to connect to a port Args: func: function used on connecting to port default_port: the default port max_attempts: max number of attempts to try connection. Defaults to MAX_ATTEMPTS.
Here is the function:
def find_available_port(func: Callable, default_port: int, max_attempts: int = 1000, **kwargs) -> None:
"""Finds and attempts to connect to a port
Args:
func: function used on connecting to port
default_port: the default port
max_attempts: max number of attempts to try connection. Defaults to MAX_ATTEMPTS.
"""
for i in range(max_attempts):
port = default_port + i
try:
return func(port, **kwargs), port
except (OSError, zmq.error.ZMQError):
print(f"Port: {port:d} in use, trying another...", file=sys.stderr)
except Exception as e:
print(type(e))
raise
raise (
Exception(f"Could not find an available port in the range: [{default_port:d}, {max_attempts + default_port:d})")
) | Finds and attempts to connect to a port Args: func: function used on connecting to port default_port: the default port max_attempts: max number of attempts to try connection. Defaults to MAX_ATTEMPTS. |
162,260 | import atexit
import os
import signal
import socket
import subprocess
import sys
import threading
import time
from typing import Optional, Union
from rich.console import Console
from nerfstudio.viewer.server import server
CONSOLE = Console()
import subprocess
The provided code snippet includes necessary dependencies for implementing the `run_viewer_bridge_server_as_subprocess` function. Write a Python function `def run_viewer_bridge_server_as_subprocess( websocket_port: int, zmq_port: Optional[int] = None, ip_address: str = "127.0.0.1", log_filename: Union[str, None] = None, )` to solve the following problem:
Runs the viewer bridge server as a subprocess. Args: zmq_port: Port to use for the ZMQ server. websocket_port: Port to use for the websocket server. ip_address: host to connect to log_filename: Filename to use for the log file. If None, no log file is created. Returns: None
Here is the function:
def run_viewer_bridge_server_as_subprocess(
websocket_port: int,
zmq_port: Optional[int] = None,
ip_address: str = "127.0.0.1",
log_filename: Union[str, None] = None,
):
"""Runs the viewer bridge server as a subprocess.
Args:
zmq_port: Port to use for the ZMQ server.
websocket_port: Port to use for the websocket server.
ip_address: host to connect to
log_filename: Filename to use for the log file. If None, no log file is created.
Returns:
None
"""
args = [sys.executable, "-u", "-m", server.__name__]
# find an available port for zmq
if zmq_port is None:
sock = socket.socket()
sock.bind(("", 0))
zmq_port = sock.getsockname()[1]
string = f"Using ZMQ port: {zmq_port}"
CONSOLE.print(f"[bold yellow]{string}")
args.append("--zmq-port")
args.append(str(zmq_port))
args.append("--websocket-port")
args.append(str(websocket_port))
args.append("--ip-address")
args.append(str(ip_address))
# supress output if no log filename is specified
logfile = open( # pylint: disable=consider-using-with
log_filename if log_filename else os.devnull, "w", encoding="utf8"
)
process = subprocess.Popen( # pylint: disable=consider-using-with
args, stdout=logfile, stderr=logfile, start_new_session=True
)
def cleanup(process):
process.kill()
process.wait()
def poll_process():
"""
Continually check to see if the viewer bridge server process is still running and has not failed.
If it fails, alert the user and exit the entire program.
"""
while process.poll() is None:
time.sleep(0.5)
string = f"\nThe viewer bridge server subprocess failed. Please check the log file {log_filename}.\n"
string += (
"You likely have to modify --viewer.zmq-port and/or --viewer.websocket-port in the "
"config to avoid conflicting ports.\n"
)
string += "Try modifying --viewer.websocket-port 7007\n"
CONSOLE.print(f"[bold red]{string}")
cleanup(process)
# This exists the entire program. sys.exit() will only kill the thread that this runs in.
os.kill(os.getpid(), signal.SIGKILL)
# continually check to see if the process stopped
t1 = threading.Thread(target=poll_process)
t1.daemon = True
t1.start()
atexit.register(cleanup, process)
return zmq_port | Runs the viewer bridge server as a subprocess. Args: zmq_port: Port to use for the ZMQ server. websocket_port: Port to use for the websocket server. ip_address: host to connect to log_filename: Filename to use for the log file. If None, no log file is created. Returns: None |
162,261 | from __future__ import annotations
import base64
import enum
import os
import sys
import threading
import time
from pathlib import Path
from typing import Any, Dict, Optional, Tuple
import cv2
import numpy as np
import torch
from rich.console import Console
from nerfstudio.cameras.cameras import Cameras
from nerfstudio.cameras.rays import RayBundle
from nerfstudio.configs import base_config as cfg
from nerfstudio.data.datasets.base_dataset import InputDataset
from nerfstudio.models.base_model import Model
from nerfstudio.utils import colormaps, profiler, writer
from nerfstudio.utils.decorators import check_main_thread, decorate_all
from nerfstudio.utils.images import BasicImages
from nerfstudio.utils.io import load_from_json, write_to_json
from nerfstudio.utils.writer import GLOBAL_BUFFER, EventName, TimeWriter
from nerfstudio.viewer.server.subprocess import run_viewer_bridge_server_as_subprocess
from nerfstudio.viewer.server.utils import get_intrinsics_matrix_and_camera_to_world_h
from nerfstudio.viewer.server.visualizer import Viewer
def load_from_json(filename: Path):
"""Load a dictionary from a JSON filename.
Args:
filename: The filename to load from.
"""
assert filename.suffix == ".json"
with open(filename, encoding="UTF-8") as file:
return json.load(file)
The provided code snippet includes necessary dependencies for implementing the `get_viewer_version` function. Write a Python function `def get_viewer_version() -> str` to solve the following problem:
Get the version of the viewer.
Here is the function:
def get_viewer_version() -> str:
"""Get the version of the viewer."""
json_filename = os.path.join(os.path.dirname(__file__), "../app/package.json")
version = load_from_json(Path(json_filename))["version"]
return version | Get the version of the viewer. |
162,262 | from __future__ import annotations
import base64
import enum
import os
import sys
import threading
import time
from pathlib import Path
from typing import Any, Dict, Optional, Tuple
import cv2
import numpy as np
import torch
from rich.console import Console
from nerfstudio.cameras.cameras import Cameras
from nerfstudio.cameras.rays import RayBundle
from nerfstudio.configs import base_config as cfg
from nerfstudio.data.datasets.base_dataset import InputDataset
from nerfstudio.models.base_model import Model
from nerfstudio.utils import colormaps, profiler, writer
from nerfstudio.utils.decorators import check_main_thread, decorate_all
from nerfstudio.utils.images import BasicImages
from nerfstudio.utils.io import load_from_json, write_to_json
from nerfstudio.utils.writer import GLOBAL_BUFFER, EventName, TimeWriter
from nerfstudio.viewer.server.subprocess import run_viewer_bridge_server_as_subprocess
from nerfstudio.viewer.server.utils import get_intrinsics_matrix_and_camera_to_world_h
from nerfstudio.viewer.server.visualizer import Viewer
class ViewerState:
"""Class to hold state for viewer variables
Args:
config: viewer setup configuration
"""
def __init__(self, config: cfg.ViewerConfig, log_filename: Path):
self.config = config
self.vis = None
self.viewer_url = None
self.log_filename = log_filename
if self.config.launch_bridge_server:
# start the viewer bridge server
assert self.config.websocket_port is not None
self.log_filename.parent.mkdir(exist_ok=True)
zmq_port = run_viewer_bridge_server_as_subprocess(
self.config.websocket_port,
zmq_port=self.config.zmq_port,
ip_address=self.config.ip_address,
log_filename=str(self.log_filename),
)
# TODO(ethan): log the output of the viewer bridge server in a file where the training logs go
CONSOLE.line()
version = get_viewer_version()
websocket_url = f"ws://localhost:{self.config.websocket_port}"
self.viewer_url = f"https://viewer.nerf.studio/versions/{version}/?websocket_url={websocket_url}"
CONSOLE.rule(characters="=")
CONSOLE.print(f"[Public] Open the viewer at {self.viewer_url}")
CONSOLE.rule(characters="=")
CONSOLE.line()
self.vis = Viewer(zmq_port=zmq_port, ip_address=self.config.ip_address)
else:
assert self.config.zmq_port is not None
self.vis = Viewer(zmq_port=self.config.zmq_port, ip_address=self.config.ip_address)
# viewer specific variables
self.prev_camera_matrix = None
self.prev_output_type = OutputTypes.INIT
self.prev_colormap_type = ColormapTypes.INIT
self.prev_moving = False
self.output_type_changed = True
self.max_resolution = 1000
self.check_interrupt_vis = False
self.check_done_render = True
self.step = 0
self.static_fps = 1
self.moving_fps = 24
self.camera_moving = False
self.prev_camera_timestamp = 0
self.output_list = None
def _pick_drawn_image_idxs(self, total_num: int) -> list[int]:
"""Determine indicies of images to display in viewer.
Args:
total_num: total number of training images.
Returns:
List of indices from [0, total_num-1].
"""
if self.config.max_num_display_images < 0:
num_display_images = total_num
else:
num_display_images = min(self.config.max_num_display_images, total_num)
# draw indices, roughly evenly spaced
return np.linspace(0, total_num - 1, num_display_images, dtype=np.int32).tolist()
def init_scene(self, dataset: InputDataset, start_train=True) -> None:
"""Draw some images and the scene aabb in the viewer.
Args:
dataset: dataset to render in the scene
start_train: whether to start train when viewer init;
if False, only displays dataset until resume train is toggled
"""
# set the config base dir
self.vis["renderingState/config_base_dir"].write(str(self.log_filename.parents[0]))
# clear the current scene
self.vis["sceneState/sceneBox"].delete()
self.vis["sceneState/cameras"].delete()
# draw the training cameras and images
image_indices = self._pick_drawn_image_idxs(len(dataset))
for idx in image_indices:
image = dataset[idx]["image"]
if isinstance(image, BasicImages):
bgr = image.images[0][..., [2, 1, 0]]
else:
bgr = image[..., [2, 1, 0]]
camera_json = dataset.cameras.to_json(camera_idx=idx, image=bgr, max_size=100)
self.vis[f"sceneState/cameras/{idx:06d}"].write(camera_json)
# draw the scene box (i.e., the bounding box)
json_ = dataset.scene_box.to_json()
self.vis["sceneState/sceneBox"].write(json_)
# set the initial state whether to train or not
self.vis["renderingState/isTraining"].write(start_train)
# self.vis["renderingState/render_time"].write(str(0))
# set the properties of the camera
# self.vis["renderingState/camera"].write(json_)
# set the main camera intrinsics to one from the dataset
# K = camera.get_intrinsics_matrix()
# set_persp_intrinsics_matrix(self.vis, K.double().numpy())
def _check_camera_path_payload(self, trainer, step: int):
"""Check to see if the camera path export button was pressed."""
# check if we should interrupt from a button press?
camera_path_payload = self.vis["camera_path_payload"].read()
if camera_path_payload:
# save a model checkpoint
trainer.save_checkpoint(step)
# write to json file
camera_path_filename = camera_path_payload["camera_path_filename"]
camera_path = camera_path_payload["camera_path"]
write_to_json(Path(camera_path_filename), camera_path)
self.vis["camera_path_payload"].delete()
def update_scene(self, trainer, step: int, graph: Model, num_rays_per_batch: int) -> None:
"""updates the scene based on the graph weights
Args:
step: iteration step of training
graph: the current checkpoint of the model
"""
has_temporal_distortion = getattr(graph, "temporal_distortion", None) is not None
self.vis["model/has_temporal_distortion"].write(str(has_temporal_distortion).lower())
is_training = self.vis["renderingState/isTraining"].read()
self.step = step
self._check_camera_path_payload(trainer, step)
camera_object = self._get_camera_object()
if camera_object is None:
return
if is_training is None or is_training:
# in training mode
if self.camera_moving:
# if the camera is moving, then we pause training and update camera continuously
while self.camera_moving:
self._render_image_in_viewer(camera_object, graph, is_training)
camera_object = self._get_camera_object()
else:
# if the camera is not moving, then we approximate how many training steps need to be taken
# to render at a FPS defined by self.static_fps.
if EventName.TRAIN_RAYS_PER_SEC.value in GLOBAL_BUFFER["events"]:
train_rays_per_sec = GLOBAL_BUFFER["events"][EventName.TRAIN_RAYS_PER_SEC.value]["avg"]
target_train_util = self.vis["renderingState/targetTrainUtil"].read()
if target_train_util is None:
target_train_util = 0.9
batches_per_sec = train_rays_per_sec / num_rays_per_batch
num_steps = max(int(1 / self.static_fps * batches_per_sec), 1)
else:
num_steps = 1
if step % num_steps == 0:
self._render_image_in_viewer(camera_object, graph, is_training)
else:
# in pause training mode, enter render loop with set graph
local_step = step
run_loop = not is_training
while run_loop:
# if self._is_render_step(local_step) and step > 0:
if step > 0:
self._render_image_in_viewer(camera_object, graph, is_training)
camera_object = self._get_camera_object()
is_training = self.vis["renderingState/isTraining"].read()
self._check_camera_path_payload(trainer, step)
run_loop = not is_training
local_step += 1
def check_interrupt(self, frame, event, arg): # pylint: disable=unused-argument
"""Raises interrupt when flag has been set and not already on lowest resolution.
Used in conjunction with SetTrace.
"""
if event == "line":
if self.check_interrupt_vis and not self.camera_moving:
raise IOChangeException
return self.check_interrupt
def _get_camera_object(self):
"""Gets the camera object from the viewer and updates the movement state if it has changed."""
data = self.vis["renderingState/camera"].read()
if data is None:
return None
camera_object = data["object"]
if self.prev_camera_matrix is not None and np.allclose(camera_object["matrix"], self.prev_camera_matrix):
self.camera_moving = False
else:
self.prev_camera_matrix = camera_object["matrix"]
self.camera_moving = True
output_type = self.vis["renderingState/output_choice"].read()
if output_type is None:
output_type = OutputTypes.INIT
if self.prev_output_type != output_type:
self.camera_moving = True
colormap_type = self.vis["renderingState/colormap_choice"].read()
if colormap_type is None:
colormap_type = ColormapTypes.INIT
if self.prev_colormap_type != colormap_type:
self.camera_moving = True
return camera_object
def _apply_colormap(self, outputs: Dict[str, Any], colors: torch.Tensor = None, eps=1e-6):
"""Determines which colormap to use based on set colormap type
Args:
outputs: the output tensors for which to apply colormaps on
colors: is only set if colormap is for semantics. Defaults to None.
eps: epsilon to handle floating point comparisons
"""
if self.output_list:
reformatted_output = self._process_invalid_output(self.prev_output_type)
# default for rgb images
if self.prev_colormap_type == ColormapTypes.DEFAULT and outputs[reformatted_output].shape[-1] == 3:
return outputs[reformatted_output]
# rendering depth outputs
if self.prev_colormap_type == ColormapTypes.DEPTH or (
self.prev_colormap_type == ColormapTypes.DEFAULT
and outputs[reformatted_output].dtype == torch.float
and (torch.max(outputs[reformatted_output]) - 1.0) > eps # handle floating point arithmetic
):
accumulation_str = (
OutputTypes.ACCUMULATION
if OutputTypes.ACCUMULATION in self.output_list
else OutputTypes.ACCUMULATION_FINE
)
return colormaps.apply_depth_colormap(outputs[reformatted_output], accumulation=outputs[accumulation_str])
# rendering accumulation outputs
if self.prev_colormap_type == ColormapTypes.TURBO or (
self.prev_colormap_type == ColormapTypes.DEFAULT and outputs[reformatted_output].dtype == torch.float
):
return colormaps.apply_colormap(outputs[reformatted_output])
# rendering semantic outputs
if self.prev_colormap_type == ColormapTypes.SEMANTIC or (
self.prev_colormap_type == ColormapTypes.DEFAULT and outputs[reformatted_output].dtype == torch.int
):
logits = outputs[reformatted_output]
labels = torch.argmax(torch.nn.functional.softmax(logits, dim=-1), dim=-1) # type: ignore
assert colors is not None
return colors[labels]
# rendering boolean outputs
if self.prev_colormap_type == ColormapTypes.BOOLEAN or (
self.prev_colormap_type == ColormapTypes.DEFAULT and outputs[reformatted_output].dtype == torch.bool
):
return colormaps.apply_boolean_colormap(outputs[reformatted_output])
raise NotImplementedError
def _send_output_to_viewer(self, outputs: Dict[str, Any], colors: torch.Tensor = None, eps=1e-6):
"""Chooses the correct output and sends it to the viewer
Args:
outputs: the dictionary of outputs to choose from, from the graph
colors: is only set if colormap is for semantics. Defaults to None.
eps: epsilon to handle floating point comparisons
"""
if self.output_list is None:
self.output_list = list(outputs.keys())
viewer_output_list = list(np.copy(self.output_list))
# remapping rgb_fine -> rgb for all cases just so that we dont have 2 of them in the options
if OutputTypes.RGB_FINE in self.output_list:
viewer_output_list.remove(OutputTypes.RGB_FINE)
viewer_output_list.insert(0, OutputTypes.RGB)
self.vis["renderingState/output_options"].write(viewer_output_list)
reformatted_output = self._process_invalid_output(self.prev_output_type)
# re-register colormaps and send to viewer
if self.output_type_changed or self.prev_colormap_type == ColormapTypes.INIT:
self.prev_colormap_type = ColormapTypes.DEFAULT
colormap_options = [ColormapTypes.DEFAULT]
if (
outputs[reformatted_output].shape[-1] != 3
and outputs[reformatted_output].dtype == torch.float
and (torch.max(outputs[reformatted_output]) - 1.0) <= eps # handle floating point arithmetic
):
# accumulation can also include depth
colormap_options.extend(["depth"])
self.output_type_changed = False
self.vis["renderingState/colormap_choice"].write(self.prev_colormap_type)
self.vis["renderingState/colormap_options"].write(colormap_options)
selected_output = (self._apply_colormap(outputs, colors) * 255).type(torch.uint8)
image = selected_output[..., [2, 1, 0]].cpu().numpy()
data = cv2.imencode(".jpg", image, [cv2.IMWRITE_JPEG_QUALITY, 75])[1].tobytes()
data = str("data:image/jpeg;base64," + base64.b64encode(data).decode("ascii"))
self.vis["render_img"].write(data)
def _update_viewer_stats(self, render_time: float, num_rays: int, image_height: int, image_width: int) -> None:
"""Function that calculates and populates all the rendering statistics accordingly
Args:
render_time: total time spent rendering current view
num_rays: number of rays rendered
image_height: resolution of the current view
image_width: resolution of the current view
"""
writer.put_time(
name=EventName.VIS_RAYS_PER_SEC, duration=num_rays / render_time, step=self.step, avg_over_steps=True
)
is_training = self.vis["renderingState/isTraining"].read()
self.vis["renderingState/eval_res"].write(f"{image_height}x{image_width}px")
if is_training is None or is_training:
# process remaining training ETA
self.vis["renderingState/train_eta"].write(GLOBAL_BUFFER["events"].get(EventName.ETA.value, "Starting"))
# process ratio time spent on vis vs train
if (
EventName.ITER_VIS_TIME.value in GLOBAL_BUFFER["events"]
and EventName.ITER_TRAIN_TIME.value in GLOBAL_BUFFER["events"]
):
vis_time = GLOBAL_BUFFER["events"][EventName.ITER_VIS_TIME.value]["avg"]
train_time = GLOBAL_BUFFER["events"][EventName.ITER_TRAIN_TIME.value]["avg"]
vis_train_ratio = f"{int(vis_time / train_time * 100)}% spent on viewer"
self.vis["renderingState/vis_train_ratio"].write(vis_train_ratio)
else:
self.vis["renderingState/vis_train_ratio"].write("Starting")
else:
self.vis["renderingState/train_eta"].write("Paused")
self.vis["renderingState/vis_train_ratio"].write("100% spent on viewer")
def _calculate_image_res(self, camera_object, is_training: bool) -> Optional[Tuple[int, int]]:
"""Calculate the maximum image height that can be rendered in the time budget
Args:
camera_object: the camera object to use for rendering
is_training: whether or not we are training
Returns:
image_height: the maximum image height that can be rendered in the time budget
image_width: the maximum image width that can be rendered in the time budget
"""
max_resolution = self.vis["renderingState/maxResolution"].read()
if max_resolution:
self.max_resolution = max_resolution
if self.camera_moving or not is_training:
target_train_util = 0
else:
target_train_util = self.vis["renderingState/targetTrainUtil"].read()
if target_train_util is None:
target_train_util = 0.9
if EventName.TRAIN_RAYS_PER_SEC.value in GLOBAL_BUFFER["events"]:
train_rays_per_sec = GLOBAL_BUFFER["events"][EventName.TRAIN_RAYS_PER_SEC.value]["avg"]
elif not is_training:
train_rays_per_sec = (
80000 # TODO(eventually find a way to not hardcode. case where there are no prior training steps)
)
else:
return None, None
if EventName.VIS_RAYS_PER_SEC.value in GLOBAL_BUFFER["events"]:
vis_rays_per_sec = GLOBAL_BUFFER["events"][EventName.VIS_RAYS_PER_SEC.value]["avg"]
else:
vis_rays_per_sec = train_rays_per_sec
current_fps = self.moving_fps if self.camera_moving else self.static_fps
# calculate number of rays that can be rendered given the target fps
num_vis_rays = vis_rays_per_sec / current_fps * (1 - target_train_util)
aspect_ratio = camera_object["aspect"]
if not self.camera_moving and not is_training:
image_height = self.max_resolution
else:
image_height = (num_vis_rays / aspect_ratio) ** 0.5
image_height = int(round(image_height, -1))
image_height = min(self.max_resolution, image_height)
image_width = int(image_height * aspect_ratio)
if image_width > self.max_resolution:
image_width = self.max_resolution
image_height = int(image_width / aspect_ratio)
return image_height, image_width
def _process_invalid_output(self, output_type: str) -> str:
"""Check to see whether we are in the corner case of RGB; if still invalid, throw error
Returns correct string mapping given improperly formatted output_type.
Args:
output_type: reformatted output type
"""
if output_type == OutputTypes.INIT:
output_type = OutputTypes.RGB
# check if rgb or rgb_fine should be the case TODO: add other checks here
attempted_output_type = output_type
if output_type not in self.output_list and output_type == OutputTypes.RGB:
output_type = OutputTypes.RGB_FINE
# check if output_type is not in list
if output_type not in self.output_list:
assert (
NotImplementedError
), f"Output {attempted_output_type} not in list. Tried to reformat as {output_type} but still not found."
return output_type
def _render_image_in_viewer(self, camera_object, graph: Model, is_training: bool) -> None:
# pylint: disable=too-many-statements
"""
Draw an image using the current camera pose from the viewer.
The image is sent over a TCP connection.
Args:
graph: current checkpoint of model
"""
# Check that timestamp is newer than the last one
if int(camera_object["timestamp"]) < self.prev_camera_timestamp:
return
self.prev_camera_timestamp = int(camera_object["timestamp"])
# check and perform output type updates
output_type = self.vis["renderingState/output_choice"].read()
output_type = OutputTypes.INIT if output_type is None else output_type
self.output_type_changed = self.prev_output_type != output_type
self.prev_output_type = output_type
# check and perform colormap type updates
colormap_type = self.vis["renderingState/colormap_choice"].read()
colormap_type = ColormapTypes.INIT if colormap_type is None else colormap_type
self.prev_colormap_type = colormap_type
# Calculate camera pose and intrinsics
try:
image_height, image_width = self._calculate_image_res(camera_object, is_training)
except ZeroDivisionError as e:
self.vis["renderingState/log_errors"].write("Error: Screen too small; no rays intersecting scene.")
time.sleep(0.03) # sleep to allow buffer to reset
print(f"Error: {e}")
return
if image_height is None:
return
intrinsics_matrix, camera_to_world_h = get_intrinsics_matrix_and_camera_to_world_h(
camera_object, image_height=image_height
)
camera_to_world = camera_to_world_h[:3, :]
camera_to_world = torch.stack(
[
camera_to_world[0, :],
camera_to_world[2, :],
camera_to_world[1, :],
],
dim=0,
)
times = self.vis["renderingState/render_time"].read()
if times is not None:
times = torch.tensor([float(times)])
camera = Cameras(
fx=intrinsics_matrix[0, 0],
fy=intrinsics_matrix[1, 1],
cx=intrinsics_matrix[0, 2],
cy=intrinsics_matrix[1, 2],
camera_to_worlds=camera_to_world[None, ...],
times=times,
)
camera = camera.to(graph.device)
camera_ray_bundle = camera.generate_rays(camera_indices=0)
graph.eval()
check_thread = CheckThread(state=self)
render_thread = RenderThread(state=self, graph=graph, camera_ray_bundle=camera_ray_bundle)
check_thread.daemon = True
render_thread.daemon = True
with TimeWriter(None, None, write=False) as vis_t:
check_thread.start()
render_thread.start()
try:
render_thread.join()
check_thread.join()
except IOChangeException:
del camera_ray_bundle
torch.cuda.empty_cache()
except RuntimeError as e:
self.vis["renderingState/log_errors"].write(
"Error: GPU out of memory. Reduce resolution to prevent viewer from crashing."
)
print(f"Error: {e}")
del camera_ray_bundle
torch.cuda.empty_cache()
time.sleep(0.5) # sleep to allow buffer to reset
graph.train()
outputs = render_thread.vis_outputs
if outputs is not None:
colors = graph.colors if hasattr(graph, "colors") else None
self._send_output_to_viewer(outputs, colors=colors)
self._update_viewer_stats(
vis_t.duration, num_rays=len(camera_ray_bundle), image_height=image_height, image_width=image_width
)
The provided code snippet includes necessary dependencies for implementing the `setup_viewer` function. Write a Python function `def setup_viewer(config: cfg.ViewerConfig, log_filename: Path)` to solve the following problem:
Sets up the viewer if enabled Args: config: the configuration to instantiate viewer
Here is the function:
def setup_viewer(config: cfg.ViewerConfig, log_filename: Path):
"""Sets up the viewer if enabled
Args:
config: the configuration to instantiate viewer
"""
viewer_state = ViewerState(config, log_filename=log_filename)
banner_messages = [f"Viewer at: {viewer_state.viewer_url}"]
return viewer_state, banner_messages | Sets up the viewer if enabled Args: config: the configuration to instantiate viewer |
162,263 | import sys
from typing import List, Optional, Tuple
import tornado.gen
import tornado.ioloop
import tornado.web
import tornado.websocket
import tyro
import umsgpack
import zmq
import zmq.eventloop.ioloop
from pyngrok import ngrok
from zmq.eventloop.zmqstream import ZMQStream
from nerfstudio.viewer.server.state.node import find_node, get_tree, walk
from nerfstudio.viewer.server.state.state_node import StateNode
def run_viewer_bridge_server(
zmq_port: int = 6000, websocket_port: int = 7007, ip_address: str = "127.0.0.1", use_ngrok: bool = False
):
"""Run the viewer bridge server.
Args:
zmq_port: port to use for zmq
websocket_port: port to use for websocket
ip_address: host to connect to
use_ngrok: whether to use ngrok to expose the zmq port
"""
# whether to launch pyngrok or not
if use_ngrok:
# Open a HTTP tunnel on the default port 80
# <NgrokTunnel: "http://<public_sub>.ngrok.io" -> "http://localhost:80">
http_tunnel = ngrok.connect(addr=str(zmq_port), proto="tcp")
print(http_tunnel)
bridge = ZMQWebSocketBridge(zmq_port=zmq_port, websocket_port=websocket_port, ip_address=ip_address)
print(bridge)
try:
bridge.run()
except KeyboardInterrupt:
pass
The provided code snippet includes necessary dependencies for implementing the `entrypoint` function. Write a Python function `def entrypoint()` to solve the following problem:
The main entrypoint.
Here is the function:
def entrypoint():
"""The main entrypoint."""
tyro.extras.set_accent_color("bright_yellow")
tyro.cli(run_viewer_bridge_server) | The main entrypoint. |
162,264 | from collections import defaultdict
from typing import Callable
class Node(defaultdict):
"""
The base class Node.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
The provided code snippet includes necessary dependencies for implementing the `get_tree` function. Write a Python function `def get_tree(node_class: Callable) -> Callable` to solve the following problem:
Get a tree from a node class. This allows one to do tree["path"]["to"]["node"] and it will return a new node if it doesn't exist or the current node if it does.
Here is the function:
def get_tree(node_class: Callable) -> Callable:
"""
Get a tree from a node class.
This allows one to do tree["path"]["to"]["node"]
and it will return a new node if it doesn't exist
or the current node if it does.
"""
assert isinstance(node_class(), Node)
tree = lambda: node_class(tree)
return tree() | Get a tree from a node class. This allows one to do tree["path"]["to"]["node"] and it will return a new node if it doesn't exist or the current node if it does. |
162,268 | import math
from typing import List, Optional, Tuple
import numpy as np
import torch
from torchtyping import TensorType
from typing_extensions import Literal
The provided code snippet includes necessary dependencies for implementing the `get_distortion_params` function. Write a Python function `def get_distortion_params( k1: float = 0.0, k2: float = 0.0, k3: float = 0.0, k4: float = 0.0, p1: float = 0.0, p2: float = 0.0, ) -> TensorType[...]` to solve the following problem:
Returns a distortion parameters matrix. Args: k1: The first radial distortion parameter. k2: The second radial distortion parameter. k3: The third radial distortion parameter. k4: The fourth radial distortion parameter. p1: The first tangential distortion parameter. p2: The second tangential distortion parameter. Returns: torch.Tensor: A distortion parameters matrix.
Here is the function:
def get_distortion_params(
k1: float = 0.0,
k2: float = 0.0,
k3: float = 0.0,
k4: float = 0.0,
p1: float = 0.0,
p2: float = 0.0,
) -> TensorType[...]:
"""Returns a distortion parameters matrix.
Args:
k1: The first radial distortion parameter.
k2: The second radial distortion parameter.
k3: The third radial distortion parameter.
k4: The fourth radial distortion parameter.
p1: The first tangential distortion parameter.
p2: The second tangential distortion parameter.
Returns:
torch.Tensor: A distortion parameters matrix.
"""
return torch.Tensor([k1, k2, k3, k4, p1, p2]) | Returns a distortion parameters matrix. Args: k1: The first radial distortion parameter. k2: The second radial distortion parameter. k3: The third radial distortion parameter. k4: The fourth radial distortion parameter. p1: The first tangential distortion parameter. p2: The second tangential distortion parameter. Returns: torch.Tensor: A distortion parameters matrix. |
162,269 | import math
from typing import List, Optional, Tuple
import numpy as np
import torch
from torchtyping import TensorType
from typing_extensions import Literal
def _compute_residual_and_jacobian(
x: torch.Tensor,
y: torch.Tensor,
xd: torch.Tensor,
yd: torch.Tensor,
distortion_params: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor,]:
"""Auxiliary function of radial_and_tangential_undistort() that computes residuals and jacobians.
Adapted from MultiNeRF:
https://github.com/google-research/multinerf/blob/b02228160d3179300c7d499dca28cb9ca3677f32/internal/camera_utils.py#L427-L474
Args:
x: The updated x coordinates.
y: The updated y coordinates.
xd: The distorted x coordinates.
yd: The distorted y coordinates.
distortion_params: The distortion parameters [k1, k2, k3, k4, p1, p2].
Returns:
The residuals (fx, fy) and jacobians (fx_x, fx_y, fy_x, fy_y).
"""
k1 = distortion_params[..., 0]
k2 = distortion_params[..., 1]
k3 = distortion_params[..., 2]
k4 = distortion_params[..., 3]
p1 = distortion_params[..., 4]
p2 = distortion_params[..., 5]
# let r(x, y) = x^2 + y^2;
# d(x, y) = 1 + k1 * r(x, y) + k2 * r(x, y) ^2 + k3 * r(x, y)^3 +
# k4 * r(x, y)^4;
r = x * x + y * y
d = 1.0 + r * (k1 + r * (k2 + r * (k3 + r * k4)))
# The perfect projection is:
# xd = x * d(x, y) + 2 * p1 * x * y + p2 * (r(x, y) + 2 * x^2);
# yd = y * d(x, y) + 2 * p2 * x * y + p1 * (r(x, y) + 2 * y^2);
#
# Let's define
#
# fx(x, y) = x * d(x, y) + 2 * p1 * x * y + p2 * (r(x, y) + 2 * x^2) - xd;
# fy(x, y) = y * d(x, y) + 2 * p2 * x * y + p1 * (r(x, y) + 2 * y^2) - yd;
#
# We are looking for a solution that satisfies
# fx(x, y) = fy(x, y) = 0;
fx = d * x + 2 * p1 * x * y + p2 * (r + 2 * x * x) - xd
fy = d * y + 2 * p2 * x * y + p1 * (r + 2 * y * y) - yd
# Compute derivative of d over [x, y]
d_r = k1 + r * (2.0 * k2 + r * (3.0 * k3 + r * 4.0 * k4))
d_x = 2.0 * x * d_r
d_y = 2.0 * y * d_r
# Compute derivative of fx over x and y.
fx_x = d + d_x * x + 2.0 * p1 * y + 6.0 * p2 * x
fx_y = d_y * x + 2.0 * p1 * x + 2.0 * p2 * y
# Compute derivative of fy over x and y.
fy_x = d_x * y + 2.0 * p2 * y + 2.0 * p1 * x
fy_y = d + d_y * y + 2.0 * p2 * x + 6.0 * p1 * y
return fx, fy, fx_x, fx_y, fy_x, fy_y
The provided code snippet includes necessary dependencies for implementing the `radial_and_tangential_undistort` function. Write a Python function `def radial_and_tangential_undistort( coords: torch.Tensor, distortion_params: torch.Tensor, eps: float = 1e-3, max_iterations: int = 10, ) -> torch.Tensor` to solve the following problem:
Computes undistorted coords given opencv distortion parameters. Addapted from MultiNeRF https://github.com/google-research/multinerf/blob/b02228160d3179300c7d499dca28cb9ca3677f32/internal/camera_utils.py#L477-L509 Args: coords: The distorted coordinates. distortion_params: The distortion parameters [k1, k2, k3, k4, p1, p2]. eps: The epsilon for the convergence. max_iterations: The maximum number of iterations to perform. Returns: The undistorted coordinates.
Here is the function:
def radial_and_tangential_undistort(
coords: torch.Tensor,
distortion_params: torch.Tensor,
eps: float = 1e-3,
max_iterations: int = 10,
) -> torch.Tensor:
"""Computes undistorted coords given opencv distortion parameters.
Addapted from MultiNeRF
https://github.com/google-research/multinerf/blob/b02228160d3179300c7d499dca28cb9ca3677f32/internal/camera_utils.py#L477-L509
Args:
coords: The distorted coordinates.
distortion_params: The distortion parameters [k1, k2, k3, k4, p1, p2].
eps: The epsilon for the convergence.
max_iterations: The maximum number of iterations to perform.
Returns:
The undistorted coordinates.
"""
# Initialize from the distorted point.
x = coords[..., 0]
y = coords[..., 1]
for _ in range(max_iterations):
fx, fy, fx_x, fx_y, fy_x, fy_y = _compute_residual_and_jacobian(
x=x, y=y, xd=coords[..., 0], yd=coords[..., 1], distortion_params=distortion_params
)
denominator = fy_x * fx_y - fx_x * fy_y
x_numerator = fx * fy_y - fy * fx_y
y_numerator = fy * fx_x - fx * fy_x
step_x = torch.where(torch.abs(denominator) > eps, x_numerator / denominator, torch.zeros_like(denominator))
step_y = torch.where(torch.abs(denominator) > eps, y_numerator / denominator, torch.zeros_like(denominator))
x = x + step_x
y = y + step_y
return torch.stack([x, y], dim=-1) | Computes undistorted coords given opencv distortion parameters. Addapted from MultiNeRF https://github.com/google-research/multinerf/blob/b02228160d3179300c7d499dca28cb9ca3677f32/internal/camera_utils.py#L477-L509 Args: coords: The distorted coordinates. distortion_params: The distortion parameters [k1, k2, k3, k4, p1, p2]. eps: The epsilon for the convergence. max_iterations: The maximum number of iterations to perform. Returns: The undistorted coordinates. |
162,270 | import math
from typing import List, Optional, Tuple
import numpy as np
import torch
from torchtyping import TensorType
from typing_extensions import Literal
def rotation_matrix(a: TensorType[3], b: TensorType[3]) -> TensorType[3, 3]:
"""Compute the rotation matrix that rotates vector a to vector b.
Args:
a: The vector to rotate.
b: The vector to rotate to.
Returns:
The rotation matrix.
"""
a = a / torch.linalg.norm(a)
b = b / torch.linalg.norm(b)
v = torch.cross(a, b)
c = torch.dot(a, b)
# If vectors are exactly opposite, we add a little noise to one of them
if c < -1 + 1e-8:
eps = (torch.rand(3) - 0.5) * 0.01
return rotation_matrix(a + eps, b)
s = torch.linalg.norm(v)
skew_sym_mat = torch.Tensor(
[
[0, -v[2], v[1]],
[v[2], 0, -v[0]],
[-v[1], v[0], 0],
]
)
return torch.eye(3) + skew_sym_mat + skew_sym_mat @ skew_sym_mat * ((1 - c) / (s**2 + 1e-8))
The provided code snippet includes necessary dependencies for implementing the `auto_orient_and_center_poses` function. Write a Python function `def auto_orient_and_center_poses( poses: TensorType["num_poses":..., 4, 4], method: Literal["pca", "up", "none"] = "up", center_poses: bool = True ) -> TensorType["num_poses":..., 3, 4]` to solve the following problem:
Orients and centers the poses. We provide two methods for orientation: pca and up. pca: Orient the poses so that the principal component of the points is aligned with the axes. This method works well when all of the cameras are in the same plane. up: Orient the poses so that the average up vector is aligned with the z axis. This method works well when images are not at arbitrary angles. Args: poses: The poses to orient. method: The method to use for orientation. center_poses: If True, the poses are centered around the origin. Returns: The oriented poses.
Here is the function:
def auto_orient_and_center_poses(
poses: TensorType["num_poses":..., 4, 4], method: Literal["pca", "up", "none"] = "up", center_poses: bool = True
) -> TensorType["num_poses":..., 3, 4]:
"""Orients and centers the poses. We provide two methods for orientation: pca and up.
pca: Orient the poses so that the principal component of the points is aligned with the axes.
This method works well when all of the cameras are in the same plane.
up: Orient the poses so that the average up vector is aligned with the z axis.
This method works well when images are not at arbitrary angles.
Args:
poses: The poses to orient.
method: The method to use for orientation.
center_poses: If True, the poses are centered around the origin.
Returns:
The oriented poses.
"""
translation = poses[..., :3, 3]
mean_translation = torch.mean(translation, dim=0)
translation_diff = translation - mean_translation
if center_poses:
translation = mean_translation
else:
translation = torch.zeros_like(mean_translation)
if method == "pca":
_, eigvec = torch.linalg.eigh(translation_diff.T @ translation_diff)
eigvec = torch.flip(eigvec, dims=(-1,))
if torch.linalg.det(eigvec) < 0:
eigvec[:, 2] = -eigvec[:, 2]
transform = torch.cat([eigvec, eigvec @ -translation[..., None]], dim=-1)
oriented_poses = transform @ poses
if oriented_poses.mean(axis=0)[2, 1] < 0:
oriented_poses[:, 1:3] = -1 * oriented_poses[:, 1:3]
elif method == "up":
up = torch.mean(poses[:, :3, 1], dim=0)
up = up / torch.linalg.norm(up)
rotation = rotation_matrix(up, torch.Tensor([0, 0, 1]))
transform = torch.cat([rotation, rotation @ -translation[..., None]], dim=-1)
oriented_poses = transform @ poses
elif method == "none":
transform = torch.eye(4)
transform[:3, 3] = -translation
transform = transform[:3, :]
oriented_poses = transform @ poses
return oriented_poses, transform | Orients and centers the poses. We provide two methods for orientation: pca and up. pca: Orient the poses so that the principal component of the points is aligned with the axes. This method works well when all of the cameras are in the same plane. up: Orient the poses so that the average up vector is aligned with the z axis. This method works well when images are not at arbitrary angles. Args: poses: The poses to orient. method: The method to use for orientation. center_poses: If True, the poses are centered around the origin. Returns: The oriented poses. |
162,271 | from typing import Any, Dict, Optional, Tuple
import numpy as np
import torch
import nerfstudio.utils.poses as pose_utils
from nerfstudio.cameras import camera_utils
from nerfstudio.cameras.camera_utils import get_interpolated_poses_many
from nerfstudio.cameras.cameras import Cameras
from nerfstudio.viewer.server.utils import three_js_perspective_camera_focal_length
def get_interpolated_poses_many(
poses: TensorType["num_poses", 3, 4],
Ks: TensorType["num_poses", 3, 3],
steps_per_transition=10,
) -> Tuple[TensorType["num_poses", 3, 4], TensorType["num_poses", 3, 3]]:
"""Return interpolated poses for many camera poses.
Args:
poses: list of camera poses
Ks: list of camera intrinsics
steps_per_transition: number of steps per transition
Returns:
tuple of new poses and intrinsics
"""
traj = []
Ks = []
for idx in range(poses.shape[0] - 1):
pose_a = poses[idx]
pose_b = poses[idx + 1]
poses_ab = get_interpolated_poses(pose_a, pose_b, steps=steps_per_transition)
traj += poses_ab
Ks += get_interpolated_k(Ks[idx], Ks[idx + 1], steps_per_transition)
return torch.stack(traj, dim=0), torch.stack(Ks, dim=0)
class Cameras(TensorDataclass):
"""Dataparser outputs for the image dataset and the ray generator.
Note: currently only supports cameras with the same principal points and types. The reason we type
the focal lengths, principal points, and image sizes as tensors is to allow for batched cameras
down the line in cases where your batches of camera data don't come from the same cameras.
If a single value is provided, it is broadcasted to all cameras.
Args:
camera_to_worlds: Camera to world matrices. Tensor of per-image c2w matrices, in [R | t] format
fx: Focal length x
fy: Focal length y
cx: Principal point x
cy: Principal point y
width: Image width
height: Image height
distortion_params: OpenCV 6 radial distortion coefficients
camera_type: Type of camera model. This will be an int corresponding to the CameraType enum.
times: Timestamps for each camera
"""
camera_to_worlds: TensorType["num_cameras":..., 3, 4]
fx: TensorType["num_cameras":..., 1]
fy: TensorType["num_cameras":..., 1]
cx: TensorType["num_cameras":..., 1]
cy: TensorType["num_cameras":..., 1]
width: TensorType["num_cameras":..., 1]
height: TensorType["num_cameras":..., 1]
distortion_params: Optional[TensorType["num_cameras":..., 6]]
camera_type: TensorType["num_cameras":..., 1]
times: Optional[TensorType["num_cameras", 1]]
def __init__(
self,
camera_to_worlds: TensorType["batch_c2ws":..., 3, 4],
fx: Union[TensorType["batch_fxs":..., 1], float],
fy: Union[TensorType["batch_fys":..., 1], float],
cx: Union[TensorType["batch_cxs":..., 1], float],
cy: Union[TensorType["batch_cys":..., 1], float],
width: Optional[Union[TensorType["batch_ws":..., 1], int]] = None,
height: Optional[Union[TensorType["batch_hs":..., 1], int]] = None,
distortion_params: Optional[TensorType["batch_dist_params":..., 6]] = None,
camera_type: Optional[
Union[
TensorType["batch_cam_types":..., 1],
int,
List[CameraType],
CameraType,
]
] = CameraType.PERSPECTIVE,
times: Optional[TensorType["num_cameras"]] = None,
):
"""Initializes the Cameras object.
Note on Input Tensor Dimensions: All of these tensors have items of dimensions TensorType[3, 4]
(in the case of the c2w matrices), TensorType[6] (in the case of distortion params), or
TensorType[1] (in the case of the rest of the elements). The dimensions before that are
considered the batch dimension of that tensor (batch_c2ws, batch_fxs, etc.). We will broadcast
all the tensors to be the same batch dimension. This means you can use any combination of the
input types in the function signature and it won't break. Your batch size for all tensors
must be broadcastable to the same size, and the resulting number of batch dimensions will be
the batch dimension with the largest number of dimensions.
"""
# This will notify the tensordataclass that we have a field with more than 1 dimension
self._field_custom_dimensions = {"camera_to_worlds": 2}
self.camera_to_worlds = camera_to_worlds
# fx fy calculation
self.fx = self._init_get_fc_xy(fx, "fx") # @dataclass's post_init will take care of broadcasting
self.fy = self._init_get_fc_xy(fy, "fy") # @dataclass's post_init will take care of broadcasting
# cx cy calculation
self.cx = self._init_get_fc_xy(cx, "cx") # @dataclass's post_init will take care of broadcasting
self.cy = self._init_get_fc_xy(cy, "cy") # @dataclass's post_init will take care of broadcasting
# Distortion Params Calculation:
self.distortion_params = distortion_params # @dataclass's post_init will take care of broadcasting
# @dataclass's post_init will take care of broadcasting
self.height = self._init_get_height_width(height, self.cy)
self.width = self._init_get_height_width(width, self.cx)
self.camera_type = self._init_get_camera_type(camera_type)
self.times = self._init_get_times(times)
self.__post_init__() # This will do the dataclass post_init and broadcast all the tensors
def _init_get_fc_xy(self, fc_xy, name):
"""
Parses the input focal length / principle point x or y and returns a tensor of the correct shape
Only needs to make sure that we a 1 in the last dimension if it is a tensor. If it is a float, we
just need to make it into a tensor and it will be broadcasted later in the __post_init__ function.
Args:
fc_xy: The focal length / principle point x or y
name: The name of the variable. Used for error messages
"""
if isinstance(fc_xy, float):
fc_xy = torch.Tensor([fc_xy], device=self.device)
elif isinstance(fc_xy, torch.Tensor):
if fc_xy.ndim == 0 or fc_xy.shape[-1] != 1:
fc_xy = fc_xy.unsqueeze(-1)
fc_xy = fc_xy.to(self.device)
else:
raise ValueError(f"{name} must be a float or tensor, got {type(fc_xy)}")
return fc_xy
def _init_get_camera_type(
self,
camera_type: Union[
TensorType["batch_cam_types":..., 1], TensorType["batch_cam_types":...], int, List[CameraType], CameraType
],
) -> TensorType["num_cameras":..., 1]:
"""
Parses the __init__() argument camera_type
Camera Type Calculation:
If CameraType, convert to int and then to tensor, then broadcast to all cameras
If List of CameraTypes, convert to ints and then to tensor, then broadcast to all cameras
If int, first go to tensor and then broadcast to all cameras
If tensor, broadcast to all cameras
Args:
camera_type: camera_type argument from __init__()
"""
if isinstance(camera_type, CameraType):
camera_type = torch.tensor([camera_type.value], device=self.device)
elif isinstance(camera_type, List) and isinstance(camera_type[0], CameraType):
camera_type = torch.tensor([[c.value] for c in camera_type], device=self.device)
elif isinstance(camera_type, int):
camera_type = torch.tensor([camera_type], device=self.device)
elif isinstance(camera_type, torch.Tensor):
assert not torch.is_floating_point(
camera_type
), f"camera_type tensor must be of type int, not: {camera_type.dtype}"
camera_type = camera_type.to(self.device)
if camera_type.ndim == 0 or camera_type.shape[-1] != 1:
camera_type = camera_type.unsqueeze(-1)
# assert torch.all(
# camera_type.view(-1)[0] == camera_type
# ), "Batched cameras of different camera_types will be allowed in the future."
else:
raise ValueError(
'Invalid camera_type. Must be CameraType, List[CameraType], int, or torch.Tensor["num_cameras"]. \
Received: '
+ str(type(camera_type))
)
return camera_type
def _init_get_height_width(
self,
h_w: Union[TensorType["batch_hws":..., 1], TensorType["batch_hws":...], int, None],
c_x_y: TensorType["batch_cxys":...],
) -> TensorType["num_cameras":..., 1]:
"""
Parses the __init__() argument for height or width
Height/Width Calculation:
If int, first go to tensor and then broadcast to all cameras
If tensor, broadcast to all cameras
If none, use cx or cy * 2
Else raise error
Args:
h_w: height or width argument from __init__()
c_x_y: cx or cy for when h_w == None
"""
if isinstance(h_w, int):
h_w = torch.Tensor([h_w]).to(torch.int64).to(self.device)
elif isinstance(h_w, torch.Tensor):
assert not torch.is_floating_point(h_w), f"height and width tensor must be of type int, not: {h_w.dtype}"
h_w = h_w.to(torch.int64).to(self.device)
if h_w.ndim == 0 or h_w.shape[-1] != 1:
h_w = h_w.unsqueeze(-1)
# assert torch.all(h_w == h_w.view(-1)[0]), "Batched cameras of different h, w will be allowed in the future."
elif h_w is None:
h_w = torch.Tensor((c_x_y * 2).to(torch.int64).to(self.device))
else:
raise ValueError("Height must be an int, tensor, or None, received: " + str(type(h_w)))
return h_w
def _init_get_times(self, times):
if times is None:
times = None
elif isinstance(times, torch.Tensor):
if times.ndim == 0 or times.shape[-1] != 1:
times = times.unsqueeze(-1).to(self.device)
else:
raise ValueError(f"times must be None or a tensor, got {type(times)}")
return times
def device(self):
"""Returns the device that the camera is on."""
return self.camera_to_worlds.device
def image_height(self) -> TensorType["num_cameras":..., 1]:
"""Returns the height of the images."""
return self.height
def image_width(self) -> TensorType["num_cameras":..., 1]:
"""Returns the height of the images."""
return self.width
def is_jagged(self):
"""
Returns whether or not the cameras are "jagged" (i.e. the height and widths are different, meaning that
you cannot concatenate the image coordinate maps together)
"""
h_jagged = not torch.all(self.height == self.height.view(-1)[0])
w_jagged = not torch.all(self.width == self.width.view(-1)[0])
return h_jagged or w_jagged
def get_image_coords(
self, pixel_offset: float = 0.5, index: Optional[Tuple] = None
) -> TensorType["height", "width", 2]:
"""This gets the image coordinates of one of the cameras in this object.
If no index is specified, it will return the maximum possible sized height / width image coordinate map,
by looking at the maximum height and width of all the cameras in this object.
Args:
pixel_offset: Offset for each pixel. Defaults to center of pixel (0.5)
index: Tuple of indices into the batch dimensions of the camera. Defaults to None, which returns the 0th
flattened camera
Returns:
Grid of image coordinates.
"""
if index is None:
image_height = torch.max(self.image_height.view(-1))
image_width = torch.max(self.image_width.view(-1))
image_coords = torch.meshgrid(torch.arange(image_height), torch.arange(image_width), indexing="ij")
image_coords = torch.stack(image_coords, dim=-1) + pixel_offset # stored as (y, x) coordinates
else:
image_height = self.image_height[index].item()
image_width = self.image_width[index].item()
image_coords = torch.meshgrid(torch.arange(image_height), torch.arange(image_width), indexing="ij")
image_coords = torch.stack(image_coords, dim=-1) + pixel_offset # stored as (y, x) coordinates
return image_coords
def generate_rays( # pylint: disable=too-many-statements
self,
camera_indices: Union[TensorType["num_rays":..., "num_cameras_batch_dims"], int],
coords: Optional[TensorType["num_rays":..., 2]] = None,
camera_opt_to_camera: Optional[TensorType["num_rays":..., 3, 4]] = None,
distortion_params_delta: Optional[TensorType["num_rays":..., 6]] = None,
keep_shape: Optional[bool] = None,
disable_distortion: bool = False,
) -> RayBundle:
"""Generates rays for the given camera indices.
This function will standardize the input arguments and then call the _generate_rays_from_coords function
to generate the rays. Our goal is to parse the arguments and then get them into the right shape:
- camera_indices: (num_rays:..., num_cameras_batch_dims)
- coords: (num_rays:..., 2)
- camera_opt_to_camera: (num_rays:..., 3, 4) or None
- distortion_params_delta: (num_rays:..., 6) or None
Read the docstring for _generate_rays_from_coords for more information on how we generate the rays
after we have standardized the arguments.
We are only concerned about different combinations of camera_indices and coords matrices, and the following
are the 4 cases we have to deal with:
1. isinstance(camera_indices, int) and coords == None
- In this case we broadcast our camera_indices / coords shape (h, w, 1 / 2 respectively)
2. isinstance(camera_indices, int) and coords != None
- In this case, we broadcast camera_indices to the same batch dim as coords
3. not isinstance(camera_indices, int) and coords == None
- In this case, we will need to set coords so that it is of shape (h, w, num_rays, 2), and broadcast
all our other args to match the new definition of num_rays := (h, w) + num_rays
4. not isinstance(camera_indices, int) and coords != None
- In this case, we have nothing to do, only check that the arguments are of the correct shape
There is one more edge case we need to be careful with: when we have "jagged cameras" (ie: different heights
and widths for each camera). This isn't problematic when we specify coords, since coords is already a tensor.
When coords == None (ie: when we render out the whole image associated with this camera), we run into problems
since there's no way to stack each coordinate map as all coordinate maps are all different shapes. In this case,
we will need to flatten each individual coordinate map and concatenate them, giving us only one batch dimension,
regaurdless of the number of prepended extra batch dimensions in the camera_indices tensor.
Args:
camera_indices: Camera indices of the flattened cameras object to generate rays for.
coords: Coordinates of the pixels to generate rays for. If None, the full image will be rendered.
camera_opt_to_camera: Optional transform for the camera to world matrices.
distortion_params_delta: Optional delta for the distortion parameters.
keep_shape: If None, then we default to the regular behavior of flattening if cameras is jagged, otherwise
keeping dimensions. If False, we flatten at the end. If True, then we keep the shape of the
camera_indices and coords tensors (if we can).
disable_distortion: If True, disables distortion.
Returns:
Rays for the given camera indices and coords.
"""
# Check the argument types to make sure they're valid and all shaped correctly
assert isinstance(camera_indices, (torch.Tensor, int)), "camera_indices must be a tensor or int"
assert coords is None or isinstance(coords, torch.Tensor), "coords must be a tensor or None"
assert camera_opt_to_camera is None or isinstance(camera_opt_to_camera, torch.Tensor)
assert distortion_params_delta is None or isinstance(distortion_params_delta, torch.Tensor)
if isinstance(camera_indices, torch.Tensor) and isinstance(coords, torch.Tensor):
num_rays_shape = camera_indices.shape[:-1]
errormsg = "Batch dims of inputs must match when inputs are all tensors"
assert coords.shape[:-1] == num_rays_shape, errormsg
assert camera_opt_to_camera is None or camera_opt_to_camera.shape[:-2] == num_rays_shape, errormsg
assert distortion_params_delta is None or distortion_params_delta.shape[:-1] == num_rays_shape, errormsg
# If zero dimensional, we need to unsqueeze to get a batch dimension and then squeeze later
if not self.shape:
cameras = self.reshape((1,))
assert torch.all(
torch.tensor(camera_indices == 0) if isinstance(camera_indices, int) else camera_indices == 0
), "Can only index into single camera with no batch dimensions if index is zero"
else:
cameras = self
# If the camera indices are an int, then we need to make sure that the camera batch is 1D
if isinstance(camera_indices, int):
assert (
len(cameras.shape) == 1
), "camera_indices must be a tensor if cameras are batched with more than 1 batch dimension"
camera_indices = torch.tensor([camera_indices], device=cameras.device)
assert camera_indices.shape[-1] == len(
cameras.shape
), "camera_indices must have shape (num_rays:..., num_cameras_batch_dims)"
# If keep_shape is True, then we need to make sure that the camera indices in question
# are all the same height and width and can actually be batched while maintaining the image
# shape
if keep_shape is True:
assert torch.all(cameras.height[camera_indices] == cameras.height[camera_indices[0]]) and torch.all(
cameras.width[camera_indices] == cameras.width[camera_indices[0]]
), "Can only keep shape if all cameras have the same height and width"
# If the cameras don't all have same height / width, if coords is not none, we will need to generate
# a flat list of coords for each camera and then concatenate otherwise our rays will be jagged.
# Camera indices, camera_opt, and distortion will also need to be broadcasted accordingly which is non-trivial
if cameras.is_jagged and coords is None and (keep_shape is None or keep_shape is False):
index_dim = camera_indices.shape[-1]
camera_indices = camera_indices.reshape(-1, index_dim)
_coords = [cameras.get_image_coords(index=tuple(index)).reshape(-1, 2) for index in camera_indices]
camera_indices = torch.cat(
[index.unsqueeze(0).repeat(coords.shape[0], 1) for index, coords in zip(camera_indices, _coords)],
)
coords = torch.cat(_coords, dim=0)
assert coords.shape[0] == camera_indices.shape[0]
# Need to get the coords of each indexed camera and flatten all coordinate maps and concatenate them
# The case where we aren't jagged && keep_shape (since otherwise coords is already set) and coords
# is None. In this case we append (h, w) to the num_rays dimensions for all tensors. In this case,
# each image in camera_indices has to have the same shape since otherwise we would have error'd when
# we checked keep_shape is valid or we aren't jagged.
if coords is None:
index_dim = camera_indices.shape[-1]
index = camera_indices.reshape(-1, index_dim)[0]
coords: torch.Tensor = cameras.get_image_coords(index=tuple(index)) # (h, w, 2)
coords = coords.reshape(coords.shape[:2] + (1,) * len(camera_indices.shape[:-1]) + (2,)) # (h, w, 1..., 2)
coords = coords.expand(coords.shape[:2] + camera_indices.shape[:-1] + (2,)) # (h, w, num_rays, 2)
camera_opt_to_camera = ( # (h, w, num_rays, 3, 4) or None
camera_opt_to_camera.broadcast_to(coords.shape[:-1] + (3, 4))
if camera_opt_to_camera is not None
else None
)
distortion_params_delta = ( # (h, w, num_rays, 6) or None
distortion_params_delta.broadcast_to(coords.shape[:-1] + (6,))
if distortion_params_delta is not None
else None
)
# If camera indices was an int or coords was none, we need to broadcast our indices along batch dims
camera_indices = camera_indices.broadcast_to(coords.shape[:-1] + (len(cameras.shape),)).to(torch.long)
# Checking our tensors have been standardized
assert isinstance(coords, torch.Tensor) and isinstance(camera_indices, torch.Tensor)
assert camera_indices.shape[-1] == len(cameras.shape)
assert camera_opt_to_camera is None or camera_opt_to_camera.shape[:-2] == coords.shape[:-1]
assert distortion_params_delta is None or distortion_params_delta.shape[:-1] == coords.shape[:-1]
# This will do the actual work of generating the rays now that we have standardized the inputs
# raybundle.shape == (num_rays) when done
# pylint: disable=protected-access
raybundle = cameras._generate_rays_from_coords(
camera_indices, coords, camera_opt_to_camera, distortion_params_delta, disable_distortion=disable_distortion
)
# If we have mandated that we don't keep the shape, then we flatten
if keep_shape is False:
raybundle = raybundle.flatten()
# TODO: We should have to squeeze the last dimension here if we started with zero batch dims, but never have to,
# so there might be a rogue squeeze happening somewhere, and this may cause some unintended behaviour
# that we haven't caught yet with tests
return raybundle
# pylint: disable=too-many-statements
def _generate_rays_from_coords(
self,
camera_indices: TensorType["num_rays":..., "num_cameras_batch_dims"],
coords: TensorType["num_rays":..., 2],
camera_opt_to_camera: Optional[TensorType["num_rays":..., 3, 4]] = None,
distortion_params_delta: Optional[TensorType["num_rays":..., 6]] = None,
disable_distortion: bool = False,
) -> RayBundle:
"""Generates rays for the given camera indices and coords where self isn't jagged
This is a fairly complex function, so let's break this down slowly.
Shapes involved:
- num_rays: This is your output raybundle shape. It dictates the number and shape of the rays generated
- num_cameras_batch_dims: This is the number of dimensions of our camera
Args:
camera_indices: Camera indices of the flattened cameras object to generate rays for.
The shape of this is such that indexing into camera_indices["num_rays":...] will return the
index into each batch dimension of the camera in order to get the correct camera specified by
"num_rays".
Example:
>>> cameras = Cameras(...)
>>> cameras.shape
(2, 3, 4)
>>> camera_indices = torch.tensor([0, 0, 0]) # We need an axis of length 3 since cameras.ndim == 3
>>> camera_indices.shape
(3,)
>>> coords = torch.tensor([1,1])
>>> coords.shape
(2,)
>>> out_rays = cameras.generate_rays(camera_indices=camera_indices, coords = coords)
# This will generate a RayBundle with a single ray for the
# camera at cameras[0,0,0] at image coordinates (1,1), so out_rays.shape == ()
>>> out_rays.shape
()
>>> camera_indices = torch.tensor([[0,0,0]])
>>> camera_indices.shape
(1, 3)
>>> coords = torch.tensor([[1,1]])
>>> coords.shape
(1, 2)
>>> out_rays = cameras.generate_rays(camera_indices=camera_indices, coords = coords)
# This will generate a RayBundle with a single ray for the
# camera at cameras[0,0,0] at point (1,1), so out_rays.shape == (1,)
# since we added an extra dimension in front of camera_indices
>>> out_rays.shape
(1,)
If you want more examples, check tests/cameras/test_cameras and the function check_generate_rays_shape
The bottom line is that for camera_indices: (num_rays:..., num_cameras_batch_dims), num_rays is the
output shape and if you index into the output RayBundle with some indices [i:...], if you index into
camera_indices with camera_indices[i:...] as well, you will get a 1D tensor containing the batch
indices into the original cameras object corresponding to that ray (ie: you will get the camera
from our batched cameras corresponding to the ray at RayBundle[i:...]).
coords: Coordinates of the pixels to generate rays for. If None, the full image will be rendered, meaning
height and width get prepended to the num_rays dimensions. Indexing into coords with [i:...] will
get you the image coordinates [x, y] of that specific ray located at output RayBundle[i:...].
camera_opt_to_camera: Optional transform for the camera to world matrices.
In terms of shape, it follows the same rules as coords, but indexing into it with [i:...] gets you
the 2D camera to world transform matrix for the camera optimization at RayBundle[i:...].
distortion_params_delta: Optional delta for the distortion parameters.
In terms of shape, it follows the same rules as coords, but indexing into it with [i:...] gets you
the 1D tensor with the 6 distortion parameters for the camera optimization at RayBundle[i:...].
disable_distortion: If True, disables distortion.
Returns:
Rays for the given camera indices and coords. RayBundle.shape == num_rays
"""
# Make sure we're on the right devices
camera_indices = camera_indices.to(self.device)
coords = coords.to(self.device)
# Checking to make sure everything is of the right shape and type
num_rays_shape = camera_indices.shape[:-1]
assert camera_indices.shape == num_rays_shape + (self.ndim,)
assert coords.shape == num_rays_shape + (2,)
assert coords.shape[-1] == 2
assert camera_opt_to_camera is None or camera_opt_to_camera.shape == num_rays_shape + (3, 4)
assert distortion_params_delta is None or distortion_params_delta.shape == num_rays_shape + (6,)
# Here, we've broken our indices down along the num_cameras_batch_dims dimension allowing us to index by all
# of our output rays at each dimension of our cameras object
true_indices = [camera_indices[..., i] for i in range(camera_indices.shape[-1])]
# Get all our focal lengths, principal points and make sure they are the right shapes
y = coords[..., 0] # (num_rays,) get rid of the last dimension
x = coords[..., 1] # (num_rays,) get rid of the last dimension
fx, fy = self.fx[true_indices].squeeze(-1), self.fy[true_indices].squeeze(-1) # (num_rays,)
cx, cy = self.cx[true_indices].squeeze(-1), self.cy[true_indices].squeeze(-1) # (num_rays,)
assert (
y.shape == num_rays_shape
and x.shape == num_rays_shape
and fx.shape == num_rays_shape
and fy.shape == num_rays_shape
and cx.shape == num_rays_shape
and cy.shape == num_rays_shape
), (
str(num_rays_shape)
+ str(y.shape)
+ str(x.shape)
+ str(fx.shape)
+ str(fy.shape)
+ str(cx.shape)
+ str(cy.shape)
)
# Get our image coordinates and image coordinates offset by 1 (offsets used for dx, dy calculations)
# Also make sure the shapes are correct
coord = torch.stack([(x - cx) / fx, -(y - cy) / fy], -1) # (num_rays, 2)
coord_x_offset = torch.stack([(x - cx + 1) / fx, -(y - cy) / fy], -1) # (num_rays, 2)
coord_y_offset = torch.stack([(x - cx) / fx, -(y - cy + 1) / fy], -1) # (num_rays, 2)
assert (
coord.shape == num_rays_shape + (2,)
and coord_x_offset.shape == num_rays_shape + (2,)
and coord_y_offset.shape == num_rays_shape + (2,)
)
# Stack image coordinates and image coordinates offset by 1, check shapes too
coord_stack = torch.stack([coord, coord_x_offset, coord_y_offset], dim=0) # (3, num_rays, 2)
assert coord_stack.shape == (3,) + num_rays_shape + (2,)
# Undistorts our images according to our distortion parameters
if not disable_distortion:
distortion_params = None
if self.distortion_params is not None:
distortion_params = self.distortion_params[true_indices]
if distortion_params_delta is not None:
distortion_params = distortion_params + distortion_params_delta
elif distortion_params_delta is not None:
distortion_params = distortion_params_delta
# Do not apply distortion for equirectangular images
if distortion_params is not None:
mask = (self.camera_type[true_indices] != CameraType.EQUIRECTANGULAR.value).squeeze(-1) # (num_rays)
coord_mask = torch.stack([mask, mask, mask], dim=0)
if mask.any():
coord_stack[coord_mask, :] = camera_utils.radial_and_tangential_undistort(
coord_stack[coord_mask, :].reshape(3, -1, 2),
distortion_params[mask, :],
).reshape(-1, 2)
# Make sure after we have undistorted our images, the shapes are still correct
assert coord_stack.shape == (3,) + num_rays_shape + (2,)
# Gets our directions for all our rays in camera coordinates and checks shapes at the end
# Here, directions_stack is of shape (3, num_rays, 3)
# directions_stack[0] is the direction for ray in camera coordinates
# directions_stack[1] is the direction for ray in camera coordinates offset by 1 in x
# directions_stack[2] is the direction for ray in camera coordinates offset by 1 in y
cam_types = torch.unique(self.camera_type, sorted=False)
directions_stack = torch.empty((3,) + num_rays_shape + (3,), device=self.device)
if CameraType.PERSPECTIVE.value in cam_types:
mask = (self.camera_type[true_indices] == CameraType.PERSPECTIVE.value).squeeze(-1) # (num_rays)
mask = torch.stack([mask, mask, mask], dim=0)
directions_stack[..., 0][mask] = torch.masked_select(coord_stack[..., 0], mask).float()
directions_stack[..., 1][mask] = torch.masked_select(coord_stack[..., 1], mask).float()
directions_stack[..., 2][mask] = -1.0
if CameraType.FISHEYE.value in cam_types:
mask = (self.camera_type[true_indices] == CameraType.FISHEYE.value).squeeze(-1) # (num_rays)
mask = torch.stack([mask, mask, mask], dim=0)
theta = torch.sqrt(torch.sum(coord_stack**2, dim=-1))
theta = torch.clip(theta, 0.0, math.pi)
sin_theta = torch.sin(theta)
directions_stack[..., 0][mask] = torch.masked_select(coord_stack[..., 0] * sin_theta / theta, mask).float()
directions_stack[..., 1][mask] = torch.masked_select(coord_stack[..., 1] * sin_theta / theta, mask).float()
directions_stack[..., 2][mask] = -torch.masked_select(torch.cos(theta), mask)
if CameraType.EQUIRECTANGULAR.value in cam_types:
mask = (self.camera_type[true_indices] == CameraType.EQUIRECTANGULAR.value).squeeze(-1) # (num_rays)
mask = torch.stack([mask, mask, mask], dim=0)
# For equirect, fx = fy = height = width/2
# Then coord[..., 0] goes from -1 to 1 and coord[..., 1] goes from -1/2 to 1/2
theta = -torch.pi * coord_stack[..., 0] # minus sign for right-handed
phi = torch.pi * (0.5 - coord_stack[..., 1])
# use spherical in local camera coordinates (+y up, x=0 and z<0 is theta=0)
directions_stack[..., 0][mask] = torch.masked_select(-torch.sin(theta) * torch.sin(phi), mask).float()
directions_stack[..., 1][mask] = torch.masked_select(torch.cos(phi), mask).float()
directions_stack[..., 2][mask] = torch.masked_select(-torch.cos(theta) * torch.sin(phi), mask).float()
for value in cam_types:
if value not in [CameraType.PERSPECTIVE.value, CameraType.FISHEYE.value, CameraType.EQUIRECTANGULAR.value]:
raise ValueError(f"Camera type {value} not supported.")
assert directions_stack.shape == (3,) + num_rays_shape + (3,)
c2w = self.camera_to_worlds[true_indices]
assert c2w.shape == num_rays_shape + (3, 4)
if camera_opt_to_camera is not None:
c2w = pose_utils.multiply(c2w, camera_opt_to_camera)
rotation = c2w[..., :3, :3] # (..., 3, 3)
assert rotation.shape == num_rays_shape + (3, 3)
directions_stack = torch.sum(
directions_stack[..., None, :] * rotation, dim=-1
) # (..., 1, 3) * (..., 3, 3) -> (..., 3)
directions_norm = torch.norm(directions_stack, dim=-1, keepdim=True)
directions_norm = directions_norm[0]
directions_stack = normalize(directions_stack, dim=-1)
assert directions_stack.shape == (3,) + num_rays_shape + (3,)
origins = c2w[..., :3, 3] # (..., 3)
assert origins.shape == num_rays_shape + (3,)
directions = directions_stack[0]
assert directions.shape == num_rays_shape + (3,)
# norms of the vector going between adjacent coords, giving us dx and dy per output ray
dx = torch.sqrt(torch.sum((directions - directions_stack[1]) ** 2, dim=-1)) # ("num_rays":...,)
dy = torch.sqrt(torch.sum((directions - directions_stack[2]) ** 2, dim=-1)) # ("num_rays":...,)
assert dx.shape == num_rays_shape and dy.shape == num_rays_shape
pixel_area = (dx * dy)[..., None] # ("num_rays":..., 1)
assert pixel_area.shape == num_rays_shape + (1,)
times = self.times[camera_indices, 0] if self.times is not None else None
return RayBundle(
origins=origins,
directions=directions,
pixel_area=pixel_area,
camera_indices=camera_indices,
directions_norm=directions_norm,
times=times,
)
def to_json(
self, camera_idx: int, image: Optional[TensorType["height", "width", 2]] = None, max_size: Optional[int] = None
) -> Dict:
"""Convert a camera to a json dictionary.
Args:
camera_idx: Index of the camera to convert.
image: An image in range [0, 1] that is encoded to a base64 string.
max_size: Max size to resize the image to if present.
Returns:
A JSON representation of the camera
"""
flattened = self.flatten()
json_ = {
"type": "PinholeCamera",
"cx": flattened[camera_idx].cx.item(),
"cy": flattened[camera_idx].cy.item(),
"fx": flattened[camera_idx].fx.item(),
"fy": flattened[camera_idx].fy.item(),
"camera_to_world": self.camera_to_worlds[camera_idx].tolist(),
"camera_index": camera_idx,
"times": flattened[camera_idx].times.item() if self.times is not None else None,
}
if image is not None:
image_uint8 = (image * 255).detach().type(torch.uint8)
if max_size is not None:
image_uint8 = image_uint8.permute(2, 0, 1)
image_uint8 = torchvision.transforms.functional.resize(image_uint8, max_size) # type: ignore
image_uint8 = image_uint8.permute(1, 2, 0)
image_uint8 = image_uint8.cpu().numpy()
data = cv2.imencode(".jpg", image_uint8)[1].tobytes()
json_["image"] = str("data:image/jpeg;base64," + base64.b64encode(data).decode("ascii"))
return json_
def get_intrinsics_matrices(self) -> TensorType["num_cameras":..., 3, 3]:
"""Returns the intrinsic matrices for each camera.
Returns:
Pinhole camera intrinsics matrices
"""
K = torch.zeros((*self.shape, 3, 3), dtype=torch.float32)
K[..., 0, 0] = self.fx.squeeze(-1)
K[..., 1, 1] = self.fy.squeeze(-1)
K[..., 0, 2] = self.cx.squeeze(-1)
K[..., 1, 2] = self.cy.squeeze(-1)
K[..., 2, 2] = 1.0
return K
def rescale_output_resolution(
self, scaling_factor: Union[TensorType["num_cameras":...], TensorType["num_cameras":..., 1], float, int]
) -> None:
"""Rescale the output resolution of the cameras.
Args:
scaling_factor: Scaling factor to apply to the output resolution.
"""
if isinstance(scaling_factor, (float, int)):
scaling_factor = torch.tensor([scaling_factor]).to(self.device).broadcast_to((self.cx.shape))
elif isinstance(scaling_factor, torch.Tensor) and scaling_factor.shape == self.shape:
scaling_factor = scaling_factor.unsqueeze(-1)
elif isinstance(scaling_factor, torch.Tensor) and scaling_factor.shape == (*self.shape, 1):
pass
else:
raise ValueError(
f"Scaling factor must be a float, int, or a tensor of shape {self.shape} or {(*self.shape, 1)}."
)
self.fx = self.fx * scaling_factor
self.fy = self.fy * scaling_factor
self.cx = self.cx * scaling_factor
self.cy = self.cy * scaling_factor
self.height = (self.height * scaling_factor).to(torch.int64)
self.width = (self.width * scaling_factor).to(torch.int64)
The provided code snippet includes necessary dependencies for implementing the `get_interpolated_camera_path` function. Write a Python function `def get_interpolated_camera_path(cameras: Cameras, steps: int) -> Cameras` to solve the following problem:
Generate a camera path between two cameras. Args: cameras: Cameras object containing intrinsics of all cameras. steps: The number of steps to interpolate between the two cameras. Returns: A new set of cameras along a path.
Here is the function:
def get_interpolated_camera_path(cameras: Cameras, steps: int) -> Cameras:
"""Generate a camera path between two cameras.
Args:
cameras: Cameras object containing intrinsics of all cameras.
steps: The number of steps to interpolate between the two cameras.
Returns:
A new set of cameras along a path.
"""
Ks = cameras.get_intrinsics_matrices().cpu().numpy()
poses = cameras.camera_to_worlds().cpu().numpy()
poses, Ks = get_interpolated_poses_many(poses, Ks, steps_per_transition=steps)
cameras = Cameras(fx=Ks[:, 0, 0], fy=Ks[:, 1, 1], cx=Ks[0, 0, 2], cy=Ks[0, 1, 2], camera_to_worlds=poses)
return cameras | Generate a camera path between two cameras. Args: cameras: Cameras object containing intrinsics of all cameras. steps: The number of steps to interpolate between the two cameras. Returns: A new set of cameras along a path. |
162,272 | from typing import Any, Dict, Optional, Tuple
import numpy as np
import torch
import nerfstudio.utils.poses as pose_utils
from nerfstudio.cameras import camera_utils
from nerfstudio.cameras.camera_utils import get_interpolated_poses_many
from nerfstudio.cameras.cameras import Cameras
from nerfstudio.viewer.server.utils import three_js_perspective_camera_focal_length
def viewmatrix(lookdir: np.ndarray, up: np.ndarray, position: np.ndarray) -> np.ndarray:
"""Construct lookat view matrix."""
vec2 = normalize(lookdir)
vec0 = normalize(np.cross(up, vec2))
vec1 = normalize(np.cross(vec2, vec0))
m = np.stack([vec0, vec1, vec2, position], axis=1)
return m
class Cameras(TensorDataclass):
"""Dataparser outputs for the image dataset and the ray generator.
Note: currently only supports cameras with the same principal points and types. The reason we type
the focal lengths, principal points, and image sizes as tensors is to allow for batched cameras
down the line in cases where your batches of camera data don't come from the same cameras.
If a single value is provided, it is broadcasted to all cameras.
Args:
camera_to_worlds: Camera to world matrices. Tensor of per-image c2w matrices, in [R | t] format
fx: Focal length x
fy: Focal length y
cx: Principal point x
cy: Principal point y
width: Image width
height: Image height
distortion_params: OpenCV 6 radial distortion coefficients
camera_type: Type of camera model. This will be an int corresponding to the CameraType enum.
times: Timestamps for each camera
"""
camera_to_worlds: TensorType["num_cameras":..., 3, 4]
fx: TensorType["num_cameras":..., 1]
fy: TensorType["num_cameras":..., 1]
cx: TensorType["num_cameras":..., 1]
cy: TensorType["num_cameras":..., 1]
width: TensorType["num_cameras":..., 1]
height: TensorType["num_cameras":..., 1]
distortion_params: Optional[TensorType["num_cameras":..., 6]]
camera_type: TensorType["num_cameras":..., 1]
times: Optional[TensorType["num_cameras", 1]]
def __init__(
self,
camera_to_worlds: TensorType["batch_c2ws":..., 3, 4],
fx: Union[TensorType["batch_fxs":..., 1], float],
fy: Union[TensorType["batch_fys":..., 1], float],
cx: Union[TensorType["batch_cxs":..., 1], float],
cy: Union[TensorType["batch_cys":..., 1], float],
width: Optional[Union[TensorType["batch_ws":..., 1], int]] = None,
height: Optional[Union[TensorType["batch_hs":..., 1], int]] = None,
distortion_params: Optional[TensorType["batch_dist_params":..., 6]] = None,
camera_type: Optional[
Union[
TensorType["batch_cam_types":..., 1],
int,
List[CameraType],
CameraType,
]
] = CameraType.PERSPECTIVE,
times: Optional[TensorType["num_cameras"]] = None,
):
"""Initializes the Cameras object.
Note on Input Tensor Dimensions: All of these tensors have items of dimensions TensorType[3, 4]
(in the case of the c2w matrices), TensorType[6] (in the case of distortion params), or
TensorType[1] (in the case of the rest of the elements). The dimensions before that are
considered the batch dimension of that tensor (batch_c2ws, batch_fxs, etc.). We will broadcast
all the tensors to be the same batch dimension. This means you can use any combination of the
input types in the function signature and it won't break. Your batch size for all tensors
must be broadcastable to the same size, and the resulting number of batch dimensions will be
the batch dimension with the largest number of dimensions.
"""
# This will notify the tensordataclass that we have a field with more than 1 dimension
self._field_custom_dimensions = {"camera_to_worlds": 2}
self.camera_to_worlds = camera_to_worlds
# fx fy calculation
self.fx = self._init_get_fc_xy(fx, "fx") # @dataclass's post_init will take care of broadcasting
self.fy = self._init_get_fc_xy(fy, "fy") # @dataclass's post_init will take care of broadcasting
# cx cy calculation
self.cx = self._init_get_fc_xy(cx, "cx") # @dataclass's post_init will take care of broadcasting
self.cy = self._init_get_fc_xy(cy, "cy") # @dataclass's post_init will take care of broadcasting
# Distortion Params Calculation:
self.distortion_params = distortion_params # @dataclass's post_init will take care of broadcasting
# @dataclass's post_init will take care of broadcasting
self.height = self._init_get_height_width(height, self.cy)
self.width = self._init_get_height_width(width, self.cx)
self.camera_type = self._init_get_camera_type(camera_type)
self.times = self._init_get_times(times)
self.__post_init__() # This will do the dataclass post_init and broadcast all the tensors
def _init_get_fc_xy(self, fc_xy, name):
"""
Parses the input focal length / principle point x or y and returns a tensor of the correct shape
Only needs to make sure that we a 1 in the last dimension if it is a tensor. If it is a float, we
just need to make it into a tensor and it will be broadcasted later in the __post_init__ function.
Args:
fc_xy: The focal length / principle point x or y
name: The name of the variable. Used for error messages
"""
if isinstance(fc_xy, float):
fc_xy = torch.Tensor([fc_xy], device=self.device)
elif isinstance(fc_xy, torch.Tensor):
if fc_xy.ndim == 0 or fc_xy.shape[-1] != 1:
fc_xy = fc_xy.unsqueeze(-1)
fc_xy = fc_xy.to(self.device)
else:
raise ValueError(f"{name} must be a float or tensor, got {type(fc_xy)}")
return fc_xy
def _init_get_camera_type(
self,
camera_type: Union[
TensorType["batch_cam_types":..., 1], TensorType["batch_cam_types":...], int, List[CameraType], CameraType
],
) -> TensorType["num_cameras":..., 1]:
"""
Parses the __init__() argument camera_type
Camera Type Calculation:
If CameraType, convert to int and then to tensor, then broadcast to all cameras
If List of CameraTypes, convert to ints and then to tensor, then broadcast to all cameras
If int, first go to tensor and then broadcast to all cameras
If tensor, broadcast to all cameras
Args:
camera_type: camera_type argument from __init__()
"""
if isinstance(camera_type, CameraType):
camera_type = torch.tensor([camera_type.value], device=self.device)
elif isinstance(camera_type, List) and isinstance(camera_type[0], CameraType):
camera_type = torch.tensor([[c.value] for c in camera_type], device=self.device)
elif isinstance(camera_type, int):
camera_type = torch.tensor([camera_type], device=self.device)
elif isinstance(camera_type, torch.Tensor):
assert not torch.is_floating_point(
camera_type
), f"camera_type tensor must be of type int, not: {camera_type.dtype}"
camera_type = camera_type.to(self.device)
if camera_type.ndim == 0 or camera_type.shape[-1] != 1:
camera_type = camera_type.unsqueeze(-1)
# assert torch.all(
# camera_type.view(-1)[0] == camera_type
# ), "Batched cameras of different camera_types will be allowed in the future."
else:
raise ValueError(
'Invalid camera_type. Must be CameraType, List[CameraType], int, or torch.Tensor["num_cameras"]. \
Received: '
+ str(type(camera_type))
)
return camera_type
def _init_get_height_width(
self,
h_w: Union[TensorType["batch_hws":..., 1], TensorType["batch_hws":...], int, None],
c_x_y: TensorType["batch_cxys":...],
) -> TensorType["num_cameras":..., 1]:
"""
Parses the __init__() argument for height or width
Height/Width Calculation:
If int, first go to tensor and then broadcast to all cameras
If tensor, broadcast to all cameras
If none, use cx or cy * 2
Else raise error
Args:
h_w: height or width argument from __init__()
c_x_y: cx or cy for when h_w == None
"""
if isinstance(h_w, int):
h_w = torch.Tensor([h_w]).to(torch.int64).to(self.device)
elif isinstance(h_w, torch.Tensor):
assert not torch.is_floating_point(h_w), f"height and width tensor must be of type int, not: {h_w.dtype}"
h_w = h_w.to(torch.int64).to(self.device)
if h_w.ndim == 0 or h_w.shape[-1] != 1:
h_w = h_w.unsqueeze(-1)
# assert torch.all(h_w == h_w.view(-1)[0]), "Batched cameras of different h, w will be allowed in the future."
elif h_w is None:
h_w = torch.Tensor((c_x_y * 2).to(torch.int64).to(self.device))
else:
raise ValueError("Height must be an int, tensor, or None, received: " + str(type(h_w)))
return h_w
def _init_get_times(self, times):
if times is None:
times = None
elif isinstance(times, torch.Tensor):
if times.ndim == 0 or times.shape[-1] != 1:
times = times.unsqueeze(-1).to(self.device)
else:
raise ValueError(f"times must be None or a tensor, got {type(times)}")
return times
def device(self):
"""Returns the device that the camera is on."""
return self.camera_to_worlds.device
def image_height(self) -> TensorType["num_cameras":..., 1]:
"""Returns the height of the images."""
return self.height
def image_width(self) -> TensorType["num_cameras":..., 1]:
"""Returns the height of the images."""
return self.width
def is_jagged(self):
"""
Returns whether or not the cameras are "jagged" (i.e. the height and widths are different, meaning that
you cannot concatenate the image coordinate maps together)
"""
h_jagged = not torch.all(self.height == self.height.view(-1)[0])
w_jagged = not torch.all(self.width == self.width.view(-1)[0])
return h_jagged or w_jagged
def get_image_coords(
self, pixel_offset: float = 0.5, index: Optional[Tuple] = None
) -> TensorType["height", "width", 2]:
"""This gets the image coordinates of one of the cameras in this object.
If no index is specified, it will return the maximum possible sized height / width image coordinate map,
by looking at the maximum height and width of all the cameras in this object.
Args:
pixel_offset: Offset for each pixel. Defaults to center of pixel (0.5)
index: Tuple of indices into the batch dimensions of the camera. Defaults to None, which returns the 0th
flattened camera
Returns:
Grid of image coordinates.
"""
if index is None:
image_height = torch.max(self.image_height.view(-1))
image_width = torch.max(self.image_width.view(-1))
image_coords = torch.meshgrid(torch.arange(image_height), torch.arange(image_width), indexing="ij")
image_coords = torch.stack(image_coords, dim=-1) + pixel_offset # stored as (y, x) coordinates
else:
image_height = self.image_height[index].item()
image_width = self.image_width[index].item()
image_coords = torch.meshgrid(torch.arange(image_height), torch.arange(image_width), indexing="ij")
image_coords = torch.stack(image_coords, dim=-1) + pixel_offset # stored as (y, x) coordinates
return image_coords
def generate_rays( # pylint: disable=too-many-statements
self,
camera_indices: Union[TensorType["num_rays":..., "num_cameras_batch_dims"], int],
coords: Optional[TensorType["num_rays":..., 2]] = None,
camera_opt_to_camera: Optional[TensorType["num_rays":..., 3, 4]] = None,
distortion_params_delta: Optional[TensorType["num_rays":..., 6]] = None,
keep_shape: Optional[bool] = None,
disable_distortion: bool = False,
) -> RayBundle:
"""Generates rays for the given camera indices.
This function will standardize the input arguments and then call the _generate_rays_from_coords function
to generate the rays. Our goal is to parse the arguments and then get them into the right shape:
- camera_indices: (num_rays:..., num_cameras_batch_dims)
- coords: (num_rays:..., 2)
- camera_opt_to_camera: (num_rays:..., 3, 4) or None
- distortion_params_delta: (num_rays:..., 6) or None
Read the docstring for _generate_rays_from_coords for more information on how we generate the rays
after we have standardized the arguments.
We are only concerned about different combinations of camera_indices and coords matrices, and the following
are the 4 cases we have to deal with:
1. isinstance(camera_indices, int) and coords == None
- In this case we broadcast our camera_indices / coords shape (h, w, 1 / 2 respectively)
2. isinstance(camera_indices, int) and coords != None
- In this case, we broadcast camera_indices to the same batch dim as coords
3. not isinstance(camera_indices, int) and coords == None
- In this case, we will need to set coords so that it is of shape (h, w, num_rays, 2), and broadcast
all our other args to match the new definition of num_rays := (h, w) + num_rays
4. not isinstance(camera_indices, int) and coords != None
- In this case, we have nothing to do, only check that the arguments are of the correct shape
There is one more edge case we need to be careful with: when we have "jagged cameras" (ie: different heights
and widths for each camera). This isn't problematic when we specify coords, since coords is already a tensor.
When coords == None (ie: when we render out the whole image associated with this camera), we run into problems
since there's no way to stack each coordinate map as all coordinate maps are all different shapes. In this case,
we will need to flatten each individual coordinate map and concatenate them, giving us only one batch dimension,
regaurdless of the number of prepended extra batch dimensions in the camera_indices tensor.
Args:
camera_indices: Camera indices of the flattened cameras object to generate rays for.
coords: Coordinates of the pixels to generate rays for. If None, the full image will be rendered.
camera_opt_to_camera: Optional transform for the camera to world matrices.
distortion_params_delta: Optional delta for the distortion parameters.
keep_shape: If None, then we default to the regular behavior of flattening if cameras is jagged, otherwise
keeping dimensions. If False, we flatten at the end. If True, then we keep the shape of the
camera_indices and coords tensors (if we can).
disable_distortion: If True, disables distortion.
Returns:
Rays for the given camera indices and coords.
"""
# Check the argument types to make sure they're valid and all shaped correctly
assert isinstance(camera_indices, (torch.Tensor, int)), "camera_indices must be a tensor or int"
assert coords is None or isinstance(coords, torch.Tensor), "coords must be a tensor or None"
assert camera_opt_to_camera is None or isinstance(camera_opt_to_camera, torch.Tensor)
assert distortion_params_delta is None or isinstance(distortion_params_delta, torch.Tensor)
if isinstance(camera_indices, torch.Tensor) and isinstance(coords, torch.Tensor):
num_rays_shape = camera_indices.shape[:-1]
errormsg = "Batch dims of inputs must match when inputs are all tensors"
assert coords.shape[:-1] == num_rays_shape, errormsg
assert camera_opt_to_camera is None or camera_opt_to_camera.shape[:-2] == num_rays_shape, errormsg
assert distortion_params_delta is None or distortion_params_delta.shape[:-1] == num_rays_shape, errormsg
# If zero dimensional, we need to unsqueeze to get a batch dimension and then squeeze later
if not self.shape:
cameras = self.reshape((1,))
assert torch.all(
torch.tensor(camera_indices == 0) if isinstance(camera_indices, int) else camera_indices == 0
), "Can only index into single camera with no batch dimensions if index is zero"
else:
cameras = self
# If the camera indices are an int, then we need to make sure that the camera batch is 1D
if isinstance(camera_indices, int):
assert (
len(cameras.shape) == 1
), "camera_indices must be a tensor if cameras are batched with more than 1 batch dimension"
camera_indices = torch.tensor([camera_indices], device=cameras.device)
assert camera_indices.shape[-1] == len(
cameras.shape
), "camera_indices must have shape (num_rays:..., num_cameras_batch_dims)"
# If keep_shape is True, then we need to make sure that the camera indices in question
# are all the same height and width and can actually be batched while maintaining the image
# shape
if keep_shape is True:
assert torch.all(cameras.height[camera_indices] == cameras.height[camera_indices[0]]) and torch.all(
cameras.width[camera_indices] == cameras.width[camera_indices[0]]
), "Can only keep shape if all cameras have the same height and width"
# If the cameras don't all have same height / width, if coords is not none, we will need to generate
# a flat list of coords for each camera and then concatenate otherwise our rays will be jagged.
# Camera indices, camera_opt, and distortion will also need to be broadcasted accordingly which is non-trivial
if cameras.is_jagged and coords is None and (keep_shape is None or keep_shape is False):
index_dim = camera_indices.shape[-1]
camera_indices = camera_indices.reshape(-1, index_dim)
_coords = [cameras.get_image_coords(index=tuple(index)).reshape(-1, 2) for index in camera_indices]
camera_indices = torch.cat(
[index.unsqueeze(0).repeat(coords.shape[0], 1) for index, coords in zip(camera_indices, _coords)],
)
coords = torch.cat(_coords, dim=0)
assert coords.shape[0] == camera_indices.shape[0]
# Need to get the coords of each indexed camera and flatten all coordinate maps and concatenate them
# The case where we aren't jagged && keep_shape (since otherwise coords is already set) and coords
# is None. In this case we append (h, w) to the num_rays dimensions for all tensors. In this case,
# each image in camera_indices has to have the same shape since otherwise we would have error'd when
# we checked keep_shape is valid or we aren't jagged.
if coords is None:
index_dim = camera_indices.shape[-1]
index = camera_indices.reshape(-1, index_dim)[0]
coords: torch.Tensor = cameras.get_image_coords(index=tuple(index)) # (h, w, 2)
coords = coords.reshape(coords.shape[:2] + (1,) * len(camera_indices.shape[:-1]) + (2,)) # (h, w, 1..., 2)
coords = coords.expand(coords.shape[:2] + camera_indices.shape[:-1] + (2,)) # (h, w, num_rays, 2)
camera_opt_to_camera = ( # (h, w, num_rays, 3, 4) or None
camera_opt_to_camera.broadcast_to(coords.shape[:-1] + (3, 4))
if camera_opt_to_camera is not None
else None
)
distortion_params_delta = ( # (h, w, num_rays, 6) or None
distortion_params_delta.broadcast_to(coords.shape[:-1] + (6,))
if distortion_params_delta is not None
else None
)
# If camera indices was an int or coords was none, we need to broadcast our indices along batch dims
camera_indices = camera_indices.broadcast_to(coords.shape[:-1] + (len(cameras.shape),)).to(torch.long)
# Checking our tensors have been standardized
assert isinstance(coords, torch.Tensor) and isinstance(camera_indices, torch.Tensor)
assert camera_indices.shape[-1] == len(cameras.shape)
assert camera_opt_to_camera is None or camera_opt_to_camera.shape[:-2] == coords.shape[:-1]
assert distortion_params_delta is None or distortion_params_delta.shape[:-1] == coords.shape[:-1]
# This will do the actual work of generating the rays now that we have standardized the inputs
# raybundle.shape == (num_rays) when done
# pylint: disable=protected-access
raybundle = cameras._generate_rays_from_coords(
camera_indices, coords, camera_opt_to_camera, distortion_params_delta, disable_distortion=disable_distortion
)
# If we have mandated that we don't keep the shape, then we flatten
if keep_shape is False:
raybundle = raybundle.flatten()
# TODO: We should have to squeeze the last dimension here if we started with zero batch dims, but never have to,
# so there might be a rogue squeeze happening somewhere, and this may cause some unintended behaviour
# that we haven't caught yet with tests
return raybundle
# pylint: disable=too-many-statements
def _generate_rays_from_coords(
self,
camera_indices: TensorType["num_rays":..., "num_cameras_batch_dims"],
coords: TensorType["num_rays":..., 2],
camera_opt_to_camera: Optional[TensorType["num_rays":..., 3, 4]] = None,
distortion_params_delta: Optional[TensorType["num_rays":..., 6]] = None,
disable_distortion: bool = False,
) -> RayBundle:
"""Generates rays for the given camera indices and coords where self isn't jagged
This is a fairly complex function, so let's break this down slowly.
Shapes involved:
- num_rays: This is your output raybundle shape. It dictates the number and shape of the rays generated
- num_cameras_batch_dims: This is the number of dimensions of our camera
Args:
camera_indices: Camera indices of the flattened cameras object to generate rays for.
The shape of this is such that indexing into camera_indices["num_rays":...] will return the
index into each batch dimension of the camera in order to get the correct camera specified by
"num_rays".
Example:
>>> cameras = Cameras(...)
>>> cameras.shape
(2, 3, 4)
>>> camera_indices = torch.tensor([0, 0, 0]) # We need an axis of length 3 since cameras.ndim == 3
>>> camera_indices.shape
(3,)
>>> coords = torch.tensor([1,1])
>>> coords.shape
(2,)
>>> out_rays = cameras.generate_rays(camera_indices=camera_indices, coords = coords)
# This will generate a RayBundle with a single ray for the
# camera at cameras[0,0,0] at image coordinates (1,1), so out_rays.shape == ()
>>> out_rays.shape
()
>>> camera_indices = torch.tensor([[0,0,0]])
>>> camera_indices.shape
(1, 3)
>>> coords = torch.tensor([[1,1]])
>>> coords.shape
(1, 2)
>>> out_rays = cameras.generate_rays(camera_indices=camera_indices, coords = coords)
# This will generate a RayBundle with a single ray for the
# camera at cameras[0,0,0] at point (1,1), so out_rays.shape == (1,)
# since we added an extra dimension in front of camera_indices
>>> out_rays.shape
(1,)
If you want more examples, check tests/cameras/test_cameras and the function check_generate_rays_shape
The bottom line is that for camera_indices: (num_rays:..., num_cameras_batch_dims), num_rays is the
output shape and if you index into the output RayBundle with some indices [i:...], if you index into
camera_indices with camera_indices[i:...] as well, you will get a 1D tensor containing the batch
indices into the original cameras object corresponding to that ray (ie: you will get the camera
from our batched cameras corresponding to the ray at RayBundle[i:...]).
coords: Coordinates of the pixels to generate rays for. If None, the full image will be rendered, meaning
height and width get prepended to the num_rays dimensions. Indexing into coords with [i:...] will
get you the image coordinates [x, y] of that specific ray located at output RayBundle[i:...].
camera_opt_to_camera: Optional transform for the camera to world matrices.
In terms of shape, it follows the same rules as coords, but indexing into it with [i:...] gets you
the 2D camera to world transform matrix for the camera optimization at RayBundle[i:...].
distortion_params_delta: Optional delta for the distortion parameters.
In terms of shape, it follows the same rules as coords, but indexing into it with [i:...] gets you
the 1D tensor with the 6 distortion parameters for the camera optimization at RayBundle[i:...].
disable_distortion: If True, disables distortion.
Returns:
Rays for the given camera indices and coords. RayBundle.shape == num_rays
"""
# Make sure we're on the right devices
camera_indices = camera_indices.to(self.device)
coords = coords.to(self.device)
# Checking to make sure everything is of the right shape and type
num_rays_shape = camera_indices.shape[:-1]
assert camera_indices.shape == num_rays_shape + (self.ndim,)
assert coords.shape == num_rays_shape + (2,)
assert coords.shape[-1] == 2
assert camera_opt_to_camera is None or camera_opt_to_camera.shape == num_rays_shape + (3, 4)
assert distortion_params_delta is None or distortion_params_delta.shape == num_rays_shape + (6,)
# Here, we've broken our indices down along the num_cameras_batch_dims dimension allowing us to index by all
# of our output rays at each dimension of our cameras object
true_indices = [camera_indices[..., i] for i in range(camera_indices.shape[-1])]
# Get all our focal lengths, principal points and make sure they are the right shapes
y = coords[..., 0] # (num_rays,) get rid of the last dimension
x = coords[..., 1] # (num_rays,) get rid of the last dimension
fx, fy = self.fx[true_indices].squeeze(-1), self.fy[true_indices].squeeze(-1) # (num_rays,)
cx, cy = self.cx[true_indices].squeeze(-1), self.cy[true_indices].squeeze(-1) # (num_rays,)
assert (
y.shape == num_rays_shape
and x.shape == num_rays_shape
and fx.shape == num_rays_shape
and fy.shape == num_rays_shape
and cx.shape == num_rays_shape
and cy.shape == num_rays_shape
), (
str(num_rays_shape)
+ str(y.shape)
+ str(x.shape)
+ str(fx.shape)
+ str(fy.shape)
+ str(cx.shape)
+ str(cy.shape)
)
# Get our image coordinates and image coordinates offset by 1 (offsets used for dx, dy calculations)
# Also make sure the shapes are correct
coord = torch.stack([(x - cx) / fx, -(y - cy) / fy], -1) # (num_rays, 2)
coord_x_offset = torch.stack([(x - cx + 1) / fx, -(y - cy) / fy], -1) # (num_rays, 2)
coord_y_offset = torch.stack([(x - cx) / fx, -(y - cy + 1) / fy], -1) # (num_rays, 2)
assert (
coord.shape == num_rays_shape + (2,)
and coord_x_offset.shape == num_rays_shape + (2,)
and coord_y_offset.shape == num_rays_shape + (2,)
)
# Stack image coordinates and image coordinates offset by 1, check shapes too
coord_stack = torch.stack([coord, coord_x_offset, coord_y_offset], dim=0) # (3, num_rays, 2)
assert coord_stack.shape == (3,) + num_rays_shape + (2,)
# Undistorts our images according to our distortion parameters
if not disable_distortion:
distortion_params = None
if self.distortion_params is not None:
distortion_params = self.distortion_params[true_indices]
if distortion_params_delta is not None:
distortion_params = distortion_params + distortion_params_delta
elif distortion_params_delta is not None:
distortion_params = distortion_params_delta
# Do not apply distortion for equirectangular images
if distortion_params is not None:
mask = (self.camera_type[true_indices] != CameraType.EQUIRECTANGULAR.value).squeeze(-1) # (num_rays)
coord_mask = torch.stack([mask, mask, mask], dim=0)
if mask.any():
coord_stack[coord_mask, :] = camera_utils.radial_and_tangential_undistort(
coord_stack[coord_mask, :].reshape(3, -1, 2),
distortion_params[mask, :],
).reshape(-1, 2)
# Make sure after we have undistorted our images, the shapes are still correct
assert coord_stack.shape == (3,) + num_rays_shape + (2,)
# Gets our directions for all our rays in camera coordinates and checks shapes at the end
# Here, directions_stack is of shape (3, num_rays, 3)
# directions_stack[0] is the direction for ray in camera coordinates
# directions_stack[1] is the direction for ray in camera coordinates offset by 1 in x
# directions_stack[2] is the direction for ray in camera coordinates offset by 1 in y
cam_types = torch.unique(self.camera_type, sorted=False)
directions_stack = torch.empty((3,) + num_rays_shape + (3,), device=self.device)
if CameraType.PERSPECTIVE.value in cam_types:
mask = (self.camera_type[true_indices] == CameraType.PERSPECTIVE.value).squeeze(-1) # (num_rays)
mask = torch.stack([mask, mask, mask], dim=0)
directions_stack[..., 0][mask] = torch.masked_select(coord_stack[..., 0], mask).float()
directions_stack[..., 1][mask] = torch.masked_select(coord_stack[..., 1], mask).float()
directions_stack[..., 2][mask] = -1.0
if CameraType.FISHEYE.value in cam_types:
mask = (self.camera_type[true_indices] == CameraType.FISHEYE.value).squeeze(-1) # (num_rays)
mask = torch.stack([mask, mask, mask], dim=0)
theta = torch.sqrt(torch.sum(coord_stack**2, dim=-1))
theta = torch.clip(theta, 0.0, math.pi)
sin_theta = torch.sin(theta)
directions_stack[..., 0][mask] = torch.masked_select(coord_stack[..., 0] * sin_theta / theta, mask).float()
directions_stack[..., 1][mask] = torch.masked_select(coord_stack[..., 1] * sin_theta / theta, mask).float()
directions_stack[..., 2][mask] = -torch.masked_select(torch.cos(theta), mask)
if CameraType.EQUIRECTANGULAR.value in cam_types:
mask = (self.camera_type[true_indices] == CameraType.EQUIRECTANGULAR.value).squeeze(-1) # (num_rays)
mask = torch.stack([mask, mask, mask], dim=0)
# For equirect, fx = fy = height = width/2
# Then coord[..., 0] goes from -1 to 1 and coord[..., 1] goes from -1/2 to 1/2
theta = -torch.pi * coord_stack[..., 0] # minus sign for right-handed
phi = torch.pi * (0.5 - coord_stack[..., 1])
# use spherical in local camera coordinates (+y up, x=0 and z<0 is theta=0)
directions_stack[..., 0][mask] = torch.masked_select(-torch.sin(theta) * torch.sin(phi), mask).float()
directions_stack[..., 1][mask] = torch.masked_select(torch.cos(phi), mask).float()
directions_stack[..., 2][mask] = torch.masked_select(-torch.cos(theta) * torch.sin(phi), mask).float()
for value in cam_types:
if value not in [CameraType.PERSPECTIVE.value, CameraType.FISHEYE.value, CameraType.EQUIRECTANGULAR.value]:
raise ValueError(f"Camera type {value} not supported.")
assert directions_stack.shape == (3,) + num_rays_shape + (3,)
c2w = self.camera_to_worlds[true_indices]
assert c2w.shape == num_rays_shape + (3, 4)
if camera_opt_to_camera is not None:
c2w = pose_utils.multiply(c2w, camera_opt_to_camera)
rotation = c2w[..., :3, :3] # (..., 3, 3)
assert rotation.shape == num_rays_shape + (3, 3)
directions_stack = torch.sum(
directions_stack[..., None, :] * rotation, dim=-1
) # (..., 1, 3) * (..., 3, 3) -> (..., 3)
directions_norm = torch.norm(directions_stack, dim=-1, keepdim=True)
directions_norm = directions_norm[0]
directions_stack = normalize(directions_stack, dim=-1)
assert directions_stack.shape == (3,) + num_rays_shape + (3,)
origins = c2w[..., :3, 3] # (..., 3)
assert origins.shape == num_rays_shape + (3,)
directions = directions_stack[0]
assert directions.shape == num_rays_shape + (3,)
# norms of the vector going between adjacent coords, giving us dx and dy per output ray
dx = torch.sqrt(torch.sum((directions - directions_stack[1]) ** 2, dim=-1)) # ("num_rays":...,)
dy = torch.sqrt(torch.sum((directions - directions_stack[2]) ** 2, dim=-1)) # ("num_rays":...,)
assert dx.shape == num_rays_shape and dy.shape == num_rays_shape
pixel_area = (dx * dy)[..., None] # ("num_rays":..., 1)
assert pixel_area.shape == num_rays_shape + (1,)
times = self.times[camera_indices, 0] if self.times is not None else None
return RayBundle(
origins=origins,
directions=directions,
pixel_area=pixel_area,
camera_indices=camera_indices,
directions_norm=directions_norm,
times=times,
)
def to_json(
self, camera_idx: int, image: Optional[TensorType["height", "width", 2]] = None, max_size: Optional[int] = None
) -> Dict:
"""Convert a camera to a json dictionary.
Args:
camera_idx: Index of the camera to convert.
image: An image in range [0, 1] that is encoded to a base64 string.
max_size: Max size to resize the image to if present.
Returns:
A JSON representation of the camera
"""
flattened = self.flatten()
json_ = {
"type": "PinholeCamera",
"cx": flattened[camera_idx].cx.item(),
"cy": flattened[camera_idx].cy.item(),
"fx": flattened[camera_idx].fx.item(),
"fy": flattened[camera_idx].fy.item(),
"camera_to_world": self.camera_to_worlds[camera_idx].tolist(),
"camera_index": camera_idx,
"times": flattened[camera_idx].times.item() if self.times is not None else None,
}
if image is not None:
image_uint8 = (image * 255).detach().type(torch.uint8)
if max_size is not None:
image_uint8 = image_uint8.permute(2, 0, 1)
image_uint8 = torchvision.transforms.functional.resize(image_uint8, max_size) # type: ignore
image_uint8 = image_uint8.permute(1, 2, 0)
image_uint8 = image_uint8.cpu().numpy()
data = cv2.imencode(".jpg", image_uint8)[1].tobytes()
json_["image"] = str("data:image/jpeg;base64," + base64.b64encode(data).decode("ascii"))
return json_
def get_intrinsics_matrices(self) -> TensorType["num_cameras":..., 3, 3]:
"""Returns the intrinsic matrices for each camera.
Returns:
Pinhole camera intrinsics matrices
"""
K = torch.zeros((*self.shape, 3, 3), dtype=torch.float32)
K[..., 0, 0] = self.fx.squeeze(-1)
K[..., 1, 1] = self.fy.squeeze(-1)
K[..., 0, 2] = self.cx.squeeze(-1)
K[..., 1, 2] = self.cy.squeeze(-1)
K[..., 2, 2] = 1.0
return K
def rescale_output_resolution(
self, scaling_factor: Union[TensorType["num_cameras":...], TensorType["num_cameras":..., 1], float, int]
) -> None:
"""Rescale the output resolution of the cameras.
Args:
scaling_factor: Scaling factor to apply to the output resolution.
"""
if isinstance(scaling_factor, (float, int)):
scaling_factor = torch.tensor([scaling_factor]).to(self.device).broadcast_to((self.cx.shape))
elif isinstance(scaling_factor, torch.Tensor) and scaling_factor.shape == self.shape:
scaling_factor = scaling_factor.unsqueeze(-1)
elif isinstance(scaling_factor, torch.Tensor) and scaling_factor.shape == (*self.shape, 1):
pass
else:
raise ValueError(
f"Scaling factor must be a float, int, or a tensor of shape {self.shape} or {(*self.shape, 1)}."
)
self.fx = self.fx * scaling_factor
self.fy = self.fy * scaling_factor
self.cx = self.cx * scaling_factor
self.cy = self.cy * scaling_factor
self.height = (self.height * scaling_factor).to(torch.int64)
self.width = (self.width * scaling_factor).to(torch.int64)
The provided code snippet includes necessary dependencies for implementing the `get_spiral_path` function. Write a Python function `def get_spiral_path( camera: Cameras, steps: int = 30, radius: Optional[float] = None, radiuses: Optional[Tuple[float]] = None, rots: int = 2, zrate: float = 0.5, ) -> Cameras` to solve the following problem:
Returns a list of camera in a sprial trajectory. Args: camera: The camera to start the spiral from. steps: The number of cameras in the generated path. radius: The radius of the spiral for all xyz directions. radiuses: The list of radii for the spiral in xyz directions. rots: The number of rotations to apply to the camera. zrate: How much to change the z position of the camera. Returns: A spiral camera path.
Here is the function:
def get_spiral_path(
camera: Cameras,
steps: int = 30,
radius: Optional[float] = None,
radiuses: Optional[Tuple[float]] = None,
rots: int = 2,
zrate: float = 0.5,
) -> Cameras:
"""
Returns a list of camera in a sprial trajectory.
Args:
camera: The camera to start the spiral from.
steps: The number of cameras in the generated path.
radius: The radius of the spiral for all xyz directions.
radiuses: The list of radii for the spiral in xyz directions.
rots: The number of rotations to apply to the camera.
zrate: How much to change the z position of the camera.
Returns:
A spiral camera path.
"""
assert radius is not None or radiuses is not None, "Either radius or radiuses must be specified."
assert camera.ndim == 1, "We assume only one batch dim here"
if radius is not None and radiuses is None:
rad = torch.tensor([radius] * 3, device=camera.device)
elif radiuses is not None and radius is None:
rad = torch.tensor(radiuses, device=camera.device)
else:
raise ValueError("Only one of radius or radiuses must be specified.")
camera = camera.flatten()
up = camera.camera_to_worlds[0, :3, 2] # scene is z up
focal = torch.min(camera.fx[0], camera.fy[0])
target = torch.tensor([0, 0, -focal], device=camera.device) # camera looking in -z direction
c2w = camera.camera_to_worlds[0]
c2wh_global = pose_utils.to4x4(c2w)
local_c2whs = []
for theta in torch.linspace(0.0, 2.0 * torch.pi * rots, steps + 1)[:-1]:
center = (
torch.tensor([torch.cos(theta), -torch.sin(theta), -torch.sin(theta * zrate)], device=camera.device) * rad
)
lookat = center - target
c2w = camera_utils.viewmatrix(lookat, up, center)
c2wh = pose_utils.to4x4(c2w)
local_c2whs.append(c2wh)
new_c2ws = []
for local_c2wh in local_c2whs:
c2wh = torch.matmul(c2wh_global, local_c2wh)
new_c2ws.append(c2wh[:3, :4])
new_c2ws = torch.stack(new_c2ws, dim=0)
return Cameras(
fx=camera.fx[0],
fy=camera.fy[0],
cx=camera.cx[0],
cy=camera.cy[0],
camera_to_worlds=new_c2ws,
) | Returns a list of camera in a sprial trajectory. Args: camera: The camera to start the spiral from. steps: The number of cameras in the generated path. radius: The radius of the spiral for all xyz directions. radiuses: The list of radii for the spiral in xyz directions. rots: The number of rotations to apply to the camera. zrate: How much to change the z position of the camera. Returns: A spiral camera path. |
162,273 | from typing import Any, Dict, Optional, Tuple
import numpy as np
import torch
import nerfstudio.utils.poses as pose_utils
from nerfstudio.cameras import camera_utils
from nerfstudio.cameras.camera_utils import get_interpolated_poses_many
from nerfstudio.cameras.cameras import Cameras
from nerfstudio.viewer.server.utils import three_js_perspective_camera_focal_length
class Cameras(TensorDataclass):
"""Dataparser outputs for the image dataset and the ray generator.
Note: currently only supports cameras with the same principal points and types. The reason we type
the focal lengths, principal points, and image sizes as tensors is to allow for batched cameras
down the line in cases where your batches of camera data don't come from the same cameras.
If a single value is provided, it is broadcasted to all cameras.
Args:
camera_to_worlds: Camera to world matrices. Tensor of per-image c2w matrices, in [R | t] format
fx: Focal length x
fy: Focal length y
cx: Principal point x
cy: Principal point y
width: Image width
height: Image height
distortion_params: OpenCV 6 radial distortion coefficients
camera_type: Type of camera model. This will be an int corresponding to the CameraType enum.
times: Timestamps for each camera
"""
camera_to_worlds: TensorType["num_cameras":..., 3, 4]
fx: TensorType["num_cameras":..., 1]
fy: TensorType["num_cameras":..., 1]
cx: TensorType["num_cameras":..., 1]
cy: TensorType["num_cameras":..., 1]
width: TensorType["num_cameras":..., 1]
height: TensorType["num_cameras":..., 1]
distortion_params: Optional[TensorType["num_cameras":..., 6]]
camera_type: TensorType["num_cameras":..., 1]
times: Optional[TensorType["num_cameras", 1]]
def __init__(
self,
camera_to_worlds: TensorType["batch_c2ws":..., 3, 4],
fx: Union[TensorType["batch_fxs":..., 1], float],
fy: Union[TensorType["batch_fys":..., 1], float],
cx: Union[TensorType["batch_cxs":..., 1], float],
cy: Union[TensorType["batch_cys":..., 1], float],
width: Optional[Union[TensorType["batch_ws":..., 1], int]] = None,
height: Optional[Union[TensorType["batch_hs":..., 1], int]] = None,
distortion_params: Optional[TensorType["batch_dist_params":..., 6]] = None,
camera_type: Optional[
Union[
TensorType["batch_cam_types":..., 1],
int,
List[CameraType],
CameraType,
]
] = CameraType.PERSPECTIVE,
times: Optional[TensorType["num_cameras"]] = None,
):
"""Initializes the Cameras object.
Note on Input Tensor Dimensions: All of these tensors have items of dimensions TensorType[3, 4]
(in the case of the c2w matrices), TensorType[6] (in the case of distortion params), or
TensorType[1] (in the case of the rest of the elements). The dimensions before that are
considered the batch dimension of that tensor (batch_c2ws, batch_fxs, etc.). We will broadcast
all the tensors to be the same batch dimension. This means you can use any combination of the
input types in the function signature and it won't break. Your batch size for all tensors
must be broadcastable to the same size, and the resulting number of batch dimensions will be
the batch dimension with the largest number of dimensions.
"""
# This will notify the tensordataclass that we have a field with more than 1 dimension
self._field_custom_dimensions = {"camera_to_worlds": 2}
self.camera_to_worlds = camera_to_worlds
# fx fy calculation
self.fx = self._init_get_fc_xy(fx, "fx") # @dataclass's post_init will take care of broadcasting
self.fy = self._init_get_fc_xy(fy, "fy") # @dataclass's post_init will take care of broadcasting
# cx cy calculation
self.cx = self._init_get_fc_xy(cx, "cx") # @dataclass's post_init will take care of broadcasting
self.cy = self._init_get_fc_xy(cy, "cy") # @dataclass's post_init will take care of broadcasting
# Distortion Params Calculation:
self.distortion_params = distortion_params # @dataclass's post_init will take care of broadcasting
# @dataclass's post_init will take care of broadcasting
self.height = self._init_get_height_width(height, self.cy)
self.width = self._init_get_height_width(width, self.cx)
self.camera_type = self._init_get_camera_type(camera_type)
self.times = self._init_get_times(times)
self.__post_init__() # This will do the dataclass post_init and broadcast all the tensors
def _init_get_fc_xy(self, fc_xy, name):
"""
Parses the input focal length / principle point x or y and returns a tensor of the correct shape
Only needs to make sure that we a 1 in the last dimension if it is a tensor. If it is a float, we
just need to make it into a tensor and it will be broadcasted later in the __post_init__ function.
Args:
fc_xy: The focal length / principle point x or y
name: The name of the variable. Used for error messages
"""
if isinstance(fc_xy, float):
fc_xy = torch.Tensor([fc_xy], device=self.device)
elif isinstance(fc_xy, torch.Tensor):
if fc_xy.ndim == 0 or fc_xy.shape[-1] != 1:
fc_xy = fc_xy.unsqueeze(-1)
fc_xy = fc_xy.to(self.device)
else:
raise ValueError(f"{name} must be a float or tensor, got {type(fc_xy)}")
return fc_xy
def _init_get_camera_type(
self,
camera_type: Union[
TensorType["batch_cam_types":..., 1], TensorType["batch_cam_types":...], int, List[CameraType], CameraType
],
) -> TensorType["num_cameras":..., 1]:
"""
Parses the __init__() argument camera_type
Camera Type Calculation:
If CameraType, convert to int and then to tensor, then broadcast to all cameras
If List of CameraTypes, convert to ints and then to tensor, then broadcast to all cameras
If int, first go to tensor and then broadcast to all cameras
If tensor, broadcast to all cameras
Args:
camera_type: camera_type argument from __init__()
"""
if isinstance(camera_type, CameraType):
camera_type = torch.tensor([camera_type.value], device=self.device)
elif isinstance(camera_type, List) and isinstance(camera_type[0], CameraType):
camera_type = torch.tensor([[c.value] for c in camera_type], device=self.device)
elif isinstance(camera_type, int):
camera_type = torch.tensor([camera_type], device=self.device)
elif isinstance(camera_type, torch.Tensor):
assert not torch.is_floating_point(
camera_type
), f"camera_type tensor must be of type int, not: {camera_type.dtype}"
camera_type = camera_type.to(self.device)
if camera_type.ndim == 0 or camera_type.shape[-1] != 1:
camera_type = camera_type.unsqueeze(-1)
# assert torch.all(
# camera_type.view(-1)[0] == camera_type
# ), "Batched cameras of different camera_types will be allowed in the future."
else:
raise ValueError(
'Invalid camera_type. Must be CameraType, List[CameraType], int, or torch.Tensor["num_cameras"]. \
Received: '
+ str(type(camera_type))
)
return camera_type
def _init_get_height_width(
self,
h_w: Union[TensorType["batch_hws":..., 1], TensorType["batch_hws":...], int, None],
c_x_y: TensorType["batch_cxys":...],
) -> TensorType["num_cameras":..., 1]:
"""
Parses the __init__() argument for height or width
Height/Width Calculation:
If int, first go to tensor and then broadcast to all cameras
If tensor, broadcast to all cameras
If none, use cx or cy * 2
Else raise error
Args:
h_w: height or width argument from __init__()
c_x_y: cx or cy for when h_w == None
"""
if isinstance(h_w, int):
h_w = torch.Tensor([h_w]).to(torch.int64).to(self.device)
elif isinstance(h_w, torch.Tensor):
assert not torch.is_floating_point(h_w), f"height and width tensor must be of type int, not: {h_w.dtype}"
h_w = h_w.to(torch.int64).to(self.device)
if h_w.ndim == 0 or h_w.shape[-1] != 1:
h_w = h_w.unsqueeze(-1)
# assert torch.all(h_w == h_w.view(-1)[0]), "Batched cameras of different h, w will be allowed in the future."
elif h_w is None:
h_w = torch.Tensor((c_x_y * 2).to(torch.int64).to(self.device))
else:
raise ValueError("Height must be an int, tensor, or None, received: " + str(type(h_w)))
return h_w
def _init_get_times(self, times):
if times is None:
times = None
elif isinstance(times, torch.Tensor):
if times.ndim == 0 or times.shape[-1] != 1:
times = times.unsqueeze(-1).to(self.device)
else:
raise ValueError(f"times must be None or a tensor, got {type(times)}")
return times
def device(self):
"""Returns the device that the camera is on."""
return self.camera_to_worlds.device
def image_height(self) -> TensorType["num_cameras":..., 1]:
"""Returns the height of the images."""
return self.height
def image_width(self) -> TensorType["num_cameras":..., 1]:
"""Returns the height of the images."""
return self.width
def is_jagged(self):
"""
Returns whether or not the cameras are "jagged" (i.e. the height and widths are different, meaning that
you cannot concatenate the image coordinate maps together)
"""
h_jagged = not torch.all(self.height == self.height.view(-1)[0])
w_jagged = not torch.all(self.width == self.width.view(-1)[0])
return h_jagged or w_jagged
def get_image_coords(
self, pixel_offset: float = 0.5, index: Optional[Tuple] = None
) -> TensorType["height", "width", 2]:
"""This gets the image coordinates of one of the cameras in this object.
If no index is specified, it will return the maximum possible sized height / width image coordinate map,
by looking at the maximum height and width of all the cameras in this object.
Args:
pixel_offset: Offset for each pixel. Defaults to center of pixel (0.5)
index: Tuple of indices into the batch dimensions of the camera. Defaults to None, which returns the 0th
flattened camera
Returns:
Grid of image coordinates.
"""
if index is None:
image_height = torch.max(self.image_height.view(-1))
image_width = torch.max(self.image_width.view(-1))
image_coords = torch.meshgrid(torch.arange(image_height), torch.arange(image_width), indexing="ij")
image_coords = torch.stack(image_coords, dim=-1) + pixel_offset # stored as (y, x) coordinates
else:
image_height = self.image_height[index].item()
image_width = self.image_width[index].item()
image_coords = torch.meshgrid(torch.arange(image_height), torch.arange(image_width), indexing="ij")
image_coords = torch.stack(image_coords, dim=-1) + pixel_offset # stored as (y, x) coordinates
return image_coords
def generate_rays( # pylint: disable=too-many-statements
self,
camera_indices: Union[TensorType["num_rays":..., "num_cameras_batch_dims"], int],
coords: Optional[TensorType["num_rays":..., 2]] = None,
camera_opt_to_camera: Optional[TensorType["num_rays":..., 3, 4]] = None,
distortion_params_delta: Optional[TensorType["num_rays":..., 6]] = None,
keep_shape: Optional[bool] = None,
disable_distortion: bool = False,
) -> RayBundle:
"""Generates rays for the given camera indices.
This function will standardize the input arguments and then call the _generate_rays_from_coords function
to generate the rays. Our goal is to parse the arguments and then get them into the right shape:
- camera_indices: (num_rays:..., num_cameras_batch_dims)
- coords: (num_rays:..., 2)
- camera_opt_to_camera: (num_rays:..., 3, 4) or None
- distortion_params_delta: (num_rays:..., 6) or None
Read the docstring for _generate_rays_from_coords for more information on how we generate the rays
after we have standardized the arguments.
We are only concerned about different combinations of camera_indices and coords matrices, and the following
are the 4 cases we have to deal with:
1. isinstance(camera_indices, int) and coords == None
- In this case we broadcast our camera_indices / coords shape (h, w, 1 / 2 respectively)
2. isinstance(camera_indices, int) and coords != None
- In this case, we broadcast camera_indices to the same batch dim as coords
3. not isinstance(camera_indices, int) and coords == None
- In this case, we will need to set coords so that it is of shape (h, w, num_rays, 2), and broadcast
all our other args to match the new definition of num_rays := (h, w) + num_rays
4. not isinstance(camera_indices, int) and coords != None
- In this case, we have nothing to do, only check that the arguments are of the correct shape
There is one more edge case we need to be careful with: when we have "jagged cameras" (ie: different heights
and widths for each camera). This isn't problematic when we specify coords, since coords is already a tensor.
When coords == None (ie: when we render out the whole image associated with this camera), we run into problems
since there's no way to stack each coordinate map as all coordinate maps are all different shapes. In this case,
we will need to flatten each individual coordinate map and concatenate them, giving us only one batch dimension,
regaurdless of the number of prepended extra batch dimensions in the camera_indices tensor.
Args:
camera_indices: Camera indices of the flattened cameras object to generate rays for.
coords: Coordinates of the pixels to generate rays for. If None, the full image will be rendered.
camera_opt_to_camera: Optional transform for the camera to world matrices.
distortion_params_delta: Optional delta for the distortion parameters.
keep_shape: If None, then we default to the regular behavior of flattening if cameras is jagged, otherwise
keeping dimensions. If False, we flatten at the end. If True, then we keep the shape of the
camera_indices and coords tensors (if we can).
disable_distortion: If True, disables distortion.
Returns:
Rays for the given camera indices and coords.
"""
# Check the argument types to make sure they're valid and all shaped correctly
assert isinstance(camera_indices, (torch.Tensor, int)), "camera_indices must be a tensor or int"
assert coords is None or isinstance(coords, torch.Tensor), "coords must be a tensor or None"
assert camera_opt_to_camera is None or isinstance(camera_opt_to_camera, torch.Tensor)
assert distortion_params_delta is None or isinstance(distortion_params_delta, torch.Tensor)
if isinstance(camera_indices, torch.Tensor) and isinstance(coords, torch.Tensor):
num_rays_shape = camera_indices.shape[:-1]
errormsg = "Batch dims of inputs must match when inputs are all tensors"
assert coords.shape[:-1] == num_rays_shape, errormsg
assert camera_opt_to_camera is None or camera_opt_to_camera.shape[:-2] == num_rays_shape, errormsg
assert distortion_params_delta is None or distortion_params_delta.shape[:-1] == num_rays_shape, errormsg
# If zero dimensional, we need to unsqueeze to get a batch dimension and then squeeze later
if not self.shape:
cameras = self.reshape((1,))
assert torch.all(
torch.tensor(camera_indices == 0) if isinstance(camera_indices, int) else camera_indices == 0
), "Can only index into single camera with no batch dimensions if index is zero"
else:
cameras = self
# If the camera indices are an int, then we need to make sure that the camera batch is 1D
if isinstance(camera_indices, int):
assert (
len(cameras.shape) == 1
), "camera_indices must be a tensor if cameras are batched with more than 1 batch dimension"
camera_indices = torch.tensor([camera_indices], device=cameras.device)
assert camera_indices.shape[-1] == len(
cameras.shape
), "camera_indices must have shape (num_rays:..., num_cameras_batch_dims)"
# If keep_shape is True, then we need to make sure that the camera indices in question
# are all the same height and width and can actually be batched while maintaining the image
# shape
if keep_shape is True:
assert torch.all(cameras.height[camera_indices] == cameras.height[camera_indices[0]]) and torch.all(
cameras.width[camera_indices] == cameras.width[camera_indices[0]]
), "Can only keep shape if all cameras have the same height and width"
# If the cameras don't all have same height / width, if coords is not none, we will need to generate
# a flat list of coords for each camera and then concatenate otherwise our rays will be jagged.
# Camera indices, camera_opt, and distortion will also need to be broadcasted accordingly which is non-trivial
if cameras.is_jagged and coords is None and (keep_shape is None or keep_shape is False):
index_dim = camera_indices.shape[-1]
camera_indices = camera_indices.reshape(-1, index_dim)
_coords = [cameras.get_image_coords(index=tuple(index)).reshape(-1, 2) for index in camera_indices]
camera_indices = torch.cat(
[index.unsqueeze(0).repeat(coords.shape[0], 1) for index, coords in zip(camera_indices, _coords)],
)
coords = torch.cat(_coords, dim=0)
assert coords.shape[0] == camera_indices.shape[0]
# Need to get the coords of each indexed camera and flatten all coordinate maps and concatenate them
# The case where we aren't jagged && keep_shape (since otherwise coords is already set) and coords
# is None. In this case we append (h, w) to the num_rays dimensions for all tensors. In this case,
# each image in camera_indices has to have the same shape since otherwise we would have error'd when
# we checked keep_shape is valid or we aren't jagged.
if coords is None:
index_dim = camera_indices.shape[-1]
index = camera_indices.reshape(-1, index_dim)[0]
coords: torch.Tensor = cameras.get_image_coords(index=tuple(index)) # (h, w, 2)
coords = coords.reshape(coords.shape[:2] + (1,) * len(camera_indices.shape[:-1]) + (2,)) # (h, w, 1..., 2)
coords = coords.expand(coords.shape[:2] + camera_indices.shape[:-1] + (2,)) # (h, w, num_rays, 2)
camera_opt_to_camera = ( # (h, w, num_rays, 3, 4) or None
camera_opt_to_camera.broadcast_to(coords.shape[:-1] + (3, 4))
if camera_opt_to_camera is not None
else None
)
distortion_params_delta = ( # (h, w, num_rays, 6) or None
distortion_params_delta.broadcast_to(coords.shape[:-1] + (6,))
if distortion_params_delta is not None
else None
)
# If camera indices was an int or coords was none, we need to broadcast our indices along batch dims
camera_indices = camera_indices.broadcast_to(coords.shape[:-1] + (len(cameras.shape),)).to(torch.long)
# Checking our tensors have been standardized
assert isinstance(coords, torch.Tensor) and isinstance(camera_indices, torch.Tensor)
assert camera_indices.shape[-1] == len(cameras.shape)
assert camera_opt_to_camera is None or camera_opt_to_camera.shape[:-2] == coords.shape[:-1]
assert distortion_params_delta is None or distortion_params_delta.shape[:-1] == coords.shape[:-1]
# This will do the actual work of generating the rays now that we have standardized the inputs
# raybundle.shape == (num_rays) when done
# pylint: disable=protected-access
raybundle = cameras._generate_rays_from_coords(
camera_indices, coords, camera_opt_to_camera, distortion_params_delta, disable_distortion=disable_distortion
)
# If we have mandated that we don't keep the shape, then we flatten
if keep_shape is False:
raybundle = raybundle.flatten()
# TODO: We should have to squeeze the last dimension here if we started with zero batch dims, but never have to,
# so there might be a rogue squeeze happening somewhere, and this may cause some unintended behaviour
# that we haven't caught yet with tests
return raybundle
# pylint: disable=too-many-statements
def _generate_rays_from_coords(
self,
camera_indices: TensorType["num_rays":..., "num_cameras_batch_dims"],
coords: TensorType["num_rays":..., 2],
camera_opt_to_camera: Optional[TensorType["num_rays":..., 3, 4]] = None,
distortion_params_delta: Optional[TensorType["num_rays":..., 6]] = None,
disable_distortion: bool = False,
) -> RayBundle:
"""Generates rays for the given camera indices and coords where self isn't jagged
This is a fairly complex function, so let's break this down slowly.
Shapes involved:
- num_rays: This is your output raybundle shape. It dictates the number and shape of the rays generated
- num_cameras_batch_dims: This is the number of dimensions of our camera
Args:
camera_indices: Camera indices of the flattened cameras object to generate rays for.
The shape of this is such that indexing into camera_indices["num_rays":...] will return the
index into each batch dimension of the camera in order to get the correct camera specified by
"num_rays".
Example:
>>> cameras = Cameras(...)
>>> cameras.shape
(2, 3, 4)
>>> camera_indices = torch.tensor([0, 0, 0]) # We need an axis of length 3 since cameras.ndim == 3
>>> camera_indices.shape
(3,)
>>> coords = torch.tensor([1,1])
>>> coords.shape
(2,)
>>> out_rays = cameras.generate_rays(camera_indices=camera_indices, coords = coords)
# This will generate a RayBundle with a single ray for the
# camera at cameras[0,0,0] at image coordinates (1,1), so out_rays.shape == ()
>>> out_rays.shape
()
>>> camera_indices = torch.tensor([[0,0,0]])
>>> camera_indices.shape
(1, 3)
>>> coords = torch.tensor([[1,1]])
>>> coords.shape
(1, 2)
>>> out_rays = cameras.generate_rays(camera_indices=camera_indices, coords = coords)
# This will generate a RayBundle with a single ray for the
# camera at cameras[0,0,0] at point (1,1), so out_rays.shape == (1,)
# since we added an extra dimension in front of camera_indices
>>> out_rays.shape
(1,)
If you want more examples, check tests/cameras/test_cameras and the function check_generate_rays_shape
The bottom line is that for camera_indices: (num_rays:..., num_cameras_batch_dims), num_rays is the
output shape and if you index into the output RayBundle with some indices [i:...], if you index into
camera_indices with camera_indices[i:...] as well, you will get a 1D tensor containing the batch
indices into the original cameras object corresponding to that ray (ie: you will get the camera
from our batched cameras corresponding to the ray at RayBundle[i:...]).
coords: Coordinates of the pixels to generate rays for. If None, the full image will be rendered, meaning
height and width get prepended to the num_rays dimensions. Indexing into coords with [i:...] will
get you the image coordinates [x, y] of that specific ray located at output RayBundle[i:...].
camera_opt_to_camera: Optional transform for the camera to world matrices.
In terms of shape, it follows the same rules as coords, but indexing into it with [i:...] gets you
the 2D camera to world transform matrix for the camera optimization at RayBundle[i:...].
distortion_params_delta: Optional delta for the distortion parameters.
In terms of shape, it follows the same rules as coords, but indexing into it with [i:...] gets you
the 1D tensor with the 6 distortion parameters for the camera optimization at RayBundle[i:...].
disable_distortion: If True, disables distortion.
Returns:
Rays for the given camera indices and coords. RayBundle.shape == num_rays
"""
# Make sure we're on the right devices
camera_indices = camera_indices.to(self.device)
coords = coords.to(self.device)
# Checking to make sure everything is of the right shape and type
num_rays_shape = camera_indices.shape[:-1]
assert camera_indices.shape == num_rays_shape + (self.ndim,)
assert coords.shape == num_rays_shape + (2,)
assert coords.shape[-1] == 2
assert camera_opt_to_camera is None or camera_opt_to_camera.shape == num_rays_shape + (3, 4)
assert distortion_params_delta is None or distortion_params_delta.shape == num_rays_shape + (6,)
# Here, we've broken our indices down along the num_cameras_batch_dims dimension allowing us to index by all
# of our output rays at each dimension of our cameras object
true_indices = [camera_indices[..., i] for i in range(camera_indices.shape[-1])]
# Get all our focal lengths, principal points and make sure they are the right shapes
y = coords[..., 0] # (num_rays,) get rid of the last dimension
x = coords[..., 1] # (num_rays,) get rid of the last dimension
fx, fy = self.fx[true_indices].squeeze(-1), self.fy[true_indices].squeeze(-1) # (num_rays,)
cx, cy = self.cx[true_indices].squeeze(-1), self.cy[true_indices].squeeze(-1) # (num_rays,)
assert (
y.shape == num_rays_shape
and x.shape == num_rays_shape
and fx.shape == num_rays_shape
and fy.shape == num_rays_shape
and cx.shape == num_rays_shape
and cy.shape == num_rays_shape
), (
str(num_rays_shape)
+ str(y.shape)
+ str(x.shape)
+ str(fx.shape)
+ str(fy.shape)
+ str(cx.shape)
+ str(cy.shape)
)
# Get our image coordinates and image coordinates offset by 1 (offsets used for dx, dy calculations)
# Also make sure the shapes are correct
coord = torch.stack([(x - cx) / fx, -(y - cy) / fy], -1) # (num_rays, 2)
coord_x_offset = torch.stack([(x - cx + 1) / fx, -(y - cy) / fy], -1) # (num_rays, 2)
coord_y_offset = torch.stack([(x - cx) / fx, -(y - cy + 1) / fy], -1) # (num_rays, 2)
assert (
coord.shape == num_rays_shape + (2,)
and coord_x_offset.shape == num_rays_shape + (2,)
and coord_y_offset.shape == num_rays_shape + (2,)
)
# Stack image coordinates and image coordinates offset by 1, check shapes too
coord_stack = torch.stack([coord, coord_x_offset, coord_y_offset], dim=0) # (3, num_rays, 2)
assert coord_stack.shape == (3,) + num_rays_shape + (2,)
# Undistorts our images according to our distortion parameters
if not disable_distortion:
distortion_params = None
if self.distortion_params is not None:
distortion_params = self.distortion_params[true_indices]
if distortion_params_delta is not None:
distortion_params = distortion_params + distortion_params_delta
elif distortion_params_delta is not None:
distortion_params = distortion_params_delta
# Do not apply distortion for equirectangular images
if distortion_params is not None:
mask = (self.camera_type[true_indices] != CameraType.EQUIRECTANGULAR.value).squeeze(-1) # (num_rays)
coord_mask = torch.stack([mask, mask, mask], dim=0)
if mask.any():
coord_stack[coord_mask, :] = camera_utils.radial_and_tangential_undistort(
coord_stack[coord_mask, :].reshape(3, -1, 2),
distortion_params[mask, :],
).reshape(-1, 2)
# Make sure after we have undistorted our images, the shapes are still correct
assert coord_stack.shape == (3,) + num_rays_shape + (2,)
# Gets our directions for all our rays in camera coordinates and checks shapes at the end
# Here, directions_stack is of shape (3, num_rays, 3)
# directions_stack[0] is the direction for ray in camera coordinates
# directions_stack[1] is the direction for ray in camera coordinates offset by 1 in x
# directions_stack[2] is the direction for ray in camera coordinates offset by 1 in y
cam_types = torch.unique(self.camera_type, sorted=False)
directions_stack = torch.empty((3,) + num_rays_shape + (3,), device=self.device)
if CameraType.PERSPECTIVE.value in cam_types:
mask = (self.camera_type[true_indices] == CameraType.PERSPECTIVE.value).squeeze(-1) # (num_rays)
mask = torch.stack([mask, mask, mask], dim=0)
directions_stack[..., 0][mask] = torch.masked_select(coord_stack[..., 0], mask).float()
directions_stack[..., 1][mask] = torch.masked_select(coord_stack[..., 1], mask).float()
directions_stack[..., 2][mask] = -1.0
if CameraType.FISHEYE.value in cam_types:
mask = (self.camera_type[true_indices] == CameraType.FISHEYE.value).squeeze(-1) # (num_rays)
mask = torch.stack([mask, mask, mask], dim=0)
theta = torch.sqrt(torch.sum(coord_stack**2, dim=-1))
theta = torch.clip(theta, 0.0, math.pi)
sin_theta = torch.sin(theta)
directions_stack[..., 0][mask] = torch.masked_select(coord_stack[..., 0] * sin_theta / theta, mask).float()
directions_stack[..., 1][mask] = torch.masked_select(coord_stack[..., 1] * sin_theta / theta, mask).float()
directions_stack[..., 2][mask] = -torch.masked_select(torch.cos(theta), mask)
if CameraType.EQUIRECTANGULAR.value in cam_types:
mask = (self.camera_type[true_indices] == CameraType.EQUIRECTANGULAR.value).squeeze(-1) # (num_rays)
mask = torch.stack([mask, mask, mask], dim=0)
# For equirect, fx = fy = height = width/2
# Then coord[..., 0] goes from -1 to 1 and coord[..., 1] goes from -1/2 to 1/2
theta = -torch.pi * coord_stack[..., 0] # minus sign for right-handed
phi = torch.pi * (0.5 - coord_stack[..., 1])
# use spherical in local camera coordinates (+y up, x=0 and z<0 is theta=0)
directions_stack[..., 0][mask] = torch.masked_select(-torch.sin(theta) * torch.sin(phi), mask).float()
directions_stack[..., 1][mask] = torch.masked_select(torch.cos(phi), mask).float()
directions_stack[..., 2][mask] = torch.masked_select(-torch.cos(theta) * torch.sin(phi), mask).float()
for value in cam_types:
if value not in [CameraType.PERSPECTIVE.value, CameraType.FISHEYE.value, CameraType.EQUIRECTANGULAR.value]:
raise ValueError(f"Camera type {value} not supported.")
assert directions_stack.shape == (3,) + num_rays_shape + (3,)
c2w = self.camera_to_worlds[true_indices]
assert c2w.shape == num_rays_shape + (3, 4)
if camera_opt_to_camera is not None:
c2w = pose_utils.multiply(c2w, camera_opt_to_camera)
rotation = c2w[..., :3, :3] # (..., 3, 3)
assert rotation.shape == num_rays_shape + (3, 3)
directions_stack = torch.sum(
directions_stack[..., None, :] * rotation, dim=-1
) # (..., 1, 3) * (..., 3, 3) -> (..., 3)
directions_norm = torch.norm(directions_stack, dim=-1, keepdim=True)
directions_norm = directions_norm[0]
directions_stack = normalize(directions_stack, dim=-1)
assert directions_stack.shape == (3,) + num_rays_shape + (3,)
origins = c2w[..., :3, 3] # (..., 3)
assert origins.shape == num_rays_shape + (3,)
directions = directions_stack[0]
assert directions.shape == num_rays_shape + (3,)
# norms of the vector going between adjacent coords, giving us dx and dy per output ray
dx = torch.sqrt(torch.sum((directions - directions_stack[1]) ** 2, dim=-1)) # ("num_rays":...,)
dy = torch.sqrt(torch.sum((directions - directions_stack[2]) ** 2, dim=-1)) # ("num_rays":...,)
assert dx.shape == num_rays_shape and dy.shape == num_rays_shape
pixel_area = (dx * dy)[..., None] # ("num_rays":..., 1)
assert pixel_area.shape == num_rays_shape + (1,)
times = self.times[camera_indices, 0] if self.times is not None else None
return RayBundle(
origins=origins,
directions=directions,
pixel_area=pixel_area,
camera_indices=camera_indices,
directions_norm=directions_norm,
times=times,
)
def to_json(
self, camera_idx: int, image: Optional[TensorType["height", "width", 2]] = None, max_size: Optional[int] = None
) -> Dict:
"""Convert a camera to a json dictionary.
Args:
camera_idx: Index of the camera to convert.
image: An image in range [0, 1] that is encoded to a base64 string.
max_size: Max size to resize the image to if present.
Returns:
A JSON representation of the camera
"""
flattened = self.flatten()
json_ = {
"type": "PinholeCamera",
"cx": flattened[camera_idx].cx.item(),
"cy": flattened[camera_idx].cy.item(),
"fx": flattened[camera_idx].fx.item(),
"fy": flattened[camera_idx].fy.item(),
"camera_to_world": self.camera_to_worlds[camera_idx].tolist(),
"camera_index": camera_idx,
"times": flattened[camera_idx].times.item() if self.times is not None else None,
}
if image is not None:
image_uint8 = (image * 255).detach().type(torch.uint8)
if max_size is not None:
image_uint8 = image_uint8.permute(2, 0, 1)
image_uint8 = torchvision.transforms.functional.resize(image_uint8, max_size) # type: ignore
image_uint8 = image_uint8.permute(1, 2, 0)
image_uint8 = image_uint8.cpu().numpy()
data = cv2.imencode(".jpg", image_uint8)[1].tobytes()
json_["image"] = str("data:image/jpeg;base64," + base64.b64encode(data).decode("ascii"))
return json_
def get_intrinsics_matrices(self) -> TensorType["num_cameras":..., 3, 3]:
"""Returns the intrinsic matrices for each camera.
Returns:
Pinhole camera intrinsics matrices
"""
K = torch.zeros((*self.shape, 3, 3), dtype=torch.float32)
K[..., 0, 0] = self.fx.squeeze(-1)
K[..., 1, 1] = self.fy.squeeze(-1)
K[..., 0, 2] = self.cx.squeeze(-1)
K[..., 1, 2] = self.cy.squeeze(-1)
K[..., 2, 2] = 1.0
return K
def rescale_output_resolution(
self, scaling_factor: Union[TensorType["num_cameras":...], TensorType["num_cameras":..., 1], float, int]
) -> None:
"""Rescale the output resolution of the cameras.
Args:
scaling_factor: Scaling factor to apply to the output resolution.
"""
if isinstance(scaling_factor, (float, int)):
scaling_factor = torch.tensor([scaling_factor]).to(self.device).broadcast_to((self.cx.shape))
elif isinstance(scaling_factor, torch.Tensor) and scaling_factor.shape == self.shape:
scaling_factor = scaling_factor.unsqueeze(-1)
elif isinstance(scaling_factor, torch.Tensor) and scaling_factor.shape == (*self.shape, 1):
pass
else:
raise ValueError(
f"Scaling factor must be a float, int, or a tensor of shape {self.shape} or {(*self.shape, 1)}."
)
self.fx = self.fx * scaling_factor
self.fy = self.fy * scaling_factor
self.cx = self.cx * scaling_factor
self.cy = self.cy * scaling_factor
self.height = (self.height * scaling_factor).to(torch.int64)
self.width = (self.width * scaling_factor).to(torch.int64)
def three_js_perspective_camera_focal_length(fov: float, image_height: int):
"""Returns the focal length of a three.js perspective camera.
Args:
fov: the field of view of the camera in degrees.
image_height: the height of the image in pixels.
"""
if fov is None:
print("Warning: fov is None, using default value")
return 50
pp_h = image_height / 2.0
focal_length = pp_h / np.tan(fov * (np.pi / 180.0) / 2.0)
return focal_length
The provided code snippet includes necessary dependencies for implementing the `get_path_from_json` function. Write a Python function `def get_path_from_json(camera_path: Dict[str, Any]) -> Cameras` to solve the following problem:
Takes a camera path dictionary and returns a trajectory as a Camera instance. Args: camera_path: A dictionary of the camera path information coming from the viewer. Returns: A Cameras instance with the camera path.
Here is the function:
def get_path_from_json(camera_path: Dict[str, Any]) -> Cameras:
"""Takes a camera path dictionary and returns a trajectory as a Camera instance.
Args:
camera_path: A dictionary of the camera path information coming from the viewer.
Returns:
A Cameras instance with the camera path.
"""
image_height = camera_path["render_height"]
image_width = camera_path["render_width"]
c2ws = []
fxs = []
fys = []
for camera in camera_path["camera_path"]:
# pose
c2w = torch.tensor(camera["camera_to_world"]).view(4, 4)[:3]
c2ws.append(c2w)
# field of view
fov = camera["fov"]
focal_length = three_js_perspective_camera_focal_length(fov, image_height)
fxs.append(focal_length)
fys.append(focal_length)
camera_to_worlds = torch.stack(c2ws, dim=0)
fx = torch.tensor(fxs)
fy = torch.tensor(fys)
return Cameras(
fx=fx,
fy=fy,
cx=image_width / 2,
cy=image_height / 2,
camera_to_worlds=camera_to_worlds,
) | Takes a camera path dictionary and returns a trajectory as a Camera instance. Args: camera_path: A dictionary of the camera path information coming from the viewer. Returns: A Cameras instance with the camera path. |
162,274 | from typing import Any, Dict, Optional, Tuple
import numpy as np
import torch
import nerfstudio.utils.poses as pose_utils
from nerfstudio.cameras import camera_utils
from nerfstudio.cameras.camera_utils import get_interpolated_poses_many
from nerfstudio.cameras.cameras import Cameras
from nerfstudio.viewer.server.utils import three_js_perspective_camera_focal_length
def viewmatrix(lookdir: np.ndarray, up: np.ndarray, position: np.ndarray) -> np.ndarray:
"""Construct lookat view matrix."""
vec2 = normalize(lookdir)
vec0 = normalize(np.cross(up, vec2))
vec1 = normalize(np.cross(vec2, vec0))
m = np.stack([vec0, vec1, vec2, position], axis=1)
return m
def focus_point_fn(poses: np.ndarray) -> np.ndarray:
"""Calculate nearest point to all focal axes in poses."""
directions, origins = poses[:, :3, 2:3], poses[:, :3, 3:4]
m = np.eye(3) - directions * np.transpose(directions, [0, 2, 1])
mt_m = np.transpose(m, [0, 2, 1]) @ m
focus_pt = np.linalg.inv(mt_m.mean(0)) @ (mt_m @ origins).mean(0)[:, 0]
return focus_pt
class Cameras(TensorDataclass):
"""Dataparser outputs for the image dataset and the ray generator.
Note: currently only supports cameras with the same principal points and types. The reason we type
the focal lengths, principal points, and image sizes as tensors is to allow for batched cameras
down the line in cases where your batches of camera data don't come from the same cameras.
If a single value is provided, it is broadcasted to all cameras.
Args:
camera_to_worlds: Camera to world matrices. Tensor of per-image c2w matrices, in [R | t] format
fx: Focal length x
fy: Focal length y
cx: Principal point x
cy: Principal point y
width: Image width
height: Image height
distortion_params: OpenCV 6 radial distortion coefficients
camera_type: Type of camera model. This will be an int corresponding to the CameraType enum.
times: Timestamps for each camera
"""
camera_to_worlds: TensorType["num_cameras":..., 3, 4]
fx: TensorType["num_cameras":..., 1]
fy: TensorType["num_cameras":..., 1]
cx: TensorType["num_cameras":..., 1]
cy: TensorType["num_cameras":..., 1]
width: TensorType["num_cameras":..., 1]
height: TensorType["num_cameras":..., 1]
distortion_params: Optional[TensorType["num_cameras":..., 6]]
camera_type: TensorType["num_cameras":..., 1]
times: Optional[TensorType["num_cameras", 1]]
def __init__(
self,
camera_to_worlds: TensorType["batch_c2ws":..., 3, 4],
fx: Union[TensorType["batch_fxs":..., 1], float],
fy: Union[TensorType["batch_fys":..., 1], float],
cx: Union[TensorType["batch_cxs":..., 1], float],
cy: Union[TensorType["batch_cys":..., 1], float],
width: Optional[Union[TensorType["batch_ws":..., 1], int]] = None,
height: Optional[Union[TensorType["batch_hs":..., 1], int]] = None,
distortion_params: Optional[TensorType["batch_dist_params":..., 6]] = None,
camera_type: Optional[
Union[
TensorType["batch_cam_types":..., 1],
int,
List[CameraType],
CameraType,
]
] = CameraType.PERSPECTIVE,
times: Optional[TensorType["num_cameras"]] = None,
):
"""Initializes the Cameras object.
Note on Input Tensor Dimensions: All of these tensors have items of dimensions TensorType[3, 4]
(in the case of the c2w matrices), TensorType[6] (in the case of distortion params), or
TensorType[1] (in the case of the rest of the elements). The dimensions before that are
considered the batch dimension of that tensor (batch_c2ws, batch_fxs, etc.). We will broadcast
all the tensors to be the same batch dimension. This means you can use any combination of the
input types in the function signature and it won't break. Your batch size for all tensors
must be broadcastable to the same size, and the resulting number of batch dimensions will be
the batch dimension with the largest number of dimensions.
"""
# This will notify the tensordataclass that we have a field with more than 1 dimension
self._field_custom_dimensions = {"camera_to_worlds": 2}
self.camera_to_worlds = camera_to_worlds
# fx fy calculation
self.fx = self._init_get_fc_xy(fx, "fx") # @dataclass's post_init will take care of broadcasting
self.fy = self._init_get_fc_xy(fy, "fy") # @dataclass's post_init will take care of broadcasting
# cx cy calculation
self.cx = self._init_get_fc_xy(cx, "cx") # @dataclass's post_init will take care of broadcasting
self.cy = self._init_get_fc_xy(cy, "cy") # @dataclass's post_init will take care of broadcasting
# Distortion Params Calculation:
self.distortion_params = distortion_params # @dataclass's post_init will take care of broadcasting
# @dataclass's post_init will take care of broadcasting
self.height = self._init_get_height_width(height, self.cy)
self.width = self._init_get_height_width(width, self.cx)
self.camera_type = self._init_get_camera_type(camera_type)
self.times = self._init_get_times(times)
self.__post_init__() # This will do the dataclass post_init and broadcast all the tensors
def _init_get_fc_xy(self, fc_xy, name):
"""
Parses the input focal length / principle point x or y and returns a tensor of the correct shape
Only needs to make sure that we a 1 in the last dimension if it is a tensor. If it is a float, we
just need to make it into a tensor and it will be broadcasted later in the __post_init__ function.
Args:
fc_xy: The focal length / principle point x or y
name: The name of the variable. Used for error messages
"""
if isinstance(fc_xy, float):
fc_xy = torch.Tensor([fc_xy], device=self.device)
elif isinstance(fc_xy, torch.Tensor):
if fc_xy.ndim == 0 or fc_xy.shape[-1] != 1:
fc_xy = fc_xy.unsqueeze(-1)
fc_xy = fc_xy.to(self.device)
else:
raise ValueError(f"{name} must be a float or tensor, got {type(fc_xy)}")
return fc_xy
def _init_get_camera_type(
self,
camera_type: Union[
TensorType["batch_cam_types":..., 1], TensorType["batch_cam_types":...], int, List[CameraType], CameraType
],
) -> TensorType["num_cameras":..., 1]:
"""
Parses the __init__() argument camera_type
Camera Type Calculation:
If CameraType, convert to int and then to tensor, then broadcast to all cameras
If List of CameraTypes, convert to ints and then to tensor, then broadcast to all cameras
If int, first go to tensor and then broadcast to all cameras
If tensor, broadcast to all cameras
Args:
camera_type: camera_type argument from __init__()
"""
if isinstance(camera_type, CameraType):
camera_type = torch.tensor([camera_type.value], device=self.device)
elif isinstance(camera_type, List) and isinstance(camera_type[0], CameraType):
camera_type = torch.tensor([[c.value] for c in camera_type], device=self.device)
elif isinstance(camera_type, int):
camera_type = torch.tensor([camera_type], device=self.device)
elif isinstance(camera_type, torch.Tensor):
assert not torch.is_floating_point(
camera_type
), f"camera_type tensor must be of type int, not: {camera_type.dtype}"
camera_type = camera_type.to(self.device)
if camera_type.ndim == 0 or camera_type.shape[-1] != 1:
camera_type = camera_type.unsqueeze(-1)
# assert torch.all(
# camera_type.view(-1)[0] == camera_type
# ), "Batched cameras of different camera_types will be allowed in the future."
else:
raise ValueError(
'Invalid camera_type. Must be CameraType, List[CameraType], int, or torch.Tensor["num_cameras"]. \
Received: '
+ str(type(camera_type))
)
return camera_type
def _init_get_height_width(
self,
h_w: Union[TensorType["batch_hws":..., 1], TensorType["batch_hws":...], int, None],
c_x_y: TensorType["batch_cxys":...],
) -> TensorType["num_cameras":..., 1]:
"""
Parses the __init__() argument for height or width
Height/Width Calculation:
If int, first go to tensor and then broadcast to all cameras
If tensor, broadcast to all cameras
If none, use cx or cy * 2
Else raise error
Args:
h_w: height or width argument from __init__()
c_x_y: cx or cy for when h_w == None
"""
if isinstance(h_w, int):
h_w = torch.Tensor([h_w]).to(torch.int64).to(self.device)
elif isinstance(h_w, torch.Tensor):
assert not torch.is_floating_point(h_w), f"height and width tensor must be of type int, not: {h_w.dtype}"
h_w = h_w.to(torch.int64).to(self.device)
if h_w.ndim == 0 or h_w.shape[-1] != 1:
h_w = h_w.unsqueeze(-1)
# assert torch.all(h_w == h_w.view(-1)[0]), "Batched cameras of different h, w will be allowed in the future."
elif h_w is None:
h_w = torch.Tensor((c_x_y * 2).to(torch.int64).to(self.device))
else:
raise ValueError("Height must be an int, tensor, or None, received: " + str(type(h_w)))
return h_w
def _init_get_times(self, times):
if times is None:
times = None
elif isinstance(times, torch.Tensor):
if times.ndim == 0 or times.shape[-1] != 1:
times = times.unsqueeze(-1).to(self.device)
else:
raise ValueError(f"times must be None or a tensor, got {type(times)}")
return times
def device(self):
"""Returns the device that the camera is on."""
return self.camera_to_worlds.device
def image_height(self) -> TensorType["num_cameras":..., 1]:
"""Returns the height of the images."""
return self.height
def image_width(self) -> TensorType["num_cameras":..., 1]:
"""Returns the height of the images."""
return self.width
def is_jagged(self):
"""
Returns whether or not the cameras are "jagged" (i.e. the height and widths are different, meaning that
you cannot concatenate the image coordinate maps together)
"""
h_jagged = not torch.all(self.height == self.height.view(-1)[0])
w_jagged = not torch.all(self.width == self.width.view(-1)[0])
return h_jagged or w_jagged
def get_image_coords(
self, pixel_offset: float = 0.5, index: Optional[Tuple] = None
) -> TensorType["height", "width", 2]:
"""This gets the image coordinates of one of the cameras in this object.
If no index is specified, it will return the maximum possible sized height / width image coordinate map,
by looking at the maximum height and width of all the cameras in this object.
Args:
pixel_offset: Offset for each pixel. Defaults to center of pixel (0.5)
index: Tuple of indices into the batch dimensions of the camera. Defaults to None, which returns the 0th
flattened camera
Returns:
Grid of image coordinates.
"""
if index is None:
image_height = torch.max(self.image_height.view(-1))
image_width = torch.max(self.image_width.view(-1))
image_coords = torch.meshgrid(torch.arange(image_height), torch.arange(image_width), indexing="ij")
image_coords = torch.stack(image_coords, dim=-1) + pixel_offset # stored as (y, x) coordinates
else:
image_height = self.image_height[index].item()
image_width = self.image_width[index].item()
image_coords = torch.meshgrid(torch.arange(image_height), torch.arange(image_width), indexing="ij")
image_coords = torch.stack(image_coords, dim=-1) + pixel_offset # stored as (y, x) coordinates
return image_coords
def generate_rays( # pylint: disable=too-many-statements
self,
camera_indices: Union[TensorType["num_rays":..., "num_cameras_batch_dims"], int],
coords: Optional[TensorType["num_rays":..., 2]] = None,
camera_opt_to_camera: Optional[TensorType["num_rays":..., 3, 4]] = None,
distortion_params_delta: Optional[TensorType["num_rays":..., 6]] = None,
keep_shape: Optional[bool] = None,
disable_distortion: bool = False,
) -> RayBundle:
"""Generates rays for the given camera indices.
This function will standardize the input arguments and then call the _generate_rays_from_coords function
to generate the rays. Our goal is to parse the arguments and then get them into the right shape:
- camera_indices: (num_rays:..., num_cameras_batch_dims)
- coords: (num_rays:..., 2)
- camera_opt_to_camera: (num_rays:..., 3, 4) or None
- distortion_params_delta: (num_rays:..., 6) or None
Read the docstring for _generate_rays_from_coords for more information on how we generate the rays
after we have standardized the arguments.
We are only concerned about different combinations of camera_indices and coords matrices, and the following
are the 4 cases we have to deal with:
1. isinstance(camera_indices, int) and coords == None
- In this case we broadcast our camera_indices / coords shape (h, w, 1 / 2 respectively)
2. isinstance(camera_indices, int) and coords != None
- In this case, we broadcast camera_indices to the same batch dim as coords
3. not isinstance(camera_indices, int) and coords == None
- In this case, we will need to set coords so that it is of shape (h, w, num_rays, 2), and broadcast
all our other args to match the new definition of num_rays := (h, w) + num_rays
4. not isinstance(camera_indices, int) and coords != None
- In this case, we have nothing to do, only check that the arguments are of the correct shape
There is one more edge case we need to be careful with: when we have "jagged cameras" (ie: different heights
and widths for each camera). This isn't problematic when we specify coords, since coords is already a tensor.
When coords == None (ie: when we render out the whole image associated with this camera), we run into problems
since there's no way to stack each coordinate map as all coordinate maps are all different shapes. In this case,
we will need to flatten each individual coordinate map and concatenate them, giving us only one batch dimension,
regaurdless of the number of prepended extra batch dimensions in the camera_indices tensor.
Args:
camera_indices: Camera indices of the flattened cameras object to generate rays for.
coords: Coordinates of the pixels to generate rays for. If None, the full image will be rendered.
camera_opt_to_camera: Optional transform for the camera to world matrices.
distortion_params_delta: Optional delta for the distortion parameters.
keep_shape: If None, then we default to the regular behavior of flattening if cameras is jagged, otherwise
keeping dimensions. If False, we flatten at the end. If True, then we keep the shape of the
camera_indices and coords tensors (if we can).
disable_distortion: If True, disables distortion.
Returns:
Rays for the given camera indices and coords.
"""
# Check the argument types to make sure they're valid and all shaped correctly
assert isinstance(camera_indices, (torch.Tensor, int)), "camera_indices must be a tensor or int"
assert coords is None or isinstance(coords, torch.Tensor), "coords must be a tensor or None"
assert camera_opt_to_camera is None or isinstance(camera_opt_to_camera, torch.Tensor)
assert distortion_params_delta is None or isinstance(distortion_params_delta, torch.Tensor)
if isinstance(camera_indices, torch.Tensor) and isinstance(coords, torch.Tensor):
num_rays_shape = camera_indices.shape[:-1]
errormsg = "Batch dims of inputs must match when inputs are all tensors"
assert coords.shape[:-1] == num_rays_shape, errormsg
assert camera_opt_to_camera is None or camera_opt_to_camera.shape[:-2] == num_rays_shape, errormsg
assert distortion_params_delta is None or distortion_params_delta.shape[:-1] == num_rays_shape, errormsg
# If zero dimensional, we need to unsqueeze to get a batch dimension and then squeeze later
if not self.shape:
cameras = self.reshape((1,))
assert torch.all(
torch.tensor(camera_indices == 0) if isinstance(camera_indices, int) else camera_indices == 0
), "Can only index into single camera with no batch dimensions if index is zero"
else:
cameras = self
# If the camera indices are an int, then we need to make sure that the camera batch is 1D
if isinstance(camera_indices, int):
assert (
len(cameras.shape) == 1
), "camera_indices must be a tensor if cameras are batched with more than 1 batch dimension"
camera_indices = torch.tensor([camera_indices], device=cameras.device)
assert camera_indices.shape[-1] == len(
cameras.shape
), "camera_indices must have shape (num_rays:..., num_cameras_batch_dims)"
# If keep_shape is True, then we need to make sure that the camera indices in question
# are all the same height and width and can actually be batched while maintaining the image
# shape
if keep_shape is True:
assert torch.all(cameras.height[camera_indices] == cameras.height[camera_indices[0]]) and torch.all(
cameras.width[camera_indices] == cameras.width[camera_indices[0]]
), "Can only keep shape if all cameras have the same height and width"
# If the cameras don't all have same height / width, if coords is not none, we will need to generate
# a flat list of coords for each camera and then concatenate otherwise our rays will be jagged.
# Camera indices, camera_opt, and distortion will also need to be broadcasted accordingly which is non-trivial
if cameras.is_jagged and coords is None and (keep_shape is None or keep_shape is False):
index_dim = camera_indices.shape[-1]
camera_indices = camera_indices.reshape(-1, index_dim)
_coords = [cameras.get_image_coords(index=tuple(index)).reshape(-1, 2) for index in camera_indices]
camera_indices = torch.cat(
[index.unsqueeze(0).repeat(coords.shape[0], 1) for index, coords in zip(camera_indices, _coords)],
)
coords = torch.cat(_coords, dim=0)
assert coords.shape[0] == camera_indices.shape[0]
# Need to get the coords of each indexed camera and flatten all coordinate maps and concatenate them
# The case where we aren't jagged && keep_shape (since otherwise coords is already set) and coords
# is None. In this case we append (h, w) to the num_rays dimensions for all tensors. In this case,
# each image in camera_indices has to have the same shape since otherwise we would have error'd when
# we checked keep_shape is valid or we aren't jagged.
if coords is None:
index_dim = camera_indices.shape[-1]
index = camera_indices.reshape(-1, index_dim)[0]
coords: torch.Tensor = cameras.get_image_coords(index=tuple(index)) # (h, w, 2)
coords = coords.reshape(coords.shape[:2] + (1,) * len(camera_indices.shape[:-1]) + (2,)) # (h, w, 1..., 2)
coords = coords.expand(coords.shape[:2] + camera_indices.shape[:-1] + (2,)) # (h, w, num_rays, 2)
camera_opt_to_camera = ( # (h, w, num_rays, 3, 4) or None
camera_opt_to_camera.broadcast_to(coords.shape[:-1] + (3, 4))
if camera_opt_to_camera is not None
else None
)
distortion_params_delta = ( # (h, w, num_rays, 6) or None
distortion_params_delta.broadcast_to(coords.shape[:-1] + (6,))
if distortion_params_delta is not None
else None
)
# If camera indices was an int or coords was none, we need to broadcast our indices along batch dims
camera_indices = camera_indices.broadcast_to(coords.shape[:-1] + (len(cameras.shape),)).to(torch.long)
# Checking our tensors have been standardized
assert isinstance(coords, torch.Tensor) and isinstance(camera_indices, torch.Tensor)
assert camera_indices.shape[-1] == len(cameras.shape)
assert camera_opt_to_camera is None or camera_opt_to_camera.shape[:-2] == coords.shape[:-1]
assert distortion_params_delta is None or distortion_params_delta.shape[:-1] == coords.shape[:-1]
# This will do the actual work of generating the rays now that we have standardized the inputs
# raybundle.shape == (num_rays) when done
# pylint: disable=protected-access
raybundle = cameras._generate_rays_from_coords(
camera_indices, coords, camera_opt_to_camera, distortion_params_delta, disable_distortion=disable_distortion
)
# If we have mandated that we don't keep the shape, then we flatten
if keep_shape is False:
raybundle = raybundle.flatten()
# TODO: We should have to squeeze the last dimension here if we started with zero batch dims, but never have to,
# so there might be a rogue squeeze happening somewhere, and this may cause some unintended behaviour
# that we haven't caught yet with tests
return raybundle
# pylint: disable=too-many-statements
def _generate_rays_from_coords(
self,
camera_indices: TensorType["num_rays":..., "num_cameras_batch_dims"],
coords: TensorType["num_rays":..., 2],
camera_opt_to_camera: Optional[TensorType["num_rays":..., 3, 4]] = None,
distortion_params_delta: Optional[TensorType["num_rays":..., 6]] = None,
disable_distortion: bool = False,
) -> RayBundle:
"""Generates rays for the given camera indices and coords where self isn't jagged
This is a fairly complex function, so let's break this down slowly.
Shapes involved:
- num_rays: This is your output raybundle shape. It dictates the number and shape of the rays generated
- num_cameras_batch_dims: This is the number of dimensions of our camera
Args:
camera_indices: Camera indices of the flattened cameras object to generate rays for.
The shape of this is such that indexing into camera_indices["num_rays":...] will return the
index into each batch dimension of the camera in order to get the correct camera specified by
"num_rays".
Example:
>>> cameras = Cameras(...)
>>> cameras.shape
(2, 3, 4)
>>> camera_indices = torch.tensor([0, 0, 0]) # We need an axis of length 3 since cameras.ndim == 3
>>> camera_indices.shape
(3,)
>>> coords = torch.tensor([1,1])
>>> coords.shape
(2,)
>>> out_rays = cameras.generate_rays(camera_indices=camera_indices, coords = coords)
# This will generate a RayBundle with a single ray for the
# camera at cameras[0,0,0] at image coordinates (1,1), so out_rays.shape == ()
>>> out_rays.shape
()
>>> camera_indices = torch.tensor([[0,0,0]])
>>> camera_indices.shape
(1, 3)
>>> coords = torch.tensor([[1,1]])
>>> coords.shape
(1, 2)
>>> out_rays = cameras.generate_rays(camera_indices=camera_indices, coords = coords)
# This will generate a RayBundle with a single ray for the
# camera at cameras[0,0,0] at point (1,1), so out_rays.shape == (1,)
# since we added an extra dimension in front of camera_indices
>>> out_rays.shape
(1,)
If you want more examples, check tests/cameras/test_cameras and the function check_generate_rays_shape
The bottom line is that for camera_indices: (num_rays:..., num_cameras_batch_dims), num_rays is the
output shape and if you index into the output RayBundle with some indices [i:...], if you index into
camera_indices with camera_indices[i:...] as well, you will get a 1D tensor containing the batch
indices into the original cameras object corresponding to that ray (ie: you will get the camera
from our batched cameras corresponding to the ray at RayBundle[i:...]).
coords: Coordinates of the pixels to generate rays for. If None, the full image will be rendered, meaning
height and width get prepended to the num_rays dimensions. Indexing into coords with [i:...] will
get you the image coordinates [x, y] of that specific ray located at output RayBundle[i:...].
camera_opt_to_camera: Optional transform for the camera to world matrices.
In terms of shape, it follows the same rules as coords, but indexing into it with [i:...] gets you
the 2D camera to world transform matrix for the camera optimization at RayBundle[i:...].
distortion_params_delta: Optional delta for the distortion parameters.
In terms of shape, it follows the same rules as coords, but indexing into it with [i:...] gets you
the 1D tensor with the 6 distortion parameters for the camera optimization at RayBundle[i:...].
disable_distortion: If True, disables distortion.
Returns:
Rays for the given camera indices and coords. RayBundle.shape == num_rays
"""
# Make sure we're on the right devices
camera_indices = camera_indices.to(self.device)
coords = coords.to(self.device)
# Checking to make sure everything is of the right shape and type
num_rays_shape = camera_indices.shape[:-1]
assert camera_indices.shape == num_rays_shape + (self.ndim,)
assert coords.shape == num_rays_shape + (2,)
assert coords.shape[-1] == 2
assert camera_opt_to_camera is None or camera_opt_to_camera.shape == num_rays_shape + (3, 4)
assert distortion_params_delta is None or distortion_params_delta.shape == num_rays_shape + (6,)
# Here, we've broken our indices down along the num_cameras_batch_dims dimension allowing us to index by all
# of our output rays at each dimension of our cameras object
true_indices = [camera_indices[..., i] for i in range(camera_indices.shape[-1])]
# Get all our focal lengths, principal points and make sure they are the right shapes
y = coords[..., 0] # (num_rays,) get rid of the last dimension
x = coords[..., 1] # (num_rays,) get rid of the last dimension
fx, fy = self.fx[true_indices].squeeze(-1), self.fy[true_indices].squeeze(-1) # (num_rays,)
cx, cy = self.cx[true_indices].squeeze(-1), self.cy[true_indices].squeeze(-1) # (num_rays,)
assert (
y.shape == num_rays_shape
and x.shape == num_rays_shape
and fx.shape == num_rays_shape
and fy.shape == num_rays_shape
and cx.shape == num_rays_shape
and cy.shape == num_rays_shape
), (
str(num_rays_shape)
+ str(y.shape)
+ str(x.shape)
+ str(fx.shape)
+ str(fy.shape)
+ str(cx.shape)
+ str(cy.shape)
)
# Get our image coordinates and image coordinates offset by 1 (offsets used for dx, dy calculations)
# Also make sure the shapes are correct
coord = torch.stack([(x - cx) / fx, -(y - cy) / fy], -1) # (num_rays, 2)
coord_x_offset = torch.stack([(x - cx + 1) / fx, -(y - cy) / fy], -1) # (num_rays, 2)
coord_y_offset = torch.stack([(x - cx) / fx, -(y - cy + 1) / fy], -1) # (num_rays, 2)
assert (
coord.shape == num_rays_shape + (2,)
and coord_x_offset.shape == num_rays_shape + (2,)
and coord_y_offset.shape == num_rays_shape + (2,)
)
# Stack image coordinates and image coordinates offset by 1, check shapes too
coord_stack = torch.stack([coord, coord_x_offset, coord_y_offset], dim=0) # (3, num_rays, 2)
assert coord_stack.shape == (3,) + num_rays_shape + (2,)
# Undistorts our images according to our distortion parameters
if not disable_distortion:
distortion_params = None
if self.distortion_params is not None:
distortion_params = self.distortion_params[true_indices]
if distortion_params_delta is not None:
distortion_params = distortion_params + distortion_params_delta
elif distortion_params_delta is not None:
distortion_params = distortion_params_delta
# Do not apply distortion for equirectangular images
if distortion_params is not None:
mask = (self.camera_type[true_indices] != CameraType.EQUIRECTANGULAR.value).squeeze(-1) # (num_rays)
coord_mask = torch.stack([mask, mask, mask], dim=0)
if mask.any():
coord_stack[coord_mask, :] = camera_utils.radial_and_tangential_undistort(
coord_stack[coord_mask, :].reshape(3, -1, 2),
distortion_params[mask, :],
).reshape(-1, 2)
# Make sure after we have undistorted our images, the shapes are still correct
assert coord_stack.shape == (3,) + num_rays_shape + (2,)
# Gets our directions for all our rays in camera coordinates and checks shapes at the end
# Here, directions_stack is of shape (3, num_rays, 3)
# directions_stack[0] is the direction for ray in camera coordinates
# directions_stack[1] is the direction for ray in camera coordinates offset by 1 in x
# directions_stack[2] is the direction for ray in camera coordinates offset by 1 in y
cam_types = torch.unique(self.camera_type, sorted=False)
directions_stack = torch.empty((3,) + num_rays_shape + (3,), device=self.device)
if CameraType.PERSPECTIVE.value in cam_types:
mask = (self.camera_type[true_indices] == CameraType.PERSPECTIVE.value).squeeze(-1) # (num_rays)
mask = torch.stack([mask, mask, mask], dim=0)
directions_stack[..., 0][mask] = torch.masked_select(coord_stack[..., 0], mask).float()
directions_stack[..., 1][mask] = torch.masked_select(coord_stack[..., 1], mask).float()
directions_stack[..., 2][mask] = -1.0
if CameraType.FISHEYE.value in cam_types:
mask = (self.camera_type[true_indices] == CameraType.FISHEYE.value).squeeze(-1) # (num_rays)
mask = torch.stack([mask, mask, mask], dim=0)
theta = torch.sqrt(torch.sum(coord_stack**2, dim=-1))
theta = torch.clip(theta, 0.0, math.pi)
sin_theta = torch.sin(theta)
directions_stack[..., 0][mask] = torch.masked_select(coord_stack[..., 0] * sin_theta / theta, mask).float()
directions_stack[..., 1][mask] = torch.masked_select(coord_stack[..., 1] * sin_theta / theta, mask).float()
directions_stack[..., 2][mask] = -torch.masked_select(torch.cos(theta), mask)
if CameraType.EQUIRECTANGULAR.value in cam_types:
mask = (self.camera_type[true_indices] == CameraType.EQUIRECTANGULAR.value).squeeze(-1) # (num_rays)
mask = torch.stack([mask, mask, mask], dim=0)
# For equirect, fx = fy = height = width/2
# Then coord[..., 0] goes from -1 to 1 and coord[..., 1] goes from -1/2 to 1/2
theta = -torch.pi * coord_stack[..., 0] # minus sign for right-handed
phi = torch.pi * (0.5 - coord_stack[..., 1])
# use spherical in local camera coordinates (+y up, x=0 and z<0 is theta=0)
directions_stack[..., 0][mask] = torch.masked_select(-torch.sin(theta) * torch.sin(phi), mask).float()
directions_stack[..., 1][mask] = torch.masked_select(torch.cos(phi), mask).float()
directions_stack[..., 2][mask] = torch.masked_select(-torch.cos(theta) * torch.sin(phi), mask).float()
for value in cam_types:
if value not in [CameraType.PERSPECTIVE.value, CameraType.FISHEYE.value, CameraType.EQUIRECTANGULAR.value]:
raise ValueError(f"Camera type {value} not supported.")
assert directions_stack.shape == (3,) + num_rays_shape + (3,)
c2w = self.camera_to_worlds[true_indices]
assert c2w.shape == num_rays_shape + (3, 4)
if camera_opt_to_camera is not None:
c2w = pose_utils.multiply(c2w, camera_opt_to_camera)
rotation = c2w[..., :3, :3] # (..., 3, 3)
assert rotation.shape == num_rays_shape + (3, 3)
directions_stack = torch.sum(
directions_stack[..., None, :] * rotation, dim=-1
) # (..., 1, 3) * (..., 3, 3) -> (..., 3)
directions_norm = torch.norm(directions_stack, dim=-1, keepdim=True)
directions_norm = directions_norm[0]
directions_stack = normalize(directions_stack, dim=-1)
assert directions_stack.shape == (3,) + num_rays_shape + (3,)
origins = c2w[..., :3, 3] # (..., 3)
assert origins.shape == num_rays_shape + (3,)
directions = directions_stack[0]
assert directions.shape == num_rays_shape + (3,)
# norms of the vector going between adjacent coords, giving us dx and dy per output ray
dx = torch.sqrt(torch.sum((directions - directions_stack[1]) ** 2, dim=-1)) # ("num_rays":...,)
dy = torch.sqrt(torch.sum((directions - directions_stack[2]) ** 2, dim=-1)) # ("num_rays":...,)
assert dx.shape == num_rays_shape and dy.shape == num_rays_shape
pixel_area = (dx * dy)[..., None] # ("num_rays":..., 1)
assert pixel_area.shape == num_rays_shape + (1,)
times = self.times[camera_indices, 0] if self.times is not None else None
return RayBundle(
origins=origins,
directions=directions,
pixel_area=pixel_area,
camera_indices=camera_indices,
directions_norm=directions_norm,
times=times,
)
def to_json(
self, camera_idx: int, image: Optional[TensorType["height", "width", 2]] = None, max_size: Optional[int] = None
) -> Dict:
"""Convert a camera to a json dictionary.
Args:
camera_idx: Index of the camera to convert.
image: An image in range [0, 1] that is encoded to a base64 string.
max_size: Max size to resize the image to if present.
Returns:
A JSON representation of the camera
"""
flattened = self.flatten()
json_ = {
"type": "PinholeCamera",
"cx": flattened[camera_idx].cx.item(),
"cy": flattened[camera_idx].cy.item(),
"fx": flattened[camera_idx].fx.item(),
"fy": flattened[camera_idx].fy.item(),
"camera_to_world": self.camera_to_worlds[camera_idx].tolist(),
"camera_index": camera_idx,
"times": flattened[camera_idx].times.item() if self.times is not None else None,
}
if image is not None:
image_uint8 = (image * 255).detach().type(torch.uint8)
if max_size is not None:
image_uint8 = image_uint8.permute(2, 0, 1)
image_uint8 = torchvision.transforms.functional.resize(image_uint8, max_size) # type: ignore
image_uint8 = image_uint8.permute(1, 2, 0)
image_uint8 = image_uint8.cpu().numpy()
data = cv2.imencode(".jpg", image_uint8)[1].tobytes()
json_["image"] = str("data:image/jpeg;base64," + base64.b64encode(data).decode("ascii"))
return json_
def get_intrinsics_matrices(self) -> TensorType["num_cameras":..., 3, 3]:
"""Returns the intrinsic matrices for each camera.
Returns:
Pinhole camera intrinsics matrices
"""
K = torch.zeros((*self.shape, 3, 3), dtype=torch.float32)
K[..., 0, 0] = self.fx.squeeze(-1)
K[..., 1, 1] = self.fy.squeeze(-1)
K[..., 0, 2] = self.cx.squeeze(-1)
K[..., 1, 2] = self.cy.squeeze(-1)
K[..., 2, 2] = 1.0
return K
def rescale_output_resolution(
self, scaling_factor: Union[TensorType["num_cameras":...], TensorType["num_cameras":..., 1], float, int]
) -> None:
"""Rescale the output resolution of the cameras.
Args:
scaling_factor: Scaling factor to apply to the output resolution.
"""
if isinstance(scaling_factor, (float, int)):
scaling_factor = torch.tensor([scaling_factor]).to(self.device).broadcast_to((self.cx.shape))
elif isinstance(scaling_factor, torch.Tensor) and scaling_factor.shape == self.shape:
scaling_factor = scaling_factor.unsqueeze(-1)
elif isinstance(scaling_factor, torch.Tensor) and scaling_factor.shape == (*self.shape, 1):
pass
else:
raise ValueError(
f"Scaling factor must be a float, int, or a tensor of shape {self.shape} or {(*self.shape, 1)}."
)
self.fx = self.fx * scaling_factor
self.fy = self.fy * scaling_factor
self.cx = self.cx * scaling_factor
self.cy = self.cy * scaling_factor
self.height = (self.height * scaling_factor).to(torch.int64)
self.width = (self.width * scaling_factor).to(torch.int64)
The provided code snippet includes necessary dependencies for implementing the `generate_ellipse_path` function. Write a Python function `def generate_ellipse_path( cameras: Cameras, n_frames: int = 120, const_speed: bool = True, z_variation: float = 0.0, z_phase: float = 0.0 ) -> np.ndarray` to solve the following problem:
Generate an elliptical render path based on the given poses.
Here is the function:
def generate_ellipse_path(
cameras: Cameras, n_frames: int = 120, const_speed: bool = True, z_variation: float = 0.0, z_phase: float = 0.0
) -> np.ndarray:
"""Generate an elliptical render path based on the given poses."""
poses = np.stack(cameras.camera_to_worlds.cpu().numpy())
# Calculate the focal point for the path (cameras point toward this).
center = focus_point_fn(poses)
# Path height sits at z=0 (in middle of zero-mean capture pattern).
offset = np.array([center[0], center[1], 0])
# Calculate scaling for ellipse axes based on input camera positions.
sc = np.percentile(np.abs(poses[:, :3, 3] - offset), 90, axis=0)
# Use ellipse that is symmetric about the focal point in xy.
low = -sc + offset
high = sc + offset
# Optional height variation need not be symmetric
z_low = np.percentile((poses[:, :3, 3]), 10, axis=0)
z_high = np.percentile((poses[:, :3, 3]), 90, axis=0)
def get_positions(theta):
# Interpolate between bounds with trig functions to get ellipse in x-y.
# Optionally also interpolate in z to change camera height along path.
return np.stack(
[
low[0] + (high - low)[0] * (np.cos(theta) * 0.5 + 0.5),
low[1] + (high - low)[1] * (np.sin(theta) * 0.5 + 0.5),
z_variation * (z_low[2] + (z_high - z_low)[2] * (np.cos(theta + 2 * np.pi * z_phase) * 0.5 + 0.5)),
],
-1,
)
theta = np.linspace(0, 2.0 * np.pi, n_frames + 1, endpoint=True)
positions = get_positions(theta)
if const_speed:
# Resample theta angles so that the velocity is closer to constant.
# lengths = np.linalg.norm(positions[1:] - positions[:-1], axis=-1)
# theta = stepfun.sample(None, theta, np.log(lengths), n_frames + 1)
# positions = get_positions(theta)
raise NotImplementedError
# Throw away duplicated last position.
positions = positions[:-1]
# Set path's up vector to axis closest to average of input pose up vectors.
avg_up = poses[:, :3, 1].mean(0)
avg_up = avg_up / np.linalg.norm(avg_up)
ind_up = np.argmax(np.abs(avg_up))
up = np.eye(3)[ind_up] * np.sign(avg_up[ind_up])
render_c2ws = np.stack([viewmatrix(p - center, up, p) for p in positions])
render_c2ws = torch.from_numpy(render_c2ws)
# use intrinsic of first camera
camera_path = Cameras(
fx=cameras[0].fx,
fy=cameras[0].fy,
cx=cameras[0].cx,
cy=cameras[0].cy,
height=cameras[0].height,
width=cameras[0].width,
camera_to_worlds=render_c2ws[:, :3, :4],
camera_type=cameras[0].camera_type,
)
return camera_path | Generate an elliptical render path based on the given poses. |
162,275 | import torch
from torchtyping import TensorType
The provided code snippet includes necessary dependencies for implementing the `exp_map_SO3xR3` function. Write a Python function `def exp_map_SO3xR3(tangent_vector: TensorType["b", 6]) -> TensorType["b", 3, 4]` to solve the following problem:
Compute the exponential map of the direct product group `SO(3) x R^3`. This can be used for learning pose deltas on SE(3), and is generally faster than `exp_map_SE3`. Args: tangent_vector: Tangent vector; length-3 translations, followed by an `so(3)` tangent vector. Returns: [R|t] tranformation matrices.
Here is the function:
def exp_map_SO3xR3(tangent_vector: TensorType["b", 6]) -> TensorType["b", 3, 4]: # pylint: disable=invalid-name
"""Compute the exponential map of the direct product group `SO(3) x R^3`.
This can be used for learning pose deltas on SE(3), and is generally faster than `exp_map_SE3`.
Args:
tangent_vector: Tangent vector; length-3 translations, followed by an `so(3)` tangent vector.
Returns:
[R|t] tranformation matrices.
"""
# code for SO3 map grabbed from pytorch3d and stripped down to bare-bones
log_rot = tangent_vector[:, 3:]
nrms = (log_rot * log_rot).sum(1)
rot_angles = torch.clamp(nrms, 1e-4).sqrt()
rot_angles_inv = 1.0 / rot_angles
fac1 = rot_angles_inv * rot_angles.sin()
fac2 = rot_angles_inv * rot_angles_inv * (1.0 - rot_angles.cos())
skews = torch.zeros((log_rot.shape[0], 3, 3), dtype=log_rot.dtype, device=log_rot.device)
skews[:, 0, 1] = -log_rot[:, 2]
skews[:, 0, 2] = log_rot[:, 1]
skews[:, 1, 0] = log_rot[:, 2]
skews[:, 1, 2] = -log_rot[:, 0]
skews[:, 2, 0] = -log_rot[:, 1]
skews[:, 2, 1] = log_rot[:, 0]
skews_square = torch.bmm(skews, skews)
ret = torch.zeros(tangent_vector.shape[0], 3, 4, dtype=tangent_vector.dtype, device=tangent_vector.device)
ret[:, :3, :3] = (
fac1[:, None, None] * skews
+ fac2[:, None, None] * skews_square
+ torch.eye(3, dtype=log_rot.dtype, device=log_rot.device)[None]
)
# Compute the translation
ret[:, :3, 3] = tangent_vector[:, :3]
return ret | Compute the exponential map of the direct product group `SO(3) x R^3`. This can be used for learning pose deltas on SE(3), and is generally faster than `exp_map_SE3`. Args: tangent_vector: Tangent vector; length-3 translations, followed by an `so(3)` tangent vector. Returns: [R|t] tranformation matrices. |
162,276 | import torch
from torchtyping import TensorType
The provided code snippet includes necessary dependencies for implementing the `exp_map_SE3` function. Write a Python function `def exp_map_SE3(tangent_vector: TensorType["b", 6]) -> TensorType["b", 3, 4]` to solve the following problem:
Compute the exponential map `se(3) -> SE(3)`. This can be used for learning pose deltas on `SE(3)`. Args: tangent_vector: A tangent vector from `se(3)`. Returns: [R|t] tranformation matrices.
Here is the function:
def exp_map_SE3(tangent_vector: TensorType["b", 6]) -> TensorType["b", 3, 4]: # pylint: disable=invalid-name
"""Compute the exponential map `se(3) -> SE(3)`.
This can be used for learning pose deltas on `SE(3)`.
Args:
tangent_vector: A tangent vector from `se(3)`.
Returns:
[R|t] tranformation matrices.
"""
tangent_vector_lin = tangent_vector[:, :3].view(-1, 3, 1)
tangent_vector_ang = tangent_vector[:, 3:].view(-1, 3, 1)
theta = torch.linalg.norm(tangent_vector_ang, dim=1).unsqueeze(1)
theta2 = theta**2
theta3 = theta**3
near_zero = theta < 1e-2
non_zero = torch.ones(1, dtype=tangent_vector.dtype, device=tangent_vector.device)
theta_nz = torch.where(near_zero, non_zero, theta)
theta2_nz = torch.where(near_zero, non_zero, theta2)
theta3_nz = torch.where(near_zero, non_zero, theta3)
# Compute the rotation
sine = theta.sin()
cosine = torch.where(near_zero, 8 / (4 + theta2) - 1, theta.cos())
sine_by_theta = torch.where(near_zero, 0.5 * cosine + 0.5, sine / theta_nz)
one_minus_cosine_by_theta2 = torch.where(near_zero, 0.5 * sine_by_theta, (1 - cosine) / theta2_nz)
ret = torch.zeros(tangent_vector.shape[0], 3, 4).to(dtype=tangent_vector.dtype, device=tangent_vector.device)
ret[:, :3, :3] = one_minus_cosine_by_theta2 * tangent_vector_ang @ tangent_vector_ang.transpose(1, 2)
ret[:, 0, 0] += cosine.view(-1)
ret[:, 1, 1] += cosine.view(-1)
ret[:, 2, 2] += cosine.view(-1)
temp = sine_by_theta.view(-1, 1) * tangent_vector_ang.view(-1, 3)
ret[:, 0, 1] -= temp[:, 2]
ret[:, 1, 0] += temp[:, 2]
ret[:, 0, 2] += temp[:, 1]
ret[:, 2, 0] -= temp[:, 1]
ret[:, 1, 2] -= temp[:, 0]
ret[:, 2, 1] += temp[:, 0]
# Compute the translation
sine_by_theta = torch.where(near_zero, 1 - theta2 / 6, sine_by_theta)
one_minus_cosine_by_theta2 = torch.where(near_zero, 0.5 - theta2 / 24, one_minus_cosine_by_theta2)
theta_minus_sine_by_theta3_t = torch.where(near_zero, 1.0 / 6 - theta2 / 120, (theta - sine) / theta3_nz)
ret[:, :, 3:] = sine_by_theta * tangent_vector_lin
ret[:, :, 3:] += one_minus_cosine_by_theta2 * torch.cross(tangent_vector_ang, tangent_vector_lin, dim=1)
ret[:, :, 3:] += theta_minus_sine_by_theta3_t * (
tangent_vector_ang @ (tangent_vector_ang.transpose(1, 2) @ tangent_vector_lin)
)
return ret | Compute the exponential map `se(3) -> SE(3)`. This can be used for learning pose deltas on `SE(3)`. Args: tangent_vector: A tangent vector from `se(3)`. Returns: [R|t] tranformation matrices. |
162,277 | import random
from typing import Dict
import torch
from nerfstudio.utils.images import BasicImages
The provided code snippet includes necessary dependencies for implementing the `collate_image_dataset_batch` function. Write a Python function `def collate_image_dataset_batch(batch: Dict, num_rays_per_batch: int, keep_full_image: bool = False)` to solve the following problem:
Operates on a batch of images and samples pixels to use for generating rays. Returns a collated batch which is input to the Graph. It will sample only within the valid 'mask' if it's specified. Args: batch: batch of images to sample from num_rays_per_batch: number of rays to sample per batch keep_full_image: whether or not to include a reference to the full image in returned batch
Here is the function:
def collate_image_dataset_batch(batch: Dict, num_rays_per_batch: int, keep_full_image: bool = False):
"""
Operates on a batch of images and samples pixels to use for generating rays.
Returns a collated batch which is input to the Graph.
It will sample only within the valid 'mask' if it's specified.
Args:
batch: batch of images to sample from
num_rays_per_batch: number of rays to sample per batch
keep_full_image: whether or not to include a reference to the full image in returned batch
"""
device = batch["image"].device
num_images, image_height, image_width, _ = batch["image"].shape
# only sample within the mask, if the mask is in the batch
if "mask" in batch:
nonzero_indices = torch.nonzero(batch["mask"][..., 0].to(device), as_tuple=False)
chosen_indices = random.sample(range(len(nonzero_indices)), k=num_rays_per_batch)
indices = nonzero_indices[chosen_indices]
else:
indices = torch.floor(
torch.rand((num_rays_per_batch, 3), device=device)
* torch.tensor([num_images, image_height, image_width], device=device)
).long()
c, y, x = (i.flatten() for i in torch.split(indices, 1, dim=-1))
collated_batch = {
key: value[c, y, x]
for key, value in batch.items()
if key not in ("image_idx", "src_imgs", "src_idxs", "sparse_sfm_points") and value is not None
}
assert collated_batch["image"].shape == (num_rays_per_batch, 3), collated_batch["image"].shape
if "sparse_sfm_points" in batch:
collated_batch["sparse_sfm_points"] = batch["sparse_sfm_points"].images[c[0]]
# Needed to correct the random indices to their actual camera idx locations.
indices[:, 0] = batch["image_idx"][c]
collated_batch["indices"] = indices # with the abs camera indices
if keep_full_image:
collated_batch["full_image"] = batch["image"]
return collated_batch | Operates on a batch of images and samples pixels to use for generating rays. Returns a collated batch which is input to the Graph. It will sample only within the valid 'mask' if it's specified. Args: batch: batch of images to sample from num_rays_per_batch: number of rays to sample per batch keep_full_image: whether or not to include a reference to the full image in returned batch |
162,278 | import random
from typing import Dict
import torch
from nerfstudio.utils.images import BasicImages
The provided code snippet includes necessary dependencies for implementing the `collate_image_dataset_batch_list` function. Write a Python function `def collate_image_dataset_batch_list(batch: Dict, num_rays_per_batch: int, keep_full_image: bool = False)` to solve the following problem:
Does the same as collate_image_dataset_batch, except it will operate over a list of images / masks inside a list. We will use this with the intent of DEPRECIATING it as soon as we find a viable alternative. The intention will be to replace this with a more efficient implementation that doesn't require a for loop, but since pytorch's ragged tensors are still in beta (this would allow for some vectorization), this will do Args: batch: batch of images to sample from num_rays_per_batch: number of rays to sample per batch keep_full_image: whether or not to include a reference to the full image in returned batch
Here is the function:
def collate_image_dataset_batch_list(batch: Dict, num_rays_per_batch: int, keep_full_image: bool = False):
"""
Does the same as collate_image_dataset_batch, except it will operate over a list of images / masks inside
a list.
We will use this with the intent of DEPRECIATING it as soon as we find a viable alternative.
The intention will be to replace this with a more efficient implementation that doesn't require a for loop, but
since pytorch's ragged tensors are still in beta (this would allow for some vectorization), this will do
Args:
batch: batch of images to sample from
num_rays_per_batch: number of rays to sample per batch
keep_full_image: whether or not to include a reference to the full image in returned batch
"""
device = batch["image"][0].device
num_images = len(batch["image"])
# only sample within the mask, if the mask is in the batch
all_indices = []
all_images = []
all_fg_masks = []
if "mask" in batch:
num_rays_in_batch = num_rays_per_batch // num_images
for i in range(num_images):
if i == num_images - 1:
num_rays_in_batch = num_rays_per_batch - (num_images - 1) * num_rays_in_batch
# nonzero_indices = torch.nonzero(batch["mask"][i][..., 0], as_tuple=False)
nonzero_indices = batch["mask"][i]
chosen_indices = random.sample(range(len(nonzero_indices)), k=num_rays_in_batch)
indices = nonzero_indices[chosen_indices]
indices = torch.cat([torch.full((num_rays_in_batch, 1), i, device=device), indices], dim=-1)
all_indices.append(indices)
all_images.append(batch["image"][i][indices[:, 1], indices[:, 2]])
if "fg_mask" in batch:
all_fg_masks.append(batch["fg_mask"][i][indices[:, 1], indices[:, 2]])
else:
num_rays_in_batch = num_rays_per_batch // num_images
for i in range(num_images):
image_height, image_width, _ = batch["image"][i].shape
if i == num_images - 1:
num_rays_in_batch = num_rays_per_batch - (num_images - 1) * num_rays_in_batch
indices = torch.floor(
torch.rand((num_rays_in_batch, 3), device=device)
* torch.tensor([1, image_height, image_width], device=device)
).long()
indices[:, 0] = i
all_indices.append(indices)
all_images.append(batch["image"][i][indices[:, 1], indices[:, 2]])
if "fg_mask" in batch:
all_fg_masks.append(batch["fg_mask"][i][indices[:, 1], indices[:, 2]])
indices = torch.cat(all_indices, dim=0)
c, y, x = (i.flatten() for i in torch.split(indices, 1, dim=-1))
collated_batch = {
key: value[c, y, x]
for key, value in batch.items()
if key != "image_idx"
and key != "image"
and key != "mask"
and key != "fg_mask"
and key != "sparse_pts"
and value is not None
}
collated_batch["image"] = torch.cat(all_images, dim=0)
if len(all_fg_masks) > 0:
collated_batch["fg_mask"] = torch.cat(all_fg_masks, dim=0)
if "sparse_pts" in batch:
rand_idx = random.randint(0, num_images - 1)
collated_batch["sparse_pts"] = batch["sparse_pts"][rand_idx]
assert collated_batch["image"].shape == (num_rays_per_batch, 3), collated_batch["image"].shape
# Needed to correct the random indices to their actual camera idx locations.
indices[:, 0] = batch["image_idx"][c]
collated_batch["indices"] = indices # with the abs camera indices
if keep_full_image:
collated_batch["full_image"] = batch["image"]
return collated_batch | Does the same as collate_image_dataset_batch, except it will operate over a list of images / masks inside a list. We will use this with the intent of DEPRECIATING it as soon as we find a viable alternative. The intention will be to replace this with a more efficient implementation that doesn't require a for loop, but since pytorch's ragged tensors are still in beta (this would allow for some vectorization), this will do Args: batch: batch of images to sample from num_rays_per_batch: number of rays to sample per batch keep_full_image: whether or not to include a reference to the full image in returned batch |
162,279 | import random
from typing import Dict
import torch
from nerfstudio.utils.images import BasicImages
The provided code snippet includes necessary dependencies for implementing the `collate_image_dataset_batch_equirectangular` function. Write a Python function `def collate_image_dataset_batch_equirectangular(batch: Dict, num_rays_per_batch: int, keep_full_image: bool = False)` to solve the following problem:
Operates on a batch of equirectangular images and samples pixels to use for generating rays. Rays will be generated uniformly on the sphere. Returns a collated batch which is input to the Graph. It will sample only within the valid 'mask' if it's specified. Args: batch: batch of images to sample from num_rays_per_batch: number of rays to sample per batch keep_full_image: whether or not to include a reference to the full image in returned batch
Here is the function:
def collate_image_dataset_batch_equirectangular(batch: Dict, num_rays_per_batch: int, keep_full_image: bool = False):
"""
Operates on a batch of equirectangular images and samples pixels to use for
generating rays. Rays will be generated uniformly on the sphere.
Returns a collated batch which is input to the Graph.
It will sample only within the valid 'mask' if it's specified.
Args:
batch: batch of images to sample from
num_rays_per_batch: number of rays to sample per batch
keep_full_image: whether or not to include a reference to the full image in returned batch
"""
# TODO(kevinddchen): make more DRY
device = batch["image"].device
num_images, image_height, image_width, _ = batch["image"].shape
# only sample within the mask, if the mask is in the batch
if "mask" in batch:
# TODO(kevinddchen): implement this
raise NotImplementedError("Masking not implemented for equirectangular images.")
# We sample theta uniformly in [0, 2*pi]
# We sample phi in [0, pi] according to the PDF f(phi) = sin(phi) / 2.
# This is done by inverse transform sampling.
# http://corysimon.github.io/articles/uniformdistn-on-sphere/
num_images_rand = torch.rand(num_rays_per_batch, device=device)
phi_rand = torch.acos(1 - 2 * torch.rand(num_rays_per_batch, device=device)) / torch.pi
theta_rand = torch.rand(num_rays_per_batch, device=device)
indices = torch.floor(
torch.stack((num_images_rand, phi_rand, theta_rand), dim=-1)
* torch.tensor([num_images, image_height, image_width], device=device)
).long()
c, y, x = (i.flatten() for i in torch.split(indices, 1, dim=-1))
collated_batch = {key: value[c, y, x] for key, value in batch.items() if key != "image_idx" and value is not None}
assert collated_batch["image"].shape == (num_rays_per_batch, 3), collated_batch["image"].shape
# Needed to correct the random indices to their actual camera idx locations.
indices[:, 0] = batch["image_idx"][c]
collated_batch["indices"] = indices # with the abs camera indices
if keep_full_image:
collated_batch["full_image"] = batch["image"]
return collated_batch | Operates on a batch of equirectangular images and samples pixels to use for generating rays. Rays will be generated uniformly on the sphere. Returns a collated batch which is input to the Graph. It will sample only within the valid 'mask' if it's specified. Args: batch: batch of images to sample from num_rays_per_batch: number of rays to sample per batch keep_full_image: whether or not to include a reference to the full image in returned batch |
162,280 | from __future__ import annotations
from dataclasses import dataclass
from typing import Dict, List
from nerfstudio.data.datamanagers.base_datamanager import VanillaDataManagerConfig
from nerfstudio.data.utils.nerfstudio_collate import nerfstudio_collate
def nerfstudio_collate(
batch, extra_mappings: Union[Dict[type, Callable], None] = None
): # pylint: disable=too-many-return-statements
r"""
This is the default pytorch collate function, but with support for nerfstudio types. All documentation
below is copied straight over from pytorch's default_collate function, python version 3.8.13,
pytorch version '1.12.1+cu113'. Custom nerfstudio types are accounted for at the end, and extra
mappings can be passed in to handle custom types. These mappings are from types: callable (types
being like int or float or the return value of type(3.), etc). The only code before we parse for custom types that
was changed from default pytorch was the addition of the extra_mappings argument, a find and replace operation
from default_collate to nerfstudio_collate, and the addition of the nerfstudio_collate_err_msg_format variable.
Function that takes in a batch of data and puts the elements within the batch
into a tensor with an additional outer dimension - batch size. The exact output type can be
a :class:`torch.Tensor`, a `Sequence` of :class:`torch.Tensor`, a
Collection of :class:`torch.Tensor`, or left unchanged, depending on the input type.
This is used as the default function for collation when
`batch_size` or `batch_sampler` is defined in :class:`~torch.utils.data.DataLoader`.
Here is the general input type (based on the type of the element within the batch) to output type mapping:
* :class:`torch.Tensor` -> :class:`torch.Tensor` (with an added outer dimension batch size)
* NumPy Arrays -> :class:`torch.Tensor`
* `float` -> :class:`torch.Tensor`
* `int` -> :class:`torch.Tensor`
* `str` -> `str` (unchanged)
* `bytes` -> `bytes` (unchanged)
* `Mapping[K, V_i]` -> `Mapping[K, nerfstudio_collate([V_1, V_2, ...])]`
* `NamedTuple[V1_i, V2_i, ...]` -> `NamedTuple[nerfstudio_collate([V1_1, V1_2, ...]),
nerfstudio_collate([V2_1, V2_2, ...]), ...]`
* `Sequence[V1_i, V2_i, ...]` -> `Sequence[nerfstudio_collate([V1_1, V1_2, ...]),
nerfstudio_collate([V2_1, V2_2, ...]), ...]`
Args:
batch: a single batch to be collated
Examples:
>>> # Example with a batch of `int`s:
>>> nerfstudio_collate([0, 1, 2, 3])
tensor([0, 1, 2, 3])
>>> # Example with a batch of `str`s:
>>> nerfstudio_collate(['a', 'b', 'c'])
['a', 'b', 'c']
>>> # Example with `Map` inside the batch:
>>> nerfstudio_collate([{'A': 0, 'B': 1}, {'A': 100, 'B': 100}])
{'A': tensor([ 0, 100]), 'B': tensor([ 1, 100])}
>>> # Example with `NamedTuple` inside the batch:
>>> Point = namedtuple('Point', ['x', 'y'])
>>> nerfstudio_collate([Point(0, 0), Point(1, 1)])
Point(x=tensor([0, 1]), y=tensor([0, 1]))
>>> # Example with `Tuple` inside the batch:
>>> nerfstudio_collate([(0, 1), (2, 3)])
[tensor([0, 2]), tensor([1, 3])]
>>> # Example with `List` inside the batch:
>>> nerfstudio_collate([[0, 1], [2, 3]])
[tensor([0, 2]), tensor([1, 3])]
"""
if extra_mappings is None:
extra_mappings = {}
elem = batch[0]
elem_type = type(elem)
if isinstance(elem, torch.Tensor): # pylint: disable=no-else-return
out = None
if torch.utils.data.get_worker_info() is not None:
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
numel = sum(x.numel() for x in batch)
storage = elem.storage()._new_shared(numel, device=elem.device) # pylint: disable=protected-access
out = elem.new(storage).resize_(len(batch), *list(elem.size()))
return torch.stack(batch, 0, out=out)
elif elem_type.__module__ == "numpy" and elem_type.__name__ != "str_" and elem_type.__name__ != "string_":
# pylint: disable=no-else-return, consider-using-in
if elem_type.__name__ == "ndarray" or elem_type.__name__ == "memmap":
# array of string classes and object
if np_str_obj_array_pattern.search(elem.dtype.str) is not None:
raise TypeError(NERFSTUDIO_COLLATE_ERR_MSG_FORMAT.format(elem.dtype))
return nerfstudio_collate([torch.as_tensor(b) for b in batch], extra_mappings=extra_mappings)
elif elem.shape == (): # scalars
return torch.as_tensor(batch)
elif isinstance(elem, float):
return torch.tensor(batch, dtype=torch.float64)
elif isinstance(elem, int):
return torch.tensor(batch)
elif isinstance(elem, string_classes):
return batch
elif isinstance(elem, collections.abc.Mapping):
try:
return elem_type(
{key: nerfstudio_collate([d[key] for d in batch], extra_mappings=extra_mappings) for key in elem}
)
except TypeError:
# The mapping type may not support `__init__(iterable)`.
return {key: nerfstudio_collate([d[key] for d in batch], extra_mappings=extra_mappings) for key in elem}
elif isinstance(elem, tuple) and hasattr(elem, "_fields"): # namedtuple
return elem_type(*(nerfstudio_collate(samples, extra_mappings=extra_mappings) for samples in zip(*batch)))
elif isinstance(elem, collections.abc.Sequence):
# check to make sure that the elements in batch have consistent size
it = iter(batch)
elem_size = len(next(it))
if not all(len(elem) == elem_size for elem in it):
raise RuntimeError("each element in list of batch should be of equal size")
transposed = list(zip(*batch)) # It may be accessed twice, so we use a list.
if isinstance(elem, tuple):
return [
nerfstudio_collate(samples, extra_mappings=extra_mappings) for samples in transposed
] # Backwards compatibility.
else:
try:
return elem_type([nerfstudio_collate(samples, extra_mappings=extra_mappings) for samples in transposed])
except TypeError:
# The sequence type may not support `__init__(iterable)` (e.g., `range`).
return [nerfstudio_collate(samples, extra_mappings=extra_mappings) for samples in transposed]
# NerfStudio types supported below
elif isinstance(elem, Cameras):
# If a camera, just concatenate along the batch dimension. In the future, this may change to stacking
assert all((isinstance(cam, Cameras) for cam in batch))
assert all((cam.distortion_params is None for cam in batch)) or all(
(cam.distortion_params is not None for cam in batch)
), "All cameras must have distortion parameters or none of them should have distortion parameters.\
Generalized batching will be supported in the future."
# If no batch dimension exists, then we need to stack everything and create a batch dimension on 0th dim
if elem.shape == ():
op = torch.stack
# If batch dimension exists, then we need to concatenate along the 0th dimension
else:
op = torch.cat
return Cameras(
op([cameras.camera_to_worlds for cameras in batch], dim=0),
op([cameras.fx for cameras in batch], dim=0),
op([cameras.fy for cameras in batch], dim=0),
op([cameras.cx for cameras in batch], dim=0),
op([cameras.cy for cameras in batch], dim=0),
height=op([cameras.height for cameras in batch], dim=0),
width=op([cameras.width for cameras in batch], dim=0),
distortion_params=op(
[
cameras.distortion_params
if cameras.distortion_params is not None
else torch.zeros_like(cameras.distortion_params)
for cameras in batch
],
dim=0,
),
camera_type=op([cameras.camera_type for cameras in batch], dim=0),
times=torch.stack(
[cameras.times if cameras.times is not None else -torch.ones_like(cameras.times) for cameras in batch],
dim=0,
),
)
elif isinstance(elem, BasicImages):
assert all((isinstance(elem, BasicImages) for elem in batch))
all_images = []
for images in batch:
all_images.extend(images.images)
return BasicImages(all_images)
for type_key in extra_mappings:
if isinstance(elem, type_key):
return extra_mappings[type_key](batch)
raise TypeError(NERFSTUDIO_COLLATE_ERR_MSG_FORMAT.format(elem_type))
The provided code snippet includes necessary dependencies for implementing the `variable_res_collate` function. Write a Python function `def variable_res_collate(batch: List[Dict]) -> Dict` to solve the following problem:
Default collate function for the cached dataloader. Args: batch: Batch of samples from the dataset. Returns: Collated batch.
Here is the function:
def variable_res_collate(batch: List[Dict]) -> Dict:
"""Default collate function for the cached dataloader.
Args:
batch: Batch of samples from the dataset.
Returns:
Collated batch.
"""
images = []
masks = []
for data in batch:
image = data.pop("image")
mask = data.pop("mask", None)
images.append(image)
if mask:
masks.append(mask)
new_batch: dict = nerfstudio_collate(batch)
new_batch["image"] = images
if masks:
new_batch["mask"] = masks
return new_batch | Default collate function for the cached dataloader. Args: batch: Batch of samples from the dataset. Returns: Collated batch. |
162,281 | from __future__ import annotations
import math
from dataclasses import dataclass, field
from pathlib import Path
from typing import Type
import numpy as np
import torch
import yaml
from rich.progress import Console, track
from typing_extensions import Literal
from nerfstudio.cameras import camera_utils
from nerfstudio.cameras.cameras import Cameras, CameraType
from nerfstudio.data.dataparsers.base_dataparser import (
DataParser,
DataParserConfig,
DataparserOutputs,
)
from nerfstudio.data.scene_box import SceneBox
from nerfstudio.data.utils.colmap_utils import (
read_cameras_binary,
read_images_binary,
read_points3d_binary,
)
from nerfstudio.model_components.ray_samplers import save_points
from nerfstudio.utils.images import BasicImages
class BasicImages:
"""This is a very primitive struct for holding images, especially for when these images
are of different heights / widths.
The purpose of this is to have a special struct wrapping around a list so that the
nerfstudio_collate fn and other parts of the code recognise this as a struct to leave alone
instead of reshaping or concatenating into a single tensor (since this will likely be used
for cases where we have images of different sizes and shapes).
This only has one batch dimension and will likely be replaced down the line with some
TensorDataclass alternative that supports arbitrary batches.
"""
def __init__(self, images: List):
assert isinstance(images, List)
assert not images or isinstance(
images[0], torch.Tensor
), f"Input should be a list of tensors, not {type(images[0]) if isinstance(images, List) else type(images)}"
self.images = images
def to(self, device):
"""Move the images to the given device."""
assert isinstance(device, torch.device)
return BasicImages([image.to(device) for image in self.images])
The provided code snippet includes necessary dependencies for implementing the `get_masks` function. Write a Python function `def get_masks(image_idx: int, masks, fg_masks, sparse_pts)` to solve the following problem:
function to process additional mask information Args: image_idx: specific image index to work with mask: mask data
Here is the function:
def get_masks(image_idx: int, masks, fg_masks, sparse_pts):
"""function to process additional mask information
Args:
image_idx: specific image index to work with
mask: mask data
"""
# mask
mask = masks[image_idx]
mask = BasicImages([mask])
# foreground mask
fg_mask = fg_masks[image_idx]
fg_mask = BasicImages([fg_mask])
# sparse_pts
pts = sparse_pts[image_idx]
pts = BasicImages([pts])
return {"mask": mask, "fg_mask": fg_mask, "sparse_pts": pts} | function to process additional mask information Args: image_idx: specific image index to work with mask: mask data |
162,282 | import math
import os
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Tuple, Type
import numpy as np
import torch
from nuscenes.nuscenes import NuScenes as NuScenesDatabase
from typing_extensions import Literal
from nerfstudio.cameras.cameras import Cameras, CameraType
from nerfstudio.data.dataparsers.base_dataparser import (
DataParser,
DataParserConfig,
DataparserOutputs,
)
from nerfstudio.data.scene_box import SceneBox
from nerfstudio.process_data.colmap_utils import qvec2rotmat
def qvec2rotmat(qvec) -> np.ndarray:
"""Convert quaternion to rotation matrix.
Args:
qvec: Quaternion vector of shape (4,).
Returns:
Rotation matrix of shape (3, 3).
"""
return np.array(
[
[
1 - 2 * qvec[2] ** 2 - 2 * qvec[3] ** 2,
2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3],
2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2],
],
[
2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3],
1 - 2 * qvec[1] ** 2 - 2 * qvec[3] ** 2,
2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1],
],
[
2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2],
2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1],
1 - 2 * qvec[1] ** 2 - 2 * qvec[2] ** 2,
],
]
)
The provided code snippet includes necessary dependencies for implementing the `rotation_translation_to_pose` function. Write a Python function `def rotation_translation_to_pose(r_vec, t_vec)` to solve the following problem:
Convert quaternion rotation and translation vectors to 4x4 matrix
Here is the function:
def rotation_translation_to_pose(r_vec, t_vec):
"""Convert quaternion rotation and translation vectors to 4x4 matrix"""
pose = np.eye(4)
pose[:3, :3] = qvec2rotmat(r_vec)
pose[:3, 3] = t_vec
return pose | Convert quaternion rotation and translation vectors to 4x4 matrix |
162,283 | from __future__ import annotations
from dataclasses import dataclass, field
from pathlib import Path
from typing import Dict, Optional, Type
from typing_extensions import Literal
import numpy as np
import torch
from PIL import Image
from rich.console import Console
from torchtyping import TensorType
from nerfstudio.cameras import camera_utils
from nerfstudio.cameras.cameras import Cameras, CameraType
from nerfstudio.data.dataparsers.base_dataparser import (
DataParser,
DataParserConfig,
DataparserOutputs,
)
from nerfstudio.data.scene_box import SceneBox
from nerfstudio.utils.images import BasicImages
from nerfstudio.utils.io import load_from_json
def get_src_from_pairs(
ref_idx, all_imgs, pairs_srcs, neighbors_num=None, neighbors_shuffle=False
) -> Dict[str, TensorType]:
# src_idx[0] is ref img
src_idx = pairs_srcs[ref_idx]
# randomly sample neighbors
if neighbors_num and neighbors_num > -1 and neighbors_num < len(src_idx) - 1:
if neighbors_shuffle:
perm_idx = torch.randperm(len(src_idx) - 1) + 1
src_idx = torch.cat([src_idx[[0]], src_idx[perm_idx[:neighbors_num]]])
else:
src_idx = src_idx[: neighbors_num + 1]
src_idx = src_idx.to(all_imgs.device)
return {"src_imgs": all_imgs[src_idx], "src_idxs": src_idx} | null |
162,284 | from __future__ import annotations
from dataclasses import dataclass, field
from pathlib import Path
from typing import Dict, Optional, Type
from typing_extensions import Literal
import numpy as np
import torch
from PIL import Image
from rich.console import Console
from torchtyping import TensorType
from nerfstudio.cameras import camera_utils
from nerfstudio.cameras.cameras import Cameras, CameraType
from nerfstudio.data.dataparsers.base_dataparser import (
DataParser,
DataParserConfig,
DataparserOutputs,
)
from nerfstudio.data.scene_box import SceneBox
from nerfstudio.utils.images import BasicImages
from nerfstudio.utils.io import load_from_json
The provided code snippet includes necessary dependencies for implementing the `get_image` function. Write a Python function `def get_image(image_filename, alpha_color=None) -> TensorType["image_height", "image_width", "num_channels"]` to solve the following problem:
Returns a 3 channel image. Args: image_idx: The image index in the dataset.
Here is the function:
def get_image(image_filename, alpha_color=None) -> TensorType["image_height", "image_width", "num_channels"]:
"""Returns a 3 channel image.
Args:
image_idx: The image index in the dataset.
"""
pil_image = Image.open(image_filename)
np_image = np.array(pil_image, dtype="uint8") # shape is (h, w, 3 or 4)
assert len(np_image.shape) == 3
assert np_image.dtype == np.uint8
assert np_image.shape[2] in [3, 4], f"Image shape of {np_image.shape} is in correct."
image = torch.from_numpy(np_image.astype("float32") / 255.0)
if alpha_color is not None and image.shape[-1] == 4:
assert image.shape[-1] == 4
image = image[:, :, :3] * image[:, :, -1:] + alpha_color * (1.0 - image[:, :, -1:])
else:
image = image[:, :, :3]
return image | Returns a 3 channel image. Args: image_idx: The image index in the dataset. |
162,285 | from __future__ import annotations
from dataclasses import dataclass, field
from pathlib import Path
from typing import Dict, Optional, Type
from typing_extensions import Literal
import numpy as np
import torch
from PIL import Image
from rich.console import Console
from torchtyping import TensorType
from nerfstudio.cameras import camera_utils
from nerfstudio.cameras.cameras import Cameras, CameraType
from nerfstudio.data.dataparsers.base_dataparser import (
DataParser,
DataParserConfig,
DataparserOutputs,
)
from nerfstudio.data.scene_box import SceneBox
from nerfstudio.utils.images import BasicImages
from nerfstudio.utils.io import load_from_json
The provided code snippet includes necessary dependencies for implementing the `get_depths_and_normals` function. Write a Python function `def get_depths_and_normals(image_idx: int, depths, normals)` to solve the following problem:
function to process additional depths and normal information Args: image_idx: specific image index to work with semantics: semantics data
Here is the function:
def get_depths_and_normals(image_idx: int, depths, normals):
"""function to process additional depths and normal information
Args:
image_idx: specific image index to work with
semantics: semantics data
"""
# depth
depth = depths[image_idx]
# normal
normal = normals[image_idx]
return {"depth": depth, "normal": normal} | function to process additional depths and normal information Args: image_idx: specific image index to work with semantics: semantics data |
162,286 | from __future__ import annotations
from dataclasses import dataclass, field
from pathlib import Path
from typing import Dict, Optional, Type
from typing_extensions import Literal
import numpy as np
import torch
from PIL import Image
from rich.console import Console
from torchtyping import TensorType
from nerfstudio.cameras import camera_utils
from nerfstudio.cameras.cameras import Cameras, CameraType
from nerfstudio.data.dataparsers.base_dataparser import (
DataParser,
DataParserConfig,
DataparserOutputs,
)
from nerfstudio.data.scene_box import SceneBox
from nerfstudio.utils.images import BasicImages
from nerfstudio.utils.io import load_from_json
The provided code snippet includes necessary dependencies for implementing the `get_sensor_depths` function. Write a Python function `def get_sensor_depths(image_idx: int, sensor_depths)` to solve the following problem:
function to process additional sensor depths Args: image_idx: specific image index to work with sensor_depths: semantics data
Here is the function:
def get_sensor_depths(image_idx: int, sensor_depths):
"""function to process additional sensor depths
Args:
image_idx: specific image index to work with
sensor_depths: semantics data
"""
# sensor depth
sensor_depth = sensor_depths[image_idx]
return {"sensor_depth": sensor_depth} | function to process additional sensor depths Args: image_idx: specific image index to work with sensor_depths: semantics data |
162,287 | from __future__ import annotations
from dataclasses import dataclass, field
from pathlib import Path
from typing import Dict, Optional, Type
from typing_extensions import Literal
import numpy as np
import torch
from PIL import Image
from rich.console import Console
from torchtyping import TensorType
from nerfstudio.cameras import camera_utils
from nerfstudio.cameras.cameras import Cameras, CameraType
from nerfstudio.data.dataparsers.base_dataparser import (
DataParser,
DataParserConfig,
DataparserOutputs,
)
from nerfstudio.data.scene_box import SceneBox
from nerfstudio.utils.images import BasicImages
from nerfstudio.utils.io import load_from_json
The provided code snippet includes necessary dependencies for implementing the `get_foreground_masks` function. Write a Python function `def get_foreground_masks(image_idx: int, fg_masks)` to solve the following problem:
function to process additional foreground_masks Args: image_idx: specific image index to work with fg_masks: foreground_masks
Here is the function:
def get_foreground_masks(image_idx: int, fg_masks):
"""function to process additional foreground_masks
Args:
image_idx: specific image index to work with
fg_masks: foreground_masks
"""
# sensor depth
fg_mask = fg_masks[image_idx]
return {"fg_mask": fg_mask} | function to process additional foreground_masks Args: image_idx: specific image index to work with fg_masks: foreground_masks |
162,288 | from __future__ import annotations
from dataclasses import dataclass, field
from pathlib import Path
from typing import Dict, Optional, Type
from typing_extensions import Literal
import numpy as np
import torch
from PIL import Image
from rich.console import Console
from torchtyping import TensorType
from nerfstudio.cameras import camera_utils
from nerfstudio.cameras.cameras import Cameras, CameraType
from nerfstudio.data.dataparsers.base_dataparser import (
DataParser,
DataParserConfig,
DataparserOutputs,
)
from nerfstudio.data.scene_box import SceneBox
from nerfstudio.utils.images import BasicImages
from nerfstudio.utils.io import load_from_json
class BasicImages:
"""This is a very primitive struct for holding images, especially for when these images
are of different heights / widths.
The purpose of this is to have a special struct wrapping around a list so that the
nerfstudio_collate fn and other parts of the code recognise this as a struct to leave alone
instead of reshaping or concatenating into a single tensor (since this will likely be used
for cases where we have images of different sizes and shapes).
This only has one batch dimension and will likely be replaced down the line with some
TensorDataclass alternative that supports arbitrary batches.
"""
def __init__(self, images: List):
assert isinstance(images, List)
assert not images or isinstance(
images[0], torch.Tensor
), f"Input should be a list of tensors, not {type(images[0]) if isinstance(images, List) else type(images)}"
self.images = images
def to(self, device):
"""Move the images to the given device."""
assert isinstance(device, torch.device)
return BasicImages([image.to(device) for image in self.images])
The provided code snippet includes necessary dependencies for implementing the `get_sparse_sfm_points` function. Write a Python function `def get_sparse_sfm_points(image_idx: int, sfm_points)` to solve the following problem:
function to process additional sparse sfm points Args: image_idx: specific image index to work with sfm_points: sparse sfm points
Here is the function:
def get_sparse_sfm_points(image_idx: int, sfm_points):
"""function to process additional sparse sfm points
Args:
image_idx: specific image index to work with
sfm_points: sparse sfm points
"""
# sfm points
sparse_sfm_points = sfm_points[image_idx]
sparse_sfm_points = BasicImages([sparse_sfm_points])
return {"sparse_sfm_points": sparse_sfm_points} | function to process additional sparse sfm points Args: image_idx: specific image index to work with sfm_points: sparse sfm points |
162,289 | from __future__ import annotations
from dataclasses import dataclass, field
from pathlib import Path
from typing import Dict, Optional, Type
from typing_extensions import Literal
import numpy as np
import torch
from PIL import Image
from rich.console import Console
from torchtyping import TensorType
from nerfstudio.cameras import camera_utils
from nerfstudio.cameras.cameras import Cameras, CameraType
from nerfstudio.data.dataparsers.base_dataparser import (
DataParser,
DataParserConfig,
DataparserOutputs,
)
from nerfstudio.data.scene_box import SceneBox
from nerfstudio.utils.images import BasicImages
from nerfstudio.utils.io import load_from_json
The provided code snippet includes necessary dependencies for implementing the `filter_list` function. Write a Python function `def filter_list(list_to_filter, indices)` to solve the following problem:
Returns a copy list with only selected indices
Here is the function:
def filter_list(list_to_filter, indices):
"""Returns a copy list with only selected indices"""
if list_to_filter:
return [list_to_filter[i] for i in indices]
else:
return [] | Returns a copy list with only selected indices |
162,290 | from __future__ import annotations
from dataclasses import dataclass, field
from glob import glob
from pathlib import Path
from typing import Dict, Literal, Optional, Type
import cv2
import numpy as np
import torch
from PIL import Image
from rich.console import Console
from torchtyping import TensorType
from nerfstudio.cameras.cameras import Cameras, CameraType
from nerfstudio.data.dataparsers.base_dataparser import (
DataParser,
DataParserConfig,
DataparserOutputs,
)
from nerfstudio.data.scene_box import SceneBox
def get_src_from_pairs(
ref_idx, all_imgs, pairs_srcs, neighbors_num=None, neighbors_shuffle=False
) -> Dict[str, TensorType]:
# src_idx[0] is ref img
src_idx = pairs_srcs[ref_idx]
# randomly sample neighbors
if neighbors_num and neighbors_num > -1 and neighbors_num < len(src_idx) - 1:
if neighbors_shuffle:
perm_idx = torch.randperm(len(src_idx) - 1) + 1
src_idx = torch.cat([src_idx[[0]], src_idx[perm_idx[:neighbors_num]]])
else:
src_idx = src_idx[: neighbors_num + 1]
src_idx = src_idx.to(all_imgs.device)
return {"src_imgs": all_imgs[src_idx], "src_idxs": src_idx} | null |
162,291 | from __future__ import annotations
from dataclasses import dataclass, field
from glob import glob
from pathlib import Path
from typing import Dict, Literal, Optional, Type
import cv2
import numpy as np
import torch
from PIL import Image
from rich.console import Console
from torchtyping import TensorType
from nerfstudio.cameras.cameras import Cameras, CameraType
from nerfstudio.data.dataparsers.base_dataparser import (
DataParser,
DataParserConfig,
DataparserOutputs,
)
from nerfstudio.data.scene_box import SceneBox
The provided code snippet includes necessary dependencies for implementing the `get_image` function. Write a Python function `def get_image(image_filename, alpha_color=None) -> TensorType["image_height", "image_width", "num_channels"]` to solve the following problem:
Returns a 3 channel image. Args: image_idx: The image index in the dataset.
Here is the function:
def get_image(image_filename, alpha_color=None) -> TensorType["image_height", "image_width", "num_channels"]:
"""Returns a 3 channel image.
Args:
image_idx: The image index in the dataset.
"""
pil_image = Image.open(image_filename)
np_image = np.array(pil_image, dtype="uint8") # shape is (h, w, 3 or 4)
assert len(np_image.shape) == 3
assert np_image.dtype == np.uint8
assert np_image.shape[2] in [3, 4], f"Image shape of {np_image.shape} is in correct."
image = torch.from_numpy(np_image.astype("float32") / 255.0)
if alpha_color is not None and image.shape[-1] == 4:
assert image.shape[-1] == 4
image = image[:, :, :3] * image[:, :, -1:] + alpha_color * (1.0 - image[:, :, -1:])
else:
image = image[:, :, :3]
return image | Returns a 3 channel image. Args: image_idx: The image index in the dataset. |
162,292 | from __future__ import annotations
from dataclasses import dataclass, field
from glob import glob
from pathlib import Path
from typing import Dict, Literal, Optional, Type
import cv2
import numpy as np
import torch
from PIL import Image
from rich.console import Console
from torchtyping import TensorType
from nerfstudio.cameras.cameras import Cameras, CameraType
from nerfstudio.data.dataparsers.base_dataparser import (
DataParser,
DataParserConfig,
DataparserOutputs,
)
from nerfstudio.data.scene_box import SceneBox
def load_K_Rt_from_P(filename, P=None):
if P is None:
lines = open(filename).read().splitlines()
if len(lines) == 4:
lines = lines[1:]
lines = [[x[0], x[1], x[2], x[3]] for x in (x.split(" ") for x in lines)]
P = np.asarray(lines).astype(np.float32).squeeze()
out = cv2.decomposeProjectionMatrix(P)
K = out[0]
R = out[1]
t = out[2]
K = K / K[2, 2]
intrinsics = np.eye(4)
intrinsics[:3, :3] = K
pose = np.eye(4, dtype=np.float32)
pose[:3, :3] = R.transpose()
pose[:3, 3] = (t[:3] / t[3])[:, 0]
return intrinsics, pose | null |
162,293 | from __future__ import annotations
from dataclasses import dataclass, field
from glob import glob
from pathlib import Path
from typing import Dict, Literal, Optional, Type
import cv2
import numpy as np
import torch
from PIL import Image
from rich.console import Console
from torchtyping import TensorType
from nerfstudio.cameras.cameras import Cameras, CameraType
from nerfstudio.data.dataparsers.base_dataparser import (
DataParser,
DataParserConfig,
DataparserOutputs,
)
from nerfstudio.data.scene_box import SceneBox
The provided code snippet includes necessary dependencies for implementing the `get_depths_and_normals` function. Write a Python function `def get_depths_and_normals(image_idx: int, depths, normals)` to solve the following problem:
function to process additional depths and normal information Args: image_idx: specific image index to work with semantics: semantics data
Here is the function:
def get_depths_and_normals(image_idx: int, depths, normals):
"""function to process additional depths and normal information
Args:
image_idx: specific image index to work with
semantics: semantics data
"""
# depth
depth = depths[image_idx]
# normal
normal = normals[image_idx]
return {"depth": depth, "normal": normal} | function to process additional depths and normal information Args: image_idx: specific image index to work with semantics: semantics data |
162,294 | from pathlib import Path
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
The provided code snippet includes necessary dependencies for implementing the `get_image_mask_tensor_from_path` function. Write a Python function `def get_image_mask_tensor_from_path(filepath: Path, scale_factor: float = 1.0) -> torch.Tensor` to solve the following problem:
Utility function to read a mask image from the given path and return a boolean tensor
Here is the function:
def get_image_mask_tensor_from_path(filepath: Path, scale_factor: float = 1.0) -> torch.Tensor:
"""
Utility function to read a mask image from the given path and return a boolean tensor
"""
pil_mask = Image.open(filepath)
if scale_factor != 1.0:
width, height = pil_mask.size
newsize = (int(width * scale_factor), int(height * scale_factor))
pil_mask = pil_mask.resize(newsize, resample=Image.NEAREST)
mask_tensor = torch.from_numpy(np.array(pil_mask)).unsqueeze(-1).bool()
return mask_tensor | Utility function to read a mask image from the given path and return a boolean tensor |
162,295 | from pathlib import Path
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
The provided code snippet includes necessary dependencies for implementing the `get_semantics_and_mask_tensors_from_path` function. Write a Python function `def get_semantics_and_mask_tensors_from_path( filepath: Path, mask_indices: Union[List, torch.Tensor], scale_factor: float = 1.0 ) -> Tuple[torch.Tensor, torch.Tensor]` to solve the following problem:
Utility function to read segmentation from the given filepath If no mask is required - use mask_indices = []
Here is the function:
def get_semantics_and_mask_tensors_from_path(
filepath: Path, mask_indices: Union[List, torch.Tensor], scale_factor: float = 1.0
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Utility function to read segmentation from the given filepath
If no mask is required - use mask_indices = []
"""
if isinstance(mask_indices, List):
mask_indices = torch.tensor(mask_indices, dtype="int64").view(1, 1, -1)
pil_image = Image.open(filepath)
if scale_factor != 1.0:
width, height = pil_image.size
newsize = (int(width * scale_factor), int(height * scale_factor))
pil_image = pil_image.resize(newsize, resample=Image.NEAREST)
semantics = torch.from_numpy(np.array(pil_image, dtype="int64"))[..., None]
mask = torch.sum(semantics == mask_indices, dim=-1, keepdim=True) == 0
return semantics, mask | Utility function to read segmentation from the given filepath If no mask is required - use mask_indices = [] |
162,296 | import collections
import os
import struct
import numpy as np
def read_cameras_text(path):
"""
see: src/base/reconstruction.cc
void Reconstruction::WriteCamerasText(const std::string& path)
void Reconstruction::ReadCamerasText(const std::string& path)
"""
cameras = {}
with open(path, "r") as fid:
while True:
line = fid.readline()
if not line:
break
line = line.strip()
if len(line) > 0 and line[0] != "#":
elems = line.split()
camera_id = int(elems[0])
model = elems[1]
width = int(elems[2])
height = int(elems[3])
params = np.array(tuple(map(float, elems[4:])))
cameras[camera_id] = Camera(id=camera_id, model=model, width=width, height=height, params=params)
return cameras
def read_cameras_binary(path_to_model_file):
"""
see: src/base/reconstruction.cc
void Reconstruction::WriteCamerasBinary(const std::string& path)
void Reconstruction::ReadCamerasBinary(const std::string& path)
"""
cameras = {}
with open(path_to_model_file, "rb") as fid:
num_cameras = read_next_bytes(fid, 8, "Q")[0]
for camera_line_index in range(num_cameras):
camera_properties = read_next_bytes(fid, num_bytes=24, format_char_sequence="iiQQ")
camera_id = camera_properties[0]
model_id = camera_properties[1]
model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name
width = camera_properties[2]
height = camera_properties[3]
num_params = CAMERA_MODEL_IDS[model_id].num_params
params = read_next_bytes(fid, num_bytes=8 * num_params, format_char_sequence="d" * num_params)
cameras[camera_id] = Camera(
id=camera_id, model=model_name, width=width, height=height, params=np.array(params)
)
assert len(cameras) == num_cameras
return cameras
def read_images_text(path):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadImagesText(const std::string& path)
void Reconstruction::WriteImagesText(const std::string& path)
"""
images = {}
with open(path, "r") as fid:
while True:
line = fid.readline()
if not line:
break
line = line.strip()
if len(line) > 0 and line[0] != "#":
elems = line.split()
image_id = int(elems[0])
qvec = np.array(tuple(map(float, elems[1:5])))
tvec = np.array(tuple(map(float, elems[5:8])))
camera_id = int(elems[8])
image_name = elems[9]
elems = fid.readline().split()
xys = np.column_stack([tuple(map(float, elems[0::3])), tuple(map(float, elems[1::3]))])
point3D_ids = np.array(tuple(map(int, elems[2::3])))
images[image_id] = Image(
id=image_id,
qvec=qvec,
tvec=tvec,
camera_id=camera_id,
name=image_name,
xys=xys,
point3D_ids=point3D_ids,
)
return images
def read_images_binary(path_to_model_file):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadImagesBinary(const std::string& path)
void Reconstruction::WriteImagesBinary(const std::string& path)
"""
images = {}
with open(path_to_model_file, "rb") as fid:
num_reg_images = read_next_bytes(fid, 8, "Q")[0]
for image_index in range(num_reg_images):
binary_image_properties = read_next_bytes(fid, num_bytes=64, format_char_sequence="idddddddi")
image_id = binary_image_properties[0]
qvec = np.array(binary_image_properties[1:5])
tvec = np.array(binary_image_properties[5:8])
camera_id = binary_image_properties[8]
image_name = ""
current_char = read_next_bytes(fid, 1, "c")[0]
while current_char != b"\x00": # look for the ASCII 0 entry
image_name += current_char.decode("utf-8")
current_char = read_next_bytes(fid, 1, "c")[0]
num_points2D = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[0]
x_y_id_s = read_next_bytes(fid, num_bytes=24 * num_points2D, format_char_sequence="ddq" * num_points2D)
xys = np.column_stack([tuple(map(float, x_y_id_s[0::3])), tuple(map(float, x_y_id_s[1::3]))])
point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3])))
images[image_id] = Image(
id=image_id,
qvec=qvec,
tvec=tvec,
camera_id=camera_id,
name=image_name,
xys=xys,
point3D_ids=point3D_ids,
)
return images
def read_points3D_text(path):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadPoints3DText(const std::string& path)
void Reconstruction::WritePoints3DText(const std::string& path)
"""
points3D = {}
with open(path, "r") as fid:
while True:
line = fid.readline()
if not line:
break
line = line.strip()
if len(line) > 0 and line[0] != "#":
elems = line.split()
point3D_id = int(elems[0])
xyz = np.array(tuple(map(float, elems[1:4])))
rgb = np.array(tuple(map(int, elems[4:7])))
error = float(elems[7])
image_ids = np.array(tuple(map(int, elems[8::2])))
point2D_idxs = np.array(tuple(map(int, elems[9::2])))
points3D[point3D_id] = Point3D(
id=point3D_id, xyz=xyz, rgb=rgb, error=error, image_ids=image_ids, point2D_idxs=point2D_idxs
)
return points3D
def read_points3d_binary(path_to_model_file):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadPoints3DBinary(const std::string& path)
void Reconstruction::WritePoints3DBinary(const std::string& path)
"""
points3D = {}
with open(path_to_model_file, "rb") as fid:
num_points = read_next_bytes(fid, 8, "Q")[0]
for point_line_index in range(num_points):
binary_point_line_properties = read_next_bytes(fid, num_bytes=43, format_char_sequence="QdddBBBd")
point3D_id = binary_point_line_properties[0]
xyz = np.array(binary_point_line_properties[1:4])
rgb = np.array(binary_point_line_properties[4:7])
error = np.array(binary_point_line_properties[7])
track_length = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[0]
track_elems = read_next_bytes(fid, num_bytes=8 * track_length, format_char_sequence="ii" * track_length)
image_ids = np.array(tuple(map(int, track_elems[0::2])))
point2D_idxs = np.array(tuple(map(int, track_elems[1::2])))
points3D[point3D_id] = Point3D(
id=point3D_id, xyz=xyz, rgb=rgb, error=error, image_ids=image_ids, point2D_idxs=point2D_idxs
)
return points3D
def read_model(path, ext):
if ext == ".txt":
cameras = read_cameras_text(os.path.join(path, "cameras" + ext))
images = read_images_text(os.path.join(path, "images" + ext))
points3D = read_points3D_text(os.path.join(path, "points3D") + ext)
else:
cameras = read_cameras_binary(os.path.join(path, "cameras" + ext))
images = read_images_binary(os.path.join(path, "images" + ext))
points3D = read_points3d_binary(os.path.join(path, "points3D") + ext)
return cameras, images, points3D | null |
162,297 | import collections
import os
import struct
import numpy as np
def rotmat2qvec(R):
Rxx, Ryx, Rzx, Rxy, Ryy, Rzy, Rxz, Ryz, Rzz = R.flat
K = (
np.array(
[
[Rxx - Ryy - Rzz, 0, 0, 0],
[Ryx + Rxy, Ryy - Rxx - Rzz, 0, 0],
[Rzx + Rxz, Rzy + Ryz, Rzz - Rxx - Ryy, 0],
[Ryz - Rzy, Rzx - Rxz, Rxy - Ryx, Rxx + Ryy + Rzz],
]
)
/ 3.0
)
eigvals, eigvecs = np.linalg.eigh(K)
qvec = eigvecs[[3, 0, 1, 2], np.argmax(eigvals)]
if qvec[0] < 0:
qvec *= -1
return qvec | null |
162,298 | from typing import Dict, Optional, Tuple
import numpy as np
import torch
from torch import nn
from torch.nn.parameter import Parameter
from torchtyping import TensorType
from nerfstudio.cameras.rays import RaySamples
from nerfstudio.data.scene_box import SceneBox
from nerfstudio.field_components.activations import trunc_exp
from nerfstudio.field_components.embedding import Embedding
from nerfstudio.field_components.encodings import Encoding, HashEncoding, SHEncoding
from nerfstudio.field_components.field_heads import (
DensityFieldHead,
FieldHead,
FieldHeadNames,
PredNormalsFieldHead,
RGBFieldHead,
SemanticFieldHead,
TransientDensityFieldHead,
TransientRGBFieldHead,
UncertaintyFieldHead,
)
from nerfstudio.field_components.mlp import MLP
from nerfstudio.field_components.spatial_distortions import (
SceneContraction,
SpatialDistortion,
)
from nerfstudio.fields.base_field import Field
The provided code snippet includes necessary dependencies for implementing the `get_normalized_directions` function. Write a Python function `def get_normalized_directions(directions: TensorType["bs":..., 3])` to solve the following problem:
SH encoding must be in the range [0, 1] Args: directions: batch of directions
Here is the function:
def get_normalized_directions(directions: TensorType["bs":..., 3]):
"""SH encoding must be in the range [0, 1]
Args:
directions: batch of directions
"""
return (directions + 1.0) / 2.0 | SH encoding must be in the range [0, 1] Args: directions: batch of directions |
162,299 | from typing import Optional
import torch
from nerfacc import ContractionType, contract
from torch.nn.parameter import Parameter
from torchtyping import TensorType
from nerfstudio.cameras.rays import RaySamples
from nerfstudio.data.scene_box import SceneBox
from nerfstudio.field_components.activations import trunc_exp
from nerfstudio.field_components.embedding import Embedding
from nerfstudio.field_components.field_heads import FieldHeadNames
from nerfstudio.fields.base_field import Field
The provided code snippet includes necessary dependencies for implementing the `get_normalized_directions` function. Write a Python function `def get_normalized_directions(directions: TensorType["bs":..., 3])` to solve the following problem:
SH encoding must be in the range [0, 1] Args: directions: batch of directions
Here is the function:
def get_normalized_directions(directions: TensorType["bs":..., 3]):
"""SH encoding must be in the range [0, 1]
Args:
directions: batch of directions
"""
return (directions + 1.0) / 2.0 | SH encoding must be in the range [0, 1] Args: directions: batch of directions |
162,300 | from __future__ import annotations
import typing
from abc import abstractmethod
from dataclasses import dataclass, field
from time import time
from typing import Any, Dict, List, Optional, Type, Union, cast
import torch
import torch.distributed as dist
from rich.progress import (
BarColumn,
MofNCompleteColumn,
Progress,
TextColumn,
TimeElapsedColumn,
)
from torch import nn
from torch.nn import Parameter
from torch.nn.parallel import DistributedDataParallel as DDP
from typing_extensions import Literal
from nerfstudio.configs import base_config as cfg
from nerfstudio.data.datamanagers.base_datamanager import (
DataManager,
FlexibleDataManager,
FlexibleDataManagerConfig,
VanillaDataManager,
VanillaDataManagerConfig,
)
from nerfstudio.engine.callbacks import TrainingCallback, TrainingCallbackAttributes
from nerfstudio.models.base_model import Model, ModelConfig
from nerfstudio.utils import profiler
from nerfstudio.utils.images import BasicImages
class Model(nn.Module):
"""Model class
Where everything (Fields, Optimizers, Samplers, Visualization, etc) is linked together. This should be
subclassed for custom NeRF model.
Args:
config: configuration for instantiating model
scene_box: dataset scene box
"""
config: ModelConfig
def __init__(
self,
config: ModelConfig,
scene_box: SceneBox,
num_train_data: int,
world_size: int = 1,
local_rank: int = 0,
**kwargs,
) -> None:
super().__init__()
self.config = config
self.scene_box = scene_box
self.num_train_data = num_train_data
self.kwargs = kwargs
self.collider = None
self.world_size = world_size
self.local_rank = local_rank
self.populate_modules() # populate the modules
self.callbacks = None
# to keep track of which device the nn.Module is on
self.device_indicator_param = nn.Parameter(torch.empty(0))
def device(self):
"""Returns the device that the model is on."""
return self.device_indicator_param.device
def get_training_callbacks( # pylint:disable=no-self-use
self, training_callback_attributes: TrainingCallbackAttributes # pylint: disable=unused-argument
) -> List[TrainingCallback]:
"""Returns a list of callbacks that run functions at the specified training iterations."""
return []
def populate_modules(self):
"""Set the necessary modules to get the network working."""
# default instantiates optional modules that are common among many networks
# NOTE: call `super().populate_modules()` in subclasses
if self.config.enable_collider:
self.collider = NearFarCollider(
near_plane=self.config.collider_params["near_plane"], far_plane=self.config.collider_params["far_plane"]
)
def get_param_groups(self) -> Dict[str, List[Parameter]]:
"""Obtain the parameter groups for the optimizers
Returns:
Mapping of different parameter groups
"""
def get_outputs(self, ray_bundle: RayBundle) -> Dict[str, torch.Tensor]:
"""Takes in a Ray Bundle and returns a dictionary of outputs.
Args:
ray_bundle: Input bundle of rays. This raybundle should have all the
needed information to compute the outputs.
Returns:
Outputs of model. (ie. rendered colors)
"""
def forward(self, ray_bundle: RayBundle) -> Dict[str, torch.Tensor]:
"""Run forward starting with a ray bundle. This outputs different things depending on the configuration
of the model and whether or not the batch is provided (whether or not we are training basically)
Args:
ray_bundle: containing all the information needed to render that ray latents included
"""
if self.collider is not None:
ray_bundle = self.collider(ray_bundle)
return self.get_outputs(ray_bundle)
def get_metrics_dict(self, outputs, batch) -> Dict[str, torch.Tensor]:
"""Compute and returns metrics.
Args:
outputs: the output to compute loss dict to
batch: ground truth batch corresponding to outputs
"""
# pylint: disable=unused-argument
# pylint: disable=no-self-use
return {}
def get_loss_dict(self, outputs, batch, metrics_dict=None) -> Dict[str, torch.Tensor]:
"""Computes and returns the losses dict.
Args:
outputs: the output to compute loss dict to
batch: ground truth batch corresponding to outputs
metrics_dict: dictionary of metrics, some of which we can use for loss
"""
def get_outputs_for_camera_ray_bundle(self, camera_ray_bundle: RayBundle) -> Dict[str, torch.Tensor]:
"""Takes in camera parameters and computes the output of the model.
Args:
camera_ray_bundle: ray bundle to calculate outputs over
"""
num_rays_per_chunk = self.config.eval_num_rays_per_chunk
image_height, image_width = camera_ray_bundle.origins.shape[:2]
num_rays = len(camera_ray_bundle)
outputs_lists = defaultdict(list)
for i in range(0, num_rays, num_rays_per_chunk):
start_idx = i
end_idx = i + num_rays_per_chunk
ray_bundle = camera_ray_bundle.get_row_major_sliced_ray_bundle(start_idx, end_idx)
outputs = self.forward(ray_bundle=ray_bundle)
for output_name, output in outputs.items(): # type: ignore
outputs_lists[output_name].append(output)
outputs = {}
for output_name, outputs_list in outputs_lists.items():
if not torch.is_tensor(outputs_list[0]):
# TODO: handle lists of tensors as well
continue
outputs[output_name] = torch.cat(outputs_list).view(image_height, image_width, -1) # type: ignore
return outputs
def get_image_metrics_and_images(
self, outputs: Dict[str, torch.Tensor], batch: Dict[str, torch.Tensor]
) -> Tuple[Dict[str, float], Dict[str, torch.Tensor]]:
"""Writes the test image outputs.
TODO: This shouldn't return a loss
Args:
image_idx: Index of the image.
step: Current step.
batch: Batch of data.
outputs: Outputs of the model.
Returns:
A dictionary of metrics.
"""
def load_model(self, loaded_state: Dict[str, Any]) -> None:
"""Load the checkpoint from the given path
Args:
loaded_state: dictionary of pre-trained model states
"""
state = {key.replace("module.", ""): value for key, value in loaded_state["model"].items()}
self.load_state_dict(state) # type: ignore
The provided code snippet includes necessary dependencies for implementing the `module_wrapper` function. Write a Python function `def module_wrapper(ddp_or_model: Union[DDP, Model]) -> Model` to solve the following problem:
If DDP, then return the .module. Otherwise, return the model.
Here is the function:
def module_wrapper(ddp_or_model: Union[DDP, Model]) -> Model:
"""
If DDP, then return the .module. Otherwise, return the model.
"""
if isinstance(ddp_or_model, DDP):
return cast(Model, ddp_or_model.module)
return ddp_or_model | If DDP, then return the .module. Otherwise, return the model. |
162,301 | import shutil
import sys
from enum import Enum
from pathlib import Path
from typing import List, Optional, Tuple
from rich.console import Console
from typing_extensions import Literal
from nerfstudio.utils.rich_utils import status
from nerfstudio.utils.scripts import run_command
CONSOLE = Console(width=120)
def get_num_frames_in_video(video: Path) -> int:
"""Returns the number of frames in a video.
Args:
video: Path to a video.
Returns:
The number of frames in a video.
"""
cmd = f"ffprobe -v error -select_streams v:0 -count_packets \
-show_entries stream=nb_read_packets -of csv=p=0 {video}"
output = run_command(cmd)
assert output is not None
output = output.strip(" ,\t\n\r")
return int(output)
def status(msg: str, spinner: str = "bouncingBall", verbose: bool = False):
"""A context manager that does nothing is verbose is True. Otherwise it hides logs under a message.
Args:
msg: The message to log.
spinner: The spinner to use.
verbose: If True, print all logs, else hide them.
"""
if verbose:
return nullcontext()
return CONSOLE.status(msg, spinner=spinner)
def run_command(cmd: str, verbose=False) -> Optional[str]:
"""Runs a command and returns the output.
Args:
cmd: Command to run.
verbose: If True, logs the output of the command.
Returns:
The output of the command if return_output is True, otherwise None.
"""
out = subprocess.run(cmd, capture_output=not verbose, shell=True, check=False)
if out.returncode != 0:
CONSOLE.rule("[bold red] :skull: :skull: :skull: ERROR :skull: :skull: :skull: ", style="red")
CONSOLE.print(f"[bold red]Error running command: {cmd}")
CONSOLE.rule(style="red")
CONSOLE.print(out.stderr.decode("utf-8"))
sys.exit(1)
if out.stdout is not None:
return out.stdout.decode("utf-8")
return out
The provided code snippet includes necessary dependencies for implementing the `convert_video_to_images` function. Write a Python function `def convert_video_to_images( video_path: Path, image_dir: Path, num_frames_target: int, verbose: bool = False ) -> Tuple[List[str], int]` to solve the following problem:
Converts a video into a sequence of images. Args: video_path: Path to the video. output_dir: Path to the output directory. num_frames_target: Number of frames to extract. verbose: If True, logs the output of the command. Returns: A tuple containing summary of the conversion and the number of extracted frames.
Here is the function:
def convert_video_to_images(
video_path: Path, image_dir: Path, num_frames_target: int, verbose: bool = False
) -> Tuple[List[str], int]:
"""Converts a video into a sequence of images.
Args:
video_path: Path to the video.
output_dir: Path to the output directory.
num_frames_target: Number of frames to extract.
verbose: If True, logs the output of the command.
Returns:
A tuple containing summary of the conversion and the number of extracted frames.
"""
with status(msg="Converting video to images...", spinner="bouncingBall", verbose=verbose):
# delete existing images in folder
for img in image_dir.glob("*.png"):
if verbose:
CONSOLE.log(f"Deleting {img}")
img.unlink()
num_frames = get_num_frames_in_video(video_path)
if num_frames == 0:
CONSOLE.print(f"[bold red]Error: Video has no frames: {video_path}")
sys.exit(1)
print("Number of frames in video:", num_frames)
out_filename = image_dir / "frame_%05d.png"
ffmpeg_cmd = f"ffmpeg -i {video_path}"
spacing = num_frames // num_frames_target
if spacing > 1:
ffmpeg_cmd += f" -vf thumbnail={spacing},setpts=N/TB -r 1"
else:
CONSOLE.print("[bold red]Can't satify requested number of frames. Extracting all frames.")
ffmpeg_cmd += f" {out_filename}"
run_command(ffmpeg_cmd, verbose=verbose)
num_final_frames = len(list(image_dir.glob("*.png")))
summary_log = []
summary_log.append(f"Starting with {num_frames} video frames")
summary_log.append(f"We extracted {num_final_frames} images")
CONSOLE.log("[bold green]:tada: Done converting video to images.")
return summary_log, num_final_frames | Converts a video into a sequence of images. Args: video_path: Path to the video. output_dir: Path to the output directory. num_frames_target: Number of frames to extract. verbose: If True, logs the output of the command. Returns: A tuple containing summary of the conversion and the number of extracted frames. |
162,302 | import shutil
import sys
from enum import Enum
from pathlib import Path
from typing import List, Optional, Tuple
from rich.console import Console
from typing_extensions import Literal
from nerfstudio.utils.rich_utils import status
from nerfstudio.utils.scripts import run_command
def copy_images_list(
image_paths: List[Path], image_dir: Path, crop_border_pixels: Optional[int] = None, verbose: bool = False
) -> List[Path]:
"""Copy all images in a list of Paths. Useful for filtering from a directory.
Args:
image_paths: List of Paths of images to copy to a new directory.
image_dir: Path to the output directory.
crop_border_pixels: If not None, crops each edge by the specified number of pixels.
verbose: If True, print extra logging.
Returns:
A list of the copied image Paths.
"""
# Remove original directory only if we provide a proper image folder path
if image_dir.is_dir() and len(image_paths):
shutil.rmtree(image_dir, ignore_errors=True)
image_dir.mkdir(exist_ok=True, parents=True)
copied_image_paths = []
# Images should be 1-indexed for the rest of the pipeline.
for idx, image_path in enumerate(image_paths):
if verbose:
CONSOLE.log(f"Copying image {idx + 1} of {len(image_paths)}...")
copied_image_path = image_dir / f"frame_{idx + 1:05d}{image_path.suffix}"
shutil.copy(image_path, copied_image_path)
copied_image_paths.append(copied_image_path)
if crop_border_pixels is not None:
file_type = image_paths[0].suffix
filename = f"frame_%05d{file_type}"
crop = f"crop=iw-{crop_border_pixels*2}:ih-{crop_border_pixels*2}"
ffmpeg_cmd = f"ffmpeg -y -noautorotate -i {image_dir / filename} -q:v 2 -vf {crop} {image_dir / filename}"
run_command(ffmpeg_cmd, verbose=verbose)
num_frames = len(image_paths)
if num_frames == 0:
CONSOLE.log("[bold red]:skull: No usable images in the data folder.")
else:
CONSOLE.log("[bold green]:tada: Done copying images.")
return copied_image_paths
def status(msg: str, spinner: str = "bouncingBall", verbose: bool = False):
"""A context manager that does nothing is verbose is True. Otherwise it hides logs under a message.
Args:
msg: The message to log.
spinner: The spinner to use.
verbose: If True, print all logs, else hide them.
"""
if verbose:
return nullcontext()
return CONSOLE.status(msg, spinner=spinner)
The provided code snippet includes necessary dependencies for implementing the `copy_images` function. Write a Python function `def copy_images(data: Path, image_dir: Path, verbose) -> int` to solve the following problem:
Copy images from a directory to a new directory. Args: data: Path to the directory of images. image_dir: Path to the output directory. verbose: If True, print extra logging. Returns: The number of images copied.
Here is the function:
def copy_images(data: Path, image_dir: Path, verbose) -> int:
"""Copy images from a directory to a new directory.
Args:
data: Path to the directory of images.
image_dir: Path to the output directory.
verbose: If True, print extra logging.
Returns:
The number of images copied.
"""
with status(msg="[bold yellow]Copying images...", spinner="bouncingBall", verbose=verbose):
allowed_exts = [".jpg", ".jpeg", ".png", ".tif", ".tiff"]
image_paths = sorted([p for p in data.glob("[!.]*") if p.suffix.lower() in allowed_exts])
num_frames = len(copy_images_list(image_paths, image_dir, verbose=verbose))
return num_frames | Copy images from a directory to a new directory. Args: data: Path to the directory of images. image_dir: Path to the output directory. verbose: If True, print extra logging. Returns: The number of images copied. |
162,303 | import shutil
import sys
from enum import Enum
from pathlib import Path
from typing import List, Optional, Tuple
from rich.console import Console
from typing_extensions import Literal
from nerfstudio.utils.rich_utils import status
from nerfstudio.utils.scripts import run_command
CONSOLE = Console(width=120)
def status(msg: str, spinner: str = "bouncingBall", verbose: bool = False):
"""A context manager that does nothing is verbose is True. Otherwise it hides logs under a message.
Args:
msg: The message to log.
spinner: The spinner to use.
verbose: If True, print all logs, else hide them.
"""
if verbose:
return nullcontext()
return CONSOLE.status(msg, spinner=spinner)
def run_command(cmd: str, verbose=False) -> Optional[str]:
"""Runs a command and returns the output.
Args:
cmd: Command to run.
verbose: If True, logs the output of the command.
Returns:
The output of the command if return_output is True, otherwise None.
"""
out = subprocess.run(cmd, capture_output=not verbose, shell=True, check=False)
if out.returncode != 0:
CONSOLE.rule("[bold red] :skull: :skull: :skull: ERROR :skull: :skull: :skull: ", style="red")
CONSOLE.print(f"[bold red]Error running command: {cmd}")
CONSOLE.rule(style="red")
CONSOLE.print(out.stderr.decode("utf-8"))
sys.exit(1)
if out.stdout is not None:
return out.stdout.decode("utf-8")
return out
The provided code snippet includes necessary dependencies for implementing the `downscale_images` function. Write a Python function `def downscale_images(image_dir: Path, num_downscales: int, verbose: bool = False) -> str` to solve the following problem:
Downscales the images in the directory. Uses FFMPEG. Assumes images are named frame_00001.png, frame_00002.png, etc. Args: image_dir: Path to the directory containing the images. num_downscales: Number of times to downscale the images. Downscales by 2 each time. verbose: If True, logs the output of the command. Returns: Summary of downscaling.
Here is the function:
def downscale_images(image_dir: Path, num_downscales: int, verbose: bool = False) -> str:
"""Downscales the images in the directory. Uses FFMPEG.
Assumes images are named frame_00001.png, frame_00002.png, etc.
Args:
image_dir: Path to the directory containing the images.
num_downscales: Number of times to downscale the images. Downscales by 2 each time.
verbose: If True, logs the output of the command.
Returns:
Summary of downscaling.
"""
if num_downscales == 0:
return "No downscaling performed."
with status(msg="[bold yellow]Downscaling images...", spinner="growVertical", verbose=verbose):
downscale_factors = [2**i for i in range(num_downscales + 1)[1:]]
for downscale_factor in downscale_factors:
assert downscale_factor > 1
assert isinstance(downscale_factor, int)
downscale_dir = image_dir.parent / f"images_{downscale_factor}"
downscale_dir.mkdir(parents=True, exist_ok=True)
file_type = image_dir.glob("frame_*").__next__().suffix
filename = f"frame_%05d{file_type}"
ffmpeg_cmd = [
f"ffmpeg -y -noautorotate -i {image_dir / filename} ",
f"-q:v 2 -vf scale=iw/{downscale_factor}:ih/{downscale_factor} ",
f"{downscale_dir / filename}",
]
ffmpeg_cmd = " ".join(ffmpeg_cmd)
run_command(ffmpeg_cmd, verbose=verbose)
CONSOLE.log("[bold green]:tada: Done downscaling images.")
downscale_text = [f"[bold blue]{2**(i+1)}x[/bold blue]" for i in range(num_downscales)]
downscale_text = ", ".join(downscale_text[:-1]) + " and " + downscale_text[-1]
return f"We downsampled the images by {downscale_text}" | Downscales the images in the directory. Uses FFMPEG. Assumes images are named frame_00001.png, frame_00002.png, etc. Args: image_dir: Path to the directory containing the images. num_downscales: Number of times to downscale the images. Downscales by 2 each time. verbose: If True, logs the output of the command. Returns: Summary of downscaling. |
162,304 | import shutil
import sys
from enum import Enum
from pathlib import Path
from typing import List, Optional, Tuple
from rich.console import Console
from typing_extensions import Literal
from nerfstudio.utils.rich_utils import status
from nerfstudio.utils.scripts import run_command
The provided code snippet includes necessary dependencies for implementing the `find_tool_feature_matcher_combination` function. Write a Python function `def find_tool_feature_matcher_combination( sfm_tool: Literal["any", "colmap", "hloc"], feature_type: Literal[ "any", "sift", "superpoint", "superpoint_aachen", "superpoint_max", "superpoint_inloc", "r2d2", "d2net-ss", "sosnet", "disk", ], matcher_type: Literal[ "any", "NN", "superglue", "superglue-fast", "NN-superpoint", "NN-ratio", "NN-mutual", "adalam" ], )` to solve the following problem:
Find a valid combination of sfm tool, feature type, and matcher type. Basically, replace the default parameters 'any' by usable value Args: sfm_tool: Sfm tool name (any, colmap, hloc) feature_type: Type of image features (any, sift, superpoint, ...) matcher_type: Type of matching algorithm (any, NN, superglue,...) Returns: Tuple of sfm tool, feature type, and matcher type. Returns (None,None,None) if no valid combination can be found
Here is the function:
def find_tool_feature_matcher_combination(
sfm_tool: Literal["any", "colmap", "hloc"],
feature_type: Literal[
"any",
"sift",
"superpoint",
"superpoint_aachen",
"superpoint_max",
"superpoint_inloc",
"r2d2",
"d2net-ss",
"sosnet",
"disk",
],
matcher_type: Literal[
"any", "NN", "superglue", "superglue-fast", "NN-superpoint", "NN-ratio", "NN-mutual", "adalam"
],
):
"""Find a valid combination of sfm tool, feature type, and matcher type.
Basically, replace the default parameters 'any' by usable value
Args:
sfm_tool: Sfm tool name (any, colmap, hloc)
feature_type: Type of image features (any, sift, superpoint, ...)
matcher_type: Type of matching algorithm (any, NN, superglue,...)
Returns:
Tuple of sfm tool, feature type, and matcher type.
Returns (None,None,None) if no valid combination can be found
"""
if sfm_tool == "any":
if (feature_type in ("any", "sift")) and (matcher_type in ("any", "NN")):
sfm_tool = "colmap"
else:
sfm_tool = "hloc"
if sfm_tool == "colmap":
if (feature_type not in ("any", "sift")) or (matcher_type not in ("any", "NN")):
return (None, None, None)
return ("colmap", "sift", "NN")
if sfm_tool == "hloc":
if feature_type in ("any", "superpoint"):
feature_type = "superpoint_aachen"
if matcher_type == "any":
matcher_type = "superglue"
elif matcher_type == "NN":
matcher_type = "NN-mutual"
return (sfm_tool, feature_type, matcher_type)
return (None, None, None) | Find a valid combination of sfm tool, feature type, and matcher type. Basically, replace the default parameters 'any' by usable value Args: sfm_tool: Sfm tool name (any, colmap, hloc) feature_type: Type of image features (any, sift, superpoint, ...) matcher_type: Type of matching algorithm (any, NN, superglue,...) Returns: Tuple of sfm tool, feature type, and matcher type. Returns (None,None,None) if no valid combination can be found |
162,305 | import json
import os
import struct
from dataclasses import dataclass
from io import BufferedReader
from pathlib import Path
from typing import Dict, Optional, Tuple
import appdirs
import numpy as np
import requests
from rich.console import Console
from rich.progress import track
from typing_extensions import Literal
from nerfstudio.process_data.process_data_utils import CameraModel
from nerfstudio.utils.rich_utils import status
from nerfstudio.utils.scripts import run_command
class Camera:
"""Camera"""
id: int
"""Camera identifier"""
model: str
"""Camera model"""
width: int
"""Image width"""
height: int
"""Image height"""
params: np.ndarray
"""Camera parameters"""
class Image:
"""Data the corresponds to a single image"""
id: int
"""Image identifier"""
qvec: np.ndarray
"""Quaternion vector"""
tvec: np.ndarray
"""Translation vector"""
camera_id: int
"""Camera identifier"""
name: str
"""Image name"""
xys: np.ndarray
"""2D points"""
point3d_ids: np.ndarray
"""Point3D identifiers"""
class Point3D:
"""Data that corresponds to a single 3D point"""
id: int
"""Point3D identifier"""
xyz: np.ndarray
"""3D point"""
rgb: np.ndarray
"""Color"""
error: float
"""Reconstruction error"""
image_ids: np.ndarray
"""Image identifiers"""
point2d_idxs: np.ndarray
"""Point2D indices"""
def read_cameras_text(path: Path) -> Dict[int, Camera]:
"""Parse COLMAP cameras.txt file into a dictionary of Camera objects.
Args:
path: Path to cameras.txt file.
Returns:
Dictionary of Camera objects.
"""
cameras = {}
with open(path, encoding="utf-8") as fid:
while True:
line = fid.readline()
if not line:
break
line = line.strip()
if len(line) > 0 and line[0] != "#":
elems = line.split()
camera_id = int(elems[0])
model = elems[1]
width = int(elems[2])
height = int(elems[3])
params = np.array(tuple(map(float, elems[4:])))
cameras[camera_id] = Camera(id=camera_id, model=model, width=width, height=height, params=params)
return cameras
def read_cameras_binary(path_to_model_file: Path) -> Dict[int, Camera]:
"""Parse COLMAP cameras.bin file into a dictionary of Camera objects.
Args:
path_to_model_file: Path to cameras.bin file.
Returns:
Dictionary of Camera objects.
"""
cameras = {}
with open(path_to_model_file, "rb") as fid:
num_cameras = read_next_bytes(fid, 8, "Q")[0]
for _ in range(num_cameras):
camera_properties = read_next_bytes(fid, num_bytes=24, format_char_sequence="iiQQ")
camera_id = camera_properties[0]
model_id = camera_properties[1]
model_name = COLMAP_CAMERA_MODEL_IDS[camera_properties[1]].model_name
width = camera_properties[2]
height = camera_properties[3]
num_params = COLMAP_CAMERA_MODEL_IDS[model_id].num_params
params = read_next_bytes(fid, num_bytes=8 * num_params, format_char_sequence="d" * num_params)
cameras[camera_id] = Camera(
id=camera_id, model=model_name, width=width, height=height, params=np.array(params)
)
assert len(cameras) == num_cameras
return cameras
def read_images_text(path: Path) -> Dict[int, Image]:
"""Parse COLMAP images.txt file into a dictionary of Image objects.
Args:
path: Path to images.txt file.
Returns:
Dictionary of Image objects.
"""
images = {}
with open(path, encoding="utf-8") as fid:
while True:
line = fid.readline()
if not line:
break
line = line.strip()
if len(line) > 0 and line[0] != "#":
elems = line.split()
image_id = int(elems[0])
qvec = np.array(tuple(map(float, elems[1:5])))
tvec = np.array(tuple(map(float, elems[5:8])))
camera_id = int(elems[8])
image_name = elems[9]
elems = fid.readline().split()
xys = np.column_stack([tuple(map(float, elems[0::3])), tuple(map(float, elems[1::3]))])
point3d_ids = np.array(tuple(map(int, elems[2::3])))
images[image_id] = Image(
id=image_id,
qvec=qvec,
tvec=tvec,
camera_id=camera_id,
name=image_name,
xys=xys,
point3d_ids=point3d_ids,
)
return images
def read_images_binary(path_to_model_file: Path) -> Dict[int, Image]:
"""Parse COLMAP images.bin file into a dictionary of Image objects.
Args:
path_to_model_file: Path to images.bin file.
Returns:
Dictionary of Image objects.
"""
images = {}
with open(path_to_model_file, "rb") as fid:
num_reg_images = read_next_bytes(fid, 8, "Q")[0]
for _ in range(num_reg_images):
binary_image_properties = read_next_bytes(fid, num_bytes=64, format_char_sequence="idddddddi")
image_id = binary_image_properties[0]
qvec = np.array(binary_image_properties[1:5])
tvec = np.array(binary_image_properties[5:8])
camera_id = binary_image_properties[8]
image_name = ""
current_char = read_next_bytes(fid, 1, "c")[0]
while current_char != b"\x00": # look for the ASCII 0 entry
image_name += current_char.decode("utf-8")
current_char = read_next_bytes(fid, 1, "c")[0]
num_points2d = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[0]
x_y_id_s = read_next_bytes(fid, num_bytes=24 * num_points2d, format_char_sequence="ddq" * num_points2d)
xys = np.column_stack([tuple(map(float, x_y_id_s[0::3])), tuple(map(float, x_y_id_s[1::3]))])
point3d_ids = np.array(tuple(map(int, x_y_id_s[2::3])))
images[image_id] = Image(
id=image_id,
qvec=qvec,
tvec=tvec,
camera_id=camera_id,
name=image_name,
xys=xys,
point3d_ids=point3d_ids,
)
return images
def read_points3d_text(path) -> Dict[int, Point3D]:
"""Parse COLMAP points3D.txt file into a dictionary of Point3D objects.
Args:
path: Path to points3D.txt file.
Returns:
Dictionary of Point3D objects.
"""
points3d = {}
with open(path, encoding="utf-8") as fid:
while True:
line = fid.readline()
if not line:
break
line = line.strip()
if len(line) > 0 and line[0] != "#":
elems = line.split()
point3d_id = int(elems[0])
xyz = np.array(tuple(map(float, elems[1:4])))
rgb = np.array(tuple(map(int, elems[4:7])))
error = float(elems[7])
image_ids = np.array(tuple(map(int, elems[8::2])))
point2d_idxs = np.array(tuple(map(int, elems[9::2])))
points3d[point3d_id] = Point3D(
id=point3d_id, xyz=xyz, rgb=rgb, error=error, image_ids=image_ids, point2d_idxs=point2d_idxs
)
return points3d
def read_points3d_binary(path_to_model_file: Path) -> Dict[int, Point3D]:
"""Parse COLMAP points3D.bin file into a dictionary of Point3D objects.
Args:
path_to_model_file: Path to points3D.bin file.
Returns:
Dictionary of Point3D objects.
"""
points3d = {}
with open(path_to_model_file, "rb") as fid:
num_points = read_next_bytes(fid, 8, "Q")[0]
for _ in range(num_points):
binary_point_line_properties = read_next_bytes(fid, num_bytes=43, format_char_sequence="QdddBBBd")
point3d_id = binary_point_line_properties[0]
xyz = np.array(binary_point_line_properties[1:4])
rgb = np.array(binary_point_line_properties[4:7])
error = np.array(binary_point_line_properties[7])
track_length = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[0]
track_elems = read_next_bytes(fid, num_bytes=8 * track_length, format_char_sequence="ii" * track_length)
image_ids = np.array(tuple(map(int, track_elems[0::2])))
point2d_idxs = np.array(tuple(map(int, track_elems[1::2])))
points3d[point3d_id] = Point3D(
id=point3d_id, xyz=xyz, rgb=rgb, error=float(error), image_ids=image_ids, point2d_idxs=point2d_idxs
)
return points3d
def detect_model_format(path: Path, ext: str) -> bool:
"""Detect the format of the model file.
Args:
path: Path to the model file.
ext: Extension to test.
Returns:
True if the model file is the tested extenstion, False otherwise.
"""
if (
os.path.isfile(path / f"cameras{ext}")
and os.path.isfile(path / f"images{ext}")
and os.path.isfile(path / f"points3D{ext}")
):
print("Detected model format: '" + ext + "'")
return True
return False
The provided code snippet includes necessary dependencies for implementing the `read_model` function. Write a Python function `def read_model(path: Path, ext: Optional[str] = None) -> Tuple[Dict[int, Camera], Dict[int, Image], Dict[int, Point3D]]` to solve the following problem:
Read a COLMAP model from a directory. Args: path: Path to the model directory. ext: Extension of the model files. If None, the function will try to detect the format. Returns: Tuple of dictionaries of Camera, Image, and Point3D objects.
Here is the function:
def read_model(path: Path, ext: Optional[str] = None) -> Tuple[Dict[int, Camera], Dict[int, Image], Dict[int, Point3D]]:
"""Read a COLMAP model from a directory.
Args:
path: Path to the model directory.
ext: Extension of the model files. If None, the function will try to detect the format.
Returns:
Tuple of dictionaries of Camera, Image, and Point3D objects.
"""
# try to detect the extension automatically
if ext is None:
if detect_model_format(path, ".bin"):
ext = ".bin"
elif detect_model_format(path, ".txt"):
ext = ".txt"
else:
raise ValueError("Provide model format: '.bin' or '.txt'")
if ext == ".txt":
cameras = read_cameras_text(path / f"cameras{ext}")
images = read_images_text(path / f"images{ext}")
points3d = read_points3d_text(path / f"points3D{ext}")
else:
cameras = read_cameras_binary(path / f"cameras{ext}")
images = read_images_binary(path / f"images{ext}")
points3d = read_points3d_binary(path / f"points3D{ext}")
return cameras, images, points3d | Read a COLMAP model from a directory. Args: path: Path to the model directory. ext: Extension of the model files. If None, the function will try to detect the format. Returns: Tuple of dictionaries of Camera, Image, and Point3D objects. |
162,306 | import json
import os
import struct
from dataclasses import dataclass
from io import BufferedReader
from pathlib import Path
from typing import Dict, Optional, Tuple
import appdirs
import numpy as np
import requests
from rich.console import Console
from rich.progress import track
from typing_extensions import Literal
from nerfstudio.process_data.process_data_utils import CameraModel
from nerfstudio.utils.rich_utils import status
from nerfstudio.utils.scripts import run_command
The provided code snippet includes necessary dependencies for implementing the `rotmat2qvec` function. Write a Python function `def rotmat2qvec(R)` to solve the following problem:
Convert rotation matrix to quaternion. Args: R: Rotation matrix of shape (3, 3). Returns: Quaternion vector of shape (4,).
Here is the function:
def rotmat2qvec(R):
"""Convert rotation matrix to quaternion.
Args:
R: Rotation matrix of shape (3, 3).
Returns:
Quaternion vector of shape (4,).
"""
rxx, ryx, rzx, rxy, ryy, rzy, rxz, ryz, rzz = R.flat
K = (
np.array(
[
[rxx - ryy - rzz, 0, 0, 0],
[ryx + rxy, ryy - rxx - rzz, 0, 0],
[rzx + rxz, rzy + ryz, rzz - rxx - ryy, 0],
[ryz - rzy, rzx - rxz, rxy - ryx, rxx + ryy + rzz],
]
)
/ 3.0
)
eigvals, eigvecs = np.linalg.eigh(K)
qvec = eigvecs[np.array([3, 0, 1, 2]), np.argmax(eigvals)]
if qvec[0] < 0:
qvec *= -1
return qvec | Convert rotation matrix to quaternion. Args: R: Rotation matrix of shape (3, 3). Returns: Quaternion vector of shape (4,). |
162,307 | import json
import os
import struct
from dataclasses import dataclass
from io import BufferedReader
from pathlib import Path
from typing import Dict, Optional, Tuple
import appdirs
import numpy as np
import requests
from rich.console import Console
from rich.progress import track
from typing_extensions import Literal
from nerfstudio.process_data.process_data_utils import CameraModel
from nerfstudio.utils.rich_utils import status
from nerfstudio.utils.scripts import run_command
CONSOLE = Console(width=120)
def get_colmap_version(colmap_cmd: str, default_version=3.8) -> float:
"""Returns the version of COLMAP.
This code assumes that colmap returns a version string of the form
"COLMAP 3.8 ..." which may not be true for all versions of COLMAP.
Args:
default_version: Default version to return if COLMAP version can't be determined.
Returns:
The version of COLMAP.
"""
output = run_command(colmap_cmd, verbose=False)
assert output is not None
for line in output.split("\n"):
if line.startswith("COLMAP"):
return float(line.split(" ")[1])
CONSOLE.print(f"[bold red]Could not find COLMAP version. Using default {default_version}")
return default_version
def get_vocab_tree() -> Path:
"""Return path to vocab tree. Downloads vocab tree if it doesn't exist.
Returns:
The path to the vocab tree.
"""
vocab_tree_filename = Path(appdirs.user_data_dir("nerfstudio")) / "vocab_tree.fbow"
if not vocab_tree_filename.exists():
r = requests.get("https://demuc.de/colmap/vocab_tree_flickr100K_words32K.bin", stream=True)
vocab_tree_filename.parent.mkdir(parents=True, exist_ok=True)
with open(vocab_tree_filename, "wb") as f:
total_length = r.headers.get("content-length")
assert total_length is not None
for chunk in track(
r.iter_content(chunk_size=1024),
total=int(total_length) / 1024 + 1,
description="Downloading vocab tree...",
):
if chunk:
f.write(chunk)
f.flush()
return vocab_tree_filename
class CameraModel(Enum):
"""Enum for camera types."""
OPENCV = "OPENCV"
OPENCV_FISHEYE = "OPENCV_FISHEYE"
def status(msg: str, spinner: str = "bouncingBall", verbose: bool = False):
"""A context manager that does nothing is verbose is True. Otherwise it hides logs under a message.
Args:
msg: The message to log.
spinner: The spinner to use.
verbose: If True, print all logs, else hide them.
"""
if verbose:
return nullcontext()
return CONSOLE.status(msg, spinner=spinner)
def run_command(cmd: str, verbose=False) -> Optional[str]:
"""Runs a command and returns the output.
Args:
cmd: Command to run.
verbose: If True, logs the output of the command.
Returns:
The output of the command if return_output is True, otherwise None.
"""
out = subprocess.run(cmd, capture_output=not verbose, shell=True, check=False)
if out.returncode != 0:
CONSOLE.rule("[bold red] :skull: :skull: :skull: ERROR :skull: :skull: :skull: ", style="red")
CONSOLE.print(f"[bold red]Error running command: {cmd}")
CONSOLE.rule(style="red")
CONSOLE.print(out.stderr.decode("utf-8"))
sys.exit(1)
if out.stdout is not None:
return out.stdout.decode("utf-8")
return out
The provided code snippet includes necessary dependencies for implementing the `run_colmap` function. Write a Python function `def run_colmap( image_dir: Path, colmap_dir: Path, camera_model: CameraModel, gpu: bool = True, verbose: bool = False, matching_method: Literal["vocab_tree", "exhaustive", "sequential"] = "vocab_tree", colmap_cmd: str = "colmap", ) -> None` to solve the following problem:
Runs COLMAP on the images. Args: image_dir: Path to the directory containing the images. colmap_dir: Path to the output directory. camera_model: Camera model to use. gpu: If True, use GPU. verbose: If True, logs the output of the command.
Here is the function:
def run_colmap(
image_dir: Path,
colmap_dir: Path,
camera_model: CameraModel,
gpu: bool = True,
verbose: bool = False,
matching_method: Literal["vocab_tree", "exhaustive", "sequential"] = "vocab_tree",
colmap_cmd: str = "colmap",
) -> None:
"""Runs COLMAP on the images.
Args:
image_dir: Path to the directory containing the images.
colmap_dir: Path to the output directory.
camera_model: Camera model to use.
gpu: If True, use GPU.
verbose: If True, logs the output of the command.
"""
colmap_version = get_colmap_version(colmap_cmd)
colmap_database_path = colmap_dir / "database.db"
if colmap_database_path.exists():
# Can't use missing_ok argument because of Python 3.7 compatibility.
colmap_database_path.unlink()
# Feature extraction
feature_extractor_cmd = [
f"{colmap_cmd} feature_extractor",
f"--database_path {colmap_dir / 'database.db'}",
f"--image_path {image_dir}",
"--ImageReader.single_camera 1",
f"--ImageReader.camera_model {camera_model.value}",
f"--SiftExtraction.use_gpu {int(gpu)}",
]
feature_extractor_cmd = " ".join(feature_extractor_cmd)
with status(msg="[bold yellow]Running COLMAP feature extractor...", spinner="moon", verbose=verbose):
run_command(feature_extractor_cmd, verbose=verbose)
CONSOLE.log("[bold green]:tada: Done extracting COLMAP features.")
# Feature matching
feature_matcher_cmd = [
f"{colmap_cmd} {matching_method}_matcher",
f"--database_path {colmap_dir / 'database.db'}",
f"--SiftMatching.use_gpu {int(gpu)}",
]
if matching_method == "vocab_tree":
vocab_tree_filename = get_vocab_tree()
feature_matcher_cmd.append(f"--VocabTreeMatching.vocab_tree_path {vocab_tree_filename}")
feature_matcher_cmd = " ".join(feature_matcher_cmd)
with status(msg="[bold yellow]Running COLMAP feature matcher...", spinner="runner", verbose=verbose):
run_command(feature_matcher_cmd, verbose=verbose)
CONSOLE.log("[bold green]:tada: Done matching COLMAP features.")
# Bundle adjustment
sparse_dir = colmap_dir / "sparse"
sparse_dir.mkdir(parents=True, exist_ok=True)
mapper_cmd = [
f"{colmap_cmd} mapper",
f"--database_path {colmap_dir / 'database.db'}",
f"--image_path {image_dir}",
f"--output_path {sparse_dir}",
]
if colmap_version >= 3.7:
mapper_cmd.append("--Mapper.ba_global_function_tolerance 1e-6")
mapper_cmd = " ".join(mapper_cmd)
with status(
msg="[bold yellow]Running COLMAP bundle adjustment... (This may take a while)",
spinner="circle",
verbose=verbose,
):
run_command(mapper_cmd, verbose=verbose)
CONSOLE.log("[bold green]:tada: Done COLMAP bundle adjustment.")
with status(msg="[bold yellow]Refine intrinsics...", spinner="dqpb", verbose=verbose):
bundle_adjuster_cmd = [
f"{colmap_cmd} bundle_adjuster",
f"--input_path {sparse_dir}/0",
f"--output_path {sparse_dir}/0",
"--BundleAdjustment.refine_principal_point 1",
]
run_command(" ".join(bundle_adjuster_cmd), verbose=verbose)
CONSOLE.log("[bold green]:tada: Done refining intrinsics.") | Runs COLMAP on the images. Args: image_dir: Path to the directory containing the images. colmap_dir: Path to the output directory. camera_model: Camera model to use. gpu: If True, use GPU. verbose: If True, logs the output of the command. |
162,308 | import json
import os
import struct
from dataclasses import dataclass
from io import BufferedReader
from pathlib import Path
from typing import Dict, Optional, Tuple
import appdirs
import numpy as np
import requests
from rich.console import Console
from rich.progress import track
from typing_extensions import Literal
from nerfstudio.process_data.process_data_utils import CameraModel
from nerfstudio.utils.rich_utils import status
from nerfstudio.utils.scripts import run_command
def read_cameras_binary(path_to_model_file: Path) -> Dict[int, Camera]:
"""Parse COLMAP cameras.bin file into a dictionary of Camera objects.
Args:
path_to_model_file: Path to cameras.bin file.
Returns:
Dictionary of Camera objects.
"""
cameras = {}
with open(path_to_model_file, "rb") as fid:
num_cameras = read_next_bytes(fid, 8, "Q")[0]
for _ in range(num_cameras):
camera_properties = read_next_bytes(fid, num_bytes=24, format_char_sequence="iiQQ")
camera_id = camera_properties[0]
model_id = camera_properties[1]
model_name = COLMAP_CAMERA_MODEL_IDS[camera_properties[1]].model_name
width = camera_properties[2]
height = camera_properties[3]
num_params = COLMAP_CAMERA_MODEL_IDS[model_id].num_params
params = read_next_bytes(fid, num_bytes=8 * num_params, format_char_sequence="d" * num_params)
cameras[camera_id] = Camera(
id=camera_id, model=model_name, width=width, height=height, params=np.array(params)
)
assert len(cameras) == num_cameras
return cameras
def read_images_binary(path_to_model_file: Path) -> Dict[int, Image]:
"""Parse COLMAP images.bin file into a dictionary of Image objects.
Args:
path_to_model_file: Path to images.bin file.
Returns:
Dictionary of Image objects.
"""
images = {}
with open(path_to_model_file, "rb") as fid:
num_reg_images = read_next_bytes(fid, 8, "Q")[0]
for _ in range(num_reg_images):
binary_image_properties = read_next_bytes(fid, num_bytes=64, format_char_sequence="idddddddi")
image_id = binary_image_properties[0]
qvec = np.array(binary_image_properties[1:5])
tvec = np.array(binary_image_properties[5:8])
camera_id = binary_image_properties[8]
image_name = ""
current_char = read_next_bytes(fid, 1, "c")[0]
while current_char != b"\x00": # look for the ASCII 0 entry
image_name += current_char.decode("utf-8")
current_char = read_next_bytes(fid, 1, "c")[0]
num_points2d = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[0]
x_y_id_s = read_next_bytes(fid, num_bytes=24 * num_points2d, format_char_sequence="ddq" * num_points2d)
xys = np.column_stack([tuple(map(float, x_y_id_s[0::3])), tuple(map(float, x_y_id_s[1::3]))])
point3d_ids = np.array(tuple(map(int, x_y_id_s[2::3])))
images[image_id] = Image(
id=image_id,
qvec=qvec,
tvec=tvec,
camera_id=camera_id,
name=image_name,
xys=xys,
point3d_ids=point3d_ids,
)
return images
def qvec2rotmat(qvec) -> np.ndarray:
"""Convert quaternion to rotation matrix.
Args:
qvec: Quaternion vector of shape (4,).
Returns:
Rotation matrix of shape (3, 3).
"""
return np.array(
[
[
1 - 2 * qvec[2] ** 2 - 2 * qvec[3] ** 2,
2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3],
2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2],
],
[
2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3],
1 - 2 * qvec[1] ** 2 - 2 * qvec[3] ** 2,
2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1],
],
[
2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2],
2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1],
1 - 2 * qvec[1] ** 2 - 2 * qvec[2] ** 2,
],
]
)
class CameraModel(Enum):
"""Enum for camera types."""
OPENCV = "OPENCV"
OPENCV_FISHEYE = "OPENCV_FISHEYE"
The provided code snippet includes necessary dependencies for implementing the `colmap_to_json` function. Write a Python function `def colmap_to_json(cameras_path: Path, images_path: Path, output_dir: Path, camera_model: CameraModel) -> int` to solve the following problem:
Converts COLMAP's cameras.bin and images.bin to a JSON file. Args: cameras_path: Path to the cameras.bin file. images_path: Path to the images.bin file. output_dir: Path to the output directory. camera_model: Camera model used. Returns: The number of registered images.
Here is the function:
def colmap_to_json(cameras_path: Path, images_path: Path, output_dir: Path, camera_model: CameraModel) -> int:
"""Converts COLMAP's cameras.bin and images.bin to a JSON file.
Args:
cameras_path: Path to the cameras.bin file.
images_path: Path to the images.bin file.
output_dir: Path to the output directory.
camera_model: Camera model used.
Returns:
The number of registered images.
"""
cameras = read_cameras_binary(cameras_path)
images = read_images_binary(images_path)
# Only supports one camera
camera_params = cameras[1].params
frames = []
for _, im_data in images.items():
rotation = qvec2rotmat(im_data.qvec)
translation = im_data.tvec.reshape(3, 1)
w2c = np.concatenate([rotation, translation], 1)
w2c = np.concatenate([w2c, np.array([[0, 0, 0, 1]])], 0)
c2w = np.linalg.inv(w2c)
# Convert from COLMAP's camera coordinate system to ours
c2w[0:3, 1:3] *= -1
c2w = c2w[np.array([1, 0, 2, 3]), :]
c2w[2, :] *= -1
name = Path(f"./images/{im_data.name}")
frame = {
"file_path": name.as_posix(),
"transform_matrix": c2w.tolist(),
}
frames.append(frame)
out = {
"fl_x": float(camera_params[0]),
"fl_y": float(camera_params[1]),
"cx": float(camera_params[2]),
"cy": float(camera_params[3]),
"w": cameras[1].width,
"h": cameras[1].height,
"camera_model": camera_model.value,
}
if camera_model == CameraModel.OPENCV:
out.update(
{
"k1": float(camera_params[4]),
"k2": float(camera_params[5]),
"p1": float(camera_params[6]),
"p2": float(camera_params[7]),
}
)
if camera_model == CameraModel.OPENCV_FISHEYE:
out.update(
{
"k1": float(camera_params[4]),
"k2": float(camera_params[5]),
"k3": float(camera_params[6]),
"k4": float(camera_params[7]),
}
)
out["frames"] = frames
with open(output_dir / "transforms.json", "w", encoding="utf-8") as f:
json.dump(out, f, indent=4)
return len(frames) | Converts COLMAP's cameras.bin and images.bin to a JSON file. Args: cameras_path: Path to the cameras.bin file. images_path: Path to the images.bin file. output_dir: Path to the output directory. camera_model: Camera model used. Returns: The number of registered images. |
162,309 | import json
import os
import struct
from dataclasses import dataclass
from io import BufferedReader
from pathlib import Path
from typing import Dict, Optional, Tuple
import appdirs
import numpy as np
import requests
from rich.console import Console
from rich.progress import track
from typing_extensions import Literal
from nerfstudio.process_data.process_data_utils import CameraModel
from nerfstudio.utils.rich_utils import status
from nerfstudio.utils.scripts import run_command
The provided code snippet includes necessary dependencies for implementing the `get_matching_summary` function. Write a Python function `def get_matching_summary(num_intial_frames: int, num_matched_frames: int) -> str` to solve the following problem:
Returns a summary of the matching results. Args: num_intial_frames: The number of initial frames. num_matched_frames: The number of matched frames. Returns: A summary of the matching results.
Here is the function:
def get_matching_summary(num_intial_frames: int, num_matched_frames: int) -> str:
"""Returns a summary of the matching results.
Args:
num_intial_frames: The number of initial frames.
num_matched_frames: The number of matched frames.
Returns:
A summary of the matching results.
"""
match_ratio = num_matched_frames / num_intial_frames
if match_ratio == 1:
return "[bold green]COLAMP found poses for all images, CONGRATS!"
if match_ratio < 0.4:
result = f"[bold red]COLMAP only found poses for {num_matched_frames / num_intial_frames * 100:.2f}%"
result += " of the images. This is low.\nThis can be caused by a variety of reasons,"
result += " such poor scene coverage, blurry images, or large exposure changes."
return result
if match_ratio < 0.8:
result = f"[bold yellow]COLMAP only found poses for {num_matched_frames / num_intial_frames * 100:.2f}%"
result += " of the images.\nThis isn't great, but may be ok."
result += "\nMissing poses can be caused by a variety of reasons, such poor scene coverage, blurry images,"
result += " or large exposure changes."
return result
return f"[bold green]COLMAP found poses for {num_matched_frames / num_intial_frames * 100:.2f}% of the images." | Returns a summary of the matching results. Args: num_intial_frames: The number of initial frames. num_matched_frames: The number of matched frames. Returns: A summary of the matching results. |
162,310 | import json
import sys
from pathlib import Path
from typing import List
from rich.console import Console
from nerfstudio.process_data.process_data_utils import CAMERA_MODELS
from nerfstudio.utils import io
CONSOLE = Console(width=120)
CAMERA_MODELS = {
"perspective": CameraModel.OPENCV,
"fisheye": CameraModel.OPENCV_FISHEYE,
}
The provided code snippet includes necessary dependencies for implementing the `polycam_to_json` function. Write a Python function `def polycam_to_json( image_filenames: List[Path], cameras_dir: Path, output_dir: Path, min_blur_score: float = 0.0, crop_border_pixels: int = 0, ) -> List[str]` to solve the following problem:
Convert Polycam data into a nerfstudio dataset. Args: image_filenames: List of paths to the original images. cameras_dir: Path to the polycam cameras directory. output_dir: Path to the output directory. min_blur_score: Minimum blur score to use an image. Images below this value will be skipped. crop_border_pixels: Number of pixels to crop from each border of the image. Returns: Summary of the conversion.
Here is the function:
def polycam_to_json(
image_filenames: List[Path],
cameras_dir: Path,
output_dir: Path,
min_blur_score: float = 0.0,
crop_border_pixels: int = 0,
) -> List[str]:
"""Convert Polycam data into a nerfstudio dataset.
Args:
image_filenames: List of paths to the original images.
cameras_dir: Path to the polycam cameras directory.
output_dir: Path to the output directory.
min_blur_score: Minimum blur score to use an image. Images below this value will be skipped.
crop_border_pixels: Number of pixels to crop from each border of the image.
Returns:
Summary of the conversion.
"""
data = {}
data["camera_model"] = CAMERA_MODELS["perspective"].value
# Needs to be a string for camera_utils.auto_orient_and_center_poses
data["orientation_override"] = "none"
frames = []
skipped_frames = 0
for i, image_filename in enumerate(image_filenames):
json_filename = cameras_dir / f"{image_filename.stem}.json"
frame_json = io.load_from_json(json_filename)
if "blur_score" in frame_json and frame_json["blur_score"] < min_blur_score:
skipped_frames += 1
continue
frame = {}
frame["fl_x"] = frame_json["fx"]
frame["fl_y"] = frame_json["fy"]
frame["cx"] = frame_json["cx"] - crop_border_pixels
frame["cy"] = frame_json["cy"] - crop_border_pixels
frame["w"] = frame_json["width"] - crop_border_pixels * 2
frame["h"] = frame_json["height"] - crop_border_pixels * 2
frame["file_path"] = f"./images/frame_{i+1:05d}{image_filename.suffix}"
# Transform matrix to nerfstudio format. Please refer to the documentation for coordinate system conventions.
frame["transform_matrix"] = [
[frame_json["t_20"], frame_json["t_21"], frame_json["t_22"], frame_json["t_23"]],
[frame_json["t_00"], frame_json["t_01"], frame_json["t_02"], frame_json["t_03"]],
[frame_json["t_10"], frame_json["t_11"], frame_json["t_12"], frame_json["t_13"]],
[0.0, 0.0, 0.0, 1.0],
]
frames.append(frame)
data["frames"] = frames
with open(output_dir / "transforms.json", "w", encoding="utf-8") as f:
json.dump(data, f, indent=4)
summary = []
if skipped_frames > 0:
summary.append(f"Skipped {skipped_frames} frames due to low blur score.")
summary.append(f"Final dataset is {len(image_filenames) - skipped_frames} frames.")
if len(image_filenames) - skipped_frames == 0:
CONSOLE.print("[bold red]No images remain after filtering, exiting")
sys.exit(1)
return summary | Convert Polycam data into a nerfstudio dataset. Args: image_filenames: List of paths to the original images. cameras_dir: Path to the polycam cameras directory. output_dir: Path to the output directory. min_blur_score: Minimum blur score to use an image. Images below this value will be skipped. crop_border_pixels: Number of pixels to crop from each border of the image. Returns: Summary of the conversion. |
162,311 | import json
import xml.etree.ElementTree as ET
from pathlib import Path
from typing import Dict, List
import numpy as np
from rich.console import Console
from nerfstudio.process_data.process_data_utils import CAMERA_MODELS
CONSOLE = Console(width=120)
def _find_distortion_param(calib_xml: ET.Element, param_name: str):
param = calib_xml.find(param_name)
if param is not None:
return float(param.text) # type: ignore
return 0.0
CAMERA_MODELS = {
"perspective": CameraModel.OPENCV,
"fisheye": CameraModel.OPENCV_FISHEYE,
}
The provided code snippet includes necessary dependencies for implementing the `metashape_to_json` function. Write a Python function `def metashape_to_json( # pylint: disable=too-many-statements image_filename_map: Dict[str, Path], xml_filename: Path, output_dir: Path, verbose: bool = False, ) -> List[str]` to solve the following problem:
Convert Metashape data into a nerfstudio dataset. Args: image_filename_map: Mapping of original image filenames to their saved locations. xml_filename: Path to the metashap cameras xml file. output_dir: Path to the output directory. verbose: Whether to print verbose output. Returns: Summary of the conversion.
Here is the function:
def metashape_to_json( # pylint: disable=too-many-statements
image_filename_map: Dict[str, Path],
xml_filename: Path,
output_dir: Path,
verbose: bool = False,
) -> List[str]:
"""Convert Metashape data into a nerfstudio dataset.
Args:
image_filename_map: Mapping of original image filenames to their saved locations.
xml_filename: Path to the metashap cameras xml file.
output_dir: Path to the output directory.
verbose: Whether to print verbose output.
Returns:
Summary of the conversion.
"""
xml_tree = ET.parse(xml_filename)
root = xml_tree.getroot()
chunk = root[0]
sensors = chunk.find("sensors")
# TODO Add support for per-frame intrinsics
if sensors is None or len(sensors) != 1:
raise ValueError("Only one sensor is supported for now")
sensor = sensors.find("sensor")
data = {}
assert sensor is not None, "Sensor not found in Metashape XML"
resolution = sensor.find("resolution")
assert resolution is not None, "Resolution not found in Metashape xml"
data["w"] = int(resolution.get("width")) # type: ignore
data["h"] = int(resolution.get("height")) # type: ignore
calib = sensor.find("calibration")
assert calib is not None, "Calibration not found in Metashape xml"
data["fl_x"] = float(calib.find("f").text) # type: ignore
data["fl_y"] = float(calib.find("f").text) # type: ignore
data["cx"] = float(calib.find("cx").text) + data["w"] / 2.0 # type: ignore
data["cy"] = float(calib.find("cy").text) + data["h"] / 2.0 # type: ignore
data["k1"] = _find_distortion_param(calib, "k1")
data["k2"] = _find_distortion_param(calib, "k2")
data["k3"] = _find_distortion_param(calib, "k3")
data["k4"] = _find_distortion_param(calib, "k4")
data["p1"] = _find_distortion_param(calib, "p1")
data["p2"] = _find_distortion_param(calib, "p2")
data["camera_model"] = CAMERA_MODELS["perspective"].value
frames = []
cameras = chunk.find("cameras")
assert cameras is not None, "Cameras not found in Metashape xml"
num_skipped = 0
for camera in cameras:
frame = {}
# Labels sometimes have a file extension. We remove it for consistency.
camera_label = camera.get("label").split(".")[0] # type: ignore
if camera_label not in image_filename_map:
continue
frame["file_path"] = image_filename_map[camera_label].as_posix()
if camera.find("transform") is None:
if verbose:
CONSOLE.print(f"Missing transforms data for {camera.get('label')}, Skipping")
num_skipped += 1
continue
t = [float(x) for x in camera.find("transform").text.split()] # type: ignore
transform = np.array(
[
[t[8], -t[9], -t[10], t[11]],
[t[0], -t[1], -t[2], t[3]],
[t[4], -t[5], -t[6], t[7]],
[t[12], -t[13], -t[14], t[15]],
]
)
frame["transform_matrix"] = transform.tolist()
frames.append(frame)
data["frames"] = frames
with open(output_dir / "transforms.json", "w", encoding="utf-8") as f:
json.dump(data, f, indent=4)
summary = []
if num_skipped == 1:
summary.append(f"{num_skipped} image skipped because it was missing its camera pose.")
if num_skipped > 1:
summary.append(f"{num_skipped} images were skipped because they were missing camera poses.")
summary.append(f"Final dataset is {len(data['frames'])} frames.")
return summary | Convert Metashape data into a nerfstudio dataset. Args: image_filename_map: Mapping of original image filenames to their saved locations. xml_filename: Path to the metashap cameras xml file. output_dir: Path to the output directory. verbose: Whether to print verbose output. Returns: Summary of the conversion. |
162,312 | import json
from pathlib import Path
from typing import List
import numpy as np
from rich.console import Console
from scipy.spatial.transform import Rotation
from nerfstudio.process_data.process_data_utils import CAMERA_MODELS
from nerfstudio.utils import io
CAMERA_MODELS = {
"perspective": CameraModel.OPENCV,
"fisheye": CameraModel.OPENCV_FISHEYE,
}
The provided code snippet includes necessary dependencies for implementing the `record3d_to_json` function. Write a Python function `def record3d_to_json(images_paths: List[Path], metadata_path: Path, output_dir: Path, indices: np.ndarray) -> int` to solve the following problem:
Converts Record3D's metadata and image paths to a JSON file. Args: images_paths: list if image paths. metadata_path: Path to the Record3D metadata JSON file. output_dir: Path to the output directory. indices: Indices to sample the metadata_path. Should be the same length as images_paths. Returns: The number of registered images.
Here is the function:
def record3d_to_json(images_paths: List[Path], metadata_path: Path, output_dir: Path, indices: np.ndarray) -> int:
"""Converts Record3D's metadata and image paths to a JSON file.
Args:
images_paths: list if image paths.
metadata_path: Path to the Record3D metadata JSON file.
output_dir: Path to the output directory.
indices: Indices to sample the metadata_path. Should be the same length as images_paths.
Returns:
The number of registered images.
"""
assert len(images_paths) == len(indices)
metadata_dict = io.load_from_json(metadata_path)
poses_data = np.array(metadata_dict["poses"]) # (N, 3, 4)
camera_to_worlds = np.concatenate(
[Rotation.from_quat(poses_data[:, :4]).as_matrix(), poses_data[:, 4:, None]],
axis=-1,
).astype(np.float32)
camera_to_worlds = camera_to_worlds[indices]
homogeneous_coord = np.zeros_like(camera_to_worlds[..., :1, :])
homogeneous_coord[..., :, 3] = 1
camera_to_worlds = np.concatenate([camera_to_worlds, homogeneous_coord], -2)
frames = []
for i, im_path in enumerate(images_paths):
c2w = camera_to_worlds[i]
frame = {
"file_path": im_path.as_posix(),
"transform_matrix": c2w.tolist(),
}
frames.append(frame)
# Camera intrinsics
K = np.array(metadata_dict["K"]).reshape((3, 3)).T
focal_length = K[0, 0]
H = metadata_dict["h"]
W = metadata_dict["w"]
# TODO(akristoffersen): The metadata dict comes with principle points,
# but caused errors in image coord indexing. Should update once that is fixed.
cx, cy = W / 2, H / 2
out = {
"fl_x": focal_length,
"fl_y": focal_length,
"cx": cx,
"cy": cy,
"w": W,
"h": H,
"camera_model": CAMERA_MODELS["perspective"].name,
}
out["frames"] = frames
with open(output_dir / "transforms.json", "w", encoding="utf-8") as f:
json.dump(out, f, indent=4)
return len(frames) | Converts Record3D's metadata and image paths to a JSON file. Args: images_paths: list if image paths. metadata_path: Path to the Record3D metadata JSON file. output_dir: Path to the output directory. indices: Indices to sample the metadata_path. Should be the same length as images_paths. Returns: The number of registered images. |
162,313 | import sys
from pathlib import Path
from typing import List, Tuple
from rich.console import Console
from nerfstudio.process_data.process_data_utils import get_num_frames_in_video
from nerfstudio.utils.rich_utils import status
from nerfstudio.utils.scripts import run_command
The provided code snippet includes necessary dependencies for implementing the `get_insta360_filenames` function. Write a Python function `def get_insta360_filenames(data: Path) -> Tuple[Path, Path]` to solve the following problem:
Returns the filenames of the Insta360 videos from a single video file. Example input name: VID_20220212_070353_00_003.insv Args: data: Path to a Insta360 file. Returns: The filenames of the Insta360 videios.
Here is the function:
def get_insta360_filenames(data: Path) -> Tuple[Path, Path]:
"""Returns the filenames of the Insta360 videos from a single video file.
Example input name: VID_20220212_070353_00_003.insv
Args:
data: Path to a Insta360 file.
Returns:
The filenames of the Insta360 videios.
"""
if data.suffix != ".insv":
raise ValueError("The input file must be an .insv file.")
file_parts = data.stem.split("_")
stem_back = f"VID_{file_parts[1]}_{file_parts[2]}_00_{file_parts[4]}.insv"
stem_front = f"VID_{file_parts[1]}_{file_parts[2]}_10_{file_parts[4]}.insv"
filename_back = data.parent / stem_back
filename_front = data.parent / stem_front
return filename_back, filename_front | Returns the filenames of the Insta360 videos from a single video file. Example input name: VID_20220212_070353_00_003.insv Args: data: Path to a Insta360 file. Returns: The filenames of the Insta360 videios. |
162,314 | import sys
from pathlib import Path
from typing import List, Tuple
from rich.console import Console
from nerfstudio.process_data.process_data_utils import get_num_frames_in_video
from nerfstudio.utils.rich_utils import status
from nerfstudio.utils.scripts import run_command
CONSOLE = Console(width=120)
def get_num_frames_in_video(video: Path) -> int:
"""Returns the number of frames in a video.
Args:
video: Path to a video.
Returns:
The number of frames in a video.
"""
cmd = f"ffprobe -v error -select_streams v:0 -count_packets \
-show_entries stream=nb_read_packets -of csv=p=0 {video}"
output = run_command(cmd)
assert output is not None
output = output.strip(" ,\t\n\r")
return int(output)
def status(msg: str, spinner: str = "bouncingBall", verbose: bool = False):
"""A context manager that does nothing is verbose is True. Otherwise it hides logs under a message.
Args:
msg: The message to log.
spinner: The spinner to use.
verbose: If True, print all logs, else hide them.
"""
if verbose:
return nullcontext()
return CONSOLE.status(msg, spinner=spinner)
def run_command(cmd: str, verbose=False) -> Optional[str]:
"""Runs a command and returns the output.
Args:
cmd: Command to run.
verbose: If True, logs the output of the command.
Returns:
The output of the command if return_output is True, otherwise None.
"""
out = subprocess.run(cmd, capture_output=not verbose, shell=True, check=False)
if out.returncode != 0:
CONSOLE.rule("[bold red] :skull: :skull: :skull: ERROR :skull: :skull: :skull: ", style="red")
CONSOLE.print(f"[bold red]Error running command: {cmd}")
CONSOLE.rule(style="red")
CONSOLE.print(out.stderr.decode("utf-8"))
sys.exit(1)
if out.stdout is not None:
return out.stdout.decode("utf-8")
return out
The provided code snippet includes necessary dependencies for implementing the `convert_insta360_to_images` function. Write a Python function `def convert_insta360_to_images( video_front: Path, video_back: Path, image_dir: Path, num_frames_target: int, crop_percentage: float = 0.7, verbose: bool = False, ) -> Tuple[List[str], int]` to solve the following problem:
Converts a video into a sequence of images. Args: video_front: Path to the front video. video_back: Path to the back video. output_dir: Path to the output directory. num_frames_target: Number of frames to extract. crop_percentage: Percentage used to calculate the cropped dimentions of extracted frames. Currently used to crop out the curved portions of the fish-eye lens. verbose: If True, logs the output of the command. Returns: A tuple containing summary of the conversion and the number of extracted frames.
Here is the function:
def convert_insta360_to_images(
video_front: Path,
video_back: Path,
image_dir: Path,
num_frames_target: int,
crop_percentage: float = 0.7,
verbose: bool = False,
) -> Tuple[List[str], int]:
"""Converts a video into a sequence of images.
Args:
video_front: Path to the front video.
video_back: Path to the back video.
output_dir: Path to the output directory.
num_frames_target: Number of frames to extract.
crop_percentage: Percentage used to calculate the cropped dimentions of extracted frames. Currently used to crop
out the curved portions of the fish-eye lens.
verbose: If True, logs the output of the command.
Returns:
A tuple containing summary of the conversion and the number of extracted frames.
"""
with status(msg="Converting video to images...", spinner="bouncingBall", verbose=verbose):
# delete existing images in folder
for img in image_dir.glob("*.png"):
if verbose:
CONSOLE.log(f"Deleting {img}")
img.unlink()
num_frames_front = get_num_frames_in_video(video_front)
num_frames_back = get_num_frames_in_video(video_back)
if num_frames_front == 0:
CONSOLE.print(f"[bold red]Error: Video has no frames: {video_front}")
sys.exit(1)
if num_frames_back == 0:
CONSOLE.print(f"[bold red]Error: Video has no frames: {video_front}")
sys.exit(1)
spacing = num_frames_front // (num_frames_target // 2)
vf_cmds = []
if spacing > 1:
vf_cmds = [f"thumbnail={spacing}", "setpts=N/TB"]
else:
CONSOLE.print("[bold red]Can't satify requested number of frames. Extracting all frames.")
vf_cmds.append(f"crop=iw*{crop_percentage}:ih*{crop_percentage}")
front_vf_cmds = vf_cmds + ["transpose=2"]
back_vf_cmds = vf_cmds + ["transpose=1"]
front_ffmpeg_cmd = f"ffmpeg -i {video_front} -vf {','.join(front_vf_cmds)} -r 1 {image_dir / 'frame_%05d.png'}"
back_ffmpeg_cmd = (
f"ffmpeg -i {video_back} -vf {','.join(back_vf_cmds)} -r 1 {image_dir / 'back_frame_%05d.png'}"
)
run_command(front_ffmpeg_cmd, verbose=verbose)
run_command(back_ffmpeg_cmd, verbose=verbose)
num_extracted_front_frames = len(list(image_dir.glob("frame*.png")))
for i, img in enumerate(image_dir.glob("back_frame_*.png")):
img.rename(image_dir / f"frame_{i+1+num_extracted_front_frames:05d}.png")
num_final_frames = len(list(image_dir.glob("*.png")))
summary_log = []
summary_log.append(f"Starting with {num_frames_front + num_frames_back} video frames")
summary_log.append(f"We extracted {num_final_frames} images")
CONSOLE.log("[bold green]:tada: Done converting insta360 to images.")
return summary_log, num_final_frames | Converts a video into a sequence of images. Args: video_front: Path to the front video. video_back: Path to the back video. output_dir: Path to the output directory. num_frames_target: Number of frames to extract. crop_percentage: Percentage used to calculate the cropped dimentions of extracted frames. Currently used to crop out the curved portions of the fish-eye lens. verbose: If True, logs the output of the command. Returns: A tuple containing summary of the conversion and the number of extracted frames. |
162,315 | import sys
from pathlib import Path
from typing import List, Tuple
from rich.console import Console
from nerfstudio.process_data.process_data_utils import get_num_frames_in_video
from nerfstudio.utils.rich_utils import status
from nerfstudio.utils.scripts import run_command
CONSOLE = Console(width=120)
def get_num_frames_in_video(video: Path) -> int:
"""Returns the number of frames in a video.
Args:
video: Path to a video.
Returns:
The number of frames in a video.
"""
cmd = f"ffprobe -v error -select_streams v:0 -count_packets \
-show_entries stream=nb_read_packets -of csv=p=0 {video}"
output = run_command(cmd)
assert output is not None
output = output.strip(" ,\t\n\r")
return int(output)
def status(msg: str, spinner: str = "bouncingBall", verbose: bool = False):
"""A context manager that does nothing is verbose is True. Otherwise it hides logs under a message.
Args:
msg: The message to log.
spinner: The spinner to use.
verbose: If True, print all logs, else hide them.
"""
if verbose:
return nullcontext()
return CONSOLE.status(msg, spinner=spinner)
def run_command(cmd: str, verbose=False) -> Optional[str]:
"""Runs a command and returns the output.
Args:
cmd: Command to run.
verbose: If True, logs the output of the command.
Returns:
The output of the command if return_output is True, otherwise None.
"""
out = subprocess.run(cmd, capture_output=not verbose, shell=True, check=False)
if out.returncode != 0:
CONSOLE.rule("[bold red] :skull: :skull: :skull: ERROR :skull: :skull: :skull: ", style="red")
CONSOLE.print(f"[bold red]Error running command: {cmd}")
CONSOLE.rule(style="red")
CONSOLE.print(out.stderr.decode("utf-8"))
sys.exit(1)
if out.stdout is not None:
return out.stdout.decode("utf-8")
return out
The provided code snippet includes necessary dependencies for implementing the `convert_insta360_single_file_to_images` function. Write a Python function `def convert_insta360_single_file_to_images( video: Path, image_dir: Path, num_frames_target: int, crop_percentage: float = 0.7, verbose: bool = False, ) -> Tuple[List[str], int]` to solve the following problem:
Converts a video into a sequence of images. Args: video: Path to the video. output_dir: Path to the output directory. num_frames_target: Number of frames to extract. crop_percentage: Percentage used to calculate the cropped dimentions of extracted frames. Currently used to crop out the curved portions of the fish-eye lens. verbose: If True, logs the output of the command. Returns: A tuple containing summary of the conversion and the number of extracted frames.
Here is the function:
def convert_insta360_single_file_to_images(
video: Path,
image_dir: Path,
num_frames_target: int,
crop_percentage: float = 0.7,
verbose: bool = False,
) -> Tuple[List[str], int]:
"""Converts a video into a sequence of images.
Args:
video: Path to the video.
output_dir: Path to the output directory.
num_frames_target: Number of frames to extract.
crop_percentage: Percentage used to calculate the cropped dimentions of extracted frames. Currently used to crop
out the curved portions of the fish-eye lens.
verbose: If True, logs the output of the command.
Returns:
A tuple containing summary of the conversion and the number of extracted frames.
"""
with status(msg="Converting video to images...", spinner="bouncingBall", verbose=verbose):
# delete existing images in folder
for img in image_dir.glob("*.png"):
if verbose:
CONSOLE.log(f"Deleting {img}")
img.unlink()
num_frames = get_num_frames_in_video(video)
if num_frames == 0:
CONSOLE.print(f"[bold red]Error: Video has no frames: {video}")
sys.exit(1)
spacing = num_frames // (num_frames_target // 2)
vf_cmds = []
if spacing > 1:
vf_cmds = [f"thumbnail={spacing}", "setpts=N/TB"]
else:
CONSOLE.print("[bold red]Can't satify requested number of frames. Extracting all frames.")
vf_cmds_back = vf_cmds.copy()
vf_cmds_front = vf_cmds.copy()
vf_cmds_back.append(
f"crop=ih*{crop_percentage}:ih*{crop_percentage}:ih*({crop_percentage}/4):ih*({crop_percentage}/4)"
)
vf_cmds_front.append(
f"crop=ih*{crop_percentage}:ih*{crop_percentage}:iw/2+ih*{crop_percentage/4}:ih*{crop_percentage/4}"
)
front_ffmpeg_cmd = f"ffmpeg -i {video} -vf {','.join(vf_cmds_front)} -r 1 {image_dir / 'frame_%05d.png'}"
back_ffmpeg_cmd = f"ffmpeg -i {video} -vf {','.join(vf_cmds_back)} -r 1 {image_dir / 'back_frame_%05d.png'}"
run_command(back_ffmpeg_cmd, verbose=verbose)
run_command(front_ffmpeg_cmd, verbose=verbose)
num_extracted_frames = len(list(image_dir.glob("frame*.png")))
for i, img in enumerate(image_dir.glob("back_frame_*.png")):
img.rename(image_dir / f"frame_{i+1+num_extracted_frames:05d}.png")
num_final_frames = len(list(image_dir.glob("*.png")))
summary_log = []
summary_log.append(f"Starting with {num_frames} video frames")
summary_log.append(f"We extracted {num_final_frames} images")
CONSOLE.log("[bold green]:tada: Done converting insta360 to images.")
return summary_log, num_final_frames | Converts a video into a sequence of images. Args: video: Path to the video. output_dir: Path to the output directory. num_frames_target: Number of frames to extract. crop_percentage: Percentage used to calculate the cropped dimentions of extracted frames. Currently used to crop out the curved portions of the fish-eye lens. verbose: If True, logs the output of the command. Returns: A tuple containing summary of the conversion and the number of extracted frames. |
162,316 | import sys
from pathlib import Path
from rich.console import Console
from typing_extensions import Literal
from nerfstudio.process_data.process_data_utils import CameraModel
try:
import pycolmap
from hloc import (
extract_features,
match_features,
pairs_from_exhaustive,
pairs_from_retrieval,
reconstruction,
)
except ImportError:
_HAS_HLOC = False
else:
_HAS_HLOC = True
CONSOLE = Console(width=120)
class CameraModel(Enum):
"""Enum for camera types."""
OPENCV = "OPENCV"
OPENCV_FISHEYE = "OPENCV_FISHEYE"
The provided code snippet includes necessary dependencies for implementing the `run_hloc` function. Write a Python function `def run_hloc( image_dir: Path, colmap_dir: Path, camera_model: CameraModel, verbose: bool = False, matching_method: Literal["vocab_tree", "exhaustive", "sequential"] = "vocab_tree", feature_type: Literal[ "sift", "superpoint_aachen", "superpoint_max", "superpoint_inloc", "r2d2", "d2net-ss", "sosnet", "disk" ] = "superpoint_aachen", matcher_type: Literal[ "superglue", "superglue-fast", "NN-superpoint", "NN-ratio", "NN-mutual", "adalam" ] = "superglue", num_matched: int = 50, ) -> None` to solve the following problem:
Runs hloc on the images. Args: image_dir: Path to the directory containing the images. colmap_dir: Path to the output directory. camera_model: Camera model to use. gpu: If True, use GPU. verbose: If True, logs the output of the command.
Here is the function:
def run_hloc(
image_dir: Path,
colmap_dir: Path,
camera_model: CameraModel,
verbose: bool = False,
matching_method: Literal["vocab_tree", "exhaustive", "sequential"] = "vocab_tree",
feature_type: Literal[
"sift", "superpoint_aachen", "superpoint_max", "superpoint_inloc", "r2d2", "d2net-ss", "sosnet", "disk"
] = "superpoint_aachen",
matcher_type: Literal[
"superglue", "superglue-fast", "NN-superpoint", "NN-ratio", "NN-mutual", "adalam"
] = "superglue",
num_matched: int = 50,
) -> None:
"""Runs hloc on the images.
Args:
image_dir: Path to the directory containing the images.
colmap_dir: Path to the output directory.
camera_model: Camera model to use.
gpu: If True, use GPU.
verbose: If True, logs the output of the command.
"""
if not _HAS_HLOC:
CONSOLE.print(
f"[bold red]Error: To use this set of parameters ({feature_type}/{matcher_type}/hloc), "
"you must install hloc toolbox!!"
)
sys.exit(1)
outputs = colmap_dir
sfm_pairs = outputs / "pairs-netvlad.txt"
sfm_dir = outputs / "sparse" / "0"
features = outputs / "features.h5"
matches = outputs / "matches.h5"
retrieval_conf = extract_features.confs["netvlad"]
feature_conf = extract_features.confs[feature_type]
matcher_conf = match_features.confs[matcher_type]
references = [p.relative_to(image_dir).as_posix() for p in image_dir.iterdir()]
extract_features.main(feature_conf, image_dir, image_list=references, feature_path=features)
if matching_method == "exhaustive":
pairs_from_exhaustive.main(sfm_pairs, image_list=references)
else:
retrieval_path = extract_features.main(retrieval_conf, image_dir, outputs)
if num_matched >= len(references):
num_matched = len(references)
pairs_from_retrieval.main(retrieval_path, sfm_pairs, num_matched=num_matched)
match_features.main(matcher_conf, sfm_pairs, features=features, matches=matches)
image_options = pycolmap.ImageReaderOptions(camera_model=camera_model.value)
reconstruction.main(
sfm_dir,
image_dir,
sfm_pairs,
features,
matches,
camera_mode=pycolmap.CameraMode.SINGLE,
image_options=image_options,
verbose=verbose,
) | Runs hloc on the images. Args: image_dir: Path to the directory containing the images. colmap_dir: Path to the output directory. camera_model: Camera model to use. gpu: If True, use GPU. verbose: If True, logs the output of the command. |
162,317 | from __future__ import annotations
from dataclasses import field
from typing import Any, Dict
from rich.console import Console
The provided code snippet includes necessary dependencies for implementing the `to_immutable_dict` function. Write a Python function `def to_immutable_dict(d: Dict[str, Any])` to solve the following problem:
Method to convert mutable dict to default factory dict Args: d: dictionary to convert into default factory dict for dataclass
Here is the function:
def to_immutable_dict(d: Dict[str, Any]):
"""Method to convert mutable dict to default factory dict
Args:
d: dictionary to convert into default factory dict for dataclass
"""
return field(default_factory=lambda: dict(d)) | Method to convert mutable dict to default factory dict Args: d: dictionary to convert into default factory dict for dataclass |
162,318 | from __future__ import annotations
import sys
from dataclasses import dataclass
from typing import List, Optional, Tuple
import numpy as np
import open3d as o3d
import pymeshlab
import torch
from rich.console import Console
from rich.progress import (
BarColumn,
Progress,
TaskProgressColumn,
TextColumn,
TimeRemainingColumn,
)
from torchtyping import TensorType
from nerfstudio.cameras.cameras import Cameras
from nerfstudio.configs.base_config import Config
from nerfstudio.pipelines.base_pipeline import Pipeline
from nerfstudio.utils.rich_utils import ItersPerSecColumn
CONSOLE = Console(width=120)
class Mesh:
"""Class for a mesh."""
vertices: TensorType["num_verts", 3]
"""Vertices of the mesh."""
faces: TensorType["num_faces", 3]
"""Faces of the mesh."""
normals: TensorType["num_verts", 3]
"""Normals of the mesh."""
colors: Optional[TensorType["num_verts", 3]] = None
"""Colors of the mesh."""
def get_mesh_from_pymeshlab_mesh(mesh: pymeshlab.Mesh) -> Mesh:
"""Get a Mesh from a pymeshlab mesh.
See https://pymeshlab.readthedocs.io/en/0.1.5/classes/mesh.html for details.
"""
return Mesh(
vertices=torch.from_numpy(mesh.vertex_matrix()).float(),
faces=torch.from_numpy(mesh.face_matrix()).long(),
normals=torch.from_numpy(np.copy(mesh.vertex_normal_matrix())).float(),
colors=torch.from_numpy(mesh.vertex_color_matrix()).float(),
)
The provided code snippet includes necessary dependencies for implementing the `get_mesh_from_filename` function. Write a Python function `def get_mesh_from_filename(filename: str, target_num_faces: Optional[int] = None) -> Mesh` to solve the following problem:
Get a Mesh from a filename.
Here is the function:
def get_mesh_from_filename(filename: str, target_num_faces: Optional[int] = None) -> Mesh:
"""Get a Mesh from a filename."""
ms = pymeshlab.MeshSet()
ms.load_new_mesh(filename)
if target_num_faces is not None:
CONSOLE.print("Running meshing decimation with quadric edge collapse")
ms.meshing_decimation_quadric_edge_collapse(targetfacenum=target_num_faces)
mesh = ms.current_mesh()
return get_mesh_from_pymeshlab_mesh(mesh) | Get a Mesh from a filename. |
162,319 | from __future__ import annotations
import sys
from dataclasses import dataclass
from typing import List, Optional, Tuple
import numpy as np
import open3d as o3d
import pymeshlab
import torch
from rich.console import Console
from rich.progress import (
BarColumn,
Progress,
TaskProgressColumn,
TextColumn,
TimeRemainingColumn,
)
from torchtyping import TensorType
from nerfstudio.cameras.cameras import Cameras
from nerfstudio.configs.base_config import Config
from nerfstudio.pipelines.base_pipeline import Pipeline
from nerfstudio.utils.rich_utils import ItersPerSecColumn
CONSOLE = Console(width=120)
class Pipeline(nn.Module):
"""The intent of this class is to provide a higher level interface for the Model
that will be easy to use for our Trainer class.
This class will contain high level functions for the model like getting the loss
dictionaries and visualization code. It should have ways to get the next iterations
training loss, evaluation loss, and generate whole images for visualization. Each model
class should be 1:1 with a pipeline that can act as a standardized interface and hide
differences in how each model takes in and outputs data.
This class's function is to hide the data manager and model classes from the trainer,
worrying about:
1) Fetching data with the data manager
2) Feeding the model the data and fetching the loss
Hopefully this provides a higher level interface for the trainer to use, and
simplifying the model classes, which each may have different forward() methods
and so on.
Args:
config: configuration to instantiate pipeline
device: location to place model and data
test_mode:
'train': loads train/eval datasets into memory
'test': loads train/test datset into memory
'inference': does not load any dataset into memory
world_size: total number of machines available
local_rank: rank of current machine
Attributes:
datamanager: The data manager that will be used
model: The model that will be used
"""
# pylint: disable=abstract-method
datamanager: DataManager
_model: Model
def model(self):
"""Returns the unwrapped model if in ddp"""
return module_wrapper(self._model)
def device(self):
"""Returns the device that the model is on."""
return self.model.device
def get_train_loss_dict(self, step: int):
"""This function gets your training loss dict. This will be responsible for
getting the next batch of data from the DataManager and interfacing with the
Model class, feeding the data to the model's forward function.
Args:
step: current iteration step to update sampler if using DDP (distributed)
"""
if self.world_size > 1 and step:
assert self.datamanager.train_sampler is not None
self.datamanager.train_sampler.set_epoch(step)
ray_bundle, batch = self.datamanager.next_train(step)
model_outputs = self.model(ray_bundle, batch)
metrics_dict = self.model.get_metrics_dict(model_outputs, batch)
loss_dict = self.model.get_loss_dict(model_outputs, batch, metrics_dict)
return model_outputs, loss_dict, metrics_dict
def get_eval_loss_dict(self, step: int):
"""This function gets your evaluation loss dict. It needs to get the data
from the DataManager and feed it to the model's forward function
Args:
step: current iteration step
"""
self.eval()
if self.world_size > 1:
assert self.datamanager.eval_sampler is not None
self.datamanager.eval_sampler.set_epoch(step)
ray_bundle, batch = self.datamanager.next_eval(step)
model_outputs = self.model(ray_bundle, batch)
metrics_dict = self.model.get_metrics_dict(model_outputs, batch)
loss_dict = self.model.get_loss_dict(model_outputs, batch, metrics_dict)
self.train()
return model_outputs, loss_dict, metrics_dict
def get_eval_image_metrics_and_images(self, step: int):
"""This function gets your evaluation loss dict. It needs to get the data
from the DataManager and feed it to the model's forward function
Args:
step: current iteration step
"""
def get_average_eval_image_metrics(self, step: Optional[int] = None):
"""Iterate over all the images in the eval dataset and get the average."""
def load_pipeline(self, loaded_state: Dict[str, Any]) -> None:
"""Load the checkpoint from the given path
Args:
loaded_state: pre-trained model state dict
"""
def get_training_callbacks(
self, training_callback_attributes: TrainingCallbackAttributes
) -> List[TrainingCallback]:
"""Returns the training callbacks from both the Dataloader and the Model."""
def get_param_groups(self) -> Dict[str, List[Parameter]]:
"""Get the param groups for the pipeline.
Returns:
A list of dictionaries containing the pipeline's param groups.
"""
The provided code snippet includes necessary dependencies for implementing the `generate_point_cloud` function. Write a Python function `def generate_point_cloud( pipeline: Pipeline, num_points: int = 1000000, remove_outliers: bool = True, estimate_normals: bool = False, rgb_output_name: str = "rgb", depth_output_name: str = "depth", normal_output_name: Optional[str] = None, use_bounding_box: bool = True, bounding_box_min: Tuple[float, float, float] = (-1.0, -1.0, -1.0), bounding_box_max: Tuple[float, float, float] = (1.0, 1.0, 1.0), std_ratio: float = 10.0, ) -> o3d.geometry.PointCloud` to solve the following problem:
Generate a point cloud from a nerf. Args: pipeline: Pipeline to evaluate with. num_points: Number of points to generate. May result in less if outlier removal is used. remove_outliers: Whether to remove outliers. estimate_normals: Whether to estimate normals. rgb_output_name: Name of the RGB output. depth_output_name: Name of the depth output. normal_output_name: Name of the normal output. use_bounding_box: Whether to use a bounding box to sample points. bounding_box_min: Minimum of the bounding box. bounding_box_max: Maximum of the bounding box. std_ratio: Threshold based on STD of the average distances across the point cloud to remove outliers. Returns: Point cloud.
Here is the function:
def generate_point_cloud(
pipeline: Pipeline,
num_points: int = 1000000,
remove_outliers: bool = True,
estimate_normals: bool = False,
rgb_output_name: str = "rgb",
depth_output_name: str = "depth",
normal_output_name: Optional[str] = None,
use_bounding_box: bool = True,
bounding_box_min: Tuple[float, float, float] = (-1.0, -1.0, -1.0),
bounding_box_max: Tuple[float, float, float] = (1.0, 1.0, 1.0),
std_ratio: float = 10.0,
) -> o3d.geometry.PointCloud:
"""Generate a point cloud from a nerf.
Args:
pipeline: Pipeline to evaluate with.
num_points: Number of points to generate. May result in less if outlier removal is used.
remove_outliers: Whether to remove outliers.
estimate_normals: Whether to estimate normals.
rgb_output_name: Name of the RGB output.
depth_output_name: Name of the depth output.
normal_output_name: Name of the normal output.
use_bounding_box: Whether to use a bounding box to sample points.
bounding_box_min: Minimum of the bounding box.
bounding_box_max: Maximum of the bounding box.
std_ratio: Threshold based on STD of the average distances across the point cloud to remove outliers.
Returns:
Point cloud.
"""
# pylint: disable=too-many-statements
progress = Progress(
TextColumn(":cloud: Computing Point Cloud :cloud:"),
BarColumn(),
TaskProgressColumn(show_speed=True),
TimeRemainingColumn(elapsed_when_finished=True, compact=True),
)
points = []
rgbs = []
normals = []
with progress as progress_bar:
task = progress_bar.add_task("Generating Point Cloud", total=num_points)
while not progress_bar.finished:
torch.cuda.empty_cache()
with torch.no_grad():
ray_bundle, _ = pipeline.datamanager.next_train(0)
outputs = pipeline.model(ray_bundle)
if rgb_output_name not in outputs:
CONSOLE.rule("Error", style="red")
CONSOLE.print(f"Could not find {rgb_output_name} in the model outputs", justify="center")
CONSOLE.print(f"Please set --rgb_output_name to one of: {outputs.keys()}", justify="center")
sys.exit(1)
if depth_output_name not in outputs:
CONSOLE.rule("Error", style="red")
CONSOLE.print(f"Could not find {depth_output_name} in the model outputs", justify="center")
CONSOLE.print(f"Please set --depth_output_name to one of: {outputs.keys()}", justify="center")
sys.exit(1)
rgb = outputs[rgb_output_name]
depth = outputs[depth_output_name]
if normal_output_name is not None:
if normal_output_name not in outputs:
CONSOLE.rule("Error", style="red")
CONSOLE.print(f"Could not find {normal_output_name} in the model outputs", justify="center")
CONSOLE.print(f"Please set --normal_output_name to one of: {outputs.keys()}", justify="center")
sys.exit(1)
normal = outputs[normal_output_name]
point = ray_bundle.origins + ray_bundle.directions * depth
if use_bounding_box:
comp_l = torch.tensor(bounding_box_min, device=point.device)
comp_m = torch.tensor(bounding_box_max, device=point.device)
assert torch.all(
comp_l < comp_m
), f"Bounding box min {bounding_box_min} must be smaller than max {bounding_box_max}"
mask = torch.all(torch.concat([point > comp_l, point < comp_m], dim=-1), dim=-1)
point = point[mask]
rgb = rgb[mask]
if normal_output_name is not None:
normal = normal[mask]
points.append(point)
rgbs.append(rgb)
if normal_output_name is not None:
normals.append(normal)
progress.advance(task, point.shape[0])
points = torch.cat(points, dim=0)
rgbs = torch.cat(rgbs, dim=0)
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(points.float().cpu().numpy())
pcd.colors = o3d.utility.Vector3dVector(rgbs.float().cpu().numpy())
ind = None
if remove_outliers:
CONSOLE.print("Cleaning Point Cloud")
pcd, ind = pcd.remove_statistical_outlier(nb_neighbors=20, std_ratio=std_ratio)
print("\033[A\033[A")
CONSOLE.print("[bold green]:white_check_mark: Cleaning Point Cloud")
# either estimate_normals or normal_output_name, not both
if estimate_normals:
if normal_output_name is not None:
CONSOLE.rule("Error", style="red")
CONSOLE.print("Cannot estimate normals and use normal_output_name at the same time", justify="center")
sys.exit(1)
CONSOLE.print("Estimating Point Cloud Normals")
pcd.estimate_normals()
print("\033[A\033[A")
CONSOLE.print("[bold green]:white_check_mark: Estimating Point Cloud Normals")
elif normal_output_name is not None:
normals = torch.cat(normals, dim=0)
if ind is not None:
# mask out normals for points that were removed with remove_outliers
normals = normals[ind]
pcd.normals = o3d.utility.Vector3dVector(normals.float().cpu().numpy())
return pcd | Generate a point cloud from a nerf. Args: pipeline: Pipeline to evaluate with. num_points: Number of points to generate. May result in less if outlier removal is used. remove_outliers: Whether to remove outliers. estimate_normals: Whether to estimate normals. rgb_output_name: Name of the RGB output. depth_output_name: Name of the depth output. normal_output_name: Name of the normal output. use_bounding_box: Whether to use a bounding box to sample points. bounding_box_min: Minimum of the bounding box. bounding_box_max: Maximum of the bounding box. std_ratio: Threshold based on STD of the average distances across the point cloud to remove outliers. Returns: Point cloud. |
162,320 | from __future__ import annotations
import math
from pathlib import Path
from typing import Optional, Tuple
import mediapy as media
import numpy as np
import torch
import xatlas
from rich.console import Console
from torchtyping import TensorType
from typing_extensions import Literal
from nerfstudio.cameras.rays import RayBundle
from nerfstudio.exporter.exporter_utils import Mesh
from nerfstudio.pipelines.base_pipeline import Pipeline
from nerfstudio.utils.rich_utils import get_progress
CONSOLE = Console(width=120)
def unwrap_mesh_per_uv_triangle(
vertices: TensorType["num_verts", 3],
faces: TensorType["num_faces", 3],
vertex_normals: TensorType["num_verts", 3],
px_per_uv_triangle: int,
):
"""Unwrap a mesh to a UV texture. This is done by making a grid of rectangles in the UV texture map
and then having two triangles per rectangle. Then the texture image is rasterized and uses barycentric
interpolation to get the origins and directions, per pixel, that are needed to render the NeRF with.
Args:
vertices: The vertices of the mesh.
faces: The faces of the mesh.
vertex_normals: The vertex normals of the mesh.
px_per_uv_triangle: The number of pixels per UV triangle.
"""
# pylint: disable=too-many-statements
assert len(vertices) == len(vertex_normals), "Number of vertices and vertex normals must be equal"
device = vertices.device
# calculate the number of rectangles needed
triangle_padding = 3
num_squares = math.ceil(len(faces) / 2)
squares_per_side_w = math.ceil(math.sqrt(num_squares))
squares_per_side_h = math.ceil(num_squares / squares_per_side_w)
px_per_square_w = px_per_uv_triangle + triangle_padding
px_per_square_h = px_per_uv_triangle
num_pixels_w = squares_per_side_w * px_per_square_w
num_pixels_h = squares_per_side_h * px_per_square_h
# Construct what one square would look like
# The height is equal to px_per_uv_triangle pixels.
# The width is equal to px_per_uv_triangle + 3 pixels.
# v0---------------v1------------------------v2
# --Triangle 1---------------------------------
# -----------------3px gap---------------------
# --------------------------------Triangle 2---
# v2-----------------------v1----------------v0
lr_w = (px_per_uv_triangle + triangle_padding) / num_pixels_w
lr_h = (px_per_uv_triangle) / num_pixels_h
lr = torch.tensor([lr_w, lr_h], device=device)
px_w = 1.0 / num_pixels_w
px_h = 1.0 / num_pixels_h
px = torch.tensor([px_w, px_h], device=device)
edge_len_w = px_per_uv_triangle / num_pixels_w
edge_len_h = px_per_uv_triangle / num_pixels_h
scalar = (px_per_uv_triangle - 1) / px_per_uv_triangle
# uv coords (upper left and lower right)
uv_coords_upper_left = torch.tensor([[0, 0], [edge_len_w, 0], [0, edge_len_h]], device=device)
# scale for bilinear interpolation reasons
uv_coords_upper_left = uv_coords_upper_left * scalar + px / 2
lower_right = [lr_w, lr_h]
uv_coords_lower_right = torch.tensor(
[
lower_right, # lower right
[3 * px_w, lr_h], # lower left
[lr_w, 0], # upper right
],
device=device,
)
# scale for bilinear interpolation reasons
uv_coords_lower_right = (
(uv_coords_lower_right - torch.tensor(lower_right, device=device)) * scalar
+ torch.tensor(lower_right, device=device)
- px / 2
)
# Tile this pattern across the entire texture
uv_coords_square = torch.stack([uv_coords_upper_left, uv_coords_lower_right], dim=0) # (2, 3, 2)
uv_coords_square = uv_coords_square.reshape(1, 1, 6, 2) # (6, 2)
square_offsets = (
torch.stack(
torch.meshgrid(
torch.arange(squares_per_side_w, device=device),
torch.arange(squares_per_side_h, device=device),
indexing="xy",
),
dim=-1,
)
* lr
)
uv_coords_square = uv_coords_square + square_offsets.view(
squares_per_side_h, squares_per_side_w, 1, 2
) # (num_squares_h, num_squares_w, 6, 2)
texture_coordinates = uv_coords_square.view(-1, 3, 2)[: len(faces)] # (num_faces, 3, 2)
# Now find the triangle indices for every pixel and the barycentric coordinates
# which can be used to interpolate the XYZ and normal values to then query with NeRF
uv_coords, uv_indices = get_texture_image(num_pixels_w, num_pixels_h, device)
u_index = torch.div(uv_indices[..., 0], px_per_square_w, rounding_mode="floor")
v_index = torch.div(uv_indices[..., 1], px_per_square_h, rounding_mode="floor")
square_index = v_index * squares_per_side_w + u_index
u_offset = uv_indices[..., 0] % px_per_square_w
v_offset = uv_indices[..., 1] % px_per_square_h
lower_right = (u_offset + v_offset) >= (px_per_square_w - 2)
triangle_index = square_index * 2 + lower_right
triangle_index = torch.clamp(triangle_index, min=0, max=len(faces) - 1)
nearby_uv_coords = texture_coordinates[triangle_index] # (num_pixels_h, num_pixels_w, 3, 2)
nearby_vertices = vertices[faces[triangle_index]] # (num_pixels_h, num_pixels_w, 3, 3)
nearby_normals = vertex_normals[faces[triangle_index]] # (num_pixels_h, num_pixels_w, 3, 3)
# compute barycentric coordinates
v0 = nearby_uv_coords[..., 0, :] # (num_pixels, num_pixels, 2)
v1 = nearby_uv_coords[..., 1, :] # (num_pixels, num_pixels, 2)
v2 = nearby_uv_coords[..., 2, :] # (num_pixels, num_pixels, 2)
p = uv_coords # (num_pixels, num_pixels, 2)
area = get_parallelogram_area(v2, v0, v1) # 2x face area.
w0 = get_parallelogram_area(p, v1, v2) / area
w1 = get_parallelogram_area(p, v2, v0) / area
w2 = get_parallelogram_area(p, v0, v1) / area
origins = (
nearby_vertices[..., 0, :] * w0[..., None]
+ nearby_vertices[..., 1, :] * w1[..., None]
+ nearby_vertices[..., 2, :] * w2[..., None]
).float()
directions = -(
nearby_normals[..., 0, :] * w0[..., None]
+ nearby_normals[..., 1, :] * w1[..., None]
+ nearby_normals[..., 2, :] * w2[..., None]
).float()
# normalize the direction vector to make it a unit vector
directions = torch.nn.functional.normalize(directions, dim=-1)
return texture_coordinates, origins, directions
def unwrap_mesh_with_xatlas(
vertices: TensorType["num_verts", 3],
faces: TensorType["num_faces", 3, torch.long],
vertex_normals: TensorType["num_verts", 3],
num_pixels_per_side=1024,
num_faces_per_barycentric_chunk=10,
) -> Tuple[
TensorType["num_faces", 3, 2],
TensorType["num_pixels", "num_pixels", 3],
TensorType["num_pixels", "num_pixels", "num_pixels"],
]:
"""Unwrap a mesh using xatlas. We use xatlas to unwrap the mesh with UV coordinates.
Then we rasterize the mesh with a square pattern. We interpolate the XYZ and normal
values for every pixel in the texture image. We return the texture coordinates, the
origins, and the directions for every pixel.
Args:
vertices: Tensor of mesh vertices.
faces: Tensor of mesh faces.
vertex_normals: Tensor of mesh vertex normals.
num_pixels_per_side: Number of pixels per side of the texture image. We use a square.
num_faces_per_barycentric_chunk: Number of faces to use for barycentric chunk computation.
Returns:
texture_coordinates: Tensor of texture coordinates for every face.
origins: Tensor of origins for every pixel.
directions: Tensor of directions for every pixel.
"""
# pylint: disable=unused-variable
# pylint: disable=too-many-statements
device = vertices.device
# unwrap the mesh
vertices_np = vertices.cpu().numpy()
faces_np = faces.cpu().numpy()
vertex_normals_np = vertex_normals.cpu().cpu().numpy()
vmapping, indices, uvs = xatlas.parametrize( # pylint: disable=c-extension-no-member
vertices_np, faces_np, vertex_normals_np
)
# vertices texture coordinates
vertices_tc = torch.from_numpy(uvs.astype(np.float32)).to(device)
# render uv maps
vertices_tc = vertices_tc * 2.0 - 1.0 # uvs to range [-1, 1]
vertices_tc = torch.cat(
(vertices_tc, torch.zeros_like(vertices_tc[..., :1]), torch.ones_like(vertices_tc[..., :1])), dim=-1
) # [num_verts, 4]
texture_coordinates = torch.from_numpy(uvs[indices]).to(device) # (num_faces, 3, 2)
# Now find the triangle indices for every pixel and the barycentric coordinates
# which can be used to interpolate the XYZ and normal values to then query with NeRF
uv_coords, _ = get_texture_image(num_pixels_per_side, num_pixels_per_side, device)
uv_coords_shape = uv_coords.shape
p = uv_coords.reshape(1, -1, 2) # (1, N, 2)
num_vertices = p.shape[1]
num_faces = texture_coordinates.shape[0]
triangle_distances = torch.ones_like(p[..., 0]) * torch.finfo(torch.float32).max # (1, N)
triangle_indices = torch.zeros_like(p[..., 0]).long() # (1, N)
triangle_w0 = torch.zeros_like(p[..., 0]) # (1, N)
triangle_w1 = torch.zeros_like(p[..., 0]) # (1, N)
triangle_w2 = torch.zeros_like(p[..., 0]) # (1, N)
arange_list = torch.arange(num_vertices, device=device)
progress = get_progress("Chunking faces for rasterization")
with progress:
for i in progress.track(range(num_faces // num_faces_per_barycentric_chunk)):
s = i * num_faces_per_barycentric_chunk
e = min((i + 1) * num_faces_per_barycentric_chunk, num_faces)
v0 = texture_coordinates[s:e, 0:1, :] # (F, 1, 2)
v1 = texture_coordinates[s:e, 1:2, :] # (F, 1, 2)
v2 = texture_coordinates[s:e, 2:3, :] # (F, 1, 2)
# NOTE: could try clockwise vs counter clockwise
area = get_parallelogram_area(v2, v0, v1) # 2x face area.
w0 = get_parallelogram_area(p, v1, v2) / area # (num_faces_per_barycentric_chunk, N)
w1 = get_parallelogram_area(p, v2, v0) / area
w2 = get_parallelogram_area(p, v0, v1) / area
# get distance from center of triangle
dist_to_center = torch.abs(w0) + torch.abs(w1) + torch.abs(w2)
d_values, d_indices = torch.min(dist_to_center, dim=0, keepdim=True)
d_indices_with_offset = d_indices + s # add offset
condition = d_values < triangle_distances
triangle_distances = torch.where(condition, d_values, triangle_distances)
triangle_indices = torch.where(condition, d_indices_with_offset, triangle_indices)
w0_selected = w0[d_indices[0], arange_list].unsqueeze(0) # (1, N)
w1_selected = w1[d_indices[0], arange_list].unsqueeze(0) # (1, N)
w2_selected = w2[d_indices[0], arange_list].unsqueeze(0) # (1, N)
triangle_w0 = torch.where(condition, w0_selected, triangle_w0)
triangle_w1 = torch.where(condition, w1_selected, triangle_w1)
triangle_w2 = torch.where(condition, w2_selected, triangle_w2)
nearby_vertices = vertices[faces[triangle_indices[0]]] # (N, 3, 3)
nearby_normals = vertex_normals[faces[triangle_indices[0]]] # (N, 3, 3)
origins = (
nearby_vertices[..., 0, :] * triangle_w0[0, :, None]
+ nearby_vertices[..., 1, :] * triangle_w1[0, :, None]
+ nearby_vertices[..., 2, :] * triangle_w2[0, :, None]
).float()
directions = -(
nearby_normals[..., 0, :] * triangle_w0[0, :, None]
+ nearby_normals[..., 1, :] * triangle_w1[0, :, None]
+ nearby_normals[..., 2, :] * triangle_w2[0, :, None]
).float()
origins = origins.reshape(uv_coords_shape[0], uv_coords_shape[1], 3)
directions = directions.reshape(uv_coords_shape[0], uv_coords_shape[1], 3)
# normalize the direction vector to make it a unit vector
directions = torch.nn.functional.normalize(directions, dim=-1)
return texture_coordinates, origins, directions
class RayBundle(TensorDataclass):
"""A bundle of ray parameters."""
# TODO(ethan): make sure the sizes with ... are correct
origins: TensorType[..., 3]
"""Ray origins (XYZ)"""
directions: TensorType[..., 3]
"""Unit ray direction vector"""
pixel_area: TensorType[..., 1]
"""Projected area of pixel a distance 1 away from origin"""
directions_norm: Optional[TensorType[..., 1]] = None
"""Norm of ray direction vector before normalization"""
camera_indices: Optional[TensorType[..., 1]] = None
"""Camera indices"""
nears: Optional[TensorType[..., 1]] = None
"""Distance along ray to start sampling"""
fars: Optional[TensorType[..., 1]] = None
"""Rays Distance along ray to stop sampling"""
metadata: Optional[Dict[str, TensorType["num_rays", "latent_dims"]]] = None
"""Additional metadata or data needed for interpolation, will mimic shape of rays"""
times: Optional[TensorType[..., 1]] = None
"""Times at which rays are sampled"""
def set_camera_indices(self, camera_index: int) -> None:
"""Sets all of the the camera indices to a specific camera index.
Args:
camera_index: Camera index.
"""
self.camera_indices = torch.ones_like(self.origins[..., 0:1]).long() * camera_index
def __len__(self):
num_rays = torch.numel(self.origins) // self.origins.shape[-1]
return num_rays
def sample(self, num_rays: int) -> "RayBundle":
"""Returns a RayBundle as a subset of rays.
Args:
num_rays: Number of rays in output RayBundle
Returns:
RayBundle with subset of rays.
"""
assert num_rays <= len(self)
indices = random.sample(range(len(self)), k=num_rays)
return self[indices]
def get_row_major_sliced_ray_bundle(self, start_idx: int, end_idx: int) -> "RayBundle":
"""Flattens RayBundle and extracts chunk given start and end indicies.
Args:
start_idx: Start index of RayBundle chunk.
end_idx: End index of RayBundle chunk.
Returns:
Flattened RayBundle with end_idx-start_idx rays.
"""
return self.flatten()[start_idx:end_idx]
def get_ray_samples(
self,
bin_starts: TensorType["bs":..., "num_samples", 1],
bin_ends: TensorType["bs":..., "num_samples", 1],
spacing_starts: Optional[TensorType["bs":..., "num_samples", 1]] = None,
spacing_ends: Optional[TensorType["bs":..., "num_samples", 1]] = None,
spacing_to_euclidean_fn: Optional[Callable] = None,
) -> RaySamples:
"""Produces samples for each ray by projection points along the ray direction. Currently samples uniformly.
Args:
bin_starts: Distance from origin to start of bin.
bin_ends: Distance from origin to end of bin.
Returns:
Samples projected along ray.
"""
deltas = bin_ends - bin_starts
if self.camera_indices is not None:
camera_indices = self.camera_indices[..., None]
else:
camera_indices = None
shaped_raybundle_fields = self[..., None]
frustums = Frustums(
origins=shaped_raybundle_fields.origins, # [..., 1, 3]
directions=shaped_raybundle_fields.directions, # [..., 1, 3]
starts=bin_starts, # [..., num_samples, 1]
ends=bin_ends, # [..., num_samples, 1]
pixel_area=shaped_raybundle_fields.pixel_area, # [..., 1, 1]
)
ray_samples = RaySamples(
frustums=frustums,
camera_indices=camera_indices, # [..., 1, 1]
deltas=deltas, # [..., num_samples, 1]
spacing_starts=spacing_starts, # [..., num_samples, 1]
spacing_ends=spacing_ends, # [..., num_samples, 1]
spacing_to_euclidean_fn=spacing_to_euclidean_fn,
metadata=shaped_raybundle_fields.metadata,
times=None if self.times is None else self.times[..., None], # [..., 1, 1]
)
return ray_samples
class Mesh:
"""Class for a mesh."""
vertices: TensorType["num_verts", 3]
"""Vertices of the mesh."""
faces: TensorType["num_faces", 3]
"""Faces of the mesh."""
normals: TensorType["num_verts", 3]
"""Normals of the mesh."""
colors: Optional[TensorType["num_verts", 3]] = None
"""Colors of the mesh."""
class Pipeline(nn.Module):
"""The intent of this class is to provide a higher level interface for the Model
that will be easy to use for our Trainer class.
This class will contain high level functions for the model like getting the loss
dictionaries and visualization code. It should have ways to get the next iterations
training loss, evaluation loss, and generate whole images for visualization. Each model
class should be 1:1 with a pipeline that can act as a standardized interface and hide
differences in how each model takes in and outputs data.
This class's function is to hide the data manager and model classes from the trainer,
worrying about:
1) Fetching data with the data manager
2) Feeding the model the data and fetching the loss
Hopefully this provides a higher level interface for the trainer to use, and
simplifying the model classes, which each may have different forward() methods
and so on.
Args:
config: configuration to instantiate pipeline
device: location to place model and data
test_mode:
'train': loads train/eval datasets into memory
'test': loads train/test datset into memory
'inference': does not load any dataset into memory
world_size: total number of machines available
local_rank: rank of current machine
Attributes:
datamanager: The data manager that will be used
model: The model that will be used
"""
# pylint: disable=abstract-method
datamanager: DataManager
_model: Model
def model(self):
"""Returns the unwrapped model if in ddp"""
return module_wrapper(self._model)
def device(self):
"""Returns the device that the model is on."""
return self.model.device
def get_train_loss_dict(self, step: int):
"""This function gets your training loss dict. This will be responsible for
getting the next batch of data from the DataManager and interfacing with the
Model class, feeding the data to the model's forward function.
Args:
step: current iteration step to update sampler if using DDP (distributed)
"""
if self.world_size > 1 and step:
assert self.datamanager.train_sampler is not None
self.datamanager.train_sampler.set_epoch(step)
ray_bundle, batch = self.datamanager.next_train(step)
model_outputs = self.model(ray_bundle, batch)
metrics_dict = self.model.get_metrics_dict(model_outputs, batch)
loss_dict = self.model.get_loss_dict(model_outputs, batch, metrics_dict)
return model_outputs, loss_dict, metrics_dict
def get_eval_loss_dict(self, step: int):
"""This function gets your evaluation loss dict. It needs to get the data
from the DataManager and feed it to the model's forward function
Args:
step: current iteration step
"""
self.eval()
if self.world_size > 1:
assert self.datamanager.eval_sampler is not None
self.datamanager.eval_sampler.set_epoch(step)
ray_bundle, batch = self.datamanager.next_eval(step)
model_outputs = self.model(ray_bundle, batch)
metrics_dict = self.model.get_metrics_dict(model_outputs, batch)
loss_dict = self.model.get_loss_dict(model_outputs, batch, metrics_dict)
self.train()
return model_outputs, loss_dict, metrics_dict
def get_eval_image_metrics_and_images(self, step: int):
"""This function gets your evaluation loss dict. It needs to get the data
from the DataManager and feed it to the model's forward function
Args:
step: current iteration step
"""
def get_average_eval_image_metrics(self, step: Optional[int] = None):
"""Iterate over all the images in the eval dataset and get the average."""
def load_pipeline(self, loaded_state: Dict[str, Any]) -> None:
"""Load the checkpoint from the given path
Args:
loaded_state: pre-trained model state dict
"""
def get_training_callbacks(
self, training_callback_attributes: TrainingCallbackAttributes
) -> List[TrainingCallback]:
"""Returns the training callbacks from both the Dataloader and the Model."""
def get_param_groups(self) -> Dict[str, List[Parameter]]:
"""Get the param groups for the pipeline.
Returns:
A list of dictionaries containing the pipeline's param groups.
"""
def get_progress(description: str, suffix: Optional[str] = None):
"""Helper function to return a rich Progress object."""
progress_list = [TextColumn(description), BarColumn(), TaskProgressColumn(show_speed=True)]
progress_list += [ItersPerSecColumn(suffix=suffix)] if suffix else []
progress_list += [TimeRemainingColumn(elapsed_when_finished=True, compact=True)]
progress = Progress(*progress_list)
return progress
The provided code snippet includes necessary dependencies for implementing the `export_textured_mesh` function. Write a Python function `def export_textured_mesh( mesh: Mesh, pipeline: Pipeline, output_dir: Path, px_per_uv_triangle: Optional[int] = None, unwrap_method: Literal["xatlas", "custom"] = "xatlas", raylen_method: Literal["edge", "none"] = "edge", num_pixels_per_side=1024, )` to solve the following problem:
Textures a mesh using the radiance field from the Pipeline. The mesh is written to an OBJ file in the output directory, along with the corresponding material and texture files. Operations will occur on the same device as the Pipeline. Args: mesh: The mesh to texture. pipeline: The pipeline to use for texturing. output_dir: The directory to write the textured mesh to. px_per_uv_triangle: The number of pixels per side of UV triangle. unwrap_method: The method to use for unwrapping the mesh. offset_method: The method to use for computing the ray length to render. num_pixels_per_side: The number of pixels per side of the texture image.
Here is the function:
def export_textured_mesh(
mesh: Mesh,
pipeline: Pipeline,
output_dir: Path,
px_per_uv_triangle: Optional[int] = None,
unwrap_method: Literal["xatlas", "custom"] = "xatlas",
raylen_method: Literal["edge", "none"] = "edge",
num_pixels_per_side=1024,
):
"""Textures a mesh using the radiance field from the Pipeline.
The mesh is written to an OBJ file in the output directory,
along with the corresponding material and texture files.
Operations will occur on the same device as the Pipeline.
Args:
mesh: The mesh to texture.
pipeline: The pipeline to use for texturing.
output_dir: The directory to write the textured mesh to.
px_per_uv_triangle: The number of pixels per side of UV triangle.
unwrap_method: The method to use for unwrapping the mesh.
offset_method: The method to use for computing the ray length to render.
num_pixels_per_side: The number of pixels per side of the texture image.
"""
# pylint: disable=too-many-statements
device = pipeline.device
vertices = mesh.vertices.to(device)
faces = mesh.faces.to(device)
vertex_normals = mesh.normals.to(device)
summary_log = []
summary_log.append(f"Unwrapped mesh using {unwrap_method} method.")
summary_log.append(f"Mesh has {len(vertices)} vertices and {len(faces)} faces.")
if unwrap_method == "xatlas":
CONSOLE.print("Unwrapping mesh with xatlas method... this may take a while.")
texture_coordinates, origins, directions = unwrap_mesh_with_xatlas(
vertices, faces, vertex_normals, num_pixels_per_side=num_pixels_per_side
)
print("\033[A\033[A")
CONSOLE.print("[bold green]:white_check_mark: Unwrapped mesh with xatlas method")
elif unwrap_method == "custom":
CONSOLE.print("Unwrapping mesh with custom method...")
texture_coordinates, origins, directions = unwrap_mesh_per_uv_triangle(
vertices, faces, vertex_normals, px_per_uv_triangle
)
print("\033[A\033[A")
CONSOLE.print("[bold green]:white_check_mark: Unwrapped mesh with custom method")
else:
raise ValueError(f"Unwrap method {unwrap_method} not supported.")
if raylen_method == "edge":
face_vertices = vertices[faces]
# compute the length of the rays we want to render
# we make a reasonable approximation by using the mean length of one edge per face
raylen = 2.0 * torch.mean(torch.norm(face_vertices[:, 1, :] - face_vertices[:, 0, :], dim=-1)).float()
elif raylen_method == "none":
raylen = 0.0
else:
raise ValueError(f"Ray length method {raylen_method} not supported.")
summary_log.append(f"Length of rendered rays to compute texture values: {raylen}")
origins = origins - 0.5 * raylen * directions
pixel_area = torch.ones_like(origins[..., 0:1])
camera_indices = torch.zeros_like(origins[..., 0:1])
nears = torch.zeros_like(origins[..., 0:1])
fars = torch.ones_like(origins[..., 0:1]) * raylen
directions_norm = torch.ones_like(origins[..., 0:1]) # for surface model
camera_ray_bundle = RayBundle(
origins=origins,
directions=directions,
pixel_area=pixel_area,
camera_indices=camera_indices,
directions_norm=directions_norm,
nears=nears,
fars=fars,
)
CONSOLE.print("Creating texture image by rendering with NeRF...")
with torch.no_grad():
outputs = pipeline.model.get_outputs_for_camera_ray_bundle(camera_ray_bundle)
# save the texture image
texture_image = outputs["rgb"].cpu().numpy()
media.write_image(str(output_dir / "material_0.png"), texture_image)
CONSOLE.print("Writing relevant OBJ information to files...")
# create the .mtl file
lines_mtl = [
"# Generated with nerfstudio",
"newmtl material_0",
"Ka 1.000 1.000 1.000",
"Kd 1.000 1.000 1.000",
"Ks 0.000 0.000 0.000",
"d 1.0",
"illum 2",
"Ns 1.00000000",
"map_Kd material_0.png",
]
lines_mtl = [line + "\n" for line in lines_mtl]
file_mtl = open(output_dir / "material_0.mtl", "w", encoding="utf-8") # pylint: disable=consider-using-with
file_mtl.writelines(lines_mtl)
file_mtl.close()
# create the .obj file
lines_obj = ["# Generated with nerfstudio", "mtllib material_0.mtl", "usemtl material_0"]
lines_obj = [line + "\n" for line in lines_obj]
file_obj = open(output_dir / "mesh.obj", "w", encoding="utf-8") # pylint: disable=consider-using-with
file_obj.writelines(lines_obj)
# write the geometric vertices
vertices = vertices.cpu().numpy()
progress = get_progress("Writing vertices to file", suffix="lines-per-sec")
with progress:
for i in progress.track(range(len(vertices))):
vertex = vertices[i]
line = f"v {vertex[0]} {vertex[1]} {vertex[2]}\n"
file_obj.write(line)
# write the texture coordinates
texture_coordinates = texture_coordinates.cpu().numpy()
with progress:
progress = get_progress("Writing texture coordinates to file", suffix="lines-per-sec")
for i in progress.track(range(len(faces))):
for uv in texture_coordinates[i]:
line = f"vt {uv[0]} {1.0 - uv[1]}\n"
file_obj.write(line)
# write the vertex normals
vertex_normals = vertex_normals.cpu().numpy()
progress = get_progress("Writing vertex normals to file", suffix="lines-per-sec")
with progress:
for i in progress.track(range(len(vertex_normals))):
normal = vertex_normals[i]
line = f"vn {normal[0]} {normal[1]} {normal[2]}\n"
file_obj.write(line)
# write the faces
faces = faces.cpu().numpy()
progress = get_progress("Writing faces to file", suffix="lines-per-sec")
with progress:
for i in progress.track(range(len(faces))):
face = faces[i]
v1 = face[0] + 1
v2 = face[1] + 1
v3 = face[2] + 1
vt1 = i * 3 + 1
vt2 = i * 3 + 2
vt3 = i * 3 + 3
vn1 = v1
vn2 = v2
vn3 = v3
line = f"f {v1}/{vt1}/{vn1} {v2}/{vt2}/{vn2} {v3}/{vt3}/{vn3}\n"
file_obj.write(line)
file_obj.close()
summary_log.append(f"OBJ file saved to {output_dir / 'mesh.obj'}")
summary_log.append(f"MTL file saved to {output_dir / 'material_0.mtl'}")
summary_log.append(
f"Texture image saved to {output_dir / 'material_0.png'} "
f"with resolution {texture_image.shape[1]}x{texture_image.shape[0]} (WxH)"
)
CONSOLE.rule("[bold green]:tada: :tada: :tada: All DONE :tada: :tada: :tada:")
for summary in summary_log:
CONSOLE.print(summary, justify="center")
CONSOLE.rule() | Textures a mesh using the radiance field from the Pipeline. The mesh is written to an OBJ file in the output directory, along with the corresponding material and texture files. Operations will occur on the same device as the Pipeline. Args: mesh: The mesh to texture. pipeline: The pipeline to use for texturing. output_dir: The directory to write the textured mesh to. px_per_uv_triangle: The number of pixels per side of UV triangle. unwrap_method: The method to use for unwrapping the mesh. offset_method: The method to use for computing the ray length to render. num_pixels_per_side: The number of pixels per side of the texture image. |
162,321 | from __future__ import annotations
from dataclasses import dataclass, field
from pathlib import Path
from typing import List, Optional, Tuple, Union
import numpy as np
import pymeshlab
import torch
import torch.nn.functional as F
from rich.console import Console
from skimage import measure
from torchtyping import TensorType
from nerfstudio.exporter.exporter_utils import Mesh, render_trajectory
from nerfstudio.pipelines.base_pipeline import Pipeline
CONSOLE = Console(width=120)
class TSDF:
"""
Class for creating TSDFs.
"""
voxel_coords: TensorType[3, "xdim", "ydim", "zdim"]
"""Coordinates of each voxel in the TSDF."""
values: TensorType["xdim", "ydim", "zdim"]
"""TSDF values for each voxel."""
weights: TensorType["xdim", "ydim", "zdim"]
"""TSDF weights for each voxel."""
colors: TensorType["xdim", "ydim", "zdim", 3]
"""TSDF colors for each voxel."""
voxel_size: TensorType[3]
"""Size of each voxel in the TSDF. [x, y, z] size."""
origin: TensorType[3]
"""Origin of the TSDF [xmin, ymin, zmin]."""
truncation_margin: float = 5.0
"""Margin for truncation."""
def to(self, device: str):
"""Move the tensors to the specified device.
Args:
device: The device to move the tensors to. E.g., "cuda:0" or "cpu".
"""
self.voxel_coords = self.voxel_coords.to(device)
self.values = self.values.to(device)
self.weights = self.weights.to(device)
self.colors = self.colors.to(device)
self.voxel_size = self.voxel_size.to(device)
self.origin = self.origin.to(device)
return self
def device(self):
"""Returns the device that voxel_coords is on."""
return self.voxel_coords.device
def truncation(self):
"""Returns the truncation distance."""
# TODO: clean this up
truncation = self.voxel_size[0] * self.truncation_margin
return truncation
def from_aabb(aabb: TensorType[2, 3], volume_dims: TensorType[3]):
"""Returns an instance of TSDF from an axis-aligned bounding box and volume dimensions.
Args:
aabb: The axis-aligned bounding box with shape [[xmin, ymin, zmin], [xmax, ymax, zmax]].
volume_dims: The volume dimensions with shape [xdim, ydim, zdim].
"""
origin = aabb[0]
voxel_size = (aabb[1] - aabb[0]) / volume_dims
# create the voxel coordinates
xdim = torch.arange(volume_dims[0])
ydim = torch.arange(volume_dims[1])
zdim = torch.arange(volume_dims[2])
grid = torch.stack(torch.meshgrid([xdim, ydim, zdim], indexing="ij"), dim=0)
voxel_coords = origin.view(3, 1, 1, 1) + grid * voxel_size.view(3, 1, 1, 1)
# initialize the values and weights
values = -torch.ones(volume_dims.tolist())
weights = torch.zeros(volume_dims.tolist())
colors = torch.zeros(volume_dims.tolist() + [3])
# TODO: move to device
return TSDF(voxel_coords, values, weights, colors, voxel_size, origin)
def get_mesh(self) -> Mesh:
"""Extracts a mesh using marching cubes."""
device = self.values.device
# run marching cubes on CPU
tsdf_values_np = self.values.clamp(-1, 1).cpu().numpy()
vertices, faces, normals, _ = measure.marching_cubes(tsdf_values_np, level=0, allow_degenerate=False)
vertices_indices = np.round(vertices).astype(int)
colors = self.colors[vertices_indices[:, 0], vertices_indices[:, 1], vertices_indices[:, 2]]
# move back to original device
vertices = torch.from_numpy(vertices.copy()).to(device)
faces = torch.from_numpy(faces.copy()).to(device)
normals = torch.from_numpy(normals.copy()).to(device)
# move vertices back to world space
vertices = self.origin.view(1, 3) + vertices * self.voxel_size.view(1, 3)
return Mesh(vertices=vertices, faces=faces, normals=normals, colors=colors)
def export_mesh(cls, mesh: Mesh, filename: str):
"""Exports the mesh to a file.
We use pymeshlab to export the mesh as a PLY file.
Args:
mesh: The mesh to export.
filename: The filename to export the mesh to.
"""
vertex_matrix = mesh.vertices.cpu().numpy().astype("float64")
face_matrix = mesh.faces.cpu().numpy().astype("int32")
v_normals_matrix = mesh.normals.cpu().numpy().astype("float64")
v_color_matrix = mesh.colors.cpu().numpy().astype("float64")
# colors need an alpha channel
v_color_matrix = np.concatenate([v_color_matrix, np.ones((v_color_matrix.shape[0], 1))], axis=-1)
# create a new Mesh
m = pymeshlab.Mesh(
vertex_matrix=vertex_matrix,
face_matrix=face_matrix,
v_normals_matrix=v_normals_matrix,
v_color_matrix=v_color_matrix,
)
# create a new MeshSet
ms = pymeshlab.MeshSet()
# add the mesh to the MeshSet
ms.add_mesh(m, "mesh")
# save the current mesh
ms.save_current_mesh(filename)
def integrate_tsdf(
self,
c2w: TensorType["batch", 4, 4],
K: TensorType["batch", 3, 3],
depth_images: TensorType["batch", 1, "height", "width"],
color_images: Optional[TensorType["batch", 3, "height", "width"]] = None,
mask_images: Optional[TensorType["batch", 1, "height", "width"]] = None,
):
"""Integrates a batch of depth images into the TSDF.
Args:
c2w: The camera extrinsics.
K: The camera intrinsics.
depth_images: The depth images to integrate.
color_images: The color images to integrate.
mask_images: The mask images to integrate.
"""
if mask_images is not None:
raise NotImplementedError("Mask images are not supported yet.")
batch_size = c2w.shape[0]
shape = self.voxel_coords.shape[1:]
# Project voxel_coords into image space...
image_size = torch.tensor(
[depth_images.shape[-1], depth_images.shape[-2]], device=self.device
) # [width, height]
# make voxel_coords homogeneous
voxel_world_coords = self.voxel_coords.view(3, -1)
voxel_world_coords = torch.cat(
[voxel_world_coords, torch.ones(1, voxel_world_coords.shape[1], device=self.device)], dim=0
)
voxel_world_coords = voxel_world_coords.unsqueeze(0) # [1, 4, N]
voxel_world_coords = voxel_world_coords.expand(batch_size, *voxel_world_coords.shape[1:]) # [batch, 4, N]
voxel_cam_coords = torch.bmm(torch.inverse(c2w), voxel_world_coords) # [batch, 4, N]
# flip the z axis
voxel_cam_coords[:, 2, :] = -voxel_cam_coords[:, 2, :]
# flip the y axis
voxel_cam_coords[:, 1, :] = -voxel_cam_coords[:, 1, :]
# we need the distance of the point to the camera, not the z coordinate
voxel_depth = torch.sqrt(torch.sum(voxel_cam_coords[:, :3, :] ** 2, dim=-2, keepdim=True)) # [batch, 1, N]
voxel_cam_coords_z = voxel_cam_coords[:, 2:3, :]
voxel_cam_points = torch.bmm(K, voxel_cam_coords[:, 0:3, :] / voxel_cam_coords_z) # [batch, 3, N]
voxel_pixel_coords = voxel_cam_points[:, :2, :] # [batch, 2, N]
# Sample the depth images with grid sample...
grid = voxel_pixel_coords.permute(0, 2, 1) # [batch, N, 2]
# normalize grid to [-1, 1]
grid = 2.0 * grid / image_size.view(1, 1, 2) - 1.0 # [batch, N, 2]
grid = grid[:, None] # [batch, 1, N, 2]
# depth
sampled_depth = F.grid_sample(
input=depth_images, grid=grid, mode="nearest", padding_mode="zeros", align_corners=False
) # [batch, N, 1]
sampled_depth = sampled_depth.squeeze(2) # [batch, 1, N]
# colors
if color_images is not None:
sampled_colors = F.grid_sample(
input=color_images, grid=grid, mode="nearest", padding_mode="zeros", align_corners=False
) # [batch, N, 3]
sampled_colors = sampled_colors.squeeze(2) # [batch, 3, N]
dist = sampled_depth - voxel_depth # [batch, 1, N]
tsdf_values = torch.clamp(dist / self.truncation, min=-1.0, max=1.0) # [batch, 1, N]
valid_points = (voxel_depth > 0) & (sampled_depth > 0) & (dist > -self.truncation) # [batch, 1, N]
# Sequentially update the TSDF...
for i in range(batch_size):
valid_points_i = valid_points[i]
valid_points_i_shape = valid_points_i.view(*shape) # [xdim, ydim, zdim]
# the old values
old_tsdf_values_i = self.values[valid_points_i_shape]
old_weights_i = self.weights[valid_points_i_shape]
# the new values
# TODO: let the new weight be configurable
new_tsdf_values_i = tsdf_values[i][valid_points_i]
new_weights_i = 1.0
total_weights = old_weights_i + new_weights_i
self.values[valid_points_i_shape] = (
old_tsdf_values_i * old_weights_i + new_tsdf_values_i * new_weights_i
) / total_weights
self.weights[valid_points_i_shape] = torch.clamp(total_weights, max=1.0)
if color_images is not None:
old_colors_i = self.colors[valid_points_i_shape] # [M, 3]
new_colors_i = sampled_colors[i][:, valid_points_i.squeeze(0)].permute(1, 0) # [M, 3]
self.colors[valid_points_i_shape] = (
old_colors_i * old_weights_i[:, None] + new_colors_i * new_weights_i
) / total_weights[:, None]
def render_trajectory(
pipeline: Pipeline,
cameras: Cameras,
rgb_output_name: str,
depth_output_name: str,
rendered_resolution_scaling_factor: float = 1.0,
disable_distortion: bool = False,
) -> Tuple[List[np.ndarray], List[np.ndarray]]:
"""Helper function to create a video of a trajectory.
Args:
pipeline: Pipeline to evaluate with.
cameras: Cameras to render.
rgb_output_name: Name of the RGB output.
depth_output_name: Name of the depth output.
rendered_resolution_scaling_factor: Scaling factor to apply to the camera image resolution.
disable_distortion: Whether to disable distortion.
Returns:
List of rgb images, list of depth images.
"""
images = []
depths = []
cameras.rescale_output_resolution(rendered_resolution_scaling_factor)
progress = Progress(
TextColumn(":cloud: Computing rgb and depth images :cloud:"),
BarColumn(),
TaskProgressColumn(show_speed=True),
ItersPerSecColumn(suffix="fps"),
TimeRemainingColumn(elapsed_when_finished=True, compact=True),
)
with progress:
for camera_idx in progress.track(range(cameras.size), description=""):
camera_ray_bundle = cameras.generate_rays(
camera_indices=camera_idx, disable_distortion=disable_distortion
).to(pipeline.device)
with torch.no_grad():
outputs = pipeline.model.get_outputs_for_camera_ray_bundle(camera_ray_bundle)
if rgb_output_name not in outputs:
CONSOLE.rule("Error", style="red")
CONSOLE.print(f"Could not find {rgb_output_name} in the model outputs", justify="center")
CONSOLE.print(f"Please set --rgb_output_name to one of: {outputs.keys()}", justify="center")
sys.exit(1)
if depth_output_name not in outputs:
CONSOLE.rule("Error", style="red")
CONSOLE.print(f"Could not find {depth_output_name} in the model outputs", justify="center")
CONSOLE.print(f"Please set --depth_output_name to one of: {outputs.keys()}", justify="center")
sys.exit(1)
images.append(outputs[rgb_output_name].cpu().numpy())
depths.append(outputs[depth_output_name].cpu().numpy())
return images, depths
class Pipeline(nn.Module):
"""The intent of this class is to provide a higher level interface for the Model
that will be easy to use for our Trainer class.
This class will contain high level functions for the model like getting the loss
dictionaries and visualization code. It should have ways to get the next iterations
training loss, evaluation loss, and generate whole images for visualization. Each model
class should be 1:1 with a pipeline that can act as a standardized interface and hide
differences in how each model takes in and outputs data.
This class's function is to hide the data manager and model classes from the trainer,
worrying about:
1) Fetching data with the data manager
2) Feeding the model the data and fetching the loss
Hopefully this provides a higher level interface for the trainer to use, and
simplifying the model classes, which each may have different forward() methods
and so on.
Args:
config: configuration to instantiate pipeline
device: location to place model and data
test_mode:
'train': loads train/eval datasets into memory
'test': loads train/test datset into memory
'inference': does not load any dataset into memory
world_size: total number of machines available
local_rank: rank of current machine
Attributes:
datamanager: The data manager that will be used
model: The model that will be used
"""
# pylint: disable=abstract-method
datamanager: DataManager
_model: Model
def model(self):
"""Returns the unwrapped model if in ddp"""
return module_wrapper(self._model)
def device(self):
"""Returns the device that the model is on."""
return self.model.device
def get_train_loss_dict(self, step: int):
"""This function gets your training loss dict. This will be responsible for
getting the next batch of data from the DataManager and interfacing with the
Model class, feeding the data to the model's forward function.
Args:
step: current iteration step to update sampler if using DDP (distributed)
"""
if self.world_size > 1 and step:
assert self.datamanager.train_sampler is not None
self.datamanager.train_sampler.set_epoch(step)
ray_bundle, batch = self.datamanager.next_train(step)
model_outputs = self.model(ray_bundle, batch)
metrics_dict = self.model.get_metrics_dict(model_outputs, batch)
loss_dict = self.model.get_loss_dict(model_outputs, batch, metrics_dict)
return model_outputs, loss_dict, metrics_dict
def get_eval_loss_dict(self, step: int):
"""This function gets your evaluation loss dict. It needs to get the data
from the DataManager and feed it to the model's forward function
Args:
step: current iteration step
"""
self.eval()
if self.world_size > 1:
assert self.datamanager.eval_sampler is not None
self.datamanager.eval_sampler.set_epoch(step)
ray_bundle, batch = self.datamanager.next_eval(step)
model_outputs = self.model(ray_bundle, batch)
metrics_dict = self.model.get_metrics_dict(model_outputs, batch)
loss_dict = self.model.get_loss_dict(model_outputs, batch, metrics_dict)
self.train()
return model_outputs, loss_dict, metrics_dict
def get_eval_image_metrics_and_images(self, step: int):
"""This function gets your evaluation loss dict. It needs to get the data
from the DataManager and feed it to the model's forward function
Args:
step: current iteration step
"""
def get_average_eval_image_metrics(self, step: Optional[int] = None):
"""Iterate over all the images in the eval dataset and get the average."""
def load_pipeline(self, loaded_state: Dict[str, Any]) -> None:
"""Load the checkpoint from the given path
Args:
loaded_state: pre-trained model state dict
"""
def get_training_callbacks(
self, training_callback_attributes: TrainingCallbackAttributes
) -> List[TrainingCallback]:
"""Returns the training callbacks from both the Dataloader and the Model."""
def get_param_groups(self) -> Dict[str, List[Parameter]]:
"""Get the param groups for the pipeline.
Returns:
A list of dictionaries containing the pipeline's param groups.
"""
The provided code snippet includes necessary dependencies for implementing the `export_tsdf_mesh` function. Write a Python function `def export_tsdf_mesh( pipeline: Pipeline, output_dir: Path, downscale_factor: int = 2, depth_output_name: str = "depth", rgb_output_name: str = "rgb", resolution: Union[int, List[int]] = field(default_factory=lambda: [256, 256, 256]), batch_size: int = 10, use_bounding_box: bool = True, bounding_box_min: Tuple[float, float, float] = (-1.0, -1.0, -1.0), bounding_box_max: Tuple[float, float, float] = (1.0, 1.0, 1.0), )` to solve the following problem:
Export a TSDF mesh from a pipeline. Args: pipeline: The pipeline to export the mesh from. output_dir: The directory to save the mesh to. downscale_factor: Downscale factor for the images. depth_output_name: Name of the depth output. rgb_output_name: Name of the RGB output. resolution: Resolution of the TSDF volume or [x, y, z] resolutions individually. batch_size: How many depth images to integrate per batch. use_bounding_box: Whether to use a bounding box for the TSDF volume. bounding_box_min: Minimum coordinates of the bounding box. bounding_box_max: Maximum coordinates of the bounding box.
Here is the function:
def export_tsdf_mesh(
pipeline: Pipeline,
output_dir: Path,
downscale_factor: int = 2,
depth_output_name: str = "depth",
rgb_output_name: str = "rgb",
resolution: Union[int, List[int]] = field(default_factory=lambda: [256, 256, 256]),
batch_size: int = 10,
use_bounding_box: bool = True,
bounding_box_min: Tuple[float, float, float] = (-1.0, -1.0, -1.0),
bounding_box_max: Tuple[float, float, float] = (1.0, 1.0, 1.0),
):
"""Export a TSDF mesh from a pipeline.
Args:
pipeline: The pipeline to export the mesh from.
output_dir: The directory to save the mesh to.
downscale_factor: Downscale factor for the images.
depth_output_name: Name of the depth output.
rgb_output_name: Name of the RGB output.
resolution: Resolution of the TSDF volume or [x, y, z] resolutions individually.
batch_size: How many depth images to integrate per batch.
use_bounding_box: Whether to use a bounding box for the TSDF volume.
bounding_box_min: Minimum coordinates of the bounding box.
bounding_box_max: Maximum coordinates of the bounding box.
"""
device = pipeline.device
dataparser_outputs = pipeline.datamanager.train_dataset._dataparser_outputs # pylint: disable=protected-access
# initialize the TSDF volume
if not use_bounding_box:
aabb = dataparser_outputs.scene_box.aabb
else:
aabb = torch.tensor([bounding_box_min, bounding_box_max])
if isinstance(resolution, int):
volume_dims = torch.tensor([resolution] * 3)
elif isinstance(resolution, List):
volume_dims = torch.tensor(resolution)
else:
raise ValueError("Resolution must be an int or a list.")
tsdf = TSDF.from_aabb(aabb, volume_dims=volume_dims)
# move TSDF to device
tsdf.to(device)
cameras = dataparser_outputs.cameras
# we turn off distortion when populating the TSDF
color_images, depth_images = render_trajectory(
pipeline,
cameras,
rgb_output_name=rgb_output_name,
depth_output_name=depth_output_name,
rendered_resolution_scaling_factor=1.0 / downscale_factor,
disable_distortion=True,
)
# camera extrinsics and intrinsics
c2w: TensorType["N", 3, 4] = cameras.camera_to_worlds.to(device)
# make c2w homogeneous
c2w = torch.cat([c2w, torch.zeros(c2w.shape[0], 1, 4, device=device)], dim=1)
c2w[:, 3, 3] = 1
K: TensorType["N", 3, 3] = cameras.get_intrinsics_matrices().to(device)
color_images = torch.tensor(np.array(color_images), device=device).permute(0, 3, 1, 2) # shape (N, 3, H, W)
depth_images = torch.tensor(np.array(depth_images), device=device).permute(0, 3, 1, 2) # shape (N, 1, H, W)
CONSOLE.print("Integrating the TSDF")
for i in range(0, len(c2w), batch_size):
tsdf.integrate_tsdf(
c2w[i : i + batch_size],
K[i : i + batch_size],
depth_images[i : i + batch_size],
color_images=color_images[i : i + batch_size],
)
CONSOLE.print("Computing Mesh")
mesh = tsdf.get_mesh()
CONSOLE.print("Saving TSDF Mesh")
tsdf.export_mesh(mesh, filename=str(output_dir / "tsdf_mesh.ply")) | Export a TSDF mesh from a pipeline. Args: pipeline: The pipeline to export the mesh from. output_dir: The directory to save the mesh to. downscale_factor: Downscale factor for the images. depth_output_name: Name of the depth output. rgb_output_name: Name of the RGB output. resolution: Resolution of the TSDF volume or [x, y, z] resolutions individually. batch_size: How many depth images to integrate per batch. use_bounding_box: Whether to use a bounding box for the TSDF volume. bounding_box_min: Minimum coordinates of the bounding box. bounding_box_max: Maximum coordinates of the bounding box. |
162,322 | from __future__ import annotations
from dataclasses import dataclass
from typing import Any, Dict, List, Type
import torch
from torch.cuda.amp.grad_scaler import GradScaler
from torch.nn.parameter import Parameter
from nerfstudio.configs import base_config
from nerfstudio.utils import writer
class Optimizers:
"""A set of optimizers.
Args:
config: The optimizer configuration object.
param_groups: A dictionary of parameter groups to optimize.
"""
def __init__(self, config: Dict[str, Any], param_groups: Dict[str, List[Parameter]]):
self.config = config
self.optimizers = {}
self.schedulers = {}
for param_group_name, params in param_groups.items():
lr_init = config[param_group_name]["optimizer"].lr
self.optimizers[param_group_name] = config[param_group_name]["optimizer"].setup(params=params)
if config[param_group_name]["scheduler"]:
self.schedulers[param_group_name] = config[param_group_name]["scheduler"].setup(
optimizer=self.optimizers[param_group_name], lr_init=lr_init
)
def optimizer_step(self, param_group_name: str) -> None:
"""Fetch and step corresponding optimizer.
Args:
param_group_name: name of optimizer to step forward
"""
self.optimizers[param_group_name].step()
def scheduler_step(self, param_group_name: str) -> None:
"""Fetch and step corresponding scheduler.
Args:
param_group_name: name of scheduler to step forward
"""
if self.config.param_group_name.scheduler: # type: ignore
self.schedulers[param_group_name].step()
def zero_grad_all(self) -> None:
"""Zero the gradients for all optimizer parameters."""
for _, optimizer in self.optimizers.items():
optimizer.zero_grad()
def optimizer_scaler_step_all(self, grad_scaler: GradScaler) -> None:
"""Take an optimizer step using a grad scaler.
Args:
grad_scaler: GradScaler to use
"""
for _, optimizer in self.optimizers.items():
grad_scaler.step(optimizer)
def optimizer_step_all(self):
"""Run step for all optimizers."""
for _, optimizer in self.optimizers.items():
# note that they key is the parameter name
optimizer.step()
def scheduler_step_all(self, step: int) -> None:
"""Run step for all schedulers.
Args:
step: the current step
"""
for param_group_name, scheduler in self.schedulers.items():
scheduler.step()
# TODO(ethan): clean this up. why is there indexing into a list?
lr = scheduler.get_last_lr()[0]
writer.put_scalar(name=f"learning_rate/{param_group_name}", scalar=lr, step=step)
def load_optimizers(self, loaded_state: Dict[str, Any]) -> None:
"""Helper to load the optimizer state from previous checkpoint
Args:
loaded_state: the state from the previous checkpoint
"""
for k, v in loaded_state.items():
self.optimizers[k].load_state_dict(v)
def load_schedulers(self, loaded_state: Dict[str, Any]) -> None:
"""Helper to load the schedulers state from previous checkpoint
Args:
loaded_state: the state from the previous checkpoint
"""
for k, v in loaded_state.items():
self.schedulers[k].load_state_dict(v)
The provided code snippet includes necessary dependencies for implementing the `setup_optimizers` function. Write a Python function `def setup_optimizers(config: base_config.Config, param_groups: Dict[str, List[Parameter]]) -> "Optimizers"` to solve the following problem:
Helper to set up the optimizers Args: config: The trainer configuration object. param_groups: A dictionary of parameter groups to optimize. Returns: The optimizers object.
Here is the function:
def setup_optimizers(config: base_config.Config, param_groups: Dict[str, List[Parameter]]) -> "Optimizers":
"""Helper to set up the optimizers
Args:
config: The trainer configuration object.
param_groups: A dictionary of parameter groups to optimize.
Returns:
The optimizers object.
"""
optimizer_config = config.optimizers.copy()
# Add the camera optimizer if enabled.
camera_optimizer_config = config.pipeline.datamanager.camera_optimizer
if camera_optimizer_config.mode != "off":
assert camera_optimizer_config.param_group not in optimizer_config
optimizer_config[camera_optimizer_config.param_group] = {
"optimizer": config.pipeline.datamanager.camera_optimizer.optimizer,
"scheduler": config.pipeline.datamanager.camera_optimizer.scheduler,
}
return Optimizers(optimizer_config, param_groups) | Helper to set up the optimizers Args: config: The trainer configuration object. param_groups: A dictionary of parameter groups to optimize. Returns: The optimizers object. |
162,323 | import torch
import torch.nn.functional as F
from torch import nn
from torchtyping import TensorType
from torch.autograd import Variable
import numpy as np
from math import exp
from nerfstudio.cameras.rays import RaySamples
from nerfstudio.field_components.field_heads import FieldHeadNames
def lossfun_outer(
t: TensorType[..., "num_samples+1"],
w: TensorType[..., "num_samples"],
t_env: TensorType[..., "num_samples+1"],
w_env: TensorType[..., "num_samples"],
):
"""
https://github.com/kakaobrain/NeRF-Factory/blob/f61bb8744a5cb4820a4d968fb3bfbed777550f4a/src/model/mipnerf360/helper.py#L136
https://github.com/google-research/multinerf/blob/b02228160d3179300c7d499dca28cb9ca3677f32/internal/stepfun.py#L80
Args:
t: interval edges
w: weights
t_env: interval edges of the upper bound enveloping historgram
w_env: weights that should upper bound the inner (t,w) histogram
"""
w_outer = outer(t[..., :-1], t[..., 1:], t_env[..., :-1], t_env[..., 1:], w_env)
return torch.clip(w - w_outer, min=0) ** 2 / (w + EPS)
def ray_samples_to_sdist(ray_samples):
"""Convert ray samples to s space"""
starts = ray_samples.spacing_starts
ends = ray_samples.spacing_ends
sdist = torch.cat([starts[..., 0], ends[..., -1:, 0]], dim=-1) # (num_rays, num_samples + 1)
return sdist
The provided code snippet includes necessary dependencies for implementing the `interlevel_loss` function. Write a Python function `def interlevel_loss(weights_list, ray_samples_list)` to solve the following problem:
Calculates the proposal loss in the MipNeRF-360 paper. https://github.com/kakaobrain/NeRF-Factory/blob/f61bb8744a5cb4820a4d968fb3bfbed777550f4a/src/model/mipnerf360/model.py#L515 https://github.com/google-research/multinerf/blob/b02228160d3179300c7d499dca28cb9ca3677f32/internal/train_utils.py#L133
Here is the function:
def interlevel_loss(weights_list, ray_samples_list):
"""Calculates the proposal loss in the MipNeRF-360 paper.
https://github.com/kakaobrain/NeRF-Factory/blob/f61bb8744a5cb4820a4d968fb3bfbed777550f4a/src/model/mipnerf360/model.py#L515
https://github.com/google-research/multinerf/blob/b02228160d3179300c7d499dca28cb9ca3677f32/internal/train_utils.py#L133
"""
c = ray_samples_to_sdist(ray_samples_list[-1]).detach()
w = weights_list[-1][..., 0].detach()
loss_interlevel = 0.0
for ray_samples, weights in zip(ray_samples_list[:-1], weights_list[:-1]):
sdist = ray_samples_to_sdist(ray_samples)
cp = sdist # (num_rays, num_samples + 1)
wp = weights[..., 0] # (num_rays, num_samples)
loss_interlevel += torch.mean(lossfun_outer(c, w, cp, wp))
return loss_interlevel | Calculates the proposal loss in the MipNeRF-360 paper. https://github.com/kakaobrain/NeRF-Factory/blob/f61bb8744a5cb4820a4d968fb3bfbed777550f4a/src/model/mipnerf360/model.py#L515 https://github.com/google-research/multinerf/blob/b02228160d3179300c7d499dca28cb9ca3677f32/internal/train_utils.py#L133 |
162,324 | import torch
import torch.nn.functional as F
from torch import nn
from torchtyping import TensorType
from torch.autograd import Variable
import numpy as np
from math import exp
from nerfstudio.cameras.rays import RaySamples
from nerfstudio.field_components.field_heads import FieldHeadNames
def ray_samples_to_sdist(ray_samples):
"""Convert ray samples to s space"""
starts = ray_samples.spacing_starts
ends = ray_samples.spacing_ends
sdist = torch.cat([starts[..., 0], ends[..., -1:, 0]], dim=-1) # (num_rays, num_samples + 1)
return sdist
def blur_stepfun(x, y, r):
x_c = torch.cat([x - r, x + r], dim=-1)
x_r, x_idx = torch.sort(x_c, dim=-1)
zeros = torch.zeros_like(y[:, :1])
y_1 = (torch.cat([y, zeros], dim=-1) - torch.cat([zeros, y], dim=-1)) / (2 * r)
x_idx = x_idx[:, :-1]
y_2 = torch.cat([y_1, -y_1], dim=-1)[
torch.arange(x_idx.shape[0]).reshape(-1, 1).expand(x_idx.shape).to(x_idx.device), x_idx
]
y_r = torch.cumsum((x_r[:, 1:] - x_r[:, :-1]) * torch.cumsum(y_2, dim=-1), dim=-1)
y_r = torch.cat([zeros, y_r], dim=-1)
return x_r, y_r
The provided code snippet includes necessary dependencies for implementing the `interlevel_loss_zip` function. Write a Python function `def interlevel_loss_zip(weights_list, ray_samples_list)` to solve the following problem:
Calculates the proposal loss in the Zip-NeRF paper.
Here is the function:
def interlevel_loss_zip(weights_list, ray_samples_list):
"""Calculates the proposal loss in the Zip-NeRF paper."""
c = ray_samples_to_sdist(ray_samples_list[-1]).detach()
w = weights_list[-1][..., 0].detach()
# 1. normalize
w_normalize = w / (c[:, 1:] - c[:, :-1])
loss_interlevel = 0.0
for ray_samples, weights, r in zip(ray_samples_list[:-1], weights_list[:-1], [0.03, 0.003]):
# 2. step blur with different r
x_r, y_r = blur_stepfun(c, w_normalize, r)
y_r = torch.clip(y_r, min=0)
assert (y_r >= 0.0).all()
# 3. accumulate
y_cum = torch.cumsum((y_r[:, 1:] + y_r[:, :-1]) * 0.5 * (x_r[:, 1:] - x_r[:, :-1]), dim=-1)
y_cum = torch.cat([torch.zeros_like(y_cum[:, :1]), y_cum], dim=-1)
# 4 loss
sdist = ray_samples_to_sdist(ray_samples)
cp = sdist # (num_rays, num_samples + 1)
wp = weights[..., 0] # (num_rays, num_samples)
# resample
inds = torch.searchsorted(x_r, cp, side="right")
below = torch.clamp(inds - 1, 0, x_r.shape[-1] - 1)
above = torch.clamp(inds, 0, x_r.shape[-1] - 1)
cdf_g0 = torch.gather(x_r, -1, below)
bins_g0 = torch.gather(y_cum, -1, below)
cdf_g1 = torch.gather(x_r, -1, above)
bins_g1 = torch.gather(y_cum, -1, above)
t = torch.clip(torch.nan_to_num((cp - cdf_g0) / (cdf_g1 - cdf_g0), 0), 0, 1)
bins = bins_g0 + t * (bins_g1 - bins_g0)
w_gt = bins[:, 1:] - bins[:, :-1]
# TODO here might be unstable when wp is very small
loss_interlevel += torch.mean(torch.clip(w_gt - wp, min=0) ** 2 / (wp + 1e-5))
return loss_interlevel | Calculates the proposal loss in the Zip-NeRF paper. |
162,325 | import torch
import torch.nn.functional as F
from torch import nn
from torchtyping import TensorType
from torch.autograd import Variable
import numpy as np
from math import exp
from nerfstudio.cameras.rays import RaySamples
from nerfstudio.field_components.field_heads import FieldHeadNames
def ray_samples_to_sdist(ray_samples):
"""Convert ray samples to s space"""
starts = ray_samples.spacing_starts
ends = ray_samples.spacing_ends
sdist = torch.cat([starts[..., 0], ends[..., -1:, 0]], dim=-1) # (num_rays, num_samples + 1)
return sdist
def lossfun_distortion(t, w):
"""
https://github.com/kakaobrain/NeRF-Factory/blob/f61bb8744a5cb4820a4d968fb3bfbed777550f4a/src/model/mipnerf360/helper.py#L142
https://github.com/google-research/multinerf/blob/b02228160d3179300c7d499dca28cb9ca3677f32/internal/stepfun.py#L266
"""
ut = (t[..., 1:] + t[..., :-1]) / 2
dut = torch.abs(ut[..., :, None] - ut[..., None, :])
loss_inter = torch.sum(w * torch.sum(w[..., None, :] * dut, dim=-1), dim=-1)
loss_intra = torch.sum(w**2 * (t[..., 1:] - t[..., :-1]), dim=-1) / 3
return loss_inter + loss_intra
The provided code snippet includes necessary dependencies for implementing the `distortion_loss` function. Write a Python function `def distortion_loss(weights_list, ray_samples_list)` to solve the following problem:
From mipnerf360
Here is the function:
def distortion_loss(weights_list, ray_samples_list):
"""From mipnerf360"""
c = ray_samples_to_sdist(ray_samples_list[-1])
w = weights_list[-1][..., 0]
loss = torch.mean(lossfun_distortion(c, w))
return loss | From mipnerf360 |
162,326 | import torch
import torch.nn.functional as F
from torch import nn
from torchtyping import TensorType
from torch.autograd import Variable
import numpy as np
from math import exp
from nerfstudio.cameras.rays import RaySamples
from nerfstudio.field_components.field_heads import FieldHeadNames
class RaySamples(TensorDataclass):
"""Samples along a ray"""
frustums: Frustums
"""Frustums along ray."""
camera_indices: Optional[TensorType["bs":..., 1]] = None
"""Camera index."""
deltas: Optional[TensorType["bs":..., 1]] = None
""""width" of each sample."""
spacing_starts: Optional[TensorType["bs":..., "num_samples", 1]] = None
"""Start of normalized bin edges along ray [0,1], before warping is applied, ie. linear in disparity sampling."""
spacing_ends: Optional[TensorType["bs":..., "num_samples", 1]] = None
"""Start of normalized bin edges along ray [0,1], before warping is applied, ie. linear in disparity sampling."""
spacing_to_euclidean_fn: Optional[Callable] = None
"""Function to convert bins to euclidean distance."""
metadata: Optional[Dict[str, TensorType["bs":..., "latent_dims"]]] = None
"""addtional information relevant to generating ray samples"""
times: Optional[TensorType[..., 1]] = None
"""Times at which rays are sampled"""
def get_alphas(self, densities: TensorType[..., "num_samples", 1]) -> TensorType[..., "num_samples", 1]:
"""Return weights based on predicted densities
Args:
densities: Predicted densities for samples along ray
Returns:
Weights for each sample
"""
delta_density = self.deltas * densities
alphas = 1 - torch.exp(-delta_density)
return alphas
def get_weights(self, densities: TensorType[..., "num_samples", 1]) -> TensorType[..., "num_samples", 1]:
"""Return weights based on predicted densities
Args:
densities: Predicted densities for samples along ray
Returns:
Weights for each sample
"""
delta_density = self.deltas * densities
alphas = 1 - torch.exp(-delta_density)
transmittance = torch.cumsum(delta_density[..., :-1, :], dim=-2)
transmittance = torch.cat(
[torch.zeros((*transmittance.shape[:1], 1, 1), device=densities.device), transmittance], dim=-2
)
transmittance = torch.exp(-transmittance) # [..., "num_samples"]
weights = alphas * transmittance # [..., "num_samples"]
return weights
def get_weights_and_transmittance(
self, densities: TensorType[..., "num_samples", 1]
) -> Tuple[TensorType[..., "num_samples", 1], TensorType[..., "num_samples", 1]]:
"""Return weights and transmittance based on predicted densities
Args:
densities: Predicted densities for samples along ray
Returns:
Weights and transmittance for each sample
"""
delta_density = self.deltas * densities
alphas = 1 - torch.exp(-delta_density)
transmittance = torch.cumsum(delta_density[..., :-1, :], dim=-2)
transmittance = torch.cat(
[torch.zeros((*transmittance.shape[:1], 1, 1), device=densities.device), transmittance], dim=-2
)
transmittance = torch.exp(-transmittance) # [..., "num_samples"]
weights = alphas * transmittance # [..., "num_samples"]
return weights, transmittance
def get_weights_from_alphas(self, alphas: TensorType[..., "num_samples", 1]) -> TensorType[..., "num_samples", 1]:
"""Return weights based on predicted alphas
Args:
alphas: Predicted alphas (maybe from sdf) for samples along ray
Returns:
Weights for each sample
"""
transmittance = torch.cumprod(
torch.cat([torch.ones((*alphas.shape[:1], 1, 1), device=alphas.device), 1.0 - alphas + 1e-7], 1), 1
) # [..., "num_samples"]
weights = alphas * transmittance[:, :-1, :] # [..., "num_samples"]
return weights
def get_weights_and_transmittance_from_alphas(
self, alphas: TensorType[..., "num_samples", 1]
) -> TensorType[..., "num_samples", 1]:
"""Return weights based on predicted alphas
Args:
alphas: Predicted alphas (maybe from sdf) for samples along ray
Returns:
Weights for each sample
"""
transmittance = torch.cumprod(
torch.cat([torch.ones((*alphas.shape[:1], 1, 1), device=alphas.device), 1.0 - alphas + 1e-7], 1), 1
) # [..., "num_samples"]
weights = alphas * transmittance[:, :-1, :] # [..., "num_samples"]
return weights, transmittance
The provided code snippet includes necessary dependencies for implementing the `nerfstudio_distortion_loss` function. Write a Python function `def nerfstudio_distortion_loss( ray_samples: RaySamples, densities: TensorType["bs":..., "num_samples", 1] = None, weights: TensorType["bs":..., "num_samples", 1] = None, ) -> TensorType["bs":..., 1]` to solve the following problem:
Ray based distortion loss proposed in MipNeRF-360. Returns distortion Loss. .. math:: \\mathcal{L}(\\mathbf{s}, \\mathbf{w}) =\\iint\\limits_{-\\infty}^{\\,\\,\\,\\infty} \\mathbf{w}_\\mathbf{s}(u)\\mathbf{w}_\\mathbf{s}(v)|u - v|\\,d_{u}\\,d_{v} where :math:`\\mathbf{w}_\\mathbf{s}(u)=\\sum_i w_i \\mathbb{1}_{[\\mathbf{s}_i, \\mathbf{s}_{i+1})}(u)` is the weight at location :math:`u` between bin locations :math:`s_i` and :math:`s_{i+1}`. Args: ray_samples: Ray samples to compute loss over densities: Predicted sample densities weights: Predicted weights from densities and sample locations
Here is the function:
def nerfstudio_distortion_loss(
ray_samples: RaySamples,
densities: TensorType["bs":..., "num_samples", 1] = None,
weights: TensorType["bs":..., "num_samples", 1] = None,
) -> TensorType["bs":..., 1]:
"""Ray based distortion loss proposed in MipNeRF-360. Returns distortion Loss.
.. math::
\\mathcal{L}(\\mathbf{s}, \\mathbf{w}) =\\iint\\limits_{-\\infty}^{\\,\\,\\,\\infty}
\\mathbf{w}_\\mathbf{s}(u)\\mathbf{w}_\\mathbf{s}(v)|u - v|\\,d_{u}\\,d_{v}
where :math:`\\mathbf{w}_\\mathbf{s}(u)=\\sum_i w_i \\mathbb{1}_{[\\mathbf{s}_i, \\mathbf{s}_{i+1})}(u)`
is the weight at location :math:`u` between bin locations :math:`s_i` and :math:`s_{i+1}`.
Args:
ray_samples: Ray samples to compute loss over
densities: Predicted sample densities
weights: Predicted weights from densities and sample locations
"""
if torch.is_tensor(densities):
assert not torch.is_tensor(weights), "Cannot use both densities and weights"
# Compute the weight at each sample location
weights = ray_samples.get_weights(densities)
if torch.is_tensor(weights):
assert not torch.is_tensor(densities), "Cannot use both densities and weights"
starts = ray_samples.spacing_starts
ends = ray_samples.spacing_ends
assert starts is not None and ends is not None, "Ray samples must have spacing starts and ends"
midpoints = (starts + ends) / 2.0 # (..., num_samples, 1)
loss = (
weights * weights[..., None, :, 0] * torch.abs(midpoints - midpoints[..., None, :, 0])
) # (..., num_samples, num_samples)
loss = torch.sum(loss, dim=(-1, -2))[..., None] # (..., num_samples)
loss = loss + 1 / 3.0 * torch.sum(weights**2 * (ends - starts), dim=-2)
return loss | Ray based distortion loss proposed in MipNeRF-360. Returns distortion Loss. .. math:: \\mathcal{L}(\\mathbf{s}, \\mathbf{w}) =\\iint\\limits_{-\\infty}^{\\,\\,\\,\\infty} \\mathbf{w}_\\mathbf{s}(u)\\mathbf{w}_\\mathbf{s}(v)|u - v|\\,d_{u}\\,d_{v} where :math:`\\mathbf{w}_\\mathbf{s}(u)=\\sum_i w_i \\mathbb{1}_{[\\mathbf{s}_i, \\mathbf{s}_{i+1})}(u)` is the weight at location :math:`u` between bin locations :math:`s_i` and :math:`s_{i+1}`. Args: ray_samples: Ray samples to compute loss over densities: Predicted sample densities weights: Predicted weights from densities and sample locations |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.