id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
22,926 | from . import *
def build_scenario(builder):
builder.config().game_duration = 3000
builder.config().right_team_difficulty = 0.95
builder.config().deterministic = False
if builder.EpisodeNumber() % 2 == 0:
first_team = Team.e_Left
second_team = Team.e_Right
else:
first_team = Team.e_Right
second_team = Team.e_Left
builder.SetTeam(first_team)
builder.AddPlayer(-1.000000, 0.000000, e_PlayerRole_GK)
builder.AddPlayer(0.000000, 0.020000, e_PlayerRole_RM)
builder.AddPlayer(0.000000, -0.020000, e_PlayerRole_CF)
builder.AddPlayer(-0.422000, -0.19576, e_PlayerRole_LB)
builder.AddPlayer(-0.500000, -0.06356, e_PlayerRole_CB)
builder.AddPlayer(-0.500000, 0.063559, e_PlayerRole_CB)
builder.AddPlayer(-0.422000, 0.195760, e_PlayerRole_RB)
builder.AddPlayer(-0.184212, -0.10568, e_PlayerRole_CM)
builder.AddPlayer(-0.267574, 0.000000, e_PlayerRole_CM)
builder.AddPlayer(-0.184212, 0.105680, e_PlayerRole_CM)
builder.AddPlayer(-0.010000, -0.21610, e_PlayerRole_LM)
builder.SetTeam(second_team)
builder.AddPlayer(-1.000000, 0.000000, e_PlayerRole_GK)
builder.AddPlayer(-0.050000, 0.000000, e_PlayerRole_RM)
builder.AddPlayer(-0.010000, 0.216102, e_PlayerRole_CF)
builder.AddPlayer(-0.422000, -0.19576, e_PlayerRole_LB)
builder.AddPlayer(-0.500000, -0.06356, e_PlayerRole_CB)
builder.AddPlayer(-0.500000, 0.063559, e_PlayerRole_CB)
builder.AddPlayer(-0.422000, 0.195760, e_PlayerRole_RB)
builder.AddPlayer(-0.184212, -0.10568, e_PlayerRole_CM)
builder.AddPlayer(-0.267574, 0.000000, e_PlayerRole_CM)
builder.AddPlayer(-0.184212, 0.105680, e_PlayerRole_CM)
builder.AddPlayer(-0.010000, -0.21610, e_PlayerRole_LM) | null |
22,927 | from . import *
def build_scenario(builder):
builder.config().game_duration = 3000
builder.config().right_team_difficulty = 1.0
builder.config().left_team_difficulty = 1.0
builder.config().deterministic = False
if builder.EpisodeNumber() % 2 == 0:
first_team = Team.e_Left
second_team = Team.e_Right
else:
first_team = Team.e_Right
second_team = Team.e_Left
builder.SetTeam(first_team)
builder.AddPlayer(-1.000000, 0.000000, e_PlayerRole_GK)
builder.AddPlayer(0.000000, 0.020000, e_PlayerRole_RM)
builder.AddPlayer(0.000000, -0.020000, e_PlayerRole_CF)
builder.AddPlayer(-0.422000, -0.19576, e_PlayerRole_LB)
builder.AddPlayer(-0.500000, -0.06356, e_PlayerRole_CB)
builder.AddPlayer(-0.500000, 0.063559, e_PlayerRole_CB)
builder.AddPlayer(-0.422000, 0.195760, e_PlayerRole_RB)
builder.AddPlayer(-0.184212, -0.10568, e_PlayerRole_CM)
builder.AddPlayer(-0.267574, 0.000000, e_PlayerRole_CM)
builder.AddPlayer(-0.184212, 0.105680, e_PlayerRole_CM)
builder.AddPlayer(-0.010000, -0.21610, e_PlayerRole_LM)
builder.SetTeam(second_team)
builder.AddPlayer(-1.000000, 0.000000, e_PlayerRole_GK)
builder.AddPlayer(-0.050000, 0.000000, e_PlayerRole_RM)
builder.AddPlayer(-0.010000, 0.216102, e_PlayerRole_CF)
builder.AddPlayer(-0.422000, -0.19576, e_PlayerRole_LB)
builder.AddPlayer(-0.500000, -0.06356, e_PlayerRole_CB)
builder.AddPlayer(-0.500000, 0.063559, e_PlayerRole_CB)
builder.AddPlayer(-0.422000, 0.195760, e_PlayerRole_RB)
builder.AddPlayer(-0.184212, -0.10568, e_PlayerRole_CM)
builder.AddPlayer(-0.267574, 0.000000, e_PlayerRole_CM)
builder.AddPlayer(-0.184212, 0.105680, e_PlayerRole_CM)
builder.AddPlayer(-0.010000, -0.21610, e_PlayerRole_LM) | null |
22,928 | from . import *
def build_scenario(builder):
builder.config().game_duration = 3000
builder.config().right_team_difficulty = 0.05
builder.config().left_team_difficulty = 0.05
builder.config().deterministic = False
if builder.EpisodeNumber() % 2 == 0:
first_team = Team.e_Left
second_team = Team.e_Right
else:
first_team = Team.e_Right
second_team = Team.e_Left
builder.SetTeam(first_team)
builder.AddPlayer(-1.000000, 0.000000, e_PlayerRole_GK, controllable=False)
builder.AddPlayer(0.000000, 0.020000, e_PlayerRole_RM)
builder.AddPlayer(0.000000, -0.020000, e_PlayerRole_CF)
builder.AddPlayer(-0.1, -0.1, e_PlayerRole_LB)
builder.AddPlayer(-0.1, 0.1, e_PlayerRole_CB)
builder.SetTeam(second_team)
builder.AddPlayer(-1.000000, 0.000000, e_PlayerRole_GK, controllable=False)
builder.AddPlayer(-0.04, 0.040000, e_PlayerRole_RM)
builder.AddPlayer(-0.04, -0.040000, e_PlayerRole_CF)
builder.AddPlayer(-0.1, -0.1, e_PlayerRole_LB)
builder.AddPlayer(-0.1, 0.1, e_PlayerRole_CB) | null |
22,929 | from . import *
def build_scenario(builder):
builder.config().game_duration = 400
builder.config().deterministic = False
builder.config().offsides = False
builder.config().end_episode_on_score = True
builder.config().end_episode_on_out_of_play = True
builder.config().end_episode_on_possession_change = True
builder.SetBallPosition(0.02, 0.0)
builder.SetTeam(Team.e_Left)
builder.AddPlayer(-1.0, 0.0, e_PlayerRole_GK)
builder.AddPlayer(0.0, 0.0, e_PlayerRole_CB)
builder.SetTeam(Team.e_Right)
builder.AddPlayer(-1.0, 0.0, e_PlayerRole_GK)
builder.AddPlayer(0.12, 0.2, e_PlayerRole_LB)
builder.AddPlayer(0.12, 0.1, e_PlayerRole_CB)
builder.AddPlayer(0.12, 0.0, e_PlayerRole_CM)
builder.AddPlayer(0.12, -0.1, e_PlayerRole_CB)
builder.AddPlayer(0.12, -0.2, e_PlayerRole_RB) | null |
22,930 | from . import *
episode = 0
def build_scenario(builder):
global episode
episode += 1
builder.config().game_duration = 3000
builder.config().deterministic = False
builder.config().offsides = False
builder.config().end_episode_on_score = True
builder.config().end_episode_on_out_of_play = True
builder.config().end_episode_on_possession_change = True
builder.SetBallPosition(-0.48, -0.06356)
builder.SetTeam(Team.e_Left)
builder.AddPlayer(-1.000000, 0.000000, e_PlayerRole_GK)
builder.AddPlayer(0.000000, 0.020000, e_PlayerRole_RM)
builder.AddPlayer(0.000000, -0.020000, e_PlayerRole_CF)
builder.AddPlayer(-0.422000, -0.19576, e_PlayerRole_LB)
builder.AddPlayer(-0.500000, -0.06356, e_PlayerRole_CB)
builder.AddPlayer(-0.500000, 0.063559, e_PlayerRole_CB)
builder.AddPlayer(-0.422000, 0.195760, e_PlayerRole_RB)
builder.AddPlayer(-0.184212, -0.10568, e_PlayerRole_CM)
builder.AddPlayer(-0.267574, 0.000000, e_PlayerRole_CM)
builder.AddPlayer(-0.184212, 0.105680, e_PlayerRole_CM)
builder.AddPlayer(-0.010000, -0.21610, e_PlayerRole_LM)
# All right players are lazy (i.e., they don't move, except the keeper)
builder.SetTeam(Team.e_Right)
builder.AddPlayer(-1.000000, 0.000000, e_PlayerRole_GK)
builder.AddPlayer(-0.050000, 0.000000, e_PlayerRole_RM, True)
builder.AddPlayer(-0.010000, 0.216102, e_PlayerRole_CF, True)
builder.AddPlayer(-0.422000, -0.19576, e_PlayerRole_LB, True)
builder.AddPlayer(-0.500000, -0.06356, e_PlayerRole_CB, True)
builder.AddPlayer(-0.500000, 0.063559, e_PlayerRole_CB, True)
builder.AddPlayer(-0.422000, 0.195760, e_PlayerRole_RB, True)
builder.AddPlayer(-0.184212, -0.10568, e_PlayerRole_CM, True)
builder.AddPlayer(-0.267574, 0.000000, e_PlayerRole_CM, True)
builder.AddPlayer(-0.184212, 0.105680, e_PlayerRole_CM, True)
builder.AddPlayer(-0.010000, -0.21610, e_PlayerRole_LM, True) | null |
22,931 | from . import *
def build_scenario(builder):
builder.config().game_duration = 3000
builder.config().right_team_difficulty = 0.6
builder.config().deterministic = False
if builder.EpisodeNumber() % 2 == 0:
first_team = Team.e_Left
second_team = Team.e_Right
else:
first_team = Team.e_Right
second_team = Team.e_Left
builder.SetTeam(first_team)
builder.AddPlayer(-1.000000, 0.000000, e_PlayerRole_GK)
builder.AddPlayer(0.000000, 0.020000, e_PlayerRole_RM)
builder.AddPlayer(0.000000, -0.020000, e_PlayerRole_CF)
builder.AddPlayer(-0.422000, -0.19576, e_PlayerRole_LB)
builder.AddPlayer(-0.500000, -0.06356, e_PlayerRole_CB)
builder.AddPlayer(-0.500000, 0.063559, e_PlayerRole_CB)
builder.AddPlayer(-0.422000, 0.195760, e_PlayerRole_RB)
builder.AddPlayer(-0.184212, -0.10568, e_PlayerRole_CM)
builder.AddPlayer(-0.267574, 0.000000, e_PlayerRole_CM)
builder.AddPlayer(-0.184212, 0.105680, e_PlayerRole_CM)
builder.AddPlayer(-0.010000, -0.21610, e_PlayerRole_LM)
builder.SetTeam(second_team)
builder.AddPlayer(-1.000000, 0.000000, e_PlayerRole_GK)
builder.AddPlayer(-0.050000, 0.000000, e_PlayerRole_RM)
builder.AddPlayer(-0.010000, 0.216102, e_PlayerRole_CF)
builder.AddPlayer(-0.422000, -0.19576, e_PlayerRole_LB)
builder.AddPlayer(-0.500000, -0.06356, e_PlayerRole_CB)
builder.AddPlayer(-0.500000, 0.063559, e_PlayerRole_CB)
builder.AddPlayer(-0.422000, 0.195760, e_PlayerRole_RB)
builder.AddPlayer(-0.184212, -0.10568, e_PlayerRole_CM)
builder.AddPlayer(-0.267574, 0.000000, e_PlayerRole_CM)
builder.AddPlayer(-0.184212, 0.105680, e_PlayerRole_CM)
builder.AddPlayer(-0.010000, -0.21610, e_PlayerRole_LM) | null |
22,932 | from . import *
def build_scenario(builder):
builder.config().game_duration = 400
builder.config().deterministic = False
builder.config().offsides = False
builder.config().end_episode_on_score = True
builder.config().end_episode_on_out_of_play = True
builder.config().end_episode_on_possession_change = True
builder.SetBallPosition(0.7, -0.28)
builder.SetTeam(Team.e_Left)
builder.AddPlayer(-1.0, 0.0, e_PlayerRole_GK)
builder.AddPlayer(0.7, 0.0, e_PlayerRole_CB)
builder.AddPlayer(0.7, -0.3, e_PlayerRole_CB)
builder.SetTeam(Team.e_Right)
builder.AddPlayer(-1.0, 0.0, e_PlayerRole_GK)
builder.AddPlayer(-0.75, 0.1, e_PlayerRole_CB) | null |
22,933 | from baselines.common.models import register
import sonnet as snt
import tensorflow.compat.v1 as tf
def gfootball_impala_cnn():
def network_fn(frame):
# Convert to floats.
frame = tf.to_float(frame)
frame /= 255
with tf.variable_scope('convnet'):
conv_out = frame
conv_layers = [(16, 2), (32, 2), (32, 2), (32, 2)]
for i, (num_ch, num_blocks) in enumerate(conv_layers):
# Downscale.
conv_out = snt.Conv2D(num_ch, 3, stride=1, padding='SAME')(conv_out)
conv_out = tf.nn.pool(
conv_out,
window_shape=[3, 3],
pooling_type='MAX',
padding='SAME',
strides=[2, 2])
# Residual block(s).
for j in range(num_blocks):
with tf.variable_scope('residual_%d_%d' % (i, j)):
block_input = conv_out
conv_out = tf.nn.relu(conv_out)
conv_out = snt.Conv2D(num_ch, 3, stride=1, padding='SAME')(conv_out)
conv_out = tf.nn.relu(conv_out)
conv_out = snt.Conv2D(num_ch, 3, stride=1, padding='SAME')(conv_out)
conv_out += block_input
conv_out = tf.nn.relu(conv_out)
conv_out = snt.BatchFlatten()(conv_out)
conv_out = snt.Linear(256)(conv_out)
conv_out = tf.nn.relu(conv_out)
return conv_out
return network_fn | null |
22,934 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import argparse
import gfootball.env as football_env
import gym
import ray
from ray import tune
from ray.rllib.env.multi_agent_env import MultiAgentEnv
from ray.tune.registry import register_env
def gen_policy(_):
return (None, obs_space, act_space, {}) | null |
22,935 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import multiprocessing
import os
from absl import app
from absl import flags
from baselines import logger
from baselines.bench import monitor
from baselines.common.vec_env.subproc_vec_env import SubprocVecEnv
from baselines.ppo2 import ppo2
import gfootball.env as football_env
from gfootball.examples import models
FLAGS = flags.FLAGS
def create_single_football_env(iprocess):
"""Creates gfootball environment."""
env = football_env.create_environment(
env_name=FLAGS.level, stacked=('stacked' in FLAGS.state),
rewards=FLAGS.reward_experiment,
logdir=logger.get_dir(),
write_goal_dumps=FLAGS.dump_scores and (iprocess == 0),
write_full_episode_dumps=FLAGS.dump_full_episodes and (iprocess == 0),
render=FLAGS.render and (iprocess == 0),
dump_frequency=50 if FLAGS.render and iprocess == 0 else 0)
env = monitor.Monitor(env, logger.get_dir() and os.path.join(logger.get_dir(),
str(iprocess)))
return env
The provided code snippet includes necessary dependencies for implementing the `train` function. Write a Python function `def train(_)` to solve the following problem:
Trains a PPO2 policy.
Here is the function:
def train(_):
"""Trains a PPO2 policy."""
vec_env = SubprocVecEnv([
(lambda _i=i: create_single_football_env(_i))
for i in range(FLAGS.num_envs)
], context=None)
# Import tensorflow after we create environments. TF is not fork sake, and
# we could be using TF as part of environment if one of the players is
# controlled by an already trained model.
import tensorflow.compat.v1 as tf
ncpu = multiprocessing.cpu_count()
config = tf.ConfigProto(allow_soft_placement=True,
intra_op_parallelism_threads=ncpu,
inter_op_parallelism_threads=ncpu)
config.gpu_options.allow_growth = True
tf.Session(config=config).__enter__()
ppo2.learn(network=FLAGS.policy,
total_timesteps=FLAGS.num_timesteps,
env=vec_env,
seed=FLAGS.seed,
nsteps=FLAGS.nsteps,
nminibatches=FLAGS.nminibatches,
noptepochs=FLAGS.noptepochs,
max_grad_norm=FLAGS.max_grad_norm,
gamma=FLAGS.gamma,
ent_coef=FLAGS.ent_coef,
lr=FLAGS.lr,
log_interval=1,
save_interval=FLAGS.save_interval,
cliprange=FLAGS.cliprange,
load_path=FLAGS.load_path) | Trains a PPO2 policy. |
22,936 | import random
import string
import time
import urllib.request
from gfootball.eval_server import config
import grpc
def get_grpc_channel(server):
# send keepalive ping every 10 second
# allow unlimited amount of keepalive pings
options = (('grpc.keepalive_time_ms', 10000),
('grpc.http2.max_pings_without_data', 0))
return grpc.insecure_channel(server, options=options) | null |
22,937 | import random
import string
import time
import urllib.request
from gfootball.eval_server import config
import grpc
def get_random_string(length=10, append_timestamp=True):
characters = string.ascii_lowercase + string.ascii_uppercase + string.digits
res = ''.join(random.choice(characters) for i in range(length))
if append_timestamp:
res += '_{}'.format(int(time.time()))
return res
def get_master_address(track='default'):
# We add ''?' + get_random_string()' to avoid caching. Somehow even with
# disabled server side caching on the file we were still getting the old
# content of the file.
response = urllib.request.urlopen(
config.master_address_public_path + '_' + track + '?' +
get_random_string())
ip = response.read().decode('utf-8').strip()
return '{}:{}'.format(ip, config.grpc_port) | null |
22,938 | import grpc
from gfootball.eval_server.proto import master_pb2 as gfootball_dot_eval__server_dot_proto_dot_master__pb2
def add_MasterServicer_to_server(servicer, server):
rpc_method_handlers = {
'StartGame': grpc.unary_unary_rpc_method_handler(
servicer.StartGame,
request_deserializer=gfootball_dot_eval__server_dot_proto_dot_master__pb2.StartGameRequest.FromString,
response_serializer=gfootball_dot_eval__server_dot_proto_dot_master__pb2.StartGameResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'gfootball.eval_server.Master', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,)) | null |
22,939 | import grpc
from gfootball.eval_server.proto import game_server_pb2 as gfootball_dot_eval__server_dot_proto_dot_game__server__pb2
def add_GameServerServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetEnvResult': grpc.unary_unary_rpc_method_handler(
servicer.GetEnvResult,
request_deserializer=gfootball_dot_eval__server_dot_proto_dot_game__server__pb2.GetEnvResultRequest.FromString,
response_serializer=gfootball_dot_eval__server_dot_proto_dot_game__server__pb2.GetEnvResultResponse.SerializeToString,
),
'Step': grpc.unary_unary_rpc_method_handler(
servicer.Step,
request_deserializer=gfootball_dot_eval__server_dot_proto_dot_game__server__pb2.StepRequest.FromString,
response_serializer=gfootball_dot_eval__server_dot_proto_dot_game__server__pb2.StepResponse.SerializeToString,
),
'GetCapacity': grpc.unary_unary_rpc_method_handler(
servicer.GetCapacity,
request_deserializer=gfootball_dot_eval__server_dot_proto_dot_game__server__pb2.GetCapacityRequest.FromString,
response_serializer=gfootball_dot_eval__server_dot_proto_dot_game__server__pb2.GetCapacityResponse.SerializeToString,
),
'CreateGame': grpc.unary_unary_rpc_method_handler(
servicer.CreateGame,
request_deserializer=gfootball_dot_eval__server_dot_proto_dot_game__server__pb2.CreateGameRequest.FromString,
response_serializer=gfootball_dot_eval__server_dot_proto_dot_game__server__pb2.CreateGameResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'gfootball.eval_server.GameServer', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,)) | null |
22,940 | import random
from absl import app
from absl import flags
from absl import logging
import gfootball.env as football_env
from gfootball.env import football_action_set
import grpc
import numpy as np
import tensorflow.compat.v2 as tf
FLAGS = flags.FLAGS
def random_actions(obs):
num_players = 1 if len(obs.shape) == 3 else obs.shape[0]
a = []
for _ in range(num_players):
a.append(random.randint(0, NUM_ACTIONS - 1))
return a
def generate_actions(obs, model):
a = []
# Single agent case
if len(obs.shape) == 3:
a.append(model(seed_rl_preprocessing(obs))[0][0].numpy())
else:
# Multiagent -> first dimension is a number of agents you control.
for x in range(obs.shape[0]):
a.append(model(seed_rl_preprocessing(obs[x]))[0][0].numpy())
return a
def get_inference_model(inference_model):
if not inference_model or FLAGS.username == 'random':
return random_actions
model = tf.saved_model.load(inference_model)
return lambda obs: generate_actions(obs, model) | null |
22,941 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from gfootball_engine import e_BackendAction
import numpy
from six.moves import range
action_left = CoreAction(
e_BackendAction.left, "left", sticky=True, directional=True)
action_release_direction = CoreAction(
e_BackendAction.release_direction, "release_direction", directional=True)
reverse_action_mapping = {
action_long_pass: action_release_long_pass,
action_high_pass: action_release_high_pass,
action_short_pass: action_release_short_pass,
action_shot: action_release_shot,
action_keeper_rush: action_release_keeper_rush,
action_sliding: action_release_sliding,
action_pressure: action_release_pressure,
action_team_pressure: action_release_team_pressure,
action_switch: action_release_switch,
action_sprint: action_release_sprint,
action_dribble: action_release_dribble,
action_release_long_pass: action_long_pass,
action_release_high_pass: action_high_pass,
action_release_short_pass: action_short_pass,
action_release_shot: action_shot,
action_release_keeper_rush: action_keeper_rush,
action_release_sliding: action_sliding,
action_release_pressure: action_pressure,
action_release_team_pressure: action_team_pressure,
action_release_switch: action_switch,
action_release_sprint: action_sprint,
action_release_dribble: action_dribble
}
def disable_action(action):
assert set(action.__dict__) == set(action_left.__dict__)
if action._directional:
return action_release_direction
return reverse_action_mapping[action] | null |
22,942 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from gfootball.env import football_action_set
import numpy as np
def rotate_3d_point(point):
"""Rotate 3d point around the center of the field.
Args:
points: [x, y, z] point.
Returns:
The rotated points.
"""
# This assumes the center of the field is the origin: (0, 0)
return np.array([-point[0], -point[1], point[2]])
def flip_team_observation(observation, result, config, from_team, to_team):
"""Rotates team-specific observations."""
result['{}_team'.format(to_team)] = rotate_points(
observation['{}_team'.format(from_team)])
result['{}_team_direction'.format(to_team)] = rotate_points(
observation['{}_team_direction'.format(from_team)])
result['{}_team_tired_factor'.format(to_team)] = observation[
'{}_team_tired_factor'.format(from_team)]
result['{}_team_active'.format(to_team)] = observation[
'{}_team_active'.format(from_team)]
result['{}_team_yellow_card'.format(to_team)] = observation[
'{}_team_yellow_card'.format(from_team)]
result['{}_team_roles'.format(to_team)] = observation['{}_team_roles'.format(
from_team)]
result['{}_team_active'.format(to_team)] = observation[
'{}_team_active'.format(from_team)]
result['{}_team_designated_player'.format(to_team)] = observation[
'{}_team_designated_player'.format(from_team)]
if '{}_agent_controlled_player'.format(from_team) in observation:
result['{}_agent_controlled_player'.format(to_team)] = observation[
'{}_agent_controlled_player'.format(from_team)]
if '{}_agent_sticky_actions'.format(from_team) in observation:
result['{}_agent_sticky_actions'.format(to_team)] = [
rotate_sticky_actions(sticky, config)
for sticky in observation['{}_agent_sticky_actions'.format(from_team)]
]
The provided code snippet includes necessary dependencies for implementing the `flip_observation` function. Write a Python function `def flip_observation(observation, config)` to solve the following problem:
Observation corresponding to the field rotated by 180 degrees.
Here is the function:
def flip_observation(observation, config):
"""Observation corresponding to the field rotated by 180 degrees."""
flipped_observation = {}
flipped_observation['ball'] = rotate_3d_point(observation['ball'])
flipped_observation['ball_direction'] = rotate_3d_point(
observation['ball_direction'])
flipped_observation['ball_rotation'] = observation['ball_rotation']
flipped_observation['ball_owned_team'] = 1 - observation[
'ball_owned_team'] if observation['ball_owned_team'] > -1 else -1
flipped_observation['ball_owned_player'] = observation['ball_owned_player']
flipped_observation['score'] = [
observation['score'][1], observation['score'][0]
]
flipped_observation['game_mode'] = observation['game_mode']
flipped_observation['steps_left'] = observation['steps_left']
flip_team_observation(observation, flipped_observation, config, 'left',
'right')
flip_team_observation(observation, flipped_observation, config, 'right',
'left')
return flipped_observation | Observation corresponding to the field rotated by 180 degrees. |
22,943 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from gfootball.env import football_action_set
import numpy as np
def flip_single_action(action, config):
def flip_action(action, config):
if isinstance(action, np.ndarray) or isinstance(action, list):
return [flip_single_action(a, config) for a in action]
return flip_single_action(action, config) | null |
22,944 | import pygame
_controllers = []
def add_controller(controller_kind, controller_index=None):
global _controllers
_controllers.append((controller_kind, controller_index)) | null |
22,945 | import pygame
_queue = []
_controllers = []
def fits(event, controller_kind, controller_index):
if controller_kind == 'keyboard':
return event.type in KEYBOARD_EVENTS
if controller_kind == 'gamepad':
return event.type in GAMEPAD_EVENTS and event.joy == controller_index
assert False, 'Unknown controller kind!'
def get(controller_kind, controller_index=None):
global _queue
global _controllers
_queue.extend(pygame.event.get())
result = []
new_state = []
for event in _queue:
if fits(event, controller_kind, controller_index):
result.append(event)
else:
for controller in _controllers:
if fits(event, *controller):
new_state.append(event)
break
_queue = new_state
return result | null |
22,946 | from __future__ import print_function
import copy
import tempfile
import os
import platform
from absl import flags
import gfootball_engine as libgame
def parse_player_definition(definition):
"""Parses player definition.
An example of player definition is: "agent:players=4" or "replay:path=...".
Args:
definition: a string defining a player
Returns:
A tuple (name, dict).
"""
name = definition
d = {'left_players': 0,
'right_players': 0}
if ':' in definition:
# Windows requires special handling of replays, because path may contain ':'
if platform.system() == 'Windows' and definition.startswith('replay:') \
and len(definition.split(':')) > 2:
(name, params) = 'replay', definition.split('replay:')[-1]
else:
(name, params) = definition.split(':')
for param in params.split(','):
(key, value) = param.split('=')
d[key] = value
if d['left_players'] == 0 and d['right_players'] == 0:
d['left_players'] = 1
return name, d
The provided code snippet includes necessary dependencies for implementing the `count_left_players` function. Write a Python function `def count_left_players(definition)` to solve the following problem:
Returns a number of left players given a definition.
Here is the function:
def count_left_players(definition):
"""Returns a number of left players given a definition."""
return int(parse_player_definition(definition)[1]['left_players']) | Returns a number of left players given a definition. |
22,947 | from __future__ import print_function
import copy
import tempfile
import os
import platform
from absl import flags
import gfootball_engine as libgame
def parse_player_definition(definition):
"""Parses player definition.
An example of player definition is: "agent:players=4" or "replay:path=...".
Args:
definition: a string defining a player
Returns:
A tuple (name, dict).
"""
name = definition
d = {'left_players': 0,
'right_players': 0}
if ':' in definition:
# Windows requires special handling of replays, because path may contain ':'
if platform.system() == 'Windows' and definition.startswith('replay:') \
and len(definition.split(':')) > 2:
(name, params) = 'replay', definition.split('replay:')[-1]
else:
(name, params) = definition.split(':')
for param in params.split(','):
(key, value) = param.split('=')
d[key] = value
if d['left_players'] == 0 and d['right_players'] == 0:
d['left_players'] = 1
return name, d
The provided code snippet includes necessary dependencies for implementing the `count_right_players` function. Write a Python function `def count_right_players(definition)` to solve the following problem:
Returns a number of players given a definition.
Here is the function:
def count_right_players(definition):
"""Returns a number of players given a definition."""
return int(parse_player_definition(definition)[1]['right_players']) | Returns a number of players given a definition. |
22,948 | from __future__ import print_function
import copy
import tempfile
import os
import platform
from absl import flags
import gfootball_engine as libgame
def count_players(definition):
"""Returns a number of players given a definition."""
_, player_definition = parse_player_definition(definition)
return (int(player_definition['left_players']) +
int(player_definition['right_players']))
The provided code snippet includes necessary dependencies for implementing the `get_agent_number_of_players` function. Write a Python function `def get_agent_number_of_players(players)` to solve the following problem:
Returns a total number of players controlled by an agent.
Here is the function:
def get_agent_number_of_players(players):
"""Returns a total number of players controlled by an agent."""
return sum([count_players(player) for player in players
if player.startswith('agent')]) | Returns a total number of players controlled by an agent. |
22,949 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from gfootball.env import football_action_set
import numpy as np
from six.moves import range
SMM_WIDTH = 96
SMM_HEIGHT = 72
def get_smm_layers(config):
return SMM_LAYERS
def mark_points(frame, points):
"""Draw dots corresponding to 'points'.
Args:
frame: 2-d matrix representing one SMM channel ([y, x])
points: a list of (x, y) coordinates to be marked
"""
for p in range(len(points) // 2):
x = int((points[p * 2] - MINIMAP_NORM_X_MIN) /
(MINIMAP_NORM_X_MAX - MINIMAP_NORM_X_MIN) * frame.shape[1])
y = int((points[p * 2 + 1] - MINIMAP_NORM_Y_MIN) /
(MINIMAP_NORM_Y_MAX - MINIMAP_NORM_Y_MIN) * frame.shape[0])
x = max(0, min(frame.shape[1] - 1, x))
y = max(0, min(frame.shape[0] - 1, y))
frame[y, x] = _MARKER_VALUE
The provided code snippet includes necessary dependencies for implementing the `generate_smm` function. Write a Python function `def generate_smm(observation, config=None, channel_dimensions=(SMM_WIDTH, SMM_HEIGHT))` to solve the following problem:
Returns a list of minimap observations given the raw features for each active player. Args: observation: raw features from the environment config: environment config channel_dimensions: resolution of SMM to generate Returns: (N, H, W, C) - shaped np array representing SMM. N stands for the number of players we are controlling.
Here is the function:
def generate_smm(observation, config=None,
channel_dimensions=(SMM_WIDTH, SMM_HEIGHT)):
"""Returns a list of minimap observations given the raw features for each
active player.
Args:
observation: raw features from the environment
config: environment config
channel_dimensions: resolution of SMM to generate
Returns:
(N, H, W, C) - shaped np array representing SMM. N stands for the number of
players we are controlling.
"""
frame = np.zeros((len(observation), channel_dimensions[1],
channel_dimensions[0], len(get_smm_layers(config))),
dtype=np.uint8)
for o_i, o in enumerate(observation):
for index, layer in enumerate(get_smm_layers(config)):
assert layer in o
if layer == 'active':
if o[layer] == -1:
continue
mark_points(frame[o_i, :, :, index],
np.array(o['left_team'][o[layer]]).reshape(-1))
else:
mark_points(frame[o_i, :, :, index], np.array(o[layer]).reshape(-1))
return frame | Returns a list of minimap observations given the raw features for each active player. Args: observation: raw features from the environment config: environment config channel_dimensions: resolution of SMM to generate Returns: (N, H, W, C) - shaped np array representing SMM. N stands for the number of players we are controlling. |
22,950 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import datetime
import os
import shutil
import tempfile
import timeit
import traceback
from absl import logging
from gfootball.env import constants as const
from gfootball.env import football_action_set
from gfootball.scenarios import e_PlayerRole_GK
import numpy as np
from six.moves import range
from six.moves import zip
import six.moves.cPickle
def write_players_state(writer, players_info):
table_text = [["PLAYER", "SPRINT", "DRIBBLE", "DIRECTION", "ACTION"]]
widths = [65, 65, 70, 85, 85]
# Sort the players according to the order they appear in observations
for _, player_info in sorted(players_info.items()):
table_text.append([
(player_info['id'], player_info['color']),
str(player_info.get("sprint", "-")),
str(player_info.get("dribble", "-")),
player_info.get("DIRECTION", "O"),
player_info.get("ACTION", "-")])
writer.write_table(table_text, widths, scale_factor=1.0, offset=10) | null |
22,951 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import datetime
import os
import shutil
import tempfile
import timeit
import traceback
from absl import logging
from gfootball.env import constants as const
from gfootball.env import football_action_set
from gfootball.scenarios import e_PlayerRole_GK
import numpy as np
from six.moves import range
from six.moves import zip
import six.moves.cPickle
try:
import cv2
except ImportError:
import cv2
class TextWriter(object):
def __init__(self, frame, x, y=0, field_coords=False, color=(255, 255, 255)):
self._frame = frame
if field_coords:
x = 400 * (x + 1) - 10
y = 695 * (y + 0.43)
self._pos_x = int(x)
self._pos_y = int(y) + 20
self._color = color
self._font = cv2.FONT_HERSHEY_SIMPLEX
self._lineType = 2
self._arrow_types = ('top', 'top_right', 'right', 'bottom_right', 'bottom',
'bottom_left', 'left', 'top_left')
def write(self, text, scale_factor=1, color=None):
textPos = (self._pos_x, self._pos_y)
fontScale = 0.8 * scale_factor
cv2.putText(self._frame, text, textPos, self._font, fontScale, color or self._color,
self._lineType)
self._pos_y += int(25 * scale_factor)
def write_table(self, data, widths, scale_factor=1, offset=0):
# data is a list of rows. Each row is a list of strings.
fontScale = 0.5 * scale_factor
init_x = self._pos_x
for row in data:
assert (len(row) == len(widths))
self._pos_x += offset
for col, cell in enumerate(row):
color = self._color
if isinstance(cell, tuple):
assert (len(cell) == 2)
(text, color) = cell
else:
assert (isinstance(cell, str))
text = cell
if text in self._arrow_types:
self.write_arrow(text, scale_factor=scale_factor)
else:
textPos = (self._pos_x, self._pos_y)
cv2.putText(self._frame, text, textPos, self._font, fontScale, color,
self._lineType)
self._pos_x += widths[col]
self._pos_x = init_x
self._pos_y += int(20 * scale_factor)
self._pos_x = init_x
def write_arrow(self, arrow_type, scale_factor=1):
assert (arrow_type in self._arrow_types)
thickness = 1
arrow_offsets = {
'top': (12, 0, 12, -16),
'top_right': (4, -4, 16, -16),
'right': (0, -10, 20, -10),
'bottom_right': (4, -16, 16, -4),
'bottom': (10, -16, 10, 0),
'bottom_left': (12, -12, 0, 0),
'left': (20, -10, 0, -10),
'top_left': (16, -4, 4, -16)
}
(s_x, s_y, e_x,
e_y) = tuple(int(v * scale_factor) for v in arrow_offsets[arrow_type])
start_point = (self._pos_x + s_x, self._pos_y + s_y)
end_point = (self._pos_x + e_x, self._pos_y + e_y)
image = cv2.arrowedLine(self._frame, start_point, end_point, self._color,
thickness)
e_PlayerRole_GK = libgame.e_PlayerRole.e_PlayerRole_GK
def get_frame(trace):
if 'frame' in trace._trace['observation']:
return trace._trace['observation']['frame']
frame = np.uint8(np.zeros((600, 800, 3)))
corner1 = (0, 0)
corner2 = (799, 0)
corner3 = (799, 599)
corner4 = (0, 599)
line_color = (0, 255, 255)
cv2.line(frame, corner1, corner2, line_color)
cv2.line(frame, corner2, corner3, line_color)
cv2.line(frame, corner3, corner4, line_color)
cv2.line(frame, corner4, corner1, line_color)
cv2.line(frame, (399, 0), (399, 799), line_color)
writer = TextWriter(
frame,
trace['ball'][0],
trace['ball'][1],
field_coords=True,
color=(248, 244, 236))
writer.write('B')
for player_idx, player_coord in enumerate(trace['left_team']):
writer = TextWriter(
frame,
player_coord[0],
player_coord[1],
field_coords=True,
color=(238, 68, 47))
letter = str(player_idx)
if trace['left_team_roles'][player_idx] == e_PlayerRole_GK:
letter = 'G'
writer.write(letter)
for player_idx, player_coord in enumerate(trace['right_team']):
writer = TextWriter(
frame,
player_coord[0],
player_coord[1],
field_coords=True,
color=(99, 172, 190))
letter = str(player_idx)
if trace['right_team_roles'][player_idx] == e_PlayerRole_GK:
letter = 'G'
writer.write(letter)
return frame | null |
22,952 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import datetime
import os
import shutil
import tempfile
import timeit
import traceback
from absl import logging
from gfootball.env import constants as const
from gfootball.env import football_action_set
from gfootball.scenarios import e_PlayerRole_GK
import numpy as np
from six.moves import range
from six.moves import zip
import six.moves.cPickle
def softmax(x):
return np.exp(x) / np.sum(np.exp(x), axis=0) | null |
22,953 | import importlib
import os
import pkgutil
import random
import sys
from absl import flags
from absl import logging
import gfootball_engine as libgame
def all_scenarios():
path = os.path.abspath(__file__)
path = os.path.join(os.path.dirname(os.path.dirname(path)), 'scenarios')
scenarios = []
for m in pkgutil.iter_modules([path]):
# There was API change in pkgutil between Python 3.5 and 3.6...
if m.__class__ == tuple:
scenarios.append(m[1])
else:
scenarios.append(m.name)
return scenarios | null |
22,954 | from baselines.common.policies import build_policy
from gfootball.env import football_action_set
from gfootball.env import observation_preprocessing
from gfootball.env import player_base
from gfootball.examples import models
import gym
import joblib
import numpy as np
import tensorflow.compat.v1 as tf
The provided code snippet includes necessary dependencies for implementing the `_load_variables` function. Write a Python function `def _load_variables(load_path, sess, prefix='', remove_prefix=True)` to solve the following problem:
Loads variables from checkpoint of policy trained by baselines.
Here is the function:
def _load_variables(load_path, sess, prefix='', remove_prefix=True):
"""Loads variables from checkpoint of policy trained by baselines."""
# Forked from address below since we needed loading from different var names:
# https://github.com/openai/baselines/blob/master/baselines/common/tf_util.py
variables = [v for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
if v.name.startswith(prefix)]
loaded_params = joblib.load(load_path)
restores = []
for v in variables:
v_name = v.name[len(prefix):] if remove_prefix else v.name
restores.append(v.assign(loaded_params[v_name]))
sess.run(restores) | Loads variables from checkpoint of policy trained by baselines. |
22,955 | import torch
from thop import profile
import torchvision
import models
import argparse
def clever_format(nums, format="%.2f"):
clever_nums = []
for num in nums:
if num > 1e12:
clever_nums.append(format % (num / 1024 ** 4) + "T")
elif num > 1e9:
clever_nums.append(format % (num / 1024 ** 3) + "G")
elif num > 1e6:
clever_nums.append(format % (num / 1024 ** 2) + "M")
elif num > 1e3:
clever_nums.append(format % (num / 1024) + "K")
else:
clever_nums.append(format % num + "B")
clever_nums = clever_nums[0] if len(clever_nums) == 1 else (*clever_nums, )
return clever_nums | null |
22,956 | import argparse
import os
import random
import shutil
import time
import warnings
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import models
class AverageMeter(object):
def __init__(self):
def reset(self):
def update(self, val, n=1):
def accuracy(output, target, topk=(1,)):
def train(train_loader, model, criterion, optimizer, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
losses_batch = {}
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
return losses.avg, top1.avg, top5.avg | null |
22,957 | import argparse
import os
import random
import shutil
import time
import warnings
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import models
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def validate(val_loader, model, criterion):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (input, target) in enumerate(val_loader):
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg, top5.avg | null |
22,958 | import argparse
import os
import random
import shutil
import time
import warnings
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import models
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
directory = "runs/%s/"%(args.arch + '_' + args.action)
filename = directory + filename
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, directory + 'model_best.pth.tar') | null |
22,959 | import argparse
import os
import random
import shutil
import time
import warnings
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import models
The provided code snippet includes necessary dependencies for implementing the `adjust_learning_rate` function. Write a Python function `def adjust_learning_rate(optimizer, epoch)` to solve the following problem:
Sets the learning rate to the initial LR decayed by 10 every 30 epochs
Here is the function:
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr | Sets the learning rate to the initial LR decayed by 10 every 30 epochs |
22,960 | import argparse
import os
import random
import shutil
import time
import warnings
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import models
def data_save(root, file):
if not os.path.exists(root):
os.mknod(root)
file_temp = open(root, 'r')
lines = file_temp.readlines()
if not lines:
epoch = -1
else:
epoch = lines[-1][:lines[-1].index(' ')]
epoch = int(epoch)
file_temp.close()
file_temp = open(root, 'a')
for line in file:
if line > epoch:
file_temp.write(str(line) + " " + str(file[line]) + '\n')
file_temp.close() | null |
22,961 | import argparse
import os
import random
import shutil
import time
import warnings
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import models
from thop import profile
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def train(train_loader, model, criterion, optimizer, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
losses_batch = {}
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
losses_batch[epoch] = losses.avg
loss_batch = open("loss_batch.txt", 'a')
for line in losses_batch:
loss_batch.write(str(epoch) + " " + str(line) + " " + str(losses_batch[line]) + '\n')
loss_batch.close()
return losses.avg, top1.avg, top5.avg | null |
22,962 | import argparse
import os
import random
import shutil
import time
import warnings
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import models
from thop import profile
class AverageMeter(object):
def __init__(self):
def reset(self):
def update(self, val, n=1):
def accuracy(output, target, topk=(1,)):
def validate(val_loader, model, criterion):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (input, target) in enumerate(val_loader):
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg, top5.avg | null |
22,963 | import argparse
import os
import random
import shutil
import time
import warnings
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import models
from thop import profile
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
directory = "runs/%s/"%(args.arch + '_' + args.action)
filename = directory + filename
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, directory + 'model_best.pth.tar') | null |
22,964 | import argparse
import os
import random
import shutil
import time
import warnings
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import models
from thop import profile
The provided code snippet includes necessary dependencies for implementing the `adjust_learning_rate` function. Write a Python function `def adjust_learning_rate(optimizer, epoch)` to solve the following problem:
Sets the learning rate to the initial LR decayed by 10 every 30 epochs
Here is the function:
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
# lr = args.lr * (0.1 ** (epoch // 30))
lr = args.lr * (0.98 ** epoch)
print('lr = ', lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr | Sets the learning rate to the initial LR decayed by 10 every 30 epochs |
22,965 | import argparse
import os
import random
import shutil
import time
import warnings
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import models
from thop import profile
def data_save(root, file):
if not os.path.exists(root):
os.mknod(root)
file_temp = open(root, 'r')
lines = file_temp.readlines()
if not lines:
epoch = -1
else:
epoch = lines[-1][:lines[-1].index(' ')]
epoch = int(epoch)
file_temp.close()
file_temp = open(root, 'a')
for line in file:
if line > epoch:
file_temp.write(str(line) + " " + str(file[line]) + '\n')
file_temp.close() | null |
22,966 | from torch import nn
from .eca_module import eca_layer
class ECA_MobileNetV2(nn.Module):
def __init__(self, num_classes=1000, width_mult=1.0):
super(ECA_MobileNetV2, self).__init__()
block = InvertedResidual
input_channel = 32
last_channel = 1280
inverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
# building first layer
input_channel = int(input_channel * width_mult)
self.last_channel = int(last_channel * max(1.0, width_mult))
features = [ConvBNReLU(3, input_channel, stride=2)]
# building inverted residual blocks
for t, c, n, s in inverted_residual_setting:
output_channel = int(c * width_mult)
for i in range(n):
if c < 96:
ksize = 1
else:
ksize = 3
stride = s if i == 0 else 1
features.append(block(input_channel, output_channel, stride, expand_ratio=t, k_size=ksize))
input_channel = output_channel
# building last several layers
features.append(ConvBNReLU(input_channel, self.last_channel, kernel_size=1))
# make it nn.Sequential
self.features = nn.Sequential(*features)
# building classifier
self.classifier = nn.Sequential(
nn.Dropout(0.25),
nn.Linear(self.last_channel, num_classes),
)
# weight initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm2d):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.zeros_(m.bias)
def forward(self, x):
x = self.features(x)
x = x.mean(-1).mean(-1)
x = self.classifier(x)
return x
The provided code snippet includes necessary dependencies for implementing the `eca_mobilenet_v2` function. Write a Python function `def eca_mobilenet_v2(pretrained=False, progress=True, **kwargs)` to solve the following problem:
Constructs a ECA_MobileNetV2 architecture from Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr
Here is the function:
def eca_mobilenet_v2(pretrained=False, progress=True, **kwargs):
"""
Constructs a ECA_MobileNetV2 architecture from
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
model = ECA_MobileNetV2(**kwargs)
# if pretrained:
# state_dict = load_state_dict_from_url(model_urls['mobilenet_v2'],
# progress=progress)
# model.load_state_dict(state_dict)
return model | Constructs a ECA_MobileNetV2 architecture from Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr |
22,967 | import torch.nn as nn
import math
from .eca_module import eca_layer
The provided code snippet includes necessary dependencies for implementing the `conv3x3` function. Write a Python function `def conv3x3(in_planes, out_planes, stride=1)` to solve the following problem:
3x3 convolution with padding
Here is the function:
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False) | 3x3 convolution with padding |
22,968 | import torch.nn as nn
import math
from .eca_module import eca_layer
class ECABasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, k_size=3):
super(ECABasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes, 1)
self.bn2 = nn.BatchNorm2d(planes)
self.eca = eca_layer(planes, k_size)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.eca(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, k_size=[3, 3, 3, 3]):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], int(k_size[0]))
self.layer2 = self._make_layer(block, 128, layers[1], int(k_size[1]), stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], int(k_size[2]), stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], int(k_size[3]), stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, k_size, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, k_size))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, k_size=k_size))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
The provided code snippet includes necessary dependencies for implementing the `eca_resnet18` function. Write a Python function `def eca_resnet18(k_size=[3, 3, 3, 3], num_classes=1_000, pretrained=False)` to solve the following problem:
Constructs a ResNet-18 model. Args: k_size: Adaptive selection of kernel size pretrained (bool): If True, returns a model pre-trained on ImageNet num_classes:The classes of classification
Here is the function:
def eca_resnet18(k_size=[3, 3, 3, 3], num_classes=1_000, pretrained=False):
"""Constructs a ResNet-18 model.
Args:
k_size: Adaptive selection of kernel size
pretrained (bool): If True, returns a model pre-trained on ImageNet
num_classes:The classes of classification
"""
model = ResNet(ECABasicBlock, [2, 2, 2, 2], num_classes=num_classes, k_size=k_size)
model.avgpool = nn.AdaptiveAvgPool2d(1)
return model | Constructs a ResNet-18 model. Args: k_size: Adaptive selection of kernel size pretrained (bool): If True, returns a model pre-trained on ImageNet num_classes:The classes of classification |
22,969 | import torch.nn as nn
import math
from .eca_module import eca_layer
class ECABasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, k_size=3):
super(ECABasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes, 1)
self.bn2 = nn.BatchNorm2d(planes)
self.eca = eca_layer(planes, k_size)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.eca(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, k_size=[3, 3, 3, 3]):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], int(k_size[0]))
self.layer2 = self._make_layer(block, 128, layers[1], int(k_size[1]), stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], int(k_size[2]), stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], int(k_size[3]), stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, k_size, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, k_size))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, k_size=k_size))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
The provided code snippet includes necessary dependencies for implementing the `eca_resnet34` function. Write a Python function `def eca_resnet34(k_size=[3, 3, 3, 3], num_classes=1_000, pretrained=False)` to solve the following problem:
Constructs a ResNet-34 model. Args: k_size: Adaptive selection of kernel size pretrained (bool): If True, returns a model pre-trained on ImageNet num_classes:The classes of classification
Here is the function:
def eca_resnet34(k_size=[3, 3, 3, 3], num_classes=1_000, pretrained=False):
"""Constructs a ResNet-34 model.
Args:
k_size: Adaptive selection of kernel size
pretrained (bool): If True, returns a model pre-trained on ImageNet
num_classes:The classes of classification
"""
model = ResNet(ECABasicBlock, [3, 4, 6, 3], num_classes=num_classes, k_size=k_size)
model.avgpool = nn.AdaptiveAvgPool2d(1)
return model | Constructs a ResNet-34 model. Args: k_size: Adaptive selection of kernel size pretrained (bool): If True, returns a model pre-trained on ImageNet num_classes:The classes of classification |
22,970 | import torch.nn as nn
import math
from .eca_module import eca_layer
class ECABottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, k_size=3):
super(ECABottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.eca = eca_layer(planes * 4, k_size)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out = self.eca(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, k_size=[3, 3, 3, 3]):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], int(k_size[0]))
self.layer2 = self._make_layer(block, 128, layers[1], int(k_size[1]), stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], int(k_size[2]), stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], int(k_size[3]), stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, k_size, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, k_size))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, k_size=k_size))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
The provided code snippet includes necessary dependencies for implementing the `eca_resnet50` function. Write a Python function `def eca_resnet50(k_size=[3, 3, 3, 3], num_classes=1000, pretrained=False)` to solve the following problem:
Constructs a ResNet-50 model. Args: k_size: Adaptive selection of kernel size num_classes:The classes of classification pretrained (bool): If True, returns a model pre-trained on ImageNet
Here is the function:
def eca_resnet50(k_size=[3, 3, 3, 3], num_classes=1000, pretrained=False):
"""Constructs a ResNet-50 model.
Args:
k_size: Adaptive selection of kernel size
num_classes:The classes of classification
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
print("Constructing eca_resnet50......")
model = ResNet(ECABottleneck, [3, 4, 6, 3], num_classes=num_classes, k_size=k_size)
model.avgpool = nn.AdaptiveAvgPool2d(1)
return model | Constructs a ResNet-50 model. Args: k_size: Adaptive selection of kernel size num_classes:The classes of classification pretrained (bool): If True, returns a model pre-trained on ImageNet |
22,971 | import torch.nn as nn
import math
from .eca_module import eca_layer
class ECABottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, k_size=3):
super(ECABottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.eca = eca_layer(planes * 4, k_size)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out = self.eca(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, k_size=[3, 3, 3, 3]):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], int(k_size[0]))
self.layer2 = self._make_layer(block, 128, layers[1], int(k_size[1]), stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], int(k_size[2]), stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], int(k_size[3]), stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, k_size, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, k_size))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, k_size=k_size))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
The provided code snippet includes necessary dependencies for implementing the `eca_resnet101` function. Write a Python function `def eca_resnet101(k_size=[3, 3, 3, 3], num_classes=1_000, pretrained=False)` to solve the following problem:
Constructs a ResNet-101 model. Args: k_size: Adaptive selection of kernel size num_classes:The classes of classification pretrained (bool): If True, returns a model pre-trained on ImageNet
Here is the function:
def eca_resnet101(k_size=[3, 3, 3, 3], num_classes=1_000, pretrained=False):
"""Constructs a ResNet-101 model.
Args:
k_size: Adaptive selection of kernel size
num_classes:The classes of classification
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(ECABottleneck, [3, 4, 23, 3], num_classes=num_classes, k_size=k_size)
model.avgpool = nn.AdaptiveAvgPool2d(1)
return model | Constructs a ResNet-101 model. Args: k_size: Adaptive selection of kernel size num_classes:The classes of classification pretrained (bool): If True, returns a model pre-trained on ImageNet |
22,972 | import torch.nn as nn
import math
from .eca_module import eca_layer
class ECABottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, k_size=3):
super(ECABottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.eca = eca_layer(planes * 4, k_size)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out = self.eca(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, k_size=[3, 3, 3, 3]):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], int(k_size[0]))
self.layer2 = self._make_layer(block, 128, layers[1], int(k_size[1]), stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], int(k_size[2]), stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], int(k_size[3]), stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, k_size, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, k_size))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, k_size=k_size))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
The provided code snippet includes necessary dependencies for implementing the `eca_resnet152` function. Write a Python function `def eca_resnet152(k_size=[3, 3, 3, 3], num_classes=1_000, pretrained=False)` to solve the following problem:
Constructs a ResNet-152 model. Args: k_size: Adaptive selection of kernel size num_classes:The classes of classification pretrained (bool): If True, returns a model pre-trained on ImageNet
Here is the function:
def eca_resnet152(k_size=[3, 3, 3, 3], num_classes=1_000, pretrained=False):
"""Constructs a ResNet-152 model.
Args:
k_size: Adaptive selection of kernel size
num_classes:The classes of classification
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(ECABottleneck, [3, 8, 36, 3], num_classes=num_classes, k_size=k_size)
model.avgpool = nn.AdaptiveAvgPool2d(1)
return model | Constructs a ResNet-152 model. Args: k_size: Adaptive selection of kernel size num_classes:The classes of classification pretrained (bool): If True, returns a model pre-trained on ImageNet |
22,973 | import os
import sys
from setuptools import setup, find_packages, dist
import glob
import logging
import subprocess
import torch
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME
def get_cuda_bare_metal_version(cuda_dir):
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
output = raw_output.split()
release_idx = output.index("release") + 1
release = output[release_idx].split(".")
bare_metal_major = release[0]
bare_metal_minor = release[1][0]
return raw_output, bare_metal_major, bare_metal_minor | null |
22,974 | import os
import sys
from setuptools import setup, find_packages, dist
import glob
import logging
import subprocess
import torch
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME
if not torch.cuda.is_available():
if os.getenv('FORCE_CUDA', '0') == '1':
# From: https://github.com/NVIDIA/apex/blob/c4e85f7bf144cb0e368da96d339a6cbd9882cea5/setup.py
# Extension builds after https://github.com/pytorch/pytorch/pull/23408 attempt to query torch.cuda.get_device_capability(),
# which will fail if you are compiling in an environment without visible GPUs (e.g. during an nvidia-docker build command).
logging.warning(
"Torch did not find available GPUs on this system.\n"
"If your intention is to cross-compile, this is not an error.\n"
"By default, Apex will cross-compile for Pascal (compute capabilities 6.0, 6.1, 6.2),\n"
"Volta (compute capability 7.0), Turing (compute capability 7.5),\n"
"and, if the CUDA version is >= 11.0, Ampere (compute capability 8.0).\n"
"If you wish to cross-compile for a single specific architecture,\n"
'export TORCH_CUDA_ARCH_LIST="compute capability" before running setup.py.\n'
)
if os.getenv("TORCH_CUDA_ARCH_LIST", None) is None:
_, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(CUDA_HOME)
if int(bare_metal_major) == 11:
if int(bare_metal_minor) == 0:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0"
else:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0;8.6"
else:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5"
else:
logging.warning(
"Torch did not find available GPUs on this system.\n"
"This script will install only with CPU support and will have very limited features.\n"
'If your wish to cross-compile for GPU `export FORCE_CUDA=1` before running setup.py\n'
"By default, Apex will cross-compile for Pascal (compute capabilities 6.0, 6.1, 6.2),\n"
"Volta (compute capability 7.0), Turing (compute capability 7.5),\n"
"and, if the CUDA version is >= 11.0, Ampere (compute capability 8.0).\n"
"If you wish to cross-compile for a single specific architecture,\n"
'export TORCH_CUDA_ARCH_LIST="compute capability" before running setup.py.\n'
)
def get_extensions():
extra_compile_args = {'cxx': ['-O3', '-fdiagnostics-color=always']}
define_macros = []
include_dirs = []
extensions = []
sources = glob.glob('wisp/csrc/**/*.cpp', recursive=True)
if len(sources) == 0:
print("No source files found for extension, skipping extension compilation")
return None
if torch.cuda.is_available() or os.getenv('FORCE_CUDA', '0') == '1':
define_macros += [("WITH_CUDA", None), ("THRUST_IGNORE_CUB_VERSION_CHECK", None)]
sources += glob.glob('wisp/csrc/**/*.cu', recursive=True)
extension = CUDAExtension
extra_compile_args.update({'nvcc': ['-O3']})
#include_dirs = get_include_dirs()
else:
assert(False, "CUDA is not available. Set FORCE_CUDA=1 for Docker builds")
extensions.append(
extension(
name='wisp._C',
sources=sources,
define_macros=define_macros,
extra_compile_args=extra_compile_args,
#include_dirs=include_dirs
)
)
for ext in extensions:
ext.libraries = ['cudart_static' if x == 'cudart' else x
for x in ext.libraries]
return extensions | null |
22,975 | import os
def setup(app):
# -- To demonstrate ReadTheDocs switcher -------------------------------------
# This links a few JS and CSS files that mimic the environment that RTD uses
# so that we can test RTD-like behavior. We don't need to run it on RTD and we
# don't want it loaded in GitHub Actions
if not os.environ.get("READTHEDOCS") and not os.environ.get("GITHUB_ACTIONS"):
app.add_css_file(
"https://assets.readthedocs.org/static/css/readthedocs-doc-embed.css"
)
app.add_css_file("https://assets.readthedocs.org/static/css/badge_only.css")
# Create the dummy data file so we can link it
# ref: https://github.com/readthedocs/readthedocs.org/blob/bc3e147770e5740314a8e8c33fec5d111c850498/readthedocs/core/static-src/core/js/doc-embed/footer.js # noqa: E501
app.add_js_file("rtd-data.js")
app.add_js_file(
"https://assets.readthedocs.org/static/javascript/readthedocs-doc-embed.js",
priority=501,
) | null |
22,976 | import numpy as np
import torch
import pathlib
import argparse
from kaolin.io.obj import import_mesh
from kaolin.ops.mesh import sample_points
from kaolin.render.mesh.utils import texture_mapping
from kaolin.ops.conversions.pointcloud import unbatched_pointcloud_to_spc
def convert_texture_to_torch_sample_format(texture, sampled_uvs):
""" Convert to (1, C, Tex-H, Tex-W) format """
return texture.unsqueeze(0).type(sampled_uvs.dtype).permute(0, 3, 1, 2)
The provided code snippet includes necessary dependencies for implementing the `convert_mesh_to_spc` function. Write a Python function `def convert_mesh_to_spc(mesh_path, level, output_path, num_samples)` to solve the following problem:
Loads obj and converts it to a SPC. Output will reside in output_path.
Here is the function:
def convert_mesh_to_spc(mesh_path, level, output_path, num_samples):
""" Loads obj and converts it to a SPC. Output will reside in output_path."""
mesh = import_mesh(mesh_path, with_materials=True)
print(f'Loaded mesh with {len(mesh.vertices)} vertices, {len(mesh.faces)} faces and {len(mesh.materials)} materials.')
# Load the uv coordinates per face-vertex like "features" per face-vertex,
# which sample_points will interpolate for new sample points.
# mesh.uvs is a tensor of uv coordinates of shape (#num_uvs, 2), which we consider as "features" here
# mesh.face_uvs_idx is a tensor of shape (#faces, 3), indexing which feature to use per-face-per-vertex
# Therefore, face_features will be of shape (#faces, 3, 2)
face_features = mesh.uvs[mesh.face_uvs_idx]
# Kaolin assumes an exact batch format, we make sure to convert from:
# (V, 3) to (1, V, 3)
# (F, 3, 2) to (1, F, 3, 2)
# where 1 is the batch size
batched_vertices = mesh.vertices.unsqueeze(0)
batched_face_features = face_features.unsqueeze(0)
# sample_points is faster on cuda device
batched_vertices = batched_vertices.cuda()
faces = mesh.faces.cuda()
batched_face_features = batched_face_features.cuda()
sampled_verts, _, sampled_uvs = sample_points(batched_vertices, faces,
num_samples=num_samples, face_features=batched_face_features)
print(f'Sampled {sampled_verts.shape[1]} points over the mesh surface.')
# Convert texture to sample-compatible format
diffuse_color = mesh.materials[0]['map_Kd'] # Assumes a shape with a single material
texture_maps = convert_texture_to_torch_sample_format(diffuse_color, sampled_uvs) # (1, C, Th, Tw)
texture_maps = texture_maps.cuda()
# Sample colors according to uv-coordinates
sampled_uvs = texture_mapping(texture_coordinates=sampled_uvs, texture_maps=texture_maps, mode='nearest')
# Unbatch
vertices = sampled_verts.squeeze(0)
vertex_colors = sampled_uvs.squeeze(0)
# Normalize to [0,1], and convert to RGBA if needed
vertex_colors /= 255
if vertex_colors.shape[-1] == 3:
vertex_colors = torch.cat([vertex_colors, torch.ones_like(vertex_colors[:, :1])], dim=1)
spc = unbatched_pointcloud_to_spc(vertices, level, features=vertex_colors)
print(f'SPC generated with {spc.point_hierarchies.shape[0]} cells.')
octrees_entry = spc.octrees
colors_entry = (255 * spc.features.reshape(-1))
npz_record = dict(
octree=octrees_entry.cpu().numpy().astype(np.uint8),
colors=colors_entry.cpu().numpy().astype(np.uint8)
)
# Default output path: take filename and save in current working directory
if output_path is None:
output_path = pathlib.Path(mesh_path).stem
output_path = f'{output_path}.npz'
np.savez(output_path, **npz_record)
return output_path | Loads obj and converts it to a SPC. Output will reside in output_path. |
22,977 | from __future__ import annotations
from typing import Callable, Any
import torch
import torch.nn.functional as F
The provided code snippet includes necessary dependencies for implementing the `identity` function. Write a Python function `def identity(c: torch.Tensor) -> torch.Tensor` to solve the following problem:
A naive normalization function which assumes the value is already normalized and returned as is. Args: c (torch.Tensor): A single channel tensor of an arbitrary shape. Returns: (torch.Tensor): Input channel c is returned without a change.
Here is the function:
def identity(c: torch.Tensor) -> torch.Tensor:
""" A naive normalization function which assumes the value is already normalized and returned as is.
Args:
c (torch.Tensor): A single channel tensor of an arbitrary shape.
Returns:
(torch.Tensor): Input channel c is returned without a change.
"""
return c | A naive normalization function which assumes the value is already normalized and returned as is. Args: c (torch.Tensor): A single channel tensor of an arbitrary shape. Returns: (torch.Tensor): Input channel c is returned without a change. |
22,978 | from __future__ import annotations
from typing import Callable, Any
import torch
import torch.nn.functional as F
def normalize(c: torch.Tensor, min_val: Any = None, max_val: Any = None) -> torch.Tensor:
""" A linear normalization function which maps the channel c to the range of [0, 1].
If the min / max values bounds of the channel are not explicitly specified, they're determined by
c's values.
Args:
c (torch.Tensor): A single channel tensor of an arbitrary shape.
min_val (Any): Optional, the lower boundary for possible values in channel C
max_val (Any): Optional, the upper boundary for possible values in channel C
Returns:
(torch.Tensor): Input channel c is linearly mapped to the range [0, 1].
"""
min_val = torch.min(c) if min_val is None else min_val
max_val = torch.max(c) if max_val is None else max_val
return (c - min_val) / (max_val - min_val)
The provided code snippet includes necessary dependencies for implementing the `normalize_linear_scale` function. Write a Python function `def normalize_linear_scale(c: torch.Tensor, min_val: Any = None, max_val: Any = None, linear_scale: float = 1.0) -> torch.Tensor` to solve the following problem:
A normalization function which linear scales the channel before normalizing it to the range of [0, 1]. If the min / max values bounds of the channel are not explicitly specified, they're determined by c's values. If explicitly specified, the bounds are scaled as well. Args: c (torch.Tensor): A single channel tensor of an arbitrary shape. min_val (Any): Optional, the lower boundary for possible values in channel C max_val (Any): Optional, the upper boundary for possible values in channel C linear_scale (float): Channel scale value Returns: (torch.Tensor): Input channel c is linearly mapped to the range [0, 1].
Here is the function:
def normalize_linear_scale(c: torch.Tensor, min_val: Any = None, max_val: Any = None,
linear_scale: float = 1.0) -> torch.Tensor:
""" A normalization function which linear scales the channel before normalizing it to the range of [0, 1].
If the min / max values bounds of the channel are not explicitly specified, they're determined by
c's values. If explicitly specified, the bounds are scaled as well.
Args:
c (torch.Tensor): A single channel tensor of an arbitrary shape.
min_val (Any): Optional, the lower boundary for possible values in channel C
max_val (Any): Optional, the upper boundary for possible values in channel C
linear_scale (float): Channel scale value
Returns:
(torch.Tensor): Input channel c is linearly mapped to the range [0, 1].
"""
c *= linear_scale
min_val = linear_scale * min_val if min_val is not None else min_val
max_val = linear_scale * max_val if max_val is not None else max_val
return normalize(c=c, min_val=min_val, max_val=max_val) | A normalization function which linear scales the channel before normalizing it to the range of [0, 1]. If the min / max values bounds of the channel are not explicitly specified, they're determined by c's values. If explicitly specified, the bounds are scaled as well. Args: c (torch.Tensor): A single channel tensor of an arbitrary shape. min_val (Any): Optional, the lower boundary for possible values in channel C max_val (Any): Optional, the upper boundary for possible values in channel C linear_scale (float): Channel scale value Returns: (torch.Tensor): Input channel c is linearly mapped to the range [0, 1]. |
22,979 | from __future__ import annotations
from typing import Callable, Any
import torch
import torch.nn.functional as F
def normalize(c: torch.Tensor, min_val: Any = None, max_val: Any = None) -> torch.Tensor:
""" A linear normalization function which maps the channel c to the range of [0, 1].
If the min / max values bounds of the channel are not explicitly specified, they're determined by
c's values.
Args:
c (torch.Tensor): A single channel tensor of an arbitrary shape.
min_val (Any): Optional, the lower boundary for possible values in channel C
max_val (Any): Optional, the upper boundary for possible values in channel C
Returns:
(torch.Tensor): Input channel c is linearly mapped to the range [0, 1].
"""
min_val = torch.min(c) if min_val is None else min_val
max_val = torch.max(c) if max_val is None else max_val
return (c - min_val) / (max_val - min_val)
The provided code snippet includes necessary dependencies for implementing the `normalize_log_scale` function. Write a Python function `def normalize_log_scale(c: torch.Tensor, min_val: Any = None, max_val: Any = None, linear_scale: float = 1.0, log_scale: float = 1.0) -> torch.Tensor` to solve the following problem:
A normalization function which applies log and linear scales to the channel before normalizing it to the range of [0, 1]. If the min / max values bounds of the channel are not explicitly specified, they're determined by c's values. If explicitly specified, the bounds are scaled as well. Args: c (torch.Tensor): A single channel tensor of an arbitrary shape. min_val (Any): Optional, the lower boundary for possible values in channel C max_val (Any): Optional, the upper boundary for possible values in channel C linear_scale (float): Channel linear scale value log_scale (float: Channel log scale value. Returns: (torch.Tensor): Input channel c is mapped to the range [0, 1] by the formula: ``c' = a * log(b * c)`` where a is the linear scale, b is the log scale and c is the channel value.
Here is the function:
def normalize_log_scale(c: torch.Tensor, min_val: Any = None, max_val: Any = None,
linear_scale: float = 1.0, log_scale: float = 1.0) -> torch.Tensor:
""" A normalization function which applies log and linear scales to the channel before normalizing it
to the range of [0, 1].
If the min / max values bounds of the channel are not explicitly specified, they're determined by
c's values. If explicitly specified, the bounds are scaled as well.
Args:
c (torch.Tensor): A single channel tensor of an arbitrary shape.
min_val (Any): Optional, the lower boundary for possible values in channel C
max_val (Any): Optional, the upper boundary for possible values in channel C
linear_scale (float): Channel linear scale value
log_scale (float: Channel log scale value.
Returns:
(torch.Tensor): Input channel c is mapped to the range [0, 1] by the formula:
``c' = a * log(b * c)`` where a is the linear scale, b is the log scale and c is the channel value.
"""
c = linear_scale * torch.log(log_scale * c)
min_val = linear_scale * torch.log(log_scale * min_val) if min_val is not None else min_val
max_val = linear_scale * torch.log(log_scale * max_val) if max_val is not None else max_val
return normalize(c=c, min_val=min_val, max_val=max_val) | A normalization function which applies log and linear scales to the channel before normalizing it to the range of [0, 1]. If the min / max values bounds of the channel are not explicitly specified, they're determined by c's values. If explicitly specified, the bounds are scaled as well. Args: c (torch.Tensor): A single channel tensor of an arbitrary shape. min_val (Any): Optional, the lower boundary for possible values in channel C max_val (Any): Optional, the upper boundary for possible values in channel C linear_scale (float): Channel linear scale value log_scale (float: Channel log scale value. Returns: (torch.Tensor): Input channel c is mapped to the range [0, 1] by the formula: ``c' = a * log(b * c)`` where a is the linear scale, b is the log scale and c is the channel value. |
22,980 | from __future__ import annotations
from typing import Callable, Any
import torch
import torch.nn.functional as F
def normalize(c: torch.Tensor, min_val: Any = None, max_val: Any = None) -> torch.Tensor:
""" A linear normalization function which maps the channel c to the range of [0, 1].
If the min / max values bounds of the channel are not explicitly specified, they're determined by
c's values.
Args:
c (torch.Tensor): A single channel tensor of an arbitrary shape.
min_val (Any): Optional, the lower boundary for possible values in channel C
max_val (Any): Optional, the upper boundary for possible values in channel C
Returns:
(torch.Tensor): Input channel c is linearly mapped to the range [0, 1].
"""
min_val = torch.min(c) if min_val is None else min_val
max_val = torch.max(c) if max_val is None else max_val
return (c - min_val) / (max_val - min_val)
The provided code snippet includes necessary dependencies for implementing the `normalize_vector` function. Write a Python function `def normalize_vector(c: torch.Tensor) -> torch.Tensor` to solve the following problem:
A normalization function which applies a L2 normalization over a channel of vector data. Args: c (torch.Tensor): A single channel tensor of an arbitrary shape. Returns: (torch.Tensor): Input channel c is normalized by the L2 norm.
Here is the function:
def normalize_vector(c: torch.Tensor) -> torch.Tensor:
""" A normalization function which applies a L2 normalization over a channel of vector data.
Args:
c (torch.Tensor): A single channel tensor of an arbitrary shape.
Returns:
(torch.Tensor): Input channel c is normalized by the L2 norm.
"""
return F.normalize(c, dim=-1) * 0.5 + 0.5 | A normalization function which applies a L2 normalization over a channel of vector data. Args: c (torch.Tensor): A single channel tensor of an arbitrary shape. Returns: (torch.Tensor): Input channel c is normalized by the L2 norm. |
22,981 | from __future__ import annotations
from typing import Callable, Any
import torch
import torch.nn.functional as F
The provided code snippet includes necessary dependencies for implementing the `blend_linear` function. Write a Python function `def blend_linear(c1: torch.Tensor, c2: torch.Tensor, alpha1: torch.Tensor, alpha2: torch.Tensor)` to solve the following problem:
A direct linear interpolation between c1 and c2. Useful for blending channels which do not consider the alpha value (i.e. the alpha channel itself). Args: c1 (torch.Tensor): first channel tensor of an arbitrary shape. c2 (torch.Tensor): second channel tensor, in the shape of c1. alpha1 (torch.Tensor): Unused alpha2 (torch.Tensor): Unused Returns: (torch.Tensor): Blended channel in the shape of c1
Here is the function:
def blend_linear(c1: torch.Tensor, c2: torch.Tensor, alpha1: torch.Tensor, alpha2: torch.Tensor):
""" A direct linear interpolation between c1 and c2.
Useful for blending channels which do not consider the alpha value (i.e. the alpha channel itself).
Args:
c1 (torch.Tensor): first channel tensor of an arbitrary shape.
c2 (torch.Tensor): second channel tensor, in the shape of c1.
alpha1 (torch.Tensor): Unused
alpha2 (torch.Tensor): Unused
Returns:
(torch.Tensor): Blended channel in the shape of c1
"""
return c1 + c2 * (1.0 - c1) | A direct linear interpolation between c1 and c2. Useful for blending channels which do not consider the alpha value (i.e. the alpha channel itself). Args: c1 (torch.Tensor): first channel tensor of an arbitrary shape. c2 (torch.Tensor): second channel tensor, in the shape of c1. alpha1 (torch.Tensor): Unused alpha2 (torch.Tensor): Unused Returns: (torch.Tensor): Blended channel in the shape of c1 |
22,982 | from __future__ import annotations
from typing import Callable, Any
import torch
import torch.nn.functional as F
The provided code snippet includes necessary dependencies for implementing the `blend_alpha_composite_over` function. Write a Python function `def blend_alpha_composite_over(c1: torch.Tensor, c2: torch.Tensor, alpha1: torch.Tensor, alpha2: torch.Tensor)` to solve the following problem:
An alpha compositing op where a front pixel is alpha blended with the background pixel (in a usual painter's algorithm manner). Useful for blending channels such as RGB. See: https://en.wikipedia.org/wiki/Alpha_compositing Args: c1 (torch.Tensor): first channel tensor of an arbitrary shape. c2 (torch.Tensor): second channel tensor, in the shape of c1. alpha1 (torch.Tensor): alpha channel tensor, corresponding to first channel, in the shape of c1. alpha2 (torch.Tensor): alpha channel tensor, corresponding to second channel, in the shape of c1. Returns: (torch.Tensor): Blended channel in the shape of c1
Here is the function:
def blend_alpha_composite_over(c1: torch.Tensor, c2: torch.Tensor, alpha1: torch.Tensor, alpha2: torch.Tensor):
""" An alpha compositing op where a front pixel is alpha blended with the background pixel
(in a usual painter's algorithm manner).
Useful for blending channels such as RGB.
See: https://en.wikipedia.org/wiki/Alpha_compositing
Args:
c1 (torch.Tensor): first channel tensor of an arbitrary shape.
c2 (torch.Tensor): second channel tensor, in the shape of c1.
alpha1 (torch.Tensor): alpha channel tensor, corresponding to first channel, in the shape of c1.
alpha2 (torch.Tensor): alpha channel tensor, corresponding to second channel, in the shape of c1.
Returns:
(torch.Tensor): Blended channel in the shape of c1
"""
alpha_out = alpha1 + alpha2 * (1.0 - alpha1)
c_out = torch.where(condition=alpha_out > 0,
input=(c1 * alpha1 + c2 * alpha2 * (1.0 - alpha1)) / alpha_out,
other=torch.zeros_like(c1))
return c_out | An alpha compositing op where a front pixel is alpha blended with the background pixel (in a usual painter's algorithm manner). Useful for blending channels such as RGB. See: https://en.wikipedia.org/wiki/Alpha_compositing Args: c1 (torch.Tensor): first channel tensor of an arbitrary shape. c2 (torch.Tensor): second channel tensor, in the shape of c1. alpha1 (torch.Tensor): alpha channel tensor, corresponding to first channel, in the shape of c1. alpha2 (torch.Tensor): alpha channel tensor, corresponding to second channel, in the shape of c1. Returns: (torch.Tensor): Blended channel in the shape of c1 |
22,983 | from __future__ import annotations
from typing import Callable, Any
import torch
import torch.nn.functional as F
The provided code snippet includes necessary dependencies for implementing the `blend_alpha_lerp` function. Write a Python function `def blend_alpha_lerp(c1: torch.Tensor, c2: torch.Tensor, alpha1: torch.Tensor, alpha2: torch.Tensor)` to solve the following problem:
A linear interpolation between c1 and c2, which uses the alpha channel as a weighting factor. Args: c1 (torch.Tensor): first channel tensor of an arbitrary shape. c2 (torch.Tensor): second channel tensor, in the shape of c1. alpha1 (torch.Tensor): alpha channel tensor, corresponding to first channel, in the shape of c1. alpha2 (torch.Tensor): Unused. Returns: (torch.Tensor): Blended channel in the shape of c1
Here is the function:
def blend_alpha_lerp(c1: torch.Tensor, c2: torch.Tensor, alpha1: torch.Tensor, alpha2: torch.Tensor):
""" A linear interpolation between c1 and c2, which uses the alpha channel as a weighting factor.
Args:
c1 (torch.Tensor): first channel tensor of an arbitrary shape.
c2 (torch.Tensor): second channel tensor, in the shape of c1.
alpha1 (torch.Tensor): alpha channel tensor, corresponding to first channel, in the shape of c1.
alpha2 (torch.Tensor): Unused.
Returns:
(torch.Tensor): Blended channel in the shape of c1
"""
return c1 * alpha1 + c2 * (1.0 - alpha1) | A linear interpolation between c1 and c2, which uses the alpha channel as a weighting factor. Args: c1 (torch.Tensor): first channel tensor of an arbitrary shape. c2 (torch.Tensor): second channel tensor, in the shape of c1. alpha1 (torch.Tensor): alpha channel tensor, corresponding to first channel, in the shape of c1. alpha2 (torch.Tensor): Unused. Returns: (torch.Tensor): Blended channel in the shape of c1 |
22,984 | from __future__ import annotations
from typing import Callable, Any
import torch
import torch.nn.functional as F
def normalize(c: torch.Tensor, min_val: Any = None, max_val: Any = None) -> torch.Tensor:
""" A linear normalization function which maps the channel c to the range of [0, 1].
If the min / max values bounds of the channel are not explicitly specified, they're determined by
c's values.
Args:
c (torch.Tensor): A single channel tensor of an arbitrary shape.
min_val (Any): Optional, the lower boundary for possible values in channel C
max_val (Any): Optional, the upper boundary for possible values in channel C
Returns:
(torch.Tensor): Input channel c is linearly mapped to the range [0, 1].
"""
min_val = torch.min(c) if min_val is None else min_val
max_val = torch.max(c) if max_val is None else max_val
return (c - min_val) / (max_val - min_val)
The provided code snippet includes necessary dependencies for implementing the `blend_alpha_slerp` function. Write a Python function `def blend_alpha_slerp(c1: torch.Tensor, c2: torch.Tensor, alpha1: torch.Tensor, alpha2: torch.Tensor)` to solve the following problem:
A spherical linear interpolation, useful for interpolating rotations or blending directional vectors. c1 and c2 are normalized and interpolated over the unit hypersphere. alpha1 acts as the interpolation weight. See: https://en.wikipedia.org/wiki/Slerp Args: c1 (torch.Tensor): first channel tensor of an arbitrary shape. c2 (torch.Tensor): second channel tensor, in the shape of c1. alpha1 (torch.Tensor): alpha channel tensor, corresponding to first channel, in the shape of c1. alpha2 (torch.Tensor): Unused. Returns: (torch.Tensor): Blended channel in the shape of c1
Here is the function:
def blend_alpha_slerp(c1: torch.Tensor, c2: torch.Tensor, alpha1: torch.Tensor, alpha2: torch.Tensor):
""" A spherical linear interpolation, useful for interpolating rotations or blending directional vectors.
c1 and c2 are normalized and interpolated over the unit hypersphere.
alpha1 acts as the interpolation weight.
See: https://en.wikipedia.org/wiki/Slerp
Args:
c1 (torch.Tensor): first channel tensor of an arbitrary shape.
c2 (torch.Tensor): second channel tensor, in the shape of c1.
alpha1 (torch.Tensor): alpha channel tensor, corresponding to first channel, in the shape of c1.
alpha2 (torch.Tensor): Unused.
Returns:
(torch.Tensor): Blended channel in the shape of c1
"""
# To unit directions
t = alpha1 # alpha1 is used as the interpolation weight
c1 = F.normalize(c1, dim=1)
c2 = F.normalize(c2, dim=1)
dot = (c1*c2).sum(1) # batched dot prod
omega = torch.acos(dot) # angle between directions
sin_omega = torch.sin(omega) # TODO (operel): Be careful of omega=0.0 case
c2_weight = (torch.sin((1.0 - t) * omega) / sin_omega).unsqueeze(1)
c1_weight = (torch.sin(t * omega) / sin_omega).unsqueeze(1)
res = c2_weight * c2 + c1_weight * c1
return res | A spherical linear interpolation, useful for interpolating rotations or blending directional vectors. c1 and c2 are normalized and interpolated over the unit hypersphere. alpha1 acts as the interpolation weight. See: https://en.wikipedia.org/wiki/Slerp Args: c1 (torch.Tensor): first channel tensor of an arbitrary shape. c2 (torch.Tensor): second channel tensor, in the shape of c1. alpha1 (torch.Tensor): alpha channel tensor, corresponding to first channel, in the shape of c1. alpha2 (torch.Tensor): Unused. Returns: (torch.Tensor): Blended channel in the shape of c1 |
22,985 | from __future__ import annotations
from typing import Callable, Any
import torch
import torch.nn.functional as F
The provided code snippet includes necessary dependencies for implementing the `blend_normal` function. Write a Python function `def blend_normal(c1: torch.Tensor, c2: torch.Tensor, alpha1: torch.Tensor, alpha2: torch.Tensor)` to solve the following problem:
A standard blend mode which uses the front pixel value, without mixing. Useful when alpha blending is undesired, or the channel contains categorical info (i.e. semantic class ids). Args: c1 (torch.Tensor): first channel tensor of an arbitrary shape. c2 (torch.Tensor): Unused alpha1 (torch.Tensor): Unused alpha2 (torch.Tensor): Unused Returns: (torch.Tensor): Blended channel in the shape of c1
Here is the function:
def blend_normal(c1: torch.Tensor, c2: torch.Tensor, alpha1: torch.Tensor, alpha2: torch.Tensor):
""" A standard blend mode which uses the front pixel value, without mixing.
Useful when alpha blending is undesired, or the channel contains categorical info (i.e. semantic class ids).
Args:
c1 (torch.Tensor): first channel tensor of an arbitrary shape.
c2 (torch.Tensor): Unused
alpha1 (torch.Tensor): Unused
alpha2 (torch.Tensor): Unused
Returns:
(torch.Tensor): Blended channel in the shape of c1
"""
return c1 | A standard blend mode which uses the front pixel value, without mixing. Useful when alpha blending is undesired, or the channel contains categorical info (i.e. semantic class ids). Args: c1 (torch.Tensor): first channel tensor of an arbitrary shape. c2 (torch.Tensor): Unused alpha1 (torch.Tensor): Unused alpha2 (torch.Tensor): Unused Returns: (torch.Tensor): Blended channel in the shape of c1 |
22,986 | from __future__ import annotations
from typing import Callable, Any
import torch
import torch.nn.functional as F
The provided code snippet includes necessary dependencies for implementing the `blend_multiply` function. Write a Python function `def blend_multiply(c1: torch.Tensor, c2: torch.Tensor, alpha1: torch.Tensor, alpha2: torch.Tensor)` to solve the following problem:
Commutative blend mode which preserves dark colors. Args: c1 (torch.Tensor): first channel tensor of an arbitrary shape. c2 (torch.Tensor): second channel tensor, in the shape of c1. alpha1 (torch.Tensor): Unused. alpha2 (torch.Tensor): Unused. Returns: (torch.Tensor): Blended channel in the shape of c1
Here is the function:
def blend_multiply(c1: torch.Tensor, c2: torch.Tensor, alpha1: torch.Tensor, alpha2: torch.Tensor):
""" Commutative blend mode which preserves dark colors.
Args:
c1 (torch.Tensor): first channel tensor of an arbitrary shape.
c2 (torch.Tensor): second channel tensor, in the shape of c1.
alpha1 (torch.Tensor): Unused.
alpha2 (torch.Tensor): Unused.
Returns:
(torch.Tensor): Blended channel in the shape of c1
"""
return c1 * c2 | Commutative blend mode which preserves dark colors. Args: c1 (torch.Tensor): first channel tensor of an arbitrary shape. c2 (torch.Tensor): second channel tensor, in the shape of c1. alpha1 (torch.Tensor): Unused. alpha2 (torch.Tensor): Unused. Returns: (torch.Tensor): Blended channel in the shape of c1 |
22,987 | from __future__ import annotations
from typing import Callable, Any
import torch
import torch.nn.functional as F
The provided code snippet includes necessary dependencies for implementing the `blend_screen` function. Write a Python function `def blend_screen(c1: torch.Tensor, c2: torch.Tensor, alpha1: torch.Tensor, alpha2: torch.Tensor)` to solve the following problem:
Commutative blend mode which preserves light colors. Args: c1 (torch.Tensor): first channel tensor of an arbitrary shape. c2 (torch.Tensor): second channel tensor, in the shape of c1. alpha1 (torch.Tensor): Unused. alpha2 (torch.Tensor): Unused. Returns: (torch.Tensor): Blended channel in the shape of c1
Here is the function:
def blend_screen(c1: torch.Tensor, c2: torch.Tensor, alpha1: torch.Tensor, alpha2: torch.Tensor):
""" Commutative blend mode which preserves light colors.
Args:
c1 (torch.Tensor): first channel tensor of an arbitrary shape.
c2 (torch.Tensor): second channel tensor, in the shape of c1.
alpha1 (torch.Tensor): Unused.
alpha2 (torch.Tensor): Unused.
Returns:
(torch.Tensor): Blended channel in the shape of c1
"""
return 1.0 - (1.0 - c1)(1.0 - c2) | Commutative blend mode which preserves light colors. Args: c1 (torch.Tensor): first channel tensor of an arbitrary shape. c2 (torch.Tensor): second channel tensor, in the shape of c1. alpha1 (torch.Tensor): Unused. alpha2 (torch.Tensor): Unused. Returns: (torch.Tensor): Blended channel in the shape of c1 |
22,988 | from __future__ import annotations
from typing import Callable, Any
import torch
import torch.nn.functional as F
The provided code snippet includes necessary dependencies for implementing the `blend_add` function. Write a Python function `def blend_add(c1: torch.Tensor, c2: torch.Tensor, alpha1: torch.Tensor, alpha2: torch.Tensor)` to solve the following problem:
An additive blend mode, for aggregation of channel information. Args: c1 (torch.Tensor): first channel tensor of an arbitrary shape. c2 (torch.Tensor): second channel tensor, in the shape of c1. alpha1 (torch.Tensor): Unused. alpha2 (torch.Tensor): Unused. Returns: (torch.Tensor): Blended channel in the shape of c1
Here is the function:
def blend_add(c1: torch.Tensor, c2: torch.Tensor, alpha1: torch.Tensor, alpha2: torch.Tensor):
""" An additive blend mode, for aggregation of channel information.
Args:
c1 (torch.Tensor): first channel tensor of an arbitrary shape.
c2 (torch.Tensor): second channel tensor, in the shape of c1.
alpha1 (torch.Tensor): Unused.
alpha2 (torch.Tensor): Unused.
Returns:
(torch.Tensor): Blended channel in the shape of c1
"""
return c1 + c2 | An additive blend mode, for aggregation of channel information. Args: c1 (torch.Tensor): first channel tensor of an arbitrary shape. c2 (torch.Tensor): second channel tensor, in the shape of c1. alpha1 (torch.Tensor): Unused. alpha2 (torch.Tensor): Unused. Returns: (torch.Tensor): Blended channel in the shape of c1 |
22,989 | from __future__ import annotations
from typing import Callable, Any
import torch
import torch.nn.functional as F
The provided code snippet includes necessary dependencies for implementing the `blend_sub` function. Write a Python function `def blend_sub(c1: torch.Tensor, c2: torch.Tensor, alpha1: torch.Tensor, alpha2: torch.Tensor)` to solve the following problem:
An subtractive blend mode, for removing channel information. Args: c1 (torch.Tensor): first channel tensor of an arbitrary shape. c2 (torch.Tensor): second channel tensor, in the shape of c1. alpha1 (torch.Tensor): Unused. alpha2 (torch.Tensor): Unused. Returns: (torch.Tensor): Blended channel in the shape of c1
Here is the function:
def blend_sub(c1: torch.Tensor, c2: torch.Tensor, alpha1: torch.Tensor, alpha2: torch.Tensor):
""" An subtractive blend mode, for removing channel information.
Args:
c1 (torch.Tensor): first channel tensor of an arbitrary shape.
c2 (torch.Tensor): second channel tensor, in the shape of c1.
alpha1 (torch.Tensor): Unused.
alpha2 (torch.Tensor): Unused.
Returns:
(torch.Tensor): Blended channel in the shape of c1
"""
return c1 - c2 | An subtractive blend mode, for removing channel information. Args: c1 (torch.Tensor): first channel tensor of an arbitrary shape. c2 (torch.Tensor): second channel tensor, in the shape of c1. alpha1 (torch.Tensor): Unused. alpha2 (torch.Tensor): Unused. Returns: (torch.Tensor): Blended channel in the shape of c1 |
22,990 | from __future__ import annotations
from typing import Callable, Any
import torch
import torch.nn.functional as F
The provided code snippet includes necessary dependencies for implementing the `blend_logical_and` function. Write a Python function `def blend_logical_and(c1: torch.Tensor, c2: torch.Tensor, alpha1: torch.Tensor, alpha2: torch.Tensor)` to solve the following problem:
For boolean channels, blends with a logical AND function. Args: c1 (torch.Tensor): first channel tensor of an arbitrary shape. c2 (torch.Tensor): second channel tensor, in the shape of c1. alpha1 (torch.Tensor): Unused. alpha2 (torch.Tensor): Unused. Returns: (torch.Tensor): Blended channel in the shape of c1
Here is the function:
def blend_logical_and(c1: torch.Tensor, c2: torch.Tensor, alpha1: torch.Tensor, alpha2: torch.Tensor):
""" For boolean channels, blends with a logical AND function.
Args:
c1 (torch.Tensor): first channel tensor of an arbitrary shape.
c2 (torch.Tensor): second channel tensor, in the shape of c1.
alpha1 (torch.Tensor): Unused.
alpha2 (torch.Tensor): Unused.
Returns:
(torch.Tensor): Blended channel in the shape of c1
"""
return torch.logical_and(c1, c2).to(c1.dtype) | For boolean channels, blends with a logical AND function. Args: c1 (torch.Tensor): first channel tensor of an arbitrary shape. c2 (torch.Tensor): second channel tensor, in the shape of c1. alpha1 (torch.Tensor): Unused. alpha2 (torch.Tensor): Unused. Returns: (torch.Tensor): Blended channel in the shape of c1 |
22,991 | from __future__ import annotations
from typing import Callable, Any
import torch
import torch.nn.functional as F
The provided code snippet includes necessary dependencies for implementing the `blend_logical_or` function. Write a Python function `def blend_logical_or(c1: torch.Tensor, c2: torch.Tensor, alpha1: torch.Tensor, alpha2: torch.Tensor)` to solve the following problem:
For boolean channels, blends with a logical OR function. Args: c1 (torch.Tensor): first channel tensor of an arbitrary shape. c2 (torch.Tensor): second channel tensor, in the shape of c1. alpha1 (torch.Tensor): Unused. alpha2 (torch.Tensor): Unused. Returns: (torch.Tensor): Blended channel in the shape of c1
Here is the function:
def blend_logical_or(c1: torch.Tensor, c2: torch.Tensor, alpha1: torch.Tensor, alpha2: torch.Tensor):
""" For boolean channels, blends with a logical OR function.
Args:
c1 (torch.Tensor): first channel tensor of an arbitrary shape.
c2 (torch.Tensor): second channel tensor, in the shape of c1.
alpha1 (torch.Tensor): Unused.
alpha2 (torch.Tensor): Unused.
Returns:
(torch.Tensor): Blended channel in the shape of c1
"""
return torch.logical_or(c1, c2).to(c1.dtype) | For boolean channels, blends with a logical OR function. Args: c1 (torch.Tensor): first channel tensor of an arbitrary shape. c2 (torch.Tensor): second channel tensor, in the shape of c1. alpha1 (torch.Tensor): Unused. alpha2 (torch.Tensor): Unused. Returns: (torch.Tensor): Blended channel in the shape of c1 |
22,992 | from __future__ import annotations
from wisp.core.channel_fn import *
from dataclasses import dataclass
from typing import Any, Optional, Dict
from functools import partial
class Channel:
""" Defines how a Renderbuffer channel should behave in terms of functionalities like blending, normalization,
and boundaries.
"""
blend_fn: BlendFunction = None
"""" How to blend information from this channel between 2 RenderBuffers """
normalize_fn: NormalizeFunction = normalize
""" How to normalize the channel information to a scale of [0, 1] """
min_val: Optional[Any] = None
""" Minimal valid value supported by this channel type. None indicates the valid values range from -inf. """
max_val: Optional[Any] = None
""" Maximal valid value supported by this channel type . None indicates the valid values range to inf. """
The provided code snippet includes necessary dependencies for implementing the `create_default_channel` function. Write a Python function `def create_default_channel() -> Channel` to solve the following problem:
A general channel template, to be used if no information about a channel have been recorded
Here is the function:
def create_default_channel() -> Channel:
""" A general channel template, to be used if no information about a channel have been recorded """
return Channel(
blend_fn=blend_alpha_composite_over,
normalize_fn=normalize,
min_val=None,
max_val=None
) | A general channel template, to be used if no information about a channel have been recorded |
22,993 | from __future__ import annotations
from wisp.core.channel_fn import *
from dataclasses import dataclass
from typing import Any, Optional, Dict
from functools import partial
class Channel:
""" Defines how a Renderbuffer channel should behave in terms of functionalities like blending, normalization,
and boundaries.
"""
blend_fn: BlendFunction = None
"""" How to blend information from this channel between 2 RenderBuffers """
normalize_fn: NormalizeFunction = normalize
""" How to normalize the channel information to a scale of [0, 1] """
min_val: Optional[Any] = None
""" Minimal valid value supported by this channel type. None indicates the valid values range from -inf. """
max_val: Optional[Any] = None
""" Maximal valid value supported by this channel type . None indicates the valid values range to inf. """
The provided code snippet includes necessary dependencies for implementing the `channels_starter_kit` function. Write a Python function `def channels_starter_kit() -> Dict[str, Channel]` to solve the following problem:
Creates a predefined kit of channels commonly useful in the context of Wisp. Users may augment or replace this kit with additional custom channels.
Here is the function:
def channels_starter_kit() -> Dict[str, Channel]:
""" Creates a predefined kit of channels commonly useful in the context of Wisp.
Users may augment or replace this kit with additional custom channels.
"""
return dict(
rgb=Channel(
blend_fn=blend_alpha_composite_over,
normalize_fn=identity,
min_val=0.0,
max_val=1.0
),
alpha=Channel(
blend_fn=blend_linear,
normalize_fn=normalize,
min_val=0.0,
max_val=1.0
),
depth=Channel(
blend_fn=blend_normal,
normalize_fn=partial(normalize_linear_scale, linear_scale=1000.0),
min_val=0.0
),
normal=Channel(
blend_fn=blend_alpha_slerp,
normalize_fn=normalize_vector
),
hit=Channel(
blend_fn=blend_logical_or,
normalize_fn=identity
),
err=Channel(
blend_fn=blend_add,
normalize_fn=normalize
),
gt=Channel(
blend_fn=blend_alpha_composite_over,
normalize_fn=identity,
min_val=0.0,
max_val=1.0
)
) | Creates a predefined kit of channels commonly useful in the context of Wisp. Users may augment or replace this kit with additional custom channels. |
22,994 | from typing import List, Tuple
def color_wheel():
""" Returns:
(list) a list of all colors defined in the color module.
Each entry is a tuple of 3 floats (RGB values).
"""
return [
white, black, dark_gray, light_purple, lime, red, green, blue, orange, light_cyan, light_pink,
light_yellow, light_teal, gray, soft_blue, soft_red, lime_green, purple, gold
]
The provided code snippet includes necessary dependencies for implementing the `colors_generator` function. Write a Python function `def colors_generator(skip_colors: List = None) -> Tuple[float, float, float]` to solve the following problem:
Generates the next color in the color wheel on each invocation. This generator repeats the color wheel cyclically when exhausted. Args: skip_colors
Here is the function:
def colors_generator(skip_colors: List = None) -> Tuple[float, float, float]:
""" Generates the next color in the color wheel on each invocation.
This generator repeats the color wheel cyclically when exhausted.
Args:
skip_colors
"""
if skip_colors is None:
skip_colors = []
while True:
for color in color_wheel():
if color in skip_colors:
continue
yield color | Generates the next color in the color wheel on each invocation. This generator repeats the color wheel cyclically when exhausted. Args: skip_colors |
22,995 | from __future__ import annotations
import time
import numpy as np
import torch
import torch.nn.functional as F
from typing import Tuple
from wisp.core import RenderBuffer, Rays
from wisp.ops.shaders import matcap_shader, pointlight_shadow_shader
from wisp.ops.differential import finitediff_gradient
from wisp.ops.geometric import normalized_grid, normalized_slice
def _generate_rays(camera_origin, camera_view, camera_right, camera_up, height, width,
mode='persp', fov=90.0, device='cuda'):
"""Generates rays from camera parameters.
Args:
camera_origin (torch.FloatTensor): [3] size tensor specifying the camera origin
camera_view (torch.FloatTensor): [3] size tensor specifying the camera view direction
camera_right (torch.FloatTensor): [3] size tensor specifying the camera right direction
camera_up (torch.FloatTensor): [3] size tensor specifying the camera up direction
height (int): height of the image
width (int): width of the image
mode (str): string that specifies the camera mode.
fov (float): field of view of the camera.
device (str): device the tensor will be allocated on.
Returns:
(torch.FloatTensor, torch.FloatTensor):
- [height, width, 3] tensor of ray origins
- [height, width, 3] tensor of ray directions
"""
coord = normalized_grid(height, width, device=device)
ray_origin = camera_right * coord[..., 0, np.newaxis] * np.tan(np.radians(fov / 2)) + \
camera_up * coord[..., 1, np.newaxis] * np.tan(np.radians(fov / 2)) + \
camera_origin + camera_view
ray_origin = ray_origin.reshape(-1, 3)
ray_offset = camera_view.unsqueeze(0).repeat(ray_origin.shape[0], 1)
if mode == 'ortho': # Orthographic camera
ray_dir = F.normalize(ray_offset, dim=-1)
elif mode == 'persp': # Perspective camera
ray_dir = F.normalize(ray_origin - camera_origin, dim=-1)
ray_origin = camera_origin.repeat(ray_dir.shape[0], 1)
else:
raise ValueError('Invalid camera mode!')
return ray_origin, ray_dir
The provided code snippet includes necessary dependencies for implementing the `_look_at` function. Write a Python function `def _look_at(f, t, height, width, mode='persp', fov=90.0, device='cuda')` to solve the following problem:
Vectorized look-at function, returns an array of ray origins and directions This function is mostly just a wrapper on top of generate_rays, but will calculate for you the view, right, and up vectors based on the from and to. Args: f (list of floats): [3] size list or tensor specifying the camera origin t (list of floats): [3] size list or tensor specifying the camera look at point height (int): height of the image width (int): width of the image mode (str): string that specifies the camera mode. fov (float): field of view of the camera. device (str): device the tensor will be allocated on. Returns: (torch.FloatTensor, torch.FloatTensor): - [height, width, 3] tensor of ray origins - [height, width, 3] tensor of ray directions
Here is the function:
def _look_at(f, t, height, width, mode='persp', fov=90.0, device='cuda'):
"""Vectorized look-at function, returns an array of ray origins and directions
This function is mostly just a wrapper on top of generate_rays, but will calculate for you
the view, right, and up vectors based on the from and to.
Args:
f (list of floats): [3] size list or tensor specifying the camera origin
t (list of floats): [3] size list or tensor specifying the camera look at point
height (int): height of the image
width (int): width of the image
mode (str): string that specifies the camera mode.
fov (float): field of view of the camera.
device (str): device the tensor will be allocated on.
Returns:
(torch.FloatTensor, torch.FloatTensor):
- [height, width, 3] tensor of ray origins
- [height, width, 3] tensor of ray directions
"""
camera_origin = torch.FloatTensor(f).to(device)
camera_view = F.normalize(torch.FloatTensor(t).to(device) - camera_origin, dim=0)
camera_right = F.normalize(torch.cross(camera_view, torch.FloatTensor([0, 1, 0]).to(device)), dim=0)
camera_up = F.normalize(torch.cross(camera_right, camera_view), dim=0)
return _generate_rays(camera_origin, camera_view, camera_right, camera_up,
height, width, mode=mode, fov=fov, device=device) | Vectorized look-at function, returns an array of ray origins and directions This function is mostly just a wrapper on top of generate_rays, but will calculate for you the view, right, and up vectors based on the from and to. Args: f (list of floats): [3] size list or tensor specifying the camera origin t (list of floats): [3] size list or tensor specifying the camera look at point height (int): height of the image width (int): width of the image mode (str): string that specifies the camera mode. fov (float): field of view of the camera. device (str): device the tensor will be allocated on. Returns: (torch.FloatTensor, torch.FloatTensor): - [height, width, 3] tensor of ray origins - [height, width, 3] tensor of ray directions |
22,996 | import torch
from wisp.core import ObjectTransform
from wisp.models import Pipeline, RasterizationPipeline
from wisp.framework import WispState, BottomLevelRendererState
def add_pipeline_to_scene_graph(state: WispState,
name: str,
pipeline: Pipeline,
transform: ObjectTransform = None,
**setup_args):
""" Adds a new object pipeline to the scene graph.
The pipeline contains all components required to trace this object.
The object is represented in the SceneGraph by a newly created BottomLevelRenderer.
Args:
state (WispState): A wisp state object, containing the scene graph information.
name (str): Unique name of object added to the scene graph
pipeline (Pipeline): A pipeline consisting of a field (and possibly a tracer) of the added object.
transform (ObjectTransform): The object transform, containing a 4x4 transformation matrix which transforms
the object from local object space to world space.
setup_args (Dict): Optional setup args which control how the BottomLevelRenderer will be created.
"""
if transform is None:
transform = ObjectTransform(device=pipeline.nef.device)
state.graph.neural_pipelines[name] = pipeline
state.graph.bl_renderers[name] = BottomLevelRendererState(status='pending', transform=transform,
setup_args=setup_args)
request_redraw(state) # Let renderer core know it should refresh next frame
def add_rasterizer_to_scene_graph(state: WispState,
name: str,
pipeline: RasterizationPipeline,
transform: ObjectTransform = None,
**setup_args):
""" Adds a new object rasterization pipeline to the scene graph.
The object is represented in the SceneGraph by a newly created BottomLevelRenderer.
Args:
state (WispState): A wisp state object, containing the scene graph information.
name (str): Unique name of object added to the scene graph
pipeline (RasterizationPipeline): A pipeline consisting of a rastrizer.
transform (ObjectTransform): The object transform, containing a 4x4 transformation matrix which transforms
the object from local object space to world space.
setup_args (Dict): Optional setup args which control how the BottomLevelRenderer will be created.
"""
if transform is None:
transform = ObjectTransform(device=pipeline.rasterizer.device)
state.graph.neural_pipelines[name] = pipeline
state.graph.bl_renderers[name] = BottomLevelRendererState(status='pending', transform=transform,
setup_args=setup_args)
request_redraw(state) # Let renderer core know it should refresh next frame
The provided code snippet includes necessary dependencies for implementing the `add_to_scene_graph` function. Write a Python function `def add_to_scene_graph(state: WispState, name: str, obj, transform: ObjectTransform = None, **setup_args)` to solve the following problem:
Adds a new object to the scene graph. obj can be any supported object type, neural or non-neural. This is the most general function used to manage adding new objects to the scene graph. Args: state (WispState): A wisp state object, containing the scene graph information. name (str): Unique name of object added to the scene graph obj (object): Any object supported by the scene-graph. i.e: for neural fields, obj is a Pipeline. transform (ObjectTransform): The object transform, containing a 4x4 transformation matrix which transforms the object from local object space to world space. setup_args (Dict): Optional setup args which control how the BottomLevelRenderer will be created.
Here is the function:
def add_to_scene_graph(state: WispState,
name: str,
obj,
transform: ObjectTransform = None,
**setup_args):
""" Adds a new object to the scene graph.
obj can be any supported object type, neural or non-neural.
This is the most general function used to manage adding new objects to the scene graph.
Args:
state (WispState): A wisp state object, containing the scene graph information.
name (str): Unique name of object added to the scene graph
obj (object): Any object supported by the scene-graph.
i.e: for neural fields, obj is a Pipeline.
transform (ObjectTransform): The object transform, containing a 4x4 transformation matrix which transforms
the object from local object space to world space.
setup_args (Dict): Optional setup args which control how the BottomLevelRenderer will be created.
"""
if isinstance(obj, Pipeline):
add_pipeline_to_scene_graph(state, name, obj, transform, **setup_args)
elif isinstance(obj, RasterizationPipeline):
add_rasterizer_to_scene_graph(state, name, obj, transform, **setup_args)
else: # TODO (operel): Currently only the above pipelines are supported
raise NotImplementedError(f'Unsupported object type added to scene graph: {obj}') | Adds a new object to the scene graph. obj can be any supported object type, neural or non-neural. This is the most general function used to manage adding new objects to the scene graph. Args: state (WispState): A wisp state object, containing the scene graph information. name (str): Unique name of object added to the scene graph obj (object): Any object supported by the scene-graph. i.e: for neural fields, obj is a Pipeline. transform (ObjectTransform): The object transform, containing a 4x4 transformation matrix which transforms the object from local object space to world space. setup_args (Dict): Optional setup args which control how the BottomLevelRenderer will be created. |
22,997 | import torch
from wisp.core import ObjectTransform
from wisp.models import Pipeline, RasterizationPipeline
from wisp.framework import WispState, BottomLevelRendererState
def request_redraw(state):
""" Marks the canvas as dirty,
forcing the renderer core to refresh the object renderers on the next rendering iteration.
"""
state.renderer.canvas_dirty = True
The provided code snippet includes necessary dependencies for implementing the `remove_from_scene_graph` function. Write a Python function `def remove_from_scene_graph(state: WispState, name: str)` to solve the following problem:
Removes an existing pipeline from the scene graph. Args: state (WispState): A wisp state object, containing the scene graph information. name (str): Unique name of object added to the scene graph
Here is the function:
def remove_from_scene_graph(state: WispState, name: str):
""" Removes an existing pipeline from the scene graph.
Args:
state (WispState): A wisp state object, containing the scene graph information.
name (str): Unique name of object added to the scene graph
"""
assert name in state.graph.neural_pipelines, f'Scene graph requested to remove non-existing object: {name}'
del state.graph.neural_pipelines[name]
del state.graph.bl_renderers[name]
request_redraw(state) # Let renderer core know it should refresh next frame | Removes an existing pipeline from the scene graph. Args: state (WispState): A wisp state object, containing the scene graph information. name (str): Unique name of object added to the scene graph |
22,998 | from __future__ import annotations
from collections import defaultdict, deque
from typing import Type, TYPE_CHECKING, Union
from wisp.models import Pipeline, RasterizationPipeline
from wisp.models.nefs import BaseNeuralField
from wisp.tracers import BaseTracer
def _neural_field_to_renderer_cls(pipeline: Pipeline) -> Type[RayTracedRenderer]:
tracer_type = type(pipeline.tracer)
# Start by iterating the current tracer type - look for renderers compatible with the current nef type
# or any of its parents (the hierarchy of nefs take precedence over the hierarchy of tracers).
renderer_cls = None
while tracer_type:
tracer_name = tracer_type.__name__
# Look for a renderer compatible with the current tracer type and nef classes
type_queue = deque([type(pipeline.nef)])
renderer_cls = None
while type_queue:
# Query nef + tracer combo
field_type = type_queue.popleft()
field_name = field_type.__name__
supported_tracers = _REGISTERED_RENDERABLE_NEURAL_FIELDS.get(field_name)
renderer_cls = supported_tracers.get(tracer_name) if supported_tracers is not None else None
# Current nef + tracer pair doesn't match any registered renderer
if renderer_cls is not None:
break
else: # Try querying all parent(nef) + tracer combos for compatibility
bases = field_type.__bases__
if len(bases) > 0:
for b in bases:
type_queue.append(b)
if renderer_cls is not None:
break # Found a renderer class
else:
# Didn't find a renderer class - repeat the process with the parent class of the tracer
tracer_base_types = tracer_type.__bases__
if len(tracer_base_types) > 0:
# Does tracer have a single parent class which inherits from BaseTracer?
# If so, keep looking
tracer_base_types = [base for base in tracer_type.__bases__ if issubclass(base, BaseTracer)]
tracer_type = tracer_base_types[0] if len(tracer_base_types) == 1 else None
else:
# Reached end of tracers hierarchy or it is too ambiguous, quit and fail gracefully
tracer_type = None
if tracer_type is None:
raise ValueError(f'Renderer factory encountered an unknown neural pipeline: '
f'Neural Field {type(pipeline.nef).__name__} with tracer {type(pipeline.tracer).__name__}. '
'Please register the factory to reflect what kind of renderer should be created for this '
'type of neural field/tracer.')
return renderer_cls
def _neural_rasterizer_to_renderer_cls(pipeline: RasterizationPipeline) -> Type[RasterizedRenderer]:
# Start by iterating the current rasterizer_type type -
# look for renderers compatible with the current rasterizer_type type or any of its parents
renderer_cls = None
type_queue = deque([type(pipeline.rasterizer)])
while type_queue:
# Query rasterizer
rast_type = type_queue.popleft()
rast_name = rast_type.__name__
# Look for a renderer compatible with the current rasterizer type
renderer_cls = _REGISTERED_RENDERABLE_RASTERIZERS.get(rast_name)
if renderer_cls is not None:
break # Matching renderer found
else: # Current rasterizer class doesn't match any registered renderer
# Try querying all parent(rast_type) for compatibility
bases = rast_type.__bases__
if len(bases) > 0:
for b in bases:
type_queue.append(b)
if renderer_cls is None:
raise ValueError(f'Renderer factory encountered an unknown neural rasterization pipeline: '
f'{type(pipeline.rasterizer).__name__}. '
'Please register the factory to reflect what kind of renderer should be created for this '
'type of rasterization pipeline.')
return renderer_cls
class BottomLevelRenderer(WispModule, ABC):
def __init__(self, *args, **kwargs):
super().__init__()
self._data_layers = dict()
def pre_render(self, payload: FramePayload, *args, **kwargs) -> None:
""" General frame setup occurs here, for example:
1. Ray Tracers - ray generation.
2. Update shader uniforms.
"""
pass
def needs_refresh(self, payload: FramePayload, *args, **kwargs) -> bool:
""" Override to optimize cases when the neural field does not require rendering from scratch. """
return True
def render(self, *args, **kwargs) -> RenderBuffer:
raise NotImplementedError('BottomLevelRenderer subclasses must implement render() logic.')
def redraw(self):
if self.needs_redraw():
self._data_layers = self.regenerate_data_layers()
def post_render(self, *args, **kwargs) -> None:
""" General frame teardown logic takes place, for example:
1. Cleanup of temporary information generated during the frame
2. Caching of information relevant for the next frames.
"""
pass
def needs_redraw(self) -> bool:
return True
def regenerate_data_layers(self) -> Dict[str, PrimitivesPack]:
return dict()
def data_layers(self) -> Dict[str, PrimitivesPack]:
""" Returns layers of information made of primitives, visually describing the renderer internal structures """
return self._data_layers
def device(self) -> torch.device:
raise NotImplementedError('BottomLevelRenderer subclasses must implement device')
def dtype(self) -> torch.dtype:
raise NotImplementedError('BottomLevelRenderer subclasses must implement dtype')
def acceleration_structure(self) -> Optional[str]:
""" Returns a descriptive name of the acceleration structure used by this object, if applicable. """
return None
def features_structure(self) -> Optional[str]:
""" Returns a descriptive name of the feature structure used by this object, if applicable. """
return None
def public_properties(self) -> Dict[str, Any]:
""" Wisp modules expose their public properties in a dictionary.
The purpose of this method is to give an easy table of outwards facing attributes,
for the purpose of logging, gui apps, etc.
BLASGrids are generally assumed to contain a bottom level acceleration structure.
"""
return dict()
def create_neural_field_renderer(neural_object: Union[Pipeline, RasterizationPipeline], **kwargs) -> BottomLevelRenderer:
# Fetch the neural field object. If given a pipeline, query the nef field.
# Otherwise assume we're given the neural field explicitly.
if isinstance(neural_object, Pipeline):
# Obtain the appropriate renderer class, compatible with this neural field type and tracer
renderer_cls = _neural_field_to_renderer_cls(neural_object)
# In the case of a pipeline, we use a specialized constructor which builds the renderer from its components
# kwargs may override the pipeline default settings
renderer_instance = renderer_cls.from_pipeline(neural_object, **kwargs)
elif isinstance(neural_object, RasterizationPipeline):
renderer_cls = _neural_rasterizer_to_renderer_cls(neural_object)
# In the case of a rasterization pipeline,
# we use a specialized constructor which builds the renderer from its components
# kwargs may override the pipeline default settings
renderer_instance = renderer_cls.from_pipeline(neural_object, **kwargs)
else:
raise NotImplementedError(f"Unknown pipeline type: {neural_object}")
return renderer_instance | null |
22,999 | from __future__ import annotations
from typing import Type
from wisp.models.nefs import BaseNeuralField
from wisp.tracers import BaseTracer
from wisp.renderer.core.api.base_renderer import BottomLevelRenderer
from wisp.renderer.core.api.renderers_factory import register_neural_field_type, register_rasterizer_type
class BottomLevelRenderer(WispModule, ABC):
def __init__(self, *args, **kwargs):
super().__init__()
self._data_layers = dict()
def pre_render(self, payload: FramePayload, *args, **kwargs) -> None:
""" General frame setup occurs here, for example:
1. Ray Tracers - ray generation.
2. Update shader uniforms.
"""
pass
def needs_refresh(self, payload: FramePayload, *args, **kwargs) -> bool:
""" Override to optimize cases when the neural field does not require rendering from scratch. """
return True
def render(self, *args, **kwargs) -> RenderBuffer:
raise NotImplementedError('BottomLevelRenderer subclasses must implement render() logic.')
def redraw(self):
if self.needs_redraw():
self._data_layers = self.regenerate_data_layers()
def post_render(self, *args, **kwargs) -> None:
""" General frame teardown logic takes place, for example:
1. Cleanup of temporary information generated during the frame
2. Caching of information relevant for the next frames.
"""
pass
def needs_redraw(self) -> bool:
return True
def regenerate_data_layers(self) -> Dict[str, PrimitivesPack]:
return dict()
def data_layers(self) -> Dict[str, PrimitivesPack]:
""" Returns layers of information made of primitives, visually describing the renderer internal structures """
return self._data_layers
def device(self) -> torch.device:
raise NotImplementedError('BottomLevelRenderer subclasses must implement device')
def dtype(self) -> torch.dtype:
raise NotImplementedError('BottomLevelRenderer subclasses must implement dtype')
def acceleration_structure(self) -> Optional[str]:
""" Returns a descriptive name of the acceleration structure used by this object, if applicable. """
return None
def features_structure(self) -> Optional[str]:
""" Returns a descriptive name of the feature structure used by this object, if applicable. """
return None
def public_properties(self) -> Dict[str, Any]:
""" Wisp modules expose their public properties in a dictionary.
The purpose of this method is to give an easy table of outwards facing attributes,
for the purpose of logging, gui apps, etc.
BLASGrids are generally assumed to contain a bottom level acceleration structure.
"""
return dict()
def register_neural_field_type(neural_field_type: Type[BaseNeuralField],
tracer_type: Type[BaseTracer],
renderer_type: Type[BottomLevelRenderer]):
""" Register new types of neural fields with their associated bottom level renderers using this function.
This allows the interactive renderer to display this neural field type on the canvas.
"""
field_name = neural_field_type.__name__
tracer_name = tracer_type.__name__
_REGISTERED_RENDERABLE_NEURAL_FIELDS[field_name][tracer_name] = renderer_type
The provided code snippet includes necessary dependencies for implementing the `field_renderer` function. Write a Python function `def field_renderer(field_type: Type[BaseNeuralField], tracer_type: Type[BaseTracer])` to solve the following problem:
A decorator that registers a neural field type with a renderer. By registering the renderer type, the interactive renderer knows what type of renderer to create when dealing with this type of field. Essentially, this allows displaying custom types of objects on the canvas.
Here is the function:
def field_renderer(field_type: Type[BaseNeuralField], tracer_type: Type[BaseTracer]):
""" A decorator that registers a neural field type with a renderer.
By registering the renderer type, the interactive renderer knows what type of renderer to create
when dealing with this type of field.
Essentially, this allows displaying custom types of objects on the canvas.
"""
def _register_renderer_fn(renderer_class: Type[BottomLevelRenderer]):
register_neural_field_type(field_type, tracer_type, renderer_class)
return renderer_class
return _register_renderer_fn | A decorator that registers a neural field type with a renderer. By registering the renderer type, the interactive renderer knows what type of renderer to create when dealing with this type of field. Essentially, this allows displaying custom types of objects on the canvas. |
23,000 | from __future__ import annotations
from typing import Type
from wisp.models.nefs import BaseNeuralField
from wisp.tracers import BaseTracer
from wisp.renderer.core.api.base_renderer import BottomLevelRenderer
from wisp.renderer.core.api.renderers_factory import register_neural_field_type, register_rasterizer_type
class BottomLevelRenderer(WispModule, ABC):
def __init__(self, *args, **kwargs):
super().__init__()
self._data_layers = dict()
def pre_render(self, payload: FramePayload, *args, **kwargs) -> None:
""" General frame setup occurs here, for example:
1. Ray Tracers - ray generation.
2. Update shader uniforms.
"""
pass
def needs_refresh(self, payload: FramePayload, *args, **kwargs) -> bool:
""" Override to optimize cases when the neural field does not require rendering from scratch. """
return True
def render(self, *args, **kwargs) -> RenderBuffer:
raise NotImplementedError('BottomLevelRenderer subclasses must implement render() logic.')
def redraw(self):
if self.needs_redraw():
self._data_layers = self.regenerate_data_layers()
def post_render(self, *args, **kwargs) -> None:
""" General frame teardown logic takes place, for example:
1. Cleanup of temporary information generated during the frame
2. Caching of information relevant for the next frames.
"""
pass
def needs_redraw(self) -> bool:
return True
def regenerate_data_layers(self) -> Dict[str, PrimitivesPack]:
return dict()
def data_layers(self) -> Dict[str, PrimitivesPack]:
""" Returns layers of information made of primitives, visually describing the renderer internal structures """
return self._data_layers
def device(self) -> torch.device:
raise NotImplementedError('BottomLevelRenderer subclasses must implement device')
def dtype(self) -> torch.dtype:
raise NotImplementedError('BottomLevelRenderer subclasses must implement dtype')
def acceleration_structure(self) -> Optional[str]:
""" Returns a descriptive name of the acceleration structure used by this object, if applicable. """
return None
def features_structure(self) -> Optional[str]:
""" Returns a descriptive name of the feature structure used by this object, if applicable. """
return None
def public_properties(self) -> Dict[str, Any]:
""" Wisp modules expose their public properties in a dictionary.
The purpose of this method is to give an easy table of outwards facing attributes,
for the purpose of logging, gui apps, etc.
BLASGrids are generally assumed to contain a bottom level acceleration structure.
"""
return dict()
def register_rasterizer_type(rasterizer_type: Type, renderer_type: Type[BottomLevelRenderer]):
""" Register new types of rasterizers with their associated bottom level renderers using this function.
This allows the interactive renderer to display this rasterizer type on the canvas.
"""
_REGISTERED_RENDERABLE_RASTERIZERS[rasterizer_type.__name__] = renderer_type
The provided code snippet includes necessary dependencies for implementing the `register_rasterizer` function. Write a Python function `def register_rasterizer(rasterizer_type: Type)` to solve the following problem:
A decorator that registers a rasterizer type with a renderer. By registering the renderer type, the interactive renderer knows what type of renderer to create when dealing with this type of rasterizer. Essentially, this allows displaying custom types of objects on the canvas.
Here is the function:
def register_rasterizer(rasterizer_type: Type):
""" A decorator that registers a rasterizer type with a renderer.
By registering the renderer type, the interactive renderer knows what type of renderer to create
when dealing with this type of rasterizer.
Essentially, this allows displaying custom types of objects on the canvas.
"""
def _register_renderer_fn(renderer_class: Type[BottomLevelRenderer]):
register_rasterizer_type(rasterizer_type, renderer_class)
return renderer_class
return _register_renderer_fn | A decorator that registers a rasterizer type with a renderer. By registering the renderer type, the interactive renderer knows what type of renderer to create when dealing with this type of rasterizer. Essentially, this allows displaying custom types of objects on the canvas. |
23,001 | from __future__ import annotations
import copy
import torch
import wisp.framework.state as state
from wisp.renderer.core.control.camera_controller_mode import CameraControlMode
from wisp.renderer.core.control.io import WispMouseButton
def quat_mul(Q1, Q2):
return torch.tensor([Q1[0] * Q2[3] + Q1[3] * Q2[0] - Q1[2] * Q2[1] + Q1[1] * Q2[2],
Q1[1] * Q2[3] + Q1[2] * Q2[0] + Q1[3] * Q2[1] - Q1[0] * Q2[2],
Q1[2] * Q2[3] - Q1[1] * Q2[0] + Q1[0] * Q2[1] + Q1[3] * Q2[2],
Q1[3] * Q2[3] - Q1[0] * Q2[0] - Q1[1] * Q2[1] - Q1[2] * Q2[2]]) | null |
23,002 | from __future__ import annotations
import copy
import torch
import wisp.framework.state as state
from wisp.renderer.core.control.camera_controller_mode import CameraControlMode
from wisp.renderer.core.control.io import WispMouseButton
def quat_matrix(q): # True only for unit quaternions
xx = q[0] * q[0]
xy = q[0] * q[1]
xz = q[0] * q[2]
xw = q[0] * q[3]
yy = q[1] * q[1]
yz = q[1] * q[2]
yw = q[1] * q[3]
zz = q[2] * q[2]
zw = q[2] * q[3]
ww = q[3] * q[3]
return torch.tensor([[ww + xx - yy - zz, 2.0 * (xy - zw), 2.0 * (xz + yw), 0.0],
[2.0 * (xy + zw), ww - xx + yy - zz, 2.0 * (yz - xw), 0.0],
[2.0 * (xz - yw), 2.0 * (yz + xw), ww - xx - yy + zz, 0.0],
[0.0, 0.0, 0.0, 1.0]], dtype=torch.float64) | null |
23,003 | from __future__ import annotations
import abc
import math
import numpy as np
import torch
import copy
from collections import defaultdict
from typing import Dict, List, Iterable, Tuple
from kaolin.render.camera import Camera, PinholeIntrinsics, OrthographicIntrinsics
from wisp.framework import WispState, BottomLevelRendererState
from wisp.core import RenderBuffer, Rays, PrimitivesPack, create_default_channel, ObjectTransform
from wisp.ops.raygen import generate_pinhole_rays, generate_ortho_rays, generate_centered_pixel_coords
from wisp.renderer.core.api import BottomLevelRenderer, RayTracedRenderer, FramePayload, create_neural_field_renderer
from wisp.gfx.datalayers import CameraDatalayers
class RendererCore:
def __init__(self, state: WispState):
self.state = state
self.device = state.renderer.device
# Create a camera for user view
self.camera = self._setup_camera(state)
self._camera_layers = CameraDatalayers()
# Set up the list of available bottom level object renderers, according to scene graph
self._renderers = None
self._tlas = None
self.refresh_bl_renderers(state)
self.res_x, self.res_y = None, None
self.set_full_resolution()
self._last_state = dict()
self._last_renderbuffer = None
# Minimal resolution supported by RendererCore
self.MIN_RES = 64
def _default_camera(self, lens="perspective"):
# TODO: move defaults elsewhere
if lens == 'perspective':
return Camera.from_args(
eye=torch.tensor([6.0, 6.0, 6.0]),
at=torch.tensor([0.0, 0.0, 0.0]),
up=torch.tensor([0.0, 1.0, 0.0]),
fov=40 * np.pi / 180, # In radians
x0=0.0, y0=0.0,
width=900, height=675,
near=1e-2, far=30,
dtype=torch.float64,
device=self.device
)
elif lens == 'orthographic':
return Camera.from_args(
eye=torch.tensor([6.0, 6.0, 6.0]),
at=torch.tensor([0.0, 0.0, 0.0]),
up=torch.tensor([0.0, 1.0, 0.0]),
width=800, height=800,
near=-800, far=800,
fov_distance=1.0,
dtype=torch.float64,
device=self.device
)
def change_camera_projection_mode(self, lens: str) -> None:
# Need to update only the intrinsic component of the camera
# TODO (operel): in the future, kaolin should implement a capability to switch lens and approximate params
# from one type to another where applicable
if lens == 'Perspective':
intrinsics = PinholeIntrinsics.from_fov(
fov=30 * np.pi / 180, # In radians
x0=0.0, y0=0.0,
width=self.camera.width, height=self.camera.height,
near=1e-2, far=1e2,
dtype=self.camera.dtype,
device=self.camera.device
)
elif lens == 'Orthographic':
intrinsics = OrthographicIntrinsics.from_frustum(
width=self.camera.width, height=self.camera.height,
near=-self.camera.width, far=self.camera.height,
fov_distance=1.0,
dtype=self.camera.dtype,
device=self.camera.device
)
else:
raise ValueError(f"Unknown lens type: {lens} given to render_core.change_camera_projection_mode")
self.camera.intrinsics = intrinsics
del self._last_state['camera'] # The previous camera is now obsolete
def _setup_camera(self, state: WispState):
# Use selected camera to control canvas
camera = state.renderer.selected_camera
if camera is None:
# Create a default camera
lens_type = self.state.renderer.selected_camera_lens
camera = self._default_camera(lens_type)
camera = camera.to(self.device)
return camera
def refresh_bl_renderers(self, state: WispState) -> None:
renderers = dict()
scene_graph = state.graph
# Remove obsolete bottom level renderers for pipelines that no longer exist
for obj_name in list(scene_graph.bl_renderers.keys()):
if obj_name not in scene_graph.neural_pipelines:
del scene_graph.bl_renderers[obj_name]
# Set up a renderer for all neural pipelines in the scene
for renderer_id, neural_pipeline in scene_graph.neural_pipelines.items():
# See if a descriptor for the renderer already exists.
bl_state = scene_graph.bl_renderers.get(renderer_id)
if bl_state is None:
# If not, create a default one
object_transform = ObjectTransform(device=self.device,dtype=self.camera.dtype)
bl_state = BottomLevelRendererState(status='pending', setup_args=dict(), transform=object_transform)
scene_graph.bl_renderers[renderer_id] = bl_state
if bl_state.status == 'loaded':
assert bl_state.renderer is not None, \
f'status of renderer {renderer_id} shows it was loaded, but renderer instance is None.'
renderers[renderer_id] = bl_state.renderer
elif bl_state.status == 'pending':
bl_state.renderer = create_neural_field_renderer(neural_object=neural_pipeline, **bl_state.setup_args)
bl_state.status = 'loaded'
renderers[renderer_id] = bl_state.renderer
scene_graph.visible_objects[renderer_id] = True # Mark as visible when first loaded
elif bl_state.status == 'ignored':
pass # Skip renderer on purpose
else:
raise ValueError(f'Invalid bottom level renderer state: {bl_state.status}')
self._renderers = renderers
# Refresh TLAS
self._tlas = self._setup_tlas(state)
def _setup_tlas(self, state: WispState):
# Currently the top-level acceleration structure uses a straightforward ordered list stub
return ListTLAS(state, self._renderers)
def set_full_resolution(self):
self.res_x = self.camera.width
self.res_y = self.camera.height
self.interactive_mode = False
def set_low_resolution(self, downscale_factor: int = 4):
self.res_x = self.camera.width // downscale_factor
self.res_y = self.camera.height // downscale_factor
self.interactive_mode = True
def resize_canvas(self, width, height):
self.camera.intrinsics.width = width
self.camera.intrinsics.height = height
self.set_low_resolution()
def _update_scene_graph(self):
""" Update scene graph information about objects and their data layers """
# New data layers maybe have been generated, update the scene graph about their existence.
# Some existing layers may have been regenerated, in which case we copy their previous "toggled on/off" status.
data_layers = self._bl_renderer_data_layers()
for object_id, bl_renderer_state in self.state.graph.bl_renderers.items():
object_layers = data_layers[object_id]
bl_renderer_state.data_layers = object_layers
toggled_data_layers = defaultdict(bool)
for layer_name, layer in object_layers.items(): # Copy over previous toggles if such exist
toggled_data_layers[layer_name] = bl_renderer_state.toggled_data_layers.get(layer_name, False)
bl_renderer_state.toggled_data_layers = toggled_data_layers
def redraw(self) -> None:
""" Allow bottom-level renderers to refresh internal information, such as data layers. """
# Read phase: sync with scene graph, create renderers for new objects added
self.refresh_bl_renderers(self.state)
# Invoke internal redraw() on all current visible renderers, to imply it's time to refresh content
# (i.e. data layers may get generate here behind the scenes)
scene_graph = self.state.graph
for obj_id, renderer in self._renderers.items():
if obj_id in scene_graph.visible_objects:
renderer.redraw()
# Write phase: update scene graph back with latest info from render core, i.e. new data layers generated
self._update_scene_graph()
def render(self, time_delta=None, force_render=False) -> RenderBuffer:
"""Render a frame.
Args:
time_delta (float): The time delta from the previous frame, used to control renderer parameters
based on the amount of detected lag.
force_render (bool): If True, will always output a fresh new RenderBuffer.
Otherwise the RenderBuffer can be a stale copy of the the previous frame
if no updates are detected.
Returns:
(wisp.core.RenderBuffer): The rendered buffer.
"""
payload = self._prepare_payload(time_delta)
rb = self._render_payload(payload, force_render)
output_rb = self._post_render(payload, rb)
return output_rb
def _prepare_payload(self, time_delta=None) -> FramePayload:
"""This function will prepare the FramePayload for the current frame.
The FramePayload contains metadata for the current frame, from which the RenderBuffer will be
generated from.
Args:
time_delta (float): The time delta from the previous frame, used to control renderer parameters
based on the amount of detected lag.
Returns:
(wisp.renderer.core.api.FramePayload): The metadata for the frame.
"""
# Adjust resolution of all renderers to maintain FPS
camera = self.camera
clear_color = self.state.renderer.clear_color_value
res_x, res_y = self.res_x, self.res_y
# If the FPS is slow, downscale the resolution for the render.
if self.interactive_mode and time_delta is not None:
#target_delta = 1.0 / self.target_interactive_fps
target_delta = 1.0 / 20.0
if 'res_x' in self._last_state:
num_pixels = self._last_state['res_x'] * self._last_state['res_y']
else:
num_pixels = res_x * res_y
time_per_pixel = time_delta / float(num_pixels)
target_num_pixels = target_delta / time_per_pixel
screen_ratio = res_x / res_y
res = math.sqrt(target_num_pixels / screen_ratio)
res_x = min(res_x, int(math.floor(res * screen_ratio)))
res_y = min(res_y, int(math.floor(res)))
if res_y < self.MIN_RES:
res_x = int(math.floor(self.MIN_RES*screen_ratio))
res_y = int(math.floor(self.MIN_RES))
if 'res_x' in self._last_state:
if abs(res_x - self._last_state['res_x']) < 10:
res_x = self._last_state['res_x']
res_y = self._last_state['res_y']
# TODO(ttakikawa): Leaving a note here to think about whether this should be the case...
# The renderer always needs depth, alpha, and rgb
required_channels = {"rgb", "depth", "alpha"}
selected_canvas_channel = self.state.renderer.selected_canvas_channel.lower()
visible_objects = set([k for k,v in self.state.graph.visible_objects.items() if v])
payload = FramePayload(camera=camera, interactive_mode=self.interactive_mode,
render_res_x=res_x, render_res_y=res_y, time_delta=time_delta,
visible_objects=visible_objects, clear_color=clear_color,
channels={selected_canvas_channel}.union(required_channels))
for renderer_id, renderer in self._renderers.items():
if renderer_id in payload.visible_objects:
renderer.pre_render(payload)
return payload
def raygen(self, camera, res_x, res_y):
ray_grid = generate_centered_pixel_coords(camera.width, camera.height, res_x, res_y, device=self.device)
if camera.lens_type == 'pinhole':
rays = generate_pinhole_rays(camera, ray_grid)
elif camera.lens_type == 'ortho':
rays = generate_ortho_rays(camera, ray_grid)
else:
raise ValueError(f'RendererCore failed to raygen on unknown camera lens type: {camera.lens_type}')
return rays
def _create_empty_rb(self, height, width, dtype=torch.float32) -> RenderBuffer:
clear_color = self.state.renderer.clear_color_value
clear_depth = self.state.renderer.clear_depth_value
return RenderBuffer(
rgb=torch.tensor(clear_color, dtype=dtype, device=self.device).repeat(height, width, 1),
alpha=torch.zeros((height, width, 1), dtype=dtype, device=self.device),
depth=torch.full((height, width, 1), fill_value=clear_depth, dtype=dtype, device=self.device),
hit=None
)
def _render_payload(self, payload: FramePayload, force_render: bool) -> RenderBuffer:
"""Renders a RenderBuffer using a FramePayload which contains metadata.
Args:
payload (wisp.renderer.core.api.FramePayload): Metadata for the frame to be renderered.
force_render (bool): If True, will always output a fresh new RenderBuffer.
Otherwise the RenderBuffer can be a stale copy of the the previous frame
if no updates are detected.
Returns:
(wisp.core.RenderBuffer): The rendered buffer.
"""
camera = payload.camera
res_x, res_y = payload.render_res_x, payload.render_res_y
visible_renderers = [r for r_id, r in self._renderers.items() if r_id in payload.visible_objects]
renderers_to_refresh = list(filter(lambda renderer: renderer.needs_refresh(payload), visible_renderers))
if not self.needs_refresh() and len(renderers_to_refresh) == 0 and not force_render:
return self._last_renderbuffer # No need to regenerate..
# Generate rays
rays = self.raygen(camera, res_x, res_y)
renderers_in_view = self._tlas.traverse(rays, payload)
rb_dtype = torch.float32
clear_depth = self.state.renderer.clear_depth_value
out_rb = self._create_empty_rb(height=camera.height, width=camera.width, dtype=rb_dtype)
for renderer, hit_rays in renderers_in_view:
if isinstance(renderer, RayTracedRenderer):
in_rays = hit_rays.to(device=renderer.device, dtype=renderer.dtype)
rb = renderer.render(in_rays)
else: # RasterizedRenderer
# TODO (operel): Handle transformed rasterized objects
in_cam = self.camera.to(device=renderer.device, dtype=renderer.dtype)
rb = renderer.render(in_cam)
rb = rb.to(device=self.device)
rb.rgb = rb.rgb.to(dtype=rb_dtype)
rb.alpha = rb.alpha.to(dtype=rb_dtype)
rb.depth = rb.depth.to(dtype=rb_dtype)
# TODO (operel): if rb.depth is None -> painters algorithm
# Normalize ray-traced depth buffer to graphics api range
img_dims = rb.depth.shape
# Clip depth values which fall outside of the view frustum
clip_mask = camera.clip_mask(rb.depth.squeeze(-1))
rb.alpha[~clip_mask] = 0.0
# Normalize depth from [0, inf] to NDC space according to camera settings
# (depends on near / far and NDC space)
ndc_depth = camera.normalize_depth(rb.depth.reshape(-1, 1))
rb.depth = ndc_depth.reshape(img_dims)
# Set depth of missed rays to far clipping plane, as PackedRFTracer initializes depth to 0 and writes values
# only for hit rays
alpha_mask = ~rb.hit[...,0]
rb.depth[alpha_mask] = clear_depth
rb.depth = rb.depth.to(rb_dtype)
out_rb = out_rb.blend(rb, channel_kit=self.state.graph.channels)
return out_rb
def _post_render(self, payload: FramePayload, rb: RenderBuffer) -> RenderBuffer:
# Update current resolution in case it was decreased to maintain fps
self.res_x, self.res_y = payload.render_res_x, payload.render_res_y
# Cache information to accelerate next frames
self._last_renderbuffer = rb
# Record last state, to, i.e, calculate if needs to redraw future frames
self._last_state['camera'] = copy.deepcopy(payload.camera)
self._last_state['res_x'] = payload.render_res_x
self._last_state['res_y'] = payload.render_res_y
for renderer_id, renderer in self._renderers.items():
if renderer_id in payload.visible_objects:
renderer.post_render()
# Create an output renderbuffer to contain the currently viewed mode as rgba channel
output_rb = self.map_output_channels_to_rgba(rb)
return output_rb
def needs_refresh(self) -> bool:
if len(self._last_state) == 0:
return True
# Resolution check: if not full resolution - canvas is dirty
if self._last_state['res_x'] < self.camera.width or self._last_state['res_y'] < self.camera.height:
return True
for att_name, prev_val in self._last_state.items():
if not hasattr(self, att_name):
continue
curr_val = self.__getattribute__(att_name)
if isinstance(curr_val, Camera):
if not torch.allclose(curr_val, prev_val):
return True
elif curr_val != prev_val:
return True
return False
def _bl_renderer_data_layers(self) -> Dict[str, PrimitivesPack]:
""" Returns the bottom level object data layers"""
layers = dict()
for renderer_id, renderer in self._renderers.items():
layers[renderer_id] = renderer.data_layers()
return layers
def _cameras_data_layers(self) -> Iterable[PrimitivesPack]:
""" Returns the available cameras data layer (all visible cameras layers are packed together) """
cameras_to_redraw = {camera_id: camera for camera_id, camera in self.state.graph.cameras.items()
if self.state.graph.visible_objects.get(camera_id, False)}
layers = self._camera_layers.regenerate_data_layers(cameras_to_redraw, self.state.renderer.clear_color_value)
return layers.values()
def active_data_layers(self) -> List[PrimitivesPack]:
layers_to_draw = []
for obj_state in self.state.graph.bl_renderers.values():
for layer_id, layer in obj_state.data_layers.items():
if obj_state.toggled_data_layers[layer_id]:
# Attach object transform reference to data layers, assumes the object maintains this transform ref
layer.transform = obj_state.transform
layers_to_draw.append(layer)
camera_data_layers = self._cameras_data_layers()
layers_to_draw.extend(camera_data_layers)
return layers_to_draw
def map_output_channels_to_rgba(self, rb: RenderBuffer):
selected_output_channel = self.state.renderer.selected_canvas_channel.lower()
rb_channel = rb.get_channel(selected_output_channel)
if rb_channel is None:
# Unknown channel type configured to view over the canvas.
# That can happen if, i.e. no object have traced a RenderBuffer with this channel.
# Instead of failing, create an empty rb
height, width = rb.rgb.shape[:2]
return self._create_empty_rb(height=height, width=width, dtype=rb.rgb.dtype)
# Normalize channel to [0, 1]
channels_kit = self.state.graph.channels
channel_info = channels_kit.get(selected_output_channel, create_default_channel())
normalized_channel = channel_info.normalize_fn(rb_channel.clone()) # Clone to protect from modifications
# To RGB (in normalized space)
# TODO (operel): incorporate color maps
channel_dim = normalized_channel.shape[-1]
if channel_dim == 1:
rgb = torch.cat((normalized_channel, normalized_channel, normalized_channel), dim=-1)
elif channel_dim == 2:
rgb = torch.cat((normalized_channel, normalized_channel, torch.zeros_like(normalized_channel)), dim=-1)
elif channel_dim == 3:
rgb = normalized_channel
else:
raise ValueError('Cannot display channels with more than 3 dimensions over the canvas.')
canvas_rb = RenderBuffer(rgb=rgb, depth=rb.depth, alpha=rb.alpha)
return canvas_rb
def renderers(self) -> Dict[str, BottomLevelRenderer]:
""" All loaded bottom level renderers currently employed by the renderer core """
return self._renderers
def camera(self) -> Camera:
return self.state.renderer.selected_camera
def camera(self, camera: Camera) -> None:
self.state.renderer.selected_camera = camera
def target_fps(self) -> float:
return self.state.renderer.target_fps
def target_interactive_fps(self) -> float:
return self.state.renderer.target_interactive_fps
def target_static_fps(self) -> float:
return self.state.renderer.target_static_fps
def interactive_mode(self) -> bool:
return self.state.renderer.interactive_mode
def interactive_mode(self, mode: bool) -> None:
self.state.renderer.interactive_mode = mode
def selected_camera_lens(self) -> str:
return self.state.renderer.selected_camera_lens
def selected_camera_lens(self, lens: str):
self.state.renderer.selected_camera_lens = lens
The provided code snippet includes necessary dependencies for implementing the `enable_amp` function. Write a Python function `def enable_amp(func)` to solve the following problem:
An extension to @torch.cuda.amp.autocast which queries WispState to check if mixed precision should be enabled.
Here is the function:
def enable_amp(func):
""" An extension to @torch.cuda.amp.autocast which queries WispState to check if
mixed precision should be enabled.
"""
def _enable_amp(self: RendererCore, *args, **kwargs):
with torch.cuda.amp.autocast(enabled=self.state.renderer.enable_amp):
return func(self, *args, **kwargs)
return _enable_amp | An extension to @torch.cuda.amp.autocast which queries WispState to check if mixed precision should be enabled. |
23,004 | import os
from contextlib import contextmanager
if not os.environ.get('ENABLE_PYCUDA') == '1':
from cuda import cuda
import torch
def cuda_map_resource(img):
"""Context manager simplifying use of cuda.cuGraphicsMapResources / cuGraphicsSubResourceGetMappedArray.
Boilerplate code based in part on pytorch-glumpy.
"""
# args: (count, resource, stream)
mapping_result = cuda.cuGraphicsMapResources(1, img, torch.cuda.default_stream().cuda_stream)
if mapping_result[0] != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError("Failed to map GL graphics resource to be accessed by CUDA.")
# args (resource, arrayIndex, mipLevel)
mapping_array = cuda.cuGraphicsSubResourceGetMappedArray(img, 0, 0)
if mapping_array[0] != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError("Failed to get mapped array from GL resource for CUDA copy.")
yield mapping_array[1]
unmapping_result = cuda.cuGraphicsUnmapResources(1, img, torch.cuda.default_stream().cuda_stream)
if unmapping_result[0] != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError("Failed to unmap GL graphics resource from being accessed by CUDA.")
else:
import pycuda
import pycuda.gl as pycuda_gl
window = None
try:
# !!! Should be called when interactive wisp loads, before any torch ops take place !!!
# The following is a hacky workaround due to a cublas error on interfering streams:
# pycuda.gl fails to initialize after torch performs batched matrix multiplication
# (the bugs causes any following torch.dot invocations to fail)..
# The solution is to initialize pycuda.gl early when wisp loads.
# To load pycuda.gl, we require a GL context of some window,
# so here we force glumpy-glfw to create an invisible window, which generates an opengl context.
# Then immediately import pycuda.gl.autoint to let it initialize properly
from glumpy import app
# Tell glumpy to use glfw backend
app.use("glfw_imgui")
# Let glumpy use glfw to create an invisible window
window = app.Window(width=10, height=10, title='dummy', visible=False)
# pycuda initializes the default context with "cuGLCtxCreate", but this call will fail if a GL context
# is not currently set. Therefore import is invoked only after glfw obtains a GL context.
# See: https://documen.tician.de/pycuda/gl.html#module-pycuda.gl.autoinit
import pycuda.gl.autoinit
# Next tell torch to initialize the primary cuda context
import torch
torch.cuda.init()
# pycuda should not create a new context, but retain the torch one
import pycuda.driver as cuda
pycuda_context = cuda.Device(0).retain_primary_context()
except (ModuleNotFoundError, ImportError):
pass # Don't fail if interactive mode is disabled (e.g: glumpy or pycuda are unavailable)
finally:
if window is not None:
window.close()
def cuda_map_resource(img):
"""Context manager simplifying use of pycuda_gl.RegisteredImage.
Boilerplate code based in part on pytorch-glumpy.
"""
mapping = img.map()
yield mapping.array(0, 0)
mapping.unmap()
def cuda_2d_memcpy(resource_handle, shared_tex, img, height):
cpy = cuda.CUDA_MEMCPY2D()
with cuda_map_resource(resource_handle) as ary:
cpy.srcDevice = img.data_ptr()
cpy.srcMemoryType = cuda.CUmemorytype.CU_MEMORYTYPE_DEVICE
cpy.dstArray = ary
cpy.dstMemoryType = cuda.CUmemorytype.CU_MEMORYTYPE_ARRAY
cpy.WidthInBytes = cpy.srcPitch = cpy.dstPitch = shared_tex.nbytes // height
cpy.Height = height
cpy_result = cuda.cuMemcpy2DUnaligned(cpy)
if cpy_result[0] != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError("Failed to memcopy cuda buffer to GL.") | null |
23,005 | import os
from contextlib import contextmanager
def cuda_register_gl_image(image, target):
# Create shared GL / CUDA resource
map_flags = cuda.CUgraphicsRegisterFlags.CU_GRAPHICS_REGISTER_FLAGS_WRITE_DISCARD
register_result = cuda.cuGraphicsGLRegisterImage(image=image, target=target, Flags=map_flags)
if register_result[0] != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError('Failed to register GL texture as a CUDA shared resource.')
resource_handle = register_result[1]
return resource_handle | null |
23,006 | import os
from contextlib import contextmanager
def cuda_unregister_resource(handle):
unregister_result = cuda.cuGraphicsUnregisterResource(handle)
if unregister_result[0] != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError('Failed to unregister CUDA resource.') | null |
23,007 | import os
from contextlib import contextmanager
if not os.environ.get('ENABLE_PYCUDA') == '1':
from cuda import cuda
import torch
def cuda_map_resource(img):
"""Context manager simplifying use of cuda.cuGraphicsMapResources / cuGraphicsSubResourceGetMappedArray.
Boilerplate code based in part on pytorch-glumpy.
"""
# args: (count, resource, stream)
mapping_result = cuda.cuGraphicsMapResources(1, img, torch.cuda.default_stream().cuda_stream)
if mapping_result[0] != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError("Failed to map GL graphics resource to be accessed by CUDA.")
# args (resource, arrayIndex, mipLevel)
mapping_array = cuda.cuGraphicsSubResourceGetMappedArray(img, 0, 0)
if mapping_array[0] != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError("Failed to get mapped array from GL resource for CUDA copy.")
yield mapping_array[1]
unmapping_result = cuda.cuGraphicsUnmapResources(1, img, torch.cuda.default_stream().cuda_stream)
if unmapping_result[0] != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError("Failed to unmap GL graphics resource from being accessed by CUDA.")
else:
import pycuda
import pycuda.gl as pycuda_gl
window = None
try:
# !!! Should be called when interactive wisp loads, before any torch ops take place !!!
# The following is a hacky workaround due to a cublas error on interfering streams:
# pycuda.gl fails to initialize after torch performs batched matrix multiplication
# (the bugs causes any following torch.dot invocations to fail)..
# The solution is to initialize pycuda.gl early when wisp loads.
# To load pycuda.gl, we require a GL context of some window,
# so here we force glumpy-glfw to create an invisible window, which generates an opengl context.
# Then immediately import pycuda.gl.autoint to let it initialize properly
from glumpy import app
# Tell glumpy to use glfw backend
app.use("glfw_imgui")
# Let glumpy use glfw to create an invisible window
window = app.Window(width=10, height=10, title='dummy', visible=False)
# pycuda initializes the default context with "cuGLCtxCreate", but this call will fail if a GL context
# is not currently set. Therefore import is invoked only after glfw obtains a GL context.
# See: https://documen.tician.de/pycuda/gl.html#module-pycuda.gl.autoinit
import pycuda.gl.autoinit
# Next tell torch to initialize the primary cuda context
import torch
torch.cuda.init()
# pycuda should not create a new context, but retain the torch one
import pycuda.driver as cuda
pycuda_context = cuda.Device(0).retain_primary_context()
except (ModuleNotFoundError, ImportError):
pass # Don't fail if interactive mode is disabled (e.g: glumpy or pycuda are unavailable)
finally:
if window is not None:
window.close()
def cuda_map_resource(img):
"""Context manager simplifying use of pycuda_gl.RegisteredImage.
Boilerplate code based in part on pytorch-glumpy.
"""
mapping = img.map()
yield mapping.array(0, 0)
mapping.unmap()
def cuda_2d_memcpy(resource_handle, shared_tex, img, height):
cpy = pycuda.driver.Memcpy2D()
with cuda_map_resource(resource_handle) as ary:
cpy.set_src_device(img.data_ptr())
cpy.set_dst_array(ary)
cpy.width_in_bytes = cpy.src_pitch = cpy.dst_pitch = shared_tex.nbytes // height
cpy.height = height
cpy(aligned=False) | null |
23,008 | import os
from contextlib import contextmanager
def cuda_register_gl_image(image, target):
# Create shared GL / CUDA resource
map_flags = pycuda_gl.graphics_map_flags.WRITE_DISCARD
resource_handle = pycuda_gl.RegisteredImage(image, target, map_flags)
return resource_handle | null |
23,009 | import os
from contextlib import contextmanager
def cuda_unregister_resource(handle):
# Nothing to be done - when ref count reaches zero on proxy object in python, unregister should
# be called automatically
pass | null |
23,010 | from __future__ import annotations
import contextlib
import glob
import io
import logging
import math
import os
import queue
import PIL.Image
import re
import threading
import time
import torch
import torchvision
from typing import Literal
from wisp.framework import WispState
from wisp.renderer.core import RendererCore
from wisp.renderer.core.control.io import WispMouseButton
from wisp.renderer.core.control.turntable import TurntableCameraMode
def hold_canvas(*args, **kwargs):
raise RuntimeError(__IPYCANVAS_ERR) | null |
23,011 | from __future__ import annotations
import contextlib
import glob
import io
import logging
import math
import os
import queue
import PIL.Image
import re
import threading
import time
import torch
import torchvision
from typing import Literal
from wisp.framework import WispState
from wisp.renderer.core import RendererCore
from wisp.renderer.core.control.io import WispMouseButton
from wisp.renderer.core.control.turntable import TurntableCameraMode
def dummy_ctx_manager():
yield None | null |
23,012 | from __future__ import annotations
import contextlib
import glob
import io
import logging
import math
import os
import queue
import PIL.Image
import re
import threading
import time
import torch
import torchvision
from typing import Literal
from wisp.framework import WispState
from wisp.renderer.core import RendererCore
from wisp.renderer.core.control.io import WispMouseButton
from wisp.renderer.core.control.turntable import TurntableCameraMode
The provided code snippet includes necessary dependencies for implementing the `make_render_closure` function. Write a Python function `def make_render_closure(render_core: RendererCore, downscale_factor: int = 4)` to solve the following problem:
Makes a render closure over input args, so render can be called without arguments. Args: render_core: the RendererCore to use for rendering downscale_factor: how much to downscale the image when rendering Returns: function() -> torch.Tensor 0..1 float, 4 x H x W, where H, W are determined by render_core.camera and the downscale_factor
Here is the function:
def make_render_closure(render_core: RendererCore, downscale_factor: int = 4):
"""Makes a render closure over input args, so render can be called without arguments.
Args:
render_core: the RendererCore to use for rendering
downscale_factor: how much to downscale the image when rendering
Returns: function() -> torch.Tensor 0..1 float, 4 x H x W, where H, W are determined by render_core.camera and
the downscale_factor
"""
def _render(td):
""" Returns 0..1 float torch tensor of size 4 x H x W """
rescale = None
if downscale_factor == 1:
render_core.set_full_resolution()
else:
render_core.set_low_resolution(downscale_factor)
# TODO(operel): this should not be necessary
rescale = torchvision.transforms.Resize((render_core.res_y, render_core.res_x))
renderbuffer = render_core.render(time_delta=td) # we only request rerender when dirty
res = renderbuffer.image().rgba.permute(2, 0, 1) / 255 # C x H x W 0..1
if rescale is not None:
res = rescale(res)
return res
return _render | Makes a render closure over input args, so render can be called without arguments. Args: render_core: the RendererCore to use for rendering downscale_factor: how much to downscale the image when rendering Returns: function() -> torch.Tensor 0..1 float, 4 x H x W, where H, W are determined by render_core.camera and the downscale_factor |
23,013 | from __future__ import annotations
import contextlib
import glob
import io
import logging
import math
import os
import queue
import PIL.Image
import re
import threading
import time
import torch
import torchvision
from typing import Literal
from wisp.framework import WispState
from wisp.renderer.core import RendererCore
from wisp.renderer.core.control.io import WispMouseButton
from wisp.renderer.core.control.turntable import TurntableCameraMode
WISP_ROOT_DIR = os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), os.path.pardir, os.path.pardir, os.path.pardir))
The provided code snippet includes necessary dependencies for implementing the `save_canvas_render` function. Write a Python function `def save_canvas_render(canvas, filename, save_dir=None)` to solve the following problem:
Convenience function to save a rendered frame to a default location, while appending a counter to the basename. Args: canvas: IpyCanvas canvas object filename: filename and extension where to save, note number will be appended `frame.png` --> `frame1.png` save_dir: directory where to save file; will use default _results/jupyter_renders if not provided
Here is the function:
def save_canvas_render(canvas, filename, save_dir=None):
"""
Convenience function to save a rendered frame to a default location, while appending a counter
to the basename.
Args:
canvas: IpyCanvas canvas object
filename: filename and extension where to save, note number will be appended `frame.png` --> `frame1.png`
save_dir: directory where to save file; will use default _results/jupyter_renders if not provided
"""
if save_dir is None:
save_dir = os.path.join(WISP_ROOT_DIR, '_results', 'jupyter_renders')
os.makedirs(save_dir, exist_ok=True)
basename, extension = os.path.splitext(os.path.basename(filename))
pattern = re.compile(r"%s(\d+)%s" % (basename, extension))
fnames = [
os.path.basename(x) for x in glob.glob(os.path.join(save_dir, "%s[0-9]*%s" % (basename, extension)))
]
counts = [int(m.groups()[0]) for m in [pattern.match(f) for f in fnames] if m is not None]
count = 0
if len(counts) > 0:
count = max(counts) + 1
filename = os.path.join(save_dir, "%s%02d%s" % (basename, count, extension))
logging.info(f'Saving frame to: {filename}')
canvas.to_file(filename) | Convenience function to save a rendered frame to a default location, while appending a counter to the basename. Args: canvas: IpyCanvas canvas object filename: filename and extension where to save, note number will be appended `frame.png` --> `frame1.png` save_dir: directory where to save file; will use default _results/jupyter_renders if not provided |
23,014 | from __future__ import annotations
import contextlib
import glob
import io
import logging
import math
import os
import queue
import PIL.Image
import re
import threading
import time
import torch
import torchvision
from typing import Literal
from wisp.framework import WispState
from wisp.renderer.core import RendererCore
from wisp.renderer.core.control.io import WispMouseButton
from wisp.renderer.core.control.turntable import TurntableCameraMode
The provided code snippet includes necessary dependencies for implementing the `np_img_to_compressed_bytes` function. Write a Python function `def np_img_to_compressed_bytes(np_img, format)` to solve the following problem:
Converts numpy array to bytes in the specified image format. Args: np_img: numpy array H x W x C uint8 format: any format supported by Pillow, e.g. 'png' or 'jpeg' (note jpeg does not accept RGBA) Return: bytes
Here is the function:
def np_img_to_compressed_bytes(np_img, format):
""" Converts numpy array to bytes in the specified image format.
Args:
np_img: numpy array H x W x C uint8
format: any format supported by Pillow, e.g. 'png' or 'jpeg' (note jpeg does not accept RGBA)
Return:
bytes
"""
img = PIL.Image.fromarray(np_img)
buff = io.BytesIO()
img.save(buff, format=format)
return buff.getvalue() | Converts numpy array to bytes in the specified image format. Args: np_img: numpy array H x W x C uint8 format: any format supported by Pillow, e.g. 'png' or 'jpeg' (note jpeg does not accept RGBA) Return: bytes |
23,015 | from __future__ import annotations
from abc import ABC
import sys
import numpy as np
import torch
from glumpy import app, gloo, gl, ext
import imgui
from typing import Optional, Type, Callable, Dict, List, Tuple
from kaolin.render.camera import Camera
from wisp.framework import WispState, watch
from wisp.renderer.core import RendererCore
from wisp.renderer.core import cuda_register_gl_image, cuda_map_resource, cuda_2d_memcpy, cuda_unregister_resource
from wisp.renderer.core.control import CameraControlMode, WispKey, WispMouseButton
from wisp.renderer.core.control import FirstPersonCameraMode, TrackballCameraMode, TurntableCameraMode
from wisp.renderer.core.api import add_pipeline_to_scene_graph
from wisp.renderer.gizmos import Gizmo, WorldGrid, AxisPainter, PrimitivesPainter
from wisp.renderer.gui import WidgetInteractiveVisualizerProperties, WidgetGPUStats, WidgetSceneGraph, WidgetImgui
class WispApp(ABC):
""" WispApp is a base app implementation which takes care of the entire lifecycle of the rendering loop:
this is the infinite queue of events which includes: handling of IO and OS events, rendering frames and running
backgrounds tasks, i.e. optimizations.
The app is initiated by calling the following functions:
- register_background_task(): Registers a task to run alternately with the render() function per
frame. Background tasks can be, i.e, functions that run a single optimization step for some neural object.
- run(): Initiates the rendering loop. This method blocks the calling thread until the window is closed.
Future custom interactive apps can subclass this base app, and inherit methods to customize the behaviour
of the app:
- init_wisp_state(): A hook for initializing the fields of the shared state object.
The interactive renderer configuration, scene graph and user custom fields can be initialized here.
- update_render_state(): A hook for updating the fields of the shared state object at the beginning of each
frame.
- create_widgets(): Controls which gui components the app uses
- create_gizmos(): Controls which transient canvas drawable components will be used (OpenGL based).
- default_user_mode(): The default camera controller mode (first person, trackball, turntable).
- register_event_handlers(): Registers which methods are invoked in response to app events / wisp state changes.
The rendering loop alternates between the following calls:
- on_idle, which invokes user background tasks, registered via register_background_task before calling run().
- on_draw, which invokes render() when it's time to draw a new frame
Internally, the app uses the RendererCore object to manage the drawing of all objects in the scene graph.
The app may request the RendererCore to switch into "interactive mode" to ensure the FPS remains interactive
(this is done at the expense of rendering quality).
Interactive mode is automatically initiated, i.e, during user interactions.
The render() logic is composed by the following sub-functions:
- update_render_state() - Updates the fields of the shared state object at the beginning of each frame.
- render_gui() - Renders the imgui components over the canvas, fromm scratch (imgui uses immediate mode gui).
- redraw() - A "heavier" function which forces the render-core to refresh its internal state.
Newly created objects may get added to the scene graph, and obsolete objects may get removed.
Vector-primitives data layers may regenerate here.
- render_canvas() - Invokes the render-core to obtain a RenderBuffer of the rendered scene objects.
The lion share of draw logic occurs within this call, in particular the drawing of neural objects.
- _blit_to_gl_renderbuffer - Copies the RenderBuffer results to the screen buffer.
- Gizmos are finally drawn directly to the screen framebuffer (as common OpenGL draw calls), these objects
are considered transient in the sense that they don't belong to the scene graph.
- Timer tick events may also get taken care of during the rendering loop (i.e: adjust velocity of user camera).
Users should rarely override these functions, unless they're sure about what they're doing.
Customizing the app behaviour should always be preferred via the initialization hooks.
"""
# Period of time between user interactions before resetting back to full resolution mode
COOLDOWN_BETWEEN_RESOLUTION_CHANGES = 0.35 # In seconds
def __init__(self, wisp_state, window_name="wisp app"):
# Initialize app state instance
self.wisp_state: WispState = wisp_state
self.init_wisp_state(wisp_state)
# Create main app window & initialize GL context
# glumpy with a specialized glfw backend takes care of that (backend is imgui integration aware)
window = self._create_window(self.width, self.height, window_name, gl_version=wisp_state.renderer.gl_version)
self.register_io_mappings()
# Initialize gui, assumes the window is managed by glumpy with glfw
imgui.create_context()
self._is_imgui_focused = False
self._is_imgui_hovered = False
self._is_reposition_imgui_menu = True
self.canvas_dirty = False
self.redraw_every_frame = False
# Tell torch to initialize the CUDA context
torch.cuda.init()
# Initialize applicative renderer, which independently paints images for the main canvas
render_core = RendererCore(self.wisp_state)
self.window = window # App window with a GL context & oversees event callbacks
self.render_core = render_core # Internal renderer, responsible for painting over canvas
self.render_clock = app.clock.Clock()
self.render_clock.tick()
self.interactions_clock = app.clock.Clock()
self.interactions_clock.tick()
self._was_interacting_prev_frame = False
# The initialization of these fields is deferred util "on_resize" is first prompted.
# There we generate a simple billboard GL program (normally with a shared CUDA resource)
# Canvas content will be blitted onto it
self.canvas_program: Optional[gloo.Program] = None # GL program used to paint a single billboard
self.cugl_rgb_handle = None # CUDA buffer, as a shared resource with OpenGL
self.cugl_depth_handle = None
try:
# WSL does not support CUDA-OpenGL interoperability, fallback to device2host2device copy instead
from platform import uname
is_wsl = 'microsoft-standard' in uname().release
self.blitdevice2device = not is_wsl
except Exception:
# By default rendering results copy directly from torch/cuda mem to OpenGL Texture
self.blitdevice2device = True
self.user_mode: CameraControlMode = None # Camera controller object (first person, trackball or turntable)
self.widgets = self.create_widgets() # Create gui widgets for this app
self.gizmos = self.create_gizmos() # Create canvas widgets for this app
self.prim_painter = PrimitivesPainter()
self.register_event_handlers()
self.change_user_mode(self.default_user_mode())
self.redraw() # Refresh RendererCore
def add_pipeline(self, name, pipeline, transform=None):
"""Register a neural fields pipeline into the scene graph.
Args:
name (str): The name of the pipeline.
pipeline (wisp.models.Pipeline): The pipeline holding a tracer and nef.
transform (wisp.core.ObjectTransform): The transform for the pipeline.
"""
add_pipeline_to_scene_graph(self.wisp_state, name, pipeline, transform=transform)
def add_widget(self, widget, idx=None):
""" Adds a widget to the list of widgets.
By default, the widget is added to the end of the list of widgets, which means that it will be
last on the display order.
Args:
widget (wisp.renderer.gui.imgui.WidgetImgui): The widget to add.
idx (Optional[int]): If specified, will insert the widget at the specific display order.
"""
if idx is not None:
self.widgets.insert(0, widget)
else:
self.widgets.append(widget)
def add_gizmo(self, name, gizmo):
"""Adds a gizmo to the list of gizmos.
Args:
name (str): The name of the gizmo.
gizmo (wisp.renderer.gizmos.Gizmo): The gizmo to add.
"""
self.gizmos[name] = gizmo
def init_wisp_state(self, wisp_state: WispState) -> None:
""" A hook for applications to initialize specific fields inside the wisp state object.
This function is called at the very beginning of WispApp initialization, hence the initialized fields can
be customized to affect the behaviour of the renderer.
"""
# Channels available to view over the canvas
wisp_state.renderer.available_canvas_channels = ["rgb", "depth"]
wisp_state.renderer.selected_canvas_channel = "rgb"
def create_widgets(self) -> List[WidgetImgui]:
""" Returns which widgets the gui will display, in order.
Override to define which gui widgets are used by the wisp app.
"""
return [WidgetGPUStats(), WidgetInteractiveVisualizerProperties(), WidgetSceneGraph()]
def create_gizmos(self) -> Dict[str, Gizmo]:
""" Override to define which gizmos are painted on the canvas by the wisp app.
Gizmos are transient rasterized objects rendered by OpenGL on top of the canvas.
For example: world grid, axes painter.
"""
gizmos = dict()
grid_size = 10.0
planes = self.wisp_state.renderer.reference_grids
axes = set(''.join(planes))
for plane in planes:
gizmos[f'world_grid_{plane}'] = WorldGrid(squares_per_axis=20, grid_size=grid_size,
line_color=(128, 128, 128), line_size=1, plane=plane)
gizmos[f'world_grid_fine_{plane}'] = WorldGrid(squares_per_axis=200, grid_size=10.0,
line_color=(128, 128, 128), line_size=2, plane=plane)
# Axes on top of the reference grid
gizmos['grid_axis_painter'] = AxisPainter(axes_length=grid_size, line_width=1,
axes=axes, is_bidirectional=False)
return gizmos
def default_user_mode(self) -> str:
""" Override to determine the default camera control mode.
Possible choices: 'First Person View', 'Turntable', 'Trackball'
"""
return "Turntable"
def register_event_handlers(self) -> None:
""" Override (and call super) to register additional event handlers """
watch(watched_obj=self.wisp_state.renderer, field="cam_controller", status="changed",
handler=self.on_cam_controller_changed)
watch(watched_obj=self.wisp_state.renderer, field="selected_camera", status="changed",
handler=self.on_selected_camera_changed)
watch(watched_obj=self.wisp_state.renderer, field="selected_canvas_channel", status="changed",
handler=self.on_selected_canvas_channel_changed)
watch(watched_obj=self.wisp_state.renderer, field="selected_camera_lens", status="changed",
handler=self.on_selected_camera_lens_changed)
watch(watched_obj=self.wisp_state.renderer, field="clear_color_value", status="changed",
handler=self.on_clear_color_value_changed)
def on_cam_controller_changed(self, value: Type[CameraControlMode]):
# Stay synced with state change: generate new instance of mode type
mode_type = value
self.user_mode = mode_type(render_core=self.render_core, wisp_state=self.wisp_state)
def on_selected_camera_changed(self, value: Camera):
# Rebuild camera controller to free any cached info from previous camera
cam_controller_cls = type(self.user_mode)
self.user_mode = cam_controller_cls(render_core=self.render_core, wisp_state=self.wisp_state)
# Adjust the width / height according to current state of the renderer window
self.render_core.resize_canvas(height=self.height, width=self.width)
def on_selected_camera_lens_changed(self, value: str):
self.render_core.change_camera_projection_mode(value)
def on_selected_canvas_channel_changed(self, value: str):
self.canvas_dirty = True # Request canvas redraw
def on_clear_color_value_changed(self, value: Tuple[float, float, float]):
self.canvas_dirty = True # Request canvas redraw
def run(self):
""" Initiate events message queue, which triggers the rendering loop.
This call will block the thread until the app window is closed.
"""
# glump.app.Window is using argparse
# We remove sys.argv temporarily to avoid conflict with Wisp's argparse. argv is restored in on_init()
self._argv = sys.argv
sys.argv = [sys.argv[0]]
try:
app.run() # App clock should always run as frequently as possible (background tasks should not be limited)
finally:
if hasattr(self, '_argv'): # Should only be take place if an error occurred before on_init was invoked
sys.argv = self._argv
del self._argv
def _create_window(self, width, height, window_name, gl_version):
# glump.app.Window is using argparse
# We remove sys.argv temporarily to avoid conflict with Wisp's argparse
argv = sys.argv
try:
sys.argv = [argv[0]]
# Currently assume glfw backend due to integration with imgui
app.use(f"glfw_imgui ({gl_version})")
win_config = app.configuration.Configuration()
if self.wisp_state.renderer.antialiasing == 'msaa_4x':
win_config.samples = 4
# glumpy implicitly sets the GL context as current
window = app.Window(width=width, height=height, title=window_name, config=win_config)
window.on_draw = self.on_draw
window.on_resize = self.on_resize
window.on_key_press = self.on_key_press
window.on_key_release = self.on_key_release
window.on_mouse_press = self.on_mouse_press
window.on_mouse_drag = self.on_mouse_drag
window.on_mouse_release = self.on_mouse_release
window.on_mouse_scroll = self.on_mouse_scroll
window.on_mouse_motion = self.on_mouse_motion
finally:
# Restore sys.argv for Wisp argparse
sys.argv = argv
if self.wisp_state.renderer.antialiasing == 'msaa_4x':
gl.glEnable(gl.GL_MULTISAMPLE)
return window
def _create_gl_depth_billboard_program(texture: np.ndarray, depth_texture: np.ndarray):
vertex = """
uniform float scale;
attribute vec2 position;
attribute vec2 texcoord;
varying vec2 v_texcoord;
void main()
{
v_texcoord = texcoord;
gl_Position = vec4(scale*position, 0.0, 1.0);
} """
fragment = """
uniform sampler2D tex;
uniform sampler2D depth_tex;
varying vec2 v_texcoord;
void main()
{
gl_FragColor = texture2D(tex, v_texcoord);
gl_FragDepth = texture2D(depth_tex, v_texcoord).r;
} """
# TODO (operel): r component is a waste?
# Compile GL program
canvas = gloo.Program(vertex, fragment, count=4)
# Upload fixed values to GPU
canvas['position'] = [(-1, -1), (-1, +1), (+1, -1), (+1, +1)]
canvas['texcoord'] = [(0, 0), (0, 1), (1, 0), (1, 1)]
canvas['scale'] = 1.0
canvas['tex'] = texture
canvas['depth_tex'] = depth_texture
return canvas
def _create_screen_texture(res_h, res_w, channel_depth, dtype=np.uint8):
""" Create and return a Texture2D with gloo and a cuda handle. """
if issubclass(dtype, np.integer):
tex = np.zeros((res_h, res_w, channel_depth), dtype).view(gloo.Texture2D)
elif issubclass(dtype, np.floating):
tex = np.zeros((res_h, res_w, channel_depth), dtype).view(gloo.TextureFloat2D)
else:
raise ValueError(f'_register_cugl_shared_texture invoked with unsupported texture dtype: {dtype}')
# Force gloo to create GL object on GPU
tex.activate()
tex.deactivate()
return tex
def _register_cugl_shared_texture(self, tex):
if self.blitdevice2device:
# Create shared GL / CUDA resource
handle = cuda_register_gl_image(image=int(tex.handle), target=tex.target)
else:
# No shared resource required, as we copy from cuda buffer -> cpu -> GL texture
handle = None
return handle
def _reposition_gui_menu(self, menu_width, main_menu_height):
window_height = self.window.height
window_width = self.window.width
imgui.set_next_window_size(width=menu_width, height=window_height-main_menu_height, condition=imgui.ALWAYS)
imgui.set_next_window_position(x=window_width-menu_width, y=main_menu_height, condition=imgui.ALWAYS)
self._is_reposition_imgui_menu = False
def render_gui(self, state):
""" Render the entire gui window per frame (imgui works in immediate mode).
Internally, the Widgets take care of rendering the actual content.
"""
imgui.new_frame()
if imgui.begin_main_menu_bar():
main_menu_height = imgui.get_window_height()
if imgui.begin_menu("File", True):
clicked_quit, selected_quit = imgui.menu_item(
"Quit", 'Cmd+Q', False, True
)
if clicked_quit:
exit(1)
imgui.end_menu()
imgui.end_main_menu_bar()
if self._is_reposition_imgui_menu:
self._reposition_gui_menu(menu_width=350, main_menu_height=main_menu_height)
imgui.begin("Scene Information", True)
for widget in self.widgets:
widget.paint(state)
self._is_imgui_hovered = imgui.core.is_window_hovered(imgui.HOVERED_ANY_WINDOW |
imgui.HOVERED_ALLOW_WHEN_BLOCKED_BY_POPUP |
imgui.HOVERED_ALLOW_WHEN_BLOCKED_BY_ACTIVE_ITEM)
self._is_imgui_hovered = self._is_imgui_hovered or \
imgui.core.is_any_item_hovered() or imgui.is_any_item_active()
imgui.end()
self._is_imgui_focused = imgui.is_window_focused(imgui.FOCUS_ROOT_WINDOW)
imgui.end_frame()
imgui.render()
def render_canvas(self, render_core, time_delta, force_render):
""" Invoke the render-core to render all neural fields and blend into a single Renderbuffer.
The rgb and depth channels passed on to the app.
"""
# The render core returns a RenderBuffer
renderbuffer = render_core.render(time_delta, force_render)
buffer_attachment = renderbuffer.image().rgba
buffer_attachment = buffer_attachment.flip([0]) # Flip y axis
img = buffer_attachment.byte().contiguous()
buffer_attachment_depth = renderbuffer.depth
buffer_attachment_depth = buffer_attachment_depth.flip([0])
depth_img = buffer_attachment_depth.repeat(1,1,4).contiguous()
return img, depth_img
def _blit_to_gl_renderbuffer(self, img, depth_img, canvas_program, cugl_rgb_handle, cugl_depth_handle, height):
if self.blitdevice2device:
# Device to device copy: Copy CUDA buffer to GL Texture mem
shared_tex = canvas_program['tex']
shared_tex_depth = canvas_program['depth_tex']
# copy from torch into buffer
assert shared_tex.nbytes == img.numel() * img.element_size()
assert shared_tex_depth.nbytes == depth_img.numel() * depth_img.element_size() # TODO: using a 4d tex
cuda_2d_memcpy(resource_handle=cugl_rgb_handle, shared_tex=shared_tex, img=img, height=height)
cuda_2d_memcpy(resource_handle=cugl_depth_handle, shared_tex=shared_tex_depth, img=depth_img, height=height)
torch.cuda.synchronize()
else:
# Device to host to device copy: Move torch tensors to cpu and upload as texture data
canvas_program['tex'] = img.cpu().numpy()
canvas_program['depth_tex'] = depth_img.cpu().numpy()
canvas_program.draw(gl.GL_TRIANGLE_STRIP)
def update_renderer_state(self, wisp_state, dt):
"""
Populate the scene state object with the most recent information about the interactive renderer.
The scene state, for example, may be used by the GUI widgets to display up to date information.
This function is invoked in the beginning of the render() function, before the gui and the canvas are drawn.
:param wisp_state The WispState object holding shared information between components about the wisp app.
:param dt Amount of time elapsed since the last update.
"""
wisp_state.renderer.fps = app.clock.get_fps()
wisp_state.renderer.dt = dt
wisp_state.renderer.cam_controller = type(self.user_mode)
def change_user_mode(self, user_mode: str):
""" Changes the camera controller mode """
if user_mode == 'Trackball':
self.wisp_state.renderer.cam_controller = TrackballCameraMode
elif user_mode == 'First Person View':
self.wisp_state.renderer.cam_controller = FirstPersonCameraMode
elif user_mode == 'Turntable':
self.wisp_state.renderer.cam_controller = TurntableCameraMode
def redraw(self):
""" Asks the render core to redraw the scene:
- The scene graph will be refreshed (new objects added will create their renderers if needed)
- Data layers will regenerate according to up-to-date state.
render() may internally invoke redraw() when the canvas is tagged as "dirty".
A render() call is required to display changes caused by redraw() on the canvas.
"""
# Let the renderer redraw the data layers if needed
self.render_core.redraw()
# Regenerate the GL primitives according to up-to-date data layers
layers_to_draw = self.render_core.active_data_layers()
self.prim_painter.redraw(layers_to_draw)
def render(self):
""" Renders a single frame. """
dt = self.render_clock.tick() # Tick render clock: dt is now the exact time elapsed since last render
# Populate the scene state with the most recent information about the interactive renderer.
# The scene state, for example, may be used by the GUI widgets to display up to date information.
self.update_renderer_state(self.wisp_state, dt)
# Clear color / depth buffers before rendering the next frame
clear_color = (*self.wisp_state.renderer.clear_color_value, 1.0) # RGBA
self.window.clear(color=clear_color)
# imgui renders first
self.render_gui(self.wisp_state)
if self.redraw_every_frame:
self.canvas_dirty = True
# The app was asked to redraw the scene, inform the render core
if self.canvas_dirty:
self.redraw()
# Invoke the timer tick event, and let the camera controller update the state of any interactions
# of the user which involve the time elapsed (i.e: velocity, acceleration of movements).
self.user_mode.handle_timer_tick(dt)
# Toggle interactive mode on or off if needed to maintain interactive FPS rate
if self.user_mode.is_interacting():
self.render_core.set_low_resolution()
else:
# Allow a fraction of a second before turning full resolution on.
# User interactions sometimes behave like a rapid burst of short and quick interactions.
if self._was_interacting_prev_frame:
self.interactions_clock.tick()
time_since_last_interaction = self.interactions_clock.time() - self.interactions_clock.last_ts
if time_since_last_interaction > self.COOLDOWN_BETWEEN_RESOLUTION_CHANGES:
self.render_core.set_full_resolution()
self._was_interacting_prev_frame = self.user_mode.is_interacting()
# render canvas: core proceeds by invoking internal renderers tracers
# output is rendered on a Renderbuffer object, backed by torch tensors
img, depth_img = self.render_canvas(self.render_core, dt, self.canvas_dirty)
# glumpy code injected within the pyimgui render loop to blit the renderer core output to the actual canvas
# The torch buffers are copied by with cuda, connected as shared resources as 2d GL textures
self._blit_to_gl_renderbuffer(img, depth_img, self.canvas_program, self.cugl_rgb_handle,
self.cugl_depth_handle, self.height)
# Finally, render OpenGL gizmos on the canvas.
# This may include the world grid, or vectorial lines / points belonging to data layers
camera = self.render_core.camera
for gizmo in self.gizmos.values():
gizmo.render(camera)
self.prim_painter.render(camera)
self.canvas_dirty = False
def register_background_task(self, hook: Callable[[], None]) -> None:
""" Register a new callable function to run in conjunction with the rendering loop.
The app will alternate between on_idle calls, invoking the background task, and on_draw
calls, invoking the rendering itself, both occurring on the same thread.
"""
if hook is not None:
def _run_hook(dt: float):
if not self.wisp_state.renderer.background_tasks_paused:
hook()
self.window.on_idle = _run_hook
def on_draw(self, dt=None):
""" glumpy's event to draw the next frame. Invokes the render() function if needed. """
# dt arg comes from the app clock, the renderer clock is maintained separately from the background tasks
# Interactive mode on, or interaction have just started
is_interacting = self.wisp_state.renderer.interactive_mode or self.user_mode.is_interacting()
if is_interacting or self.is_time_to_render():
self.render() # Render objects uploaded to GPU
def is_time_to_render(self):
time_since_last_render = self.render_clock.time() - self.render_clock.last_ts
target_fps = self.wisp_state.renderer.target_fps
if target_fps is None or ((target_fps > 0) and time_since_last_render >= (1 / target_fps)):
return True
return False
def on_init(self):
""" Invoked when the app first runs """
# glump.app.Window is using argparse
# During app.run(), we remove sys.argv temporarily to avoid conflict with Wisp's argparse.
# Restore it here: this event is invoked by glumpy when app.run() initialization is complete.
if hasattr(self, '_argv'):
sys.argv = self._argv
del self._argv
def on_resize(self, width, height):
""" Invoked when the window is first created, or resized.
A resize causes internal textures and buffers to regenerate according the window size.
"""
self.width = width
self.height = height
# Handle deallocation of shared resources
if self.cugl_rgb_handle is not None:
cuda_unregister_resource(self.cugl_rgb_handle)
self.cugl_rgb_handle = None
if self.cugl_depth_handle is not None:
cuda_unregister_resource(self.cugl_depth_handle)
self.cugl_depth_handle = None
tex = self._create_screen_texture(height, width, self.channel_depth, dtype=np.uint8)
depth_tex = self._create_screen_texture(height, width, 4, dtype=np.float32) # TODO: Single channel
self.cugl_rgb_handle = self._register_cugl_shared_texture(tex)
self.cugl_depth_handle = self._register_cugl_shared_texture(depth_tex)
if self.canvas_program is None:
self.canvas_program = self._create_gl_depth_billboard_program(texture=tex, depth_texture=depth_tex)
else:
if self.canvas_program['tex'] is not None:
self.canvas_program['tex'].delete()
if self.canvas_program['depth_tex'] is not None:
self.canvas_program['depth_tex'].delete()
self.canvas_program['tex'] = tex
self.canvas_program['depth_tex'] = depth_tex
self.render_core.resize_canvas(height=height, width=width)
self.window.activate()
gl.glViewport(0, 0, width, height)
self._is_reposition_imgui_menu = True # Signal menu it needs to shift after resize
def is_canvas_event(self):
""" Is canvas in focus or any of imgui's windows """
return not self._is_imgui_focused
def on_mouse_press(self, x, y, button):
if self.is_canvas_event():
self.user_mode.handle_mouse_press(x, y, button)
def on_mouse_drag(self, x, y, dx, dy, button):
if self.is_canvas_event():
self.user_mode.handle_mouse_drag(x, y, dx, dy, button)
def on_mouse_release(self, x, y, button):
if self.is_canvas_event():
self.user_mode.handle_mouse_release(x, y, button)
def on_mouse_scroll(self, x, y, dx, dy):
""" The mouse wheel was scrolled by (dx,dy). """
if self.is_canvas_event():
self.user_mode.handle_mouse_scroll(x, y, dx, dy)
def on_mouse_motion(self, x, y, dx, dy):
""" The mouse was moved with no buttons held down. """
if self.is_canvas_event():
self.user_mode.handle_mouse_motion(x, y, dx, dy)
def width(self):
""" Returns the canvas width """
return self.wisp_state.renderer.canvas_width
def width(self, value: int):
""" Sets the canvas width """
self.wisp_state.renderer.canvas_width = value
def height(self):
""" Returns the canvas height """
return self.wisp_state.renderer.canvas_height
def height(self, value: int):
""" Sets the canvas height """
self.wisp_state.renderer.canvas_height = value
def channel_depth(self):
""" Returns the number of channels the screenbuffer uses for the color attachment """
return 4 # Assume the framebuffer keeps RGBA
def canvas_dirty(self):
""" Returns if the canvas is dirty,
that is, the app requires a redraw() to stay in sync with external changes
"""
return self.wisp_state.renderer.canvas_dirty
def canvas_dirty(self, value: bool):
""" Marks the canvas as dirty,
implying the app requires a redraw() to stay in sync with external changes
"""
self.wisp_state.renderer.canvas_dirty = value
def _update_imgui_keys(self, symbol):
# Normally glfw shouldn't be explicitly imported as glumpy uses it as backend.
# However, here we are forced to do that to take care of missing key mappings
import glfw
keys = [glfw.KEY_BACKSPACE, glfw.KEY_DELETE, glfw.KEY_ENTER, glfw.KEY_HOME, glfw.KEY_END,
glfw.KEY_LEFT_SHIFT, glfw.KEY_RIGHT_SHIFT,
glfw.KEY_RIGHT, glfw.KEY_LEFT, glfw.KEY_UP, glfw.KEY_DOWN,
glfw.KEY_0, glfw.KEY_1, glfw.KEY_2, glfw.KEY_3, glfw.KEY_4, glfw.KEY_5,
glfw.KEY_6, glfw.KEY_7, glfw.KEY_8, glfw.KEY_9,
glfw.KEY_KP_0, glfw.KEY_KP_1, glfw.KEY_KP_2, glfw.KEY_KP_3, glfw.KEY_KP_4, glfw.KEY_KP_5,
glfw.KEY_KP_6, glfw.KEY_KP_7, glfw.KEY_KP_8, glfw.KEY_KP_9,
glfw.KEY_KP_ENTER, glfw.KEY_KP_ADD, glfw.KEY_KP_SUBTRACT, glfw.KEY_KP_DECIMAL
]
mappings = { # Missing keys from glumpy glfw-imgui backend
glfw.KEY_KP_0: glfw.KEY_0,
glfw.KEY_KP_1: glfw.KEY_1,
glfw.KEY_KP_2: glfw.KEY_2,
glfw.KEY_KP_3: glfw.KEY_3,
glfw.KEY_KP_4: glfw.KEY_4,
glfw.KEY_KP_5: glfw.KEY_5,
glfw.KEY_KP_6: glfw.KEY_6,
glfw.KEY_KP_7: glfw.KEY_7,
glfw.KEY_KP_8: glfw.KEY_8,
glfw.KEY_KP_9: glfw.KEY_9,
glfw.KEY_KP_ENTER: glfw.KEY_ENTER,
glfw.KEY_KP_SUBTRACT: glfw.KEY_MINUS,
glfw.KEY_KP_DECIMAL: glfw.KEY_PERIOD
}
updated_symbol = symbol
for key in keys:
is_key_on = glfw.get_key(self.window.native_window, key)
imgui.get_io().keys_down[key] = is_key_on
if symbol == -1 and is_key_on and key in mappings:
updated_symbol = mappings[key]
# TODO: Verify imgui keys have been properly mapped during initialization..
# imgui.get_io().key_map[imgui.KEY_BACKSPACE] = app.window.key.BACKSPACE
# imgui.get_io().key_map[imgui.KEY_DELETE] = app.window.key.DELETE
# imgui.get_io().key_map[imgui.KEY_ENTER] = glfw.KEY_ENTER
# imgui.get_io().key_map[imgui.KEY_RIGHT_ARROW] = glfw.KEY_RIGHT
# imgui.get_io().key_map[imgui.KEY_LEFT_ARROW] = glfw.KEY_LEFT
# imgui.get_io().key_map[imgui.KEY_UP_ARROW] = glfw.KEY_UP
# imgui.get_io().key_map[imgui.KEY_DOWN_ARROW] = glfw.KEY_DOWN
return updated_symbol
def on_key_press(self, symbol, modifiers):
symbol = self._update_imgui_keys(symbol)
if symbol > 0:
imgui.get_io().add_input_character(symbol)
if self.is_canvas_event():
self.user_mode.handle_key_press(symbol, modifiers)
# TODO: Shouldn't be here
cam_mode = None
if symbol in (app.window.key.T, ord('T'), ord('t')):
cam_mode = "Trackball"
elif symbol in (app.window.key.F, ord('F'), ord('f')):
cam_mode = "First Person View"
elif symbol in (app.window.key.U, ord('U'), ord('u')):
cam_mode = "Turntable"
if cam_mode is not None:
self.change_user_mode(cam_mode)
def on_key_release(self, symbol, modifiers):
symbol = self._update_imgui_keys(symbol)
if self.is_canvas_event():
self.user_mode.handle_key_release(symbol, modifiers)
def dump_framebuffer(self, path='./framebuffer'):
# Dumps debug images of the GL screen framebuffer.
# This framebuffer should reflect the exact content of the window.
framebuffer = np.zeros((self.width, self.height * 3), dtype=np.uint8)
gl.glReadPixels(0, 0, self.width, self.height,
gl.GL_RGB, gl.GL_UNSIGNED_BYTE, framebuffer)
framebuffer = np.flip(framebuffer, 0)
ext.png.from_array(framebuffer, 'RGB').save(path + '_color.png')
framebuffer = np.zeros((self.width, self.height), dtype=np.float32)
gl.glReadPixels(0, 0, self.width, self.height,
gl.GL_DEPTH_COMPONENT, gl.GL_FLOAT, framebuffer)
framebuffer = np.flip(framebuffer, 0)
ext.png.from_array(framebuffer, 'L').save(path + '_depth.png')
def register_io_mappings(self):
WispMouseButton.register_symbol(WispMouseButton.LEFT_BUTTON, app.window.mouse.LEFT)
WispMouseButton.register_symbol(WispMouseButton.MIDDLE_BUTTON, app.window.mouse.MIDDLE)
WispMouseButton.register_symbol(WispMouseButton.RIGHT_BUTTON, app.window.mouse.RIGHT)
WispKey.register_symbol(WispKey.LEFT, app.window.key.LEFT)
WispKey.register_symbol(WispKey.RIGHT, app.window.key.RIGHT)
WispKey.register_symbol(WispKey.UP, app.window.key.UP)
WispKey.register_symbol(WispKey.DOWN, app.window.key.DOWN)
# TODO: Take care of remaining mappings, and verify the event handlers of glumpy were not overriden
The provided code snippet includes necessary dependencies for implementing the `enable_amp` function. Write a Python function `def enable_amp(func)` to solve the following problem:
An extension to @torch.cuda.amp.autocast which queries WispState to check if mixed precision should be enabled.
Here is the function:
def enable_amp(func):
""" An extension to @torch.cuda.amp.autocast which queries WispState to check if
mixed precision should be enabled.
"""
def _enable_amp(self: WispApp, *args, **kwargs):
with torch.cuda.amp.autocast(enabled=self.wisp_state.renderer.enable_amp):
return func(self, *args, **kwargs)
return _enable_amp | An extension to @torch.cuda.amp.autocast which queries WispState to check if mixed precision should be enabled. |
23,016 | from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Dict, Type, Any
from wisp.framework import WispState
from collections import deque
from wisp.core.colors import colors_generator, white, black, dark_gray, gray
_WIDGETS_REGISTRY: Dict[Type[Any], Type[WidgetImgui]] = dict()
class WidgetImgui(ABC):
def __init__(self):
pass
def paint(self, state: WispState, *args, **kwargs):
raise NotImplementedError('imgui widgets must implement the paint method.')
The provided code snippet includes necessary dependencies for implementing the `widget` function. Write a Python function `def widget(wisp_block: Type[Any])` to solve the following problem:
A decorator that registers a gui widget to paint the contents of a given wisp block. By registering a widget, the gui system knows how to load this widget when it traverses the scene graph & properties and encounters the wisp_block type. Users adding new wisp blocks can directly register corresponding widgets using this decorator.
Here is the function:
def widget(wisp_block: Type[Any]):
""" A decorator that registers a gui widget to paint the contents of a given wisp block.
By registering a widget, the gui system knows how to load this widget when it traverses the
scene graph & properties and encounters the wisp_block type.
Users adding new wisp blocks can directly register corresponding widgets using this decorator.
"""
def _register_widget_fn(widget_class: Type[WidgetImgui]):
_WIDGETS_REGISTRY[wisp_block] = widget_class
return widget_class
return _register_widget_fn | A decorator that registers a gui widget to paint the contents of a given wisp block. By registering a widget, the gui system knows how to load this widget when it traverses the scene graph & properties and encounters the wisp_block type. Users adding new wisp blocks can directly register corresponding widgets using this decorator. |
23,017 | from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Dict, Type, Any
from wisp.framework import WispState
from collections import deque
from wisp.core.colors import colors_generator, white, black, dark_gray, gray
_WIDGETS_REGISTRY: Dict[Type[Any], Type[WidgetImgui]] = dict()
class WidgetImgui(ABC):
def __init__(self):
pass
def paint(self, state: WispState, *args, **kwargs):
raise NotImplementedError('imgui widgets must implement the paint method.')
def _lookup_widget(wisp_block: Any) -> Type[WidgetImgui]:
""" Searches the wisp_block class hierarchy for any matches with some registered widget.
wisp_block can be a
"""
# Create a queue of base classes and search if any of them has a registered widget.
# Basically this is a BFS on the base classes, start with the concrete class type..
if not isinstance(wisp_block, type): # Is this a type or an instance? Fetch the type
wisp_block = type(wisp_block)
base_queue = deque([wisp_block])
widget_type = None
while widget_type is None and base_queue:
block_type = base_queue.popleft()
widget_type = _WIDGETS_REGISTRY.get(block_type)
if widget_type is None:
bases = block_type.__bases__
if len(bases) > 0:
base_queue.extend(bases)
return widget_type
The provided code snippet includes necessary dependencies for implementing the `get_widget` function. Write a Python function `def get_widget(wisp_block: Any) -> WidgetImgui` to solve the following problem:
Return a widget which matches the given wisp block. A wisp block can be of any type / subtype which was registered with @widget. The lookup logic will first look for a widget registered under the type of wisp_block, and if it cannot find it, it will start looking up the hierarchy. Note that multiple-inheritance may result in undefined behavior in case of more than one match - in such cases users are encouraged to register some widget to the concrete type. Most wisp interfaces should already be registered to some general widget, so concrete types are not required to register a dedicated widget. However, they may register a specialized widget returned to display other forms of information. For example: Args: wisp_block: any object whose type / base types were registered with @widget. Returns: (WidgetImgui) imgui widget which matches the given wisp_block type
Here is the function:
def get_widget(wisp_block: Any) -> WidgetImgui:
""" Return a widget which matches the given wisp block.
A wisp block can be of any type / subtype which was registered with @widget.
The lookup logic will first look for a widget registered under the type of wisp_block, and if it cannot find it,
it will start looking up the hierarchy.
Note that multiple-inheritance may result in undefined behavior in case of more than one match - in such cases
users are encouraged to register some widget to the concrete type.
Most wisp interfaces should already be registered to some general widget, so concrete types are not required
to register a dedicated widget.
However, they may register a specialized widget returned to display other forms of information.
For example:
Args:
wisp_block: any object whose type / base types were registered with @widget.
Returns:
(WidgetImgui) imgui widget which matches the given wisp_block type
"""
block_type = type(wisp_block)
widget_type = _WIDGETS_REGISTRY.get(block_type)
if widget_type is None:
widget_type = _lookup_widget(block_type)
_WIDGETS_REGISTRY[block_type] = widget_type
if widget_type is None:
raise ValueError(f'Gui cannot find a widget for {widget_type}. Make sure to register some widget with @widget, '
f'or consider if {widget_type} should subclass WispModule.')
widget_instance = widget_type() # Component widgets are assumed to take no args during construction
return widget_instance | Return a widget which matches the given wisp block. A wisp block can be of any type / subtype which was registered with @widget. The lookup logic will first look for a widget registered under the type of wisp_block, and if it cannot find it, it will start looking up the hierarchy. Note that multiple-inheritance may result in undefined behavior in case of more than one match - in such cases users are encouraged to register some widget to the concrete type. Most wisp interfaces should already be registered to some general widget, so concrete types are not required to register a dedicated widget. However, they may register a specialized widget returned to display other forms of information. For example: Args: wisp_block: any object whose type / base types were registered with @widget. Returns: (WidgetImgui) imgui widget which matches the given wisp_block type |
23,018 | from __future__ import annotations
import torch
from kaolin.render.camera import Camera
from kaolin.render.camera.intrinsics import CameraFOV
from wisp.core import Rays
def generate_default_grid(width, height, device=None):
h_coords = torch.arange(height, device=device, dtype=torch.float)
w_coords = torch.arange(width, device=device, dtype=torch.float)
return torch.meshgrid(h_coords, w_coords) # return pixel_y, pixel_x
def generate_centered_pixel_coords(img_width, img_height, res_x=None, res_y=None, device=None):
pixel_y, pixel_x = generate_default_grid(res_x, res_y, device)
scale_x = 1.0 if res_x is None else float(img_width) / res_x
scale_y = 1.0 if res_y is None else float(img_height) / res_y
pixel_x = pixel_x * scale_x + 0.5 # scale and add bias to pixel center
pixel_y = pixel_y * scale_y + 0.5 # scale and add bias to pixel center
return pixel_y, pixel_x | null |
23,019 | from __future__ import annotations
import torch
from kaolin.render.camera import Camera
from kaolin.render.camera.intrinsics import CameraFOV
from wisp.core import Rays
def _to_ndc_coords(pixel_x, pixel_y, camera):
pixel_x = 2 * (pixel_x / camera.width) - 1.0
pixel_y = 2 * (pixel_y / camera.height) - 1.0
return pixel_x, pixel_y
The provided code snippet includes necessary dependencies for implementing the `generate_pinhole_rays` function. Write a Python function `def generate_pinhole_rays(camera: Camera, coords_grid: torch.Tensor)` to solve the following problem:
Default ray generation function for pinhole cameras. This function assumes that the principal point (the pinhole location) is specified by a displacement (camera.x0, camera.y0) in pixel coordinates from the center of the image. The Kaolin camera class does not enforce a coordinate space for how the principal point is specified, so users will need to make sure that the correct principal point conventions are followed for the cameras passed into this function. Args: camera (kaolin.render.camera): The camera class. coords_grid (torch.FloatTensor): Grid of coordinates of shape [H, W, 2]. Returns: (wisp.core.Rays): The generated pinhole rays for the camera.
Here is the function:
def generate_pinhole_rays(camera: Camera, coords_grid: torch.Tensor):
"""Default ray generation function for pinhole cameras.
This function assumes that the principal point (the pinhole location) is specified by a
displacement (camera.x0, camera.y0) in pixel coordinates from the center of the image.
The Kaolin camera class does not enforce a coordinate space for how the principal point is specified,
so users will need to make sure that the correct principal point conventions are followed for
the cameras passed into this function.
Args:
camera (kaolin.render.camera): The camera class.
coords_grid (torch.FloatTensor): Grid of coordinates of shape [H, W, 2].
Returns:
(wisp.core.Rays): The generated pinhole rays for the camera.
"""
if camera.device != coords_grid[0].device:
raise Exception(f"Expected camera and coords_grid[0] to be on the same device, but found {camera.device} and {coords_grid[0].device}.")
if camera.device != coords_grid[1].device:
raise Exception(f"Expected camera and coords_grid[1] to be on the same device, but found {camera.device} and {coords_grid[1].device}.")
# coords_grid should remain immutable (a new tensor is implicitly created here)
pixel_y, pixel_x = coords_grid
pixel_x = pixel_x.to(camera.device, camera.dtype)
pixel_y = pixel_y.to(camera.device, camera.dtype)
# Account for principal point (offsets from the center)
pixel_x = pixel_x - camera.x0
pixel_y = pixel_y + camera.y0
# pixel values are now in range [-1, 1], both tensors are of shape res_y x res_x
pixel_x, pixel_y = _to_ndc_coords(pixel_x, pixel_y, camera)
ray_dir = torch.stack((pixel_x * camera.tan_half_fov(CameraFOV.HORIZONTAL),
-pixel_y * camera.tan_half_fov(CameraFOV.VERTICAL),
-torch.ones_like(pixel_x)), dim=-1)
ray_dir = ray_dir.reshape(-1, 3) # Flatten grid rays to 1D array
ray_orig = torch.zeros_like(ray_dir)
# Transform from camera to world coordinates
ray_orig, ray_dir = camera.extrinsics.inv_transform_rays(ray_orig, ray_dir)
ray_dir /= torch.linalg.norm(ray_dir, dim=-1, keepdim=True)
ray_orig, ray_dir = ray_orig[0], ray_dir[0] # Assume a single camera
return Rays(origins=ray_orig, dirs=ray_dir, dist_min=camera.near, dist_max=camera.far) | Default ray generation function for pinhole cameras. This function assumes that the principal point (the pinhole location) is specified by a displacement (camera.x0, camera.y0) in pixel coordinates from the center of the image. The Kaolin camera class does not enforce a coordinate space for how the principal point is specified, so users will need to make sure that the correct principal point conventions are followed for the cameras passed into this function. Args: camera (kaolin.render.camera): The camera class. coords_grid (torch.FloatTensor): Grid of coordinates of shape [H, W, 2]. Returns: (wisp.core.Rays): The generated pinhole rays for the camera. |
23,020 | from __future__ import annotations
import torch
from kaolin.render.camera import Camera
from kaolin.render.camera.intrinsics import CameraFOV
from wisp.core import Rays
def _to_ndc_coords(pixel_x, pixel_y, camera):
pixel_x = 2 * (pixel_x / camera.width) - 1.0
pixel_y = 2 * (pixel_y / camera.height) - 1.0
return pixel_x, pixel_y
def generate_ortho_rays(camera: Camera, coords_grid: torch.Tensor):
# coords_grid should remain immutable (a new tensor is implicitly created here)
pixel_y, pixel_x = coords_grid
pixel_y = pixel_y.to(camera.device, camera.dtype)
pixel_x = pixel_x.to(camera.device, camera.dtype)
# pixel values are now in range [-1, 1], both tensors are of shape res_y x res_x
pixel_x, pixel_y = _to_ndc_coords(pixel_x, pixel_y, camera)
# Rescale according to distance from camera
aspect_ratio = camera.width / camera.height
pixel_x *= camera.fov_distance * aspect_ratio
pixel_y *= camera.fov_distance
zeros = torch.zeros_like(pixel_x)
ray_dir = torch.stack((zeros, zeros, -torch.ones_like(pixel_x)), dim=-1) # Ortho rays are parallel
ray_orig = torch.stack((pixel_x, -pixel_y, zeros), dim=-1)
ray_dir = ray_dir.reshape(-1, 3) # Flatten grid rays to 1D array
ray_orig = ray_orig.reshape(-1, 3) # Flatten grid rays to 1D array
# Transform from camera to world coordinates
ray_orig, ray_dir = camera.extrinsics.inv_transform_rays(ray_orig, ray_dir)
ray_dir /= torch.linalg.norm(ray_dir, dim=-1, keepdim=True)
ray_orig, ray_dir = ray_orig[0], ray_dir[0] # Assume a single camera
return Rays(origins=ray_orig, dirs=ray_dir, dist_min=camera.near, dist_max=camera.far) | null |
23,021 | import torch
The provided code snippet includes necessary dependencies for implementing the `autodiff_gradient` function. Write a Python function `def autodiff_gradient(x, f)` to solve the following problem:
Compute gradient using the PyTorch autodiff. Args: x (torch.FloatTensor): Coordinate tensor f (nn.Module): The function to perform autodiff on.
Here is the function:
def autodiff_gradient(x, f):
"""Compute gradient using the PyTorch autodiff.
Args:
x (torch.FloatTensor): Coordinate tensor
f (nn.Module): The function to perform autodiff on.
"""
with torch.enable_grad():
x = x.requires_grad_(True)
y = f(x)
grad = torch.autograd.grad(y, x,
grad_outputs=torch.ones_like(y), create_graph=True)[0]
return grad | Compute gradient using the PyTorch autodiff. Args: x (torch.FloatTensor): Coordinate tensor f (nn.Module): The function to perform autodiff on. |
23,022 | import torch
The provided code snippet includes necessary dependencies for implementing the `finitediff_gradient` function. Write a Python function `def finitediff_gradient(x, f, eps=0.005)` to solve the following problem:
Compute 3D gradient using finite difference. Args: x (torch.FloatTensor): Coordinate tensor of shape [..., 3] f (nn.Module): The function to perform autodiff on.
Here is the function:
def finitediff_gradient(x, f, eps=0.005):
"""Compute 3D gradient using finite difference.
Args:
x (torch.FloatTensor): Coordinate tensor of shape [..., 3]
f (nn.Module): The function to perform autodiff on.
"""
eps_x = torch.tensor([eps, 0.0, 0.0], device=x.device)
eps_y = torch.tensor([0.0, eps, 0.0], device=x.device)
eps_z = torch.tensor([0.0, 0.0, eps], device=x.device)
grad = torch.cat([f(x + eps_x) - f(x - eps_x),
f(x + eps_y) - f(x - eps_y),
f(x + eps_z) - f(x - eps_z)], dim=-1)
grad = grad / (eps * 2.0)
return grad | Compute 3D gradient using finite difference. Args: x (torch.FloatTensor): Coordinate tensor of shape [..., 3] f (nn.Module): The function to perform autodiff on. |
23,023 | import torch
The provided code snippet includes necessary dependencies for implementing the `tetrahedron_gradient` function. Write a Python function `def tetrahedron_gradient(x, f, eps=0.005)` to solve the following problem:
Compute 3D gradient using finite difference (using tetrahedron method). Args: x (torch.FloatTensor): Coordinate tensor of shape [..., 3] f (nn.Module): The function to perform autodiff on.
Here is the function:
def tetrahedron_gradient(x, f, eps=0.005):
"""Compute 3D gradient using finite difference (using tetrahedron method).
Args:
x (torch.FloatTensor): Coordinate tensor of shape [..., 3]
f (nn.Module): The function to perform autodiff on.
"""
h = eps
k0 = torch.tensor([1.0, -1.0, -1.0], device=x.device, requires_grad=False)
k1 = torch.tensor([-1.0, -1.0, 1.0], device=x.device, requires_grad=False)
k2 = torch.tensor([-1.0, 1.0, -1.0], device=x.device, requires_grad=False)
k3 = torch.tensor([1.0, 1.0, 1.0], device=x.device, requires_grad=False)
h0 = torch.tensor([h, -h, -h], device=x.device, requires_grad=False)
h1 = torch.tensor([-h, -h, h], device=x.device, requires_grad=False)
h2 = torch.tensor([-h, h, -h], device=x.device, requires_grad=False)
h3 = torch.tensor([h, h, h], device=x.device, requires_grad=False)
h0 = x + h0
h1 = x + h1
h2 = x + h2
h3 = x + h3
h0 = h0.detach()
h1 = h1.detach()
h2 = h2.detach()
h3 = h3.detach()
h0 = k0 * f(h0)
h1 = k1 * f(h1)
h2 = k2 * f(h2)
h3 = k3 * f(h3)
grad = (h0 + h1 + h2 + h3) / (h * 4.0)
return grad | Compute 3D gradient using finite difference (using tetrahedron method). Args: x (torch.FloatTensor): Coordinate tensor of shape [..., 3] f (nn.Module): The function to perform autodiff on. |
23,024 | import torch
from kaolin import _C
import wisp._C as wisp_C
import kaolin.ops.spc as spc_ops
PRIMES = [1, 2654435761, 805459861]
The provided code snippet includes necessary dependencies for implementing the `hashgrid_naive` function. Write a Python function `def hashgrid_naive(coords, resolutions, codebook_bitwidth, lod_idx, codebook, codebook_lod_sizes, codebook_lod_first_idx)` to solve the following problem:
A naive PyTorch implementation of the hashgrid. This code exists here mostly as a reference: Do NOT expect a 1-to-1 numerical correspondence to the CUDA accelerated version. This code is comparatively very slow. :) Args: coords (torch.FloatTensor): 3D coordinates of shape [batch, 3] resolutions (torch.LongTensor): the resolution of the grid per level of shape [num_lods] codebook_bitwidth (int): The bitwidth of the codebook. The codebook will have 2^bw entries. lod_idx (int): The LOD to aggregate to. codebook (torch.FloatTensor): A tensor containing the stacked codebooks, each of shape [codebook_size_lod_idx, feature_dim]. codebook_lod_sizes (torch.IntTensor): A tensor containig the codebook size at each level of detail. codebook_lod_first_idx (torch.IntTensor): A tensor containing the first index of each codebook in the stacked codebook tensor. Returns: (torch.FloatTensor): Features of shape [batch*num_samples, feature_dim]
Here is the function:
def hashgrid_naive(coords, resolutions, codebook_bitwidth, lod_idx, codebook, codebook_lod_sizes, codebook_lod_first_idx):
"""
A naive PyTorch implementation of the hashgrid.
This code exists here mostly as a reference:
Do NOT expect a 1-to-1 numerical correspondence to the CUDA accelerated version.
This code is comparatively very slow. :)
Args:
coords (torch.FloatTensor): 3D coordinates of shape [batch, 3]
resolutions (torch.LongTensor): the resolution of the grid per level of shape [num_lods]
codebook_bitwidth (int): The bitwidth of the codebook. The codebook will have 2^bw entries.
lod_idx (int): The LOD to aggregate to.
codebook (torch.FloatTensor): A tensor containing the stacked codebooks, each of shape [codebook_size_lod_idx, feature_dim].
codebook_lod_sizes (torch.IntTensor): A tensor containig the codebook size at each level of detail.
codebook_lod_first_idx (torch.IntTensor): A tensor containing the first index of each codebook in the stacked codebook tensor.
Returns:
(torch.FloatTensor): Features of shape [batch*num_samples, feature_dim]
"""
codebook_size = 2**codebook_bitwidth
feats = []
for i, res in enumerate(resolutions[:lod_idx+1]):
# This assumes that the input coordinates are in the range [0, 1].
tf_coords = torch.clip(((coords + 1.0) / 2.0) * res, 0, res-1-1e-5).reshape(-1, 3)
cc000 = torch.floor(tf_coords).short()
cc = spc_ops.points_to_corners(cc000).long()
num_pts = res**3
if num_pts > codebook_size:
cidx = (
(cc[...,0] * PRIMES[0]) ^ (cc[...,1] * PRIMES[1]) ^ (cc[...,2] * PRIMES[2])
) % codebook_size
else:
cidx = cc[...,0] + cc[...,1] * res + cc[...,2] * res * res
# cidx: B, 8
fs = codebook[codebook_lod_first_idx[i] : codebook_lod_first_idx[i] + codebook_lod_sizes[i]][cidx.reshape(-1)] # B*8, F
fs = fs.reshape(-1, 8, fs.shape[-1]) # B, 8, F
coeffs = torch.zeros(coords.size(0), 8, device=coords.device, dtype=coords.dtype) # B, 8
x = tf_coords - cc000
_x = 1.0 - x
# Trilinear interpolation
coeffs[...,0] = _x[...,0] * _x[...,1] * _x[...,2]
coeffs[...,1] = _x[...,0] * _x[...,1] * x[...,2]
coeffs[...,2] = _x[...,0] * x[...,1] * _x[...,2]
coeffs[...,3] = _x[...,0] * x[...,1] * x[...,2]
coeffs[...,4] = x[...,0] * _x[...,1] * _x[...,2]
coeffs[...,5] = x[...,0] * _x[...,1] * x[...,2]
coeffs[...,6] = x[...,0] * x[...,1] * _x[...,2]
coeffs[...,7] = x[...,0] * x[...,1] * x[...,2]
coeffs = coeffs.reshape(-1, 8, 1) # B, 8, 1
fs_coeffs = (fs * coeffs).sum(1) # B, F
feats.append(fs_coeffs)
# TODO(ttakikawa): This probably does not return according to the num_samples interface
return torch.cat(feats, -1) # B, F*L | A naive PyTorch implementation of the hashgrid. This code exists here mostly as a reference: Do NOT expect a 1-to-1 numerical correspondence to the CUDA accelerated version. This code is comparatively very slow. :) Args: coords (torch.FloatTensor): 3D coordinates of shape [batch, 3] resolutions (torch.LongTensor): the resolution of the grid per level of shape [num_lods] codebook_bitwidth (int): The bitwidth of the codebook. The codebook will have 2^bw entries. lod_idx (int): The LOD to aggregate to. codebook (torch.FloatTensor): A tensor containing the stacked codebooks, each of shape [codebook_size_lod_idx, feature_dim]. codebook_lod_sizes (torch.IntTensor): A tensor containig the codebook size at each level of detail. codebook_lod_first_idx (torch.IntTensor): A tensor containing the first index of each codebook in the stacked codebook tensor. Returns: (torch.FloatTensor): Features of shape [batch*num_samples, feature_dim] |
23,025 | import torch
from kaolin import _C
import wisp._C as wisp_C
import kaolin.ops.spc as spc_ops
class HashGridInterpolate(torch.autograd.Function):
# TODO(ttakikawa): This class should also support the 2D case... which also means I have to write another kernel!
def forward(ctx, coords, resolutions, codebook_bitwidth, lod_idx, codebook, codebook_first_idx):
if codebook[0].shape[-1] % 2 == 1:
raise Exception("The codebook feature dimension needs to be a multiple of 2.")
assert(coords.shape[-1] in [2, 3])
if torch.is_autocast_enabled():
codebook = codebook.half()
# TODO(ttakikawa): Make the kernel use the LOD
feats_out = wisp_C.ops.hashgrid_interpolate_cuda(coords.contiguous(),
codebook,
codebook_first_idx,
resolutions,
codebook_bitwidth).contiguous()
ctx.save_for_backward(coords, codebook, codebook_first_idx)
ctx.resolutions = resolutions
ctx.num_lods = len(resolutions)
ctx.codebook_size = 2**codebook_bitwidth
ctx.codebook_bitwidth = codebook_bitwidth
ctx.feature_dim = codebook.shape[-1]
return feats_out
def backward(ctx, grad_output):
coords = ctx.saved_tensors[0]
codebook = ctx.saved_tensors[1]
codebook_first_idx = ctx.saved_tensors[2]
resolutions = ctx.resolutions
feature_dim = ctx.feature_dim
codebook_bitwidth = ctx.codebook_bitwidth
coords_requires_grad = ctx.needs_input_grad[0]
grad_coords, grad_codebook = wisp_C.ops.hashgrid_interpolate_backward_cuda(
coords.float().contiguous(), grad_output.contiguous(), codebook,
codebook_first_idx,
resolutions,
codebook_bitwidth, feature_dim, coords_requires_grad)
if coords_requires_grad:
return (grad_coords, None, None, None, grad_codebook, None, None)
else:
return (None, None, None, None, grad_codebook, None, None)
The provided code snippet includes necessary dependencies for implementing the `hashgrid` function. Write a Python function `def hashgrid(coords, codebook_bitwidth, lod_idx, codebook)` to solve the following problem:
A hash-grid query + interpolation function, accelerated with CUDA. Args: coords (torch.FloatTensor): 3D coordinates of shape [batch, 3] codebook_bitwidth (int): The bitwidth of the codebook. The codebook will have 2^bw entries. lod_idx (int): The LOD to aggregate to. codebook (wisp.models.grids.utils.MultiTable): A class that holds multiresolution tables. Returns: (torch.FloatTensor): Features of shape [batch, feature_dim]
Here is the function:
def hashgrid(coords, codebook_bitwidth, lod_idx, codebook):
"""A hash-grid query + interpolation function, accelerated with CUDA.
Args:
coords (torch.FloatTensor): 3D coordinates of shape [batch, 3]
codebook_bitwidth (int): The bitwidth of the codebook. The codebook will have 2^bw entries.
lod_idx (int): The LOD to aggregate to.
codebook (wisp.models.grids.utils.MultiTable): A class that holds multiresolution tables.
Returns:
(torch.FloatTensor): Features of shape [batch, feature_dim]
"""
batch, dim = coords.shape
feats = HashGridInterpolate.apply(coords.contiguous(), codebook.resolutions,
codebook_bitwidth, lod_idx, codebook.feats, codebook.begin_idxes)
feature_dim = codebook.feats.shape[1] * len(codebook.resolutions)
return feats.reshape(batch, feature_dim) | A hash-grid query + interpolation function, accelerated with CUDA. Args: coords (torch.FloatTensor): 3D coordinates of shape [batch, 3] codebook_bitwidth (int): The bitwidth of the codebook. The codebook will have 2^bw entries. lod_idx (int): The LOD to aggregate to. codebook (wisp.models.grids.utils.MultiTable): A class that holds multiresolution tables. Returns: (torch.FloatTensor): Features of shape [batch, feature_dim] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.