repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/crowd_sim/envs/policy/linear.py | sarl_star_ros/CrowdNav/crowd_sim/envs/policy/linear.py | import numpy as np
from crowd_sim.envs.policy.policy import Policy
from crowd_sim.envs.utils.action import ActionXY
class Linear(Policy):
def __init__(self):
super().__init__()
self.trainable = False
self.kinematics = 'holonomic'
self.multiagent_training = True
def configure(self, config):
assert True
def predict(self, state):
self_state = state.self_state
theta = np.arctan2(self_state.gy-self_state.py, self_state.gx-self_state.px)
vx = np.cos(theta) * self_state.v_pref
vy = np.sin(theta) * self_state.v_pref
action = ActionXY(vx, vy)
return action
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/crowd_sim/envs/policy/policy_factory.py | sarl_star_ros/CrowdNav/crowd_sim/envs/policy/policy_factory.py | from crowd_sim.envs.policy.linear import Linear
from crowd_sim.envs.policy.orca import ORCA
def none_policy():
return None
policy_factory = dict()
policy_factory['linear'] = Linear
policy_factory['orca'] = ORCA
policy_factory['none'] = none_policy
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/crowd_nav/train.py | sarl_star_ros/CrowdNav/crowd_nav/train.py | # Author: Changan Chen <changanvr@gmail.com>
# Modified by: Keyu Li <kyli@link.cuhk.edu.hk>
from __future__ import division
import sys
import logging
import argparse
import configparser
import os
import shutil
import torch
import gym
import git
from crowd_sim.envs.utils.robot import Robot
from crowd_nav.utils.trainer import Trainer
from crowd_nav.utils.memory import ReplayMemory
from crowd_nav.utils.explorer import Explorer
from crowd_nav.policy.policy_factory import policy_factory
def main():
parser = argparse.ArgumentParser('Parse configuration file')
parser.add_argument('--env_config', type=str, default='configs/env.config')
parser.add_argument('--policy', type=str, default='cadrl') # --policy sarl
parser.add_argument('--policy_config', type=str, default='configs/policy.config')
parser.add_argument('--train_config', type=str, default='configs/train.config')
parser.add_argument('--output_dir', type=str, default='data/output1')
parser.add_argument('--weights', type=str)
parser.add_argument('--resume', default=False, action='store_true')
parser.add_argument('--gpu', default=False, action='store_true')
parser.add_argument('--debug', default=False, action='store_true')
args = parser.parse_args()
# configure paths
make_new_dir = True
if os.path.exists(args.output_dir):
key = raw_input('Output directory already exists! Overwrite the folder? (y/n)') # python2
#key = input('Output directory already exists! Overwrite the folder? (y/n)') # python3
if key == 'y' and not args.resume:
shutil.rmtree(args.output_dir)
else:
make_new_dir = False
args.env_config = os.path.join(args.output_dir, os.path.basename(args.env_config))
args.policy_config = os.path.join(args.output_dir, os.path.basename(args.policy_config))
args.train_config = os.path.join(args.output_dir, os.path.basename(args.train_config))
if make_new_dir:
os.makedirs(args.output_dir)
shutil.copy(args.env_config, args.output_dir)
shutil.copy(args.policy_config, args.output_dir)
shutil.copy(args.train_config, args.output_dir)
log_file = os.path.join(args.output_dir, 'output.log')
il_weight_file = os.path.join(args.output_dir, 'il_model.pth')
rl_weight_file = os.path.join(args.output_dir, 'rl_model.pth')
# configure logging
mode = 'a' if args.resume else 'w'
file_handler = logging.FileHandler(log_file, mode=mode)
stdout_handler = logging.StreamHandler(sys.stdout)
level = logging.INFO if not args.debug else logging.DEBUG
logging.basicConfig(level=level, handlers=[stdout_handler, file_handler],
format='%(asctime)s, %(levelname)s: %(message)s', datefmt="%Y-%m-%d %H:%M:%S")
repo = git.Repo(search_parent_directories=True)
logging.info('Current git head hash code: %s', format(repo.head.object.hexsha))
device = torch.device("cuda:0" if torch.cuda.is_available() and args.gpu else "cpu")
logging.info('Using device: %s', device)
# configure policy
policy = policy_factory[args.policy]()
if not policy.trainable:
parser.error('Policy has to be trainable')
if args.policy_config is None:
parser.error('Policy config has to be specified for a trainable network')
policy_config = configparser.RawConfigParser()
policy_config.read(args.policy_config)
policy.configure(policy_config)
policy.set_device(device)
# configure environment
env_config = configparser.RawConfigParser()
env_config.read(args.env_config)
env = gym.make('CrowdSim-v0')
env.configure(env_config)
robot = Robot(env_config, 'robot')
env.set_robot(robot)
# read training parameters
if args.train_config is None:
parser.error('Train config has to be specified for a trainable network')
train_config = configparser.RawConfigParser()
train_config.read(args.train_config)
rl_learning_rate = train_config.getfloat('train', 'rl_learning_rate')
train_batches = train_config.getint('train', 'train_batches')
train_episodes = train_config.getint('train', 'train_episodes')
sample_episodes = train_config.getint('train', 'sample_episodes')
target_update_interval = train_config.getint('train', 'target_update_interval')
evaluation_interval = train_config.getint('train', 'evaluation_interval')
capacity = train_config.getint('train', 'capacity')
epsilon_start = train_config.getfloat('train', 'epsilon_start')
epsilon_end = train_config.getfloat('train', 'epsilon_end')
epsilon_decay = train_config.getfloat('train', 'epsilon_decay')
checkpoint_interval = train_config.getint('train', 'checkpoint_interval')
# configure trainer and explorer
memory = ReplayMemory(capacity)
model = policy.get_model()
batch_size = train_config.getint('trainer', 'batch_size')
trainer = Trainer(model, memory, device, batch_size)
explorer = Explorer(env, robot, device, memory, policy.gamma, target_policy=policy) # target policy: sarl
# imitation learning
if args.resume:
if not os.path.exists(rl_weight_file):
logging.error('RL weights does not exist')
model.load_state_dict(torch.load(rl_weight_file))
rl_weight_file = os.path.join(args.output_dir, 'resumed_rl_model.pth')
logging.info('Load reinforcement learning trained weights. Resume training')
elif os.path.exists(il_weight_file):
model.load_state_dict(torch.load(il_weight_file))
logging.info('Load imitation learning trained weights.')
else:
il_episodes = train_config.getint('imitation_learning', 'il_episodes')
il_policy = train_config.get('imitation_learning', 'il_policy')
il_epochs = train_config.getint('imitation_learning', 'il_epochs')
il_learning_rate = train_config.getfloat('imitation_learning', 'il_learning_rate')
trainer.set_learning_rate(il_learning_rate)
if robot.visible:
safety_space = 0
else:
safety_space = train_config.getfloat('imitation_learning', 'safety_space')
il_policy = policy_factory[il_policy]()
il_policy.multiagent_training = policy.multiagent_training
il_policy.safety_space = safety_space
robot.set_policy(il_policy) # robot.policy: orca
explorer.run_k_episodes(il_episodes, 'train', update_memory=True, imitation_learning=True)
trainer.optimize_epoch(il_epochs)
torch.save(model.state_dict(), il_weight_file)
logging.info('Finish imitation learning. Weights saved.')
logging.info('Experience set size: %d/%d', len(memory), memory.capacity)
explorer.update_target_model(model)
# reinforcement learning
policy.set_env(env)
robot.set_policy(policy)
robot.print_info()
trainer.set_learning_rate(rl_learning_rate)
# fill the memory pool with some RL experience
if args.resume:
robot.policy.set_epsilon(epsilon_end)
explorer.run_k_episodes(100, 'train', update_memory=True, episode=0)
logging.info('Experience set size: %d/%d', len(memory), memory.capacity)
episode = 0
while episode < train_episodes:
if args.resume:
epsilon = epsilon_end
else:
if episode < epsilon_decay:
epsilon = epsilon_start + (epsilon_end - epsilon_start) / epsilon_decay * episode
else:
epsilon = epsilon_end
robot.policy.set_epsilon(epsilon)
# evaluate the model
if episode % evaluation_interval == 0:
explorer.run_k_episodes(env.case_size['val'], 'val', episode=episode)
# sample k episodes into memory and optimize over the generated memory
explorer.run_k_episodes(sample_episodes, 'train', update_memory=True, episode=episode)
trainer.optimize_batch(train_batches)
episode += 1
if episode % target_update_interval == 0:
explorer.update_target_model(model)
if episode != 0 and episode % checkpoint_interval == 0:
torch.save(model.state_dict(), rl_weight_file)
# final test
explorer.run_k_episodes(env.case_size['test'], 'test', episode=episode)
if __name__ == '__main__':
main()
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/crowd_nav/__init__.py | sarl_star_ros/CrowdNav/crowd_nav/__init__.py | python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false | |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/crowd_nav/test.py | sarl_star_ros/CrowdNav/crowd_nav/test.py | # Author: Changan Chen <changanvr@gmail.com>
# Modified by: Keyu Li <kyli@link.cuhk.edu.hk>
from __future__ import division
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../../')
import logging
import argparse
import configparser
import os
import torch
import numpy as np
import gym
from crowd_nav.utils.explorer import Explorer
from crowd_nav.policy.policy_factory import policy_factory
from crowd_sim.envs.utils.robot import Robot
from crowd_sim.envs.policy.orca import ORCA
def main():
parser = argparse.ArgumentParser('Parse configuration file')
parser.add_argument('--env_config', type=str, default='configs/env.config')
parser.add_argument('--policy_config', type=str, default='configs/policy.config')
parser.add_argument('--policy', type=str, default='sarl')
parser.add_argument('--model_dir', type=str, default='data/output')
parser.add_argument('--il', default=False, action='store_true')
parser.add_argument('--gpu', default=False, action='store_true')
parser.add_argument('--visualize', default=False, action='store_true')
parser.add_argument('--phase', type=str, default='test')
parser.add_argument('--test_case', type=int, default=4)
parser.add_argument('--square', default=False, action='store_true')
parser.add_argument('--circle', default=True, action='store_true')
parser.add_argument('--video_file', type=str, default=None)
parser.add_argument('--with_costmap', default=False)
args = parser.parse_args()
if args.model_dir is not None:
env_config_file = os.path.join(args.model_dir, os.path.basename(args.env_config))
policy_config_file = os.path.join(args.model_dir, os.path.basename(args.policy_config))
if args.il:
model_weights = os.path.join(args.model_dir, 'il_model.pth')
else:
# if os.path.exists(os.path.join(args.model_dir, 'resumed_rl_model.pth')):
# model_weights = os.path.join(args.model_dir, 'resumed_rl_model.pth')
# else:
# model_weights = os.path.join(args.model_dir, 'rl_model.pth')
model_weights = os.path.join(args.model_dir, 'rl_model.pth')
else:
env_config_file = args.env_config
policy_config_file = args.policy_config
# configure logging and device
logging.basicConfig(level=logging.INFO, format='%(asctime)s, %(levelname)s: %(message)s',
datefmt="%Y-%m-%d %H:%M:%S")
device = torch.device("cuda:0" if torch.cuda.is_available() and args.gpu else "cpu")
logging.info('Using device: %s', device)
# configure policy
policy = policy_factory[args.policy]()
policy_config = configparser.RawConfigParser()
policy_config.read(policy_config_file)
policy.configure(policy_config)
policy.query_env = False
policy.with_costmap = args.with_costmap
logging.info('with_costmap: %s', policy.with_costmap)
logging.info('query_env: %s', policy.query_env)
if policy.trainable:
if args.model_dir is None:
parser.error('Trainable policy must be specified with a model weights directory')
policy.get_model().load_state_dict(torch.load(model_weights))
# configure environment
env_config = configparser.RawConfigParser()
env_config.read(env_config_file)
env = gym.make('CrowdSim-v0')
env.configure(env_config)
if args.square:
env.test_sim = 'square_crossing'
if args.circle:
env.test_sim = 'circle_crossing'
robot = Robot(env_config, 'robot')
robot.set_policy(policy)
env.set_robot(robot)
explorer = Explorer(env, robot, device, gamma=0.9)
policy.set_phase(args.phase)
policy.set_device(device)
# set safety space for ORCA in non-cooperative simulation
if isinstance(robot.policy, ORCA):
if robot.visible:
robot.policy.safety_space = 0
else:
robot.policy.safety_space = 0
logging.info('ORCA agent buffer: %f', robot.policy.safety_space)
policy.set_env(env)
robot.print_info()
if args.visualize:
ob = env.reset(args.phase, args.test_case)
done = False
last_pos = np.array(robot.get_position())
while not done:
action = robot.act(ob)
ob, _, done, info = env.step(action)
current_pos = np.array(robot.get_position())
logging.debug('Speed: %.2f', np.linalg.norm(current_pos - last_pos) / robot.time_step)
last_pos = current_pos
env.render('video', args.video_file)
logging.info('It takes %.2f seconds to finish. Final status is %s', env.global_time, info)
if robot.visible and info == 'reach goal':
human_times = env.get_human_times()
logging.info('Average time for humans to reach goal: %.2f', sum(human_times) / len(human_times))
else:
explorer.run_k_episodes(env.case_size[args.phase], args.phase, print_failure=True)
if __name__ == '__main__':
main()
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/crowd_nav/utils/memory.py | sarl_star_ros/CrowdNav/crowd_nav/utils/memory.py | from torch.utils.data import Dataset
# memory: list(state,value)
class ReplayMemory(Dataset):
def __init__(self, capacity):
self.capacity = capacity
self.memory = list()
self.position = 0
def push(self, item):
# replace old experience with new experience
if len(self.memory) < self.position + 1:
self.memory.append(item)
else:
self.memory[self.position] = item
self.position = (self.position + 1) % self.capacity
def is_full(self):
return len(self.memory) == self.capacity
def __getitem__(self, item):
return self.memory[item]
def __len__(self):
return len(self.memory)
def clear(self):
self.memory = list()
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/crowd_nav/utils/plot.py | sarl_star_ros/CrowdNav/crowd_nav/utils/plot.py | import re
import argparse
import matplotlib.pyplot as plt
import numpy as np
def running_mean(x, n):
cumsum = np.cumsum(np.insert(x, 0, 0))
return (cumsum[n:] - cumsum[:-n]) / float(n)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('log_files', type=str, nargs='+')
parser.add_argument('--plot_sr', default=False, action='store_true')
parser.add_argument('--plot_cr', default=False, action='store_true')
parser.add_argument('--plot_time', default=False, action='store_true')
parser.add_argument('--plot_reward', default=True, action='store_true')
parser.add_argument('--plot_train', default=True, action='store_true')
parser.add_argument('--plot_val', default=False, action='store_true')
parser.add_argument('--window_size', type=int, default=200)
args = parser.parse_args()
# define the names of the models you want to plot and the longest episodes you want to show
models = ['LSTM-RL', 'SARL', 'OM-SARL']
max_episodes = 10000
ax1 = ax2 = ax3 = ax4 = None
ax1_legends = []
ax2_legends = []
ax3_legends = []
ax4_legends = []
for i, log_file in enumerate(args.log_files):
with open(log_file, 'r') as file:
log = file.read()
val_pattern = r"VAL in episode (?P<episode>\d+) has success rate: (?P<sr>[0-1].\d+), " \
r"collision rate: (?P<cr>[0-1].\d+), nav time: (?P<time>\d+.\d+), " \
r"total reward: (?P<reward>[-+]?\d+.\d+)"
val_episode = []
val_sr = []
val_cr = []
val_time = []
val_reward = []
for r in re.findall(val_pattern, log):
val_episode.append(int(r[0]))
val_sr.append(float(r[1]))
val_cr.append(float(r[2]))
val_time.append(float(r[3]))
val_reward.append(float(r[4]))
train_pattern = r"TRAIN in episode (?P<episode>\d+) has success rate: (?P<sr>[0-1].\d+), " \
r"collision rate: (?P<cr>[0-1].\d+), nav time: (?P<time>\d+.\d+), " \
r"total reward: (?P<reward>[-+]?\d+.\d+)"
train_episode = []
train_sr = []
train_cr = []
train_time = []
train_reward = []
for r in re.findall(train_pattern, log):
train_episode.append(int(r[0]))
train_sr.append(float(r[1]))
train_cr.append(float(r[2]))
train_time.append(float(r[3]))
train_reward.append(float(r[4]))
train_episode = train_episode[:max_episodes]
train_sr = train_sr[:max_episodes]
train_cr = train_cr[:max_episodes]
train_time = train_time[:max_episodes]
train_reward = train_reward[:max_episodes]
# smooth training plot
train_sr_smooth = running_mean(train_sr, args.window_size)
train_cr_smooth = running_mean(train_cr, args.window_size)
train_time_smooth = running_mean(train_time, args.window_size)
train_reward_smooth = running_mean(train_reward, args.window_size)
# plot sr
if args.plot_sr:
if ax1 is None:
_, ax1 = plt.subplots()
if args.plot_train:
ax1.plot(range(len(train_sr_smooth)), train_sr_smooth)
ax1_legends.append(models[i])
if args.plot_val:
ax1.plot(val_episode, val_sr)
ax1_legends.append(models[i])
ax1.legend(ax1_legends)
ax1.set_xlabel('Episodes')
ax1.set_ylabel('Success Rate')
ax1.set_title('Success rate')
# plot time
if args.plot_time:
if ax2 is None:
_, ax2 = plt.subplots()
if args.plot_train:
ax2.plot(range(len(train_time_smooth)), train_time_smooth)
ax2_legends.append(models[i])
if args.plot_val:
ax2.plot(val_episode, val_time)
ax2_legends.append(models[i])
ax2.legend(ax2_legends)
ax2.set_xlabel('Episodes')
ax2.set_ylabel('Time(s)')
ax2.set_title("Robot's Time to Reach Goal")
# plot cr
if args.plot_cr:
if ax3 is None:
_, ax3 = plt.subplots()
if args.plot_train:
ax3.plot(range(len(train_cr_smooth)), train_cr_smooth)
ax3_legends.append(models[i])
if args.plot_val:
ax3.plot(val_episode, val_cr)
ax3_legends.append(models[i])
ax3.legend(ax3_legends)
ax3.set_xlabel('Episodes')
ax3.set_ylabel('Collision Rate')
ax3.set_title('Collision Rate')
# plot reward
if args.plot_reward:
if ax4 is None:
_, ax4 = plt.subplots()
if args.plot_train:
ax4.plot(range(len(train_reward_smooth)), train_reward_smooth)
ax4_legends.append(models[i])
if args.plot_val:
ax4.plot(val_episode, val_reward)
ax4_legends.append(models[i])
ax4.legend(ax4_legends)
ax4.set_xlabel('Episodes')
ax4.set_ylabel('Reward')
ax4.set_title('Cumulative Discounted Reward')
plt.show()
if __name__ == '__main__':
main()
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/crowd_nav/utils/trainer.py | sarl_star_ros/CrowdNav/crowd_nav/utils/trainer.py | from __future__ import division
import logging
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
class Trainer(object):
def __init__(self, model, memory, device, batch_size):
"""
Train the trainable model of a policy
"""
self.model = model
self.device = device
self.criterion = nn.MSELoss().to(device) # mean square error loss
self.memory = memory
self.data_loader = None
self.batch_size = batch_size
self.optimizer = None
def set_learning_rate(self, learning_rate):
logging.info('Current learning rate: %f', learning_rate)
self.optimizer = optim.SGD(self.model.parameters(), lr=learning_rate, momentum=0.9)
def optimize_epoch(self, num_epochs):
if self.optimizer is None:
raise ValueError('Learning rate is not set!')
if self.data_loader is None:
# randomly sample a batch of data from memory
self.data_loader = DataLoader(self.memory, self.batch_size, shuffle=True)
average_epoch_loss = 0
for epoch in range(num_epochs):
epoch_loss = 0
for data in self.data_loader: # until all the dataset is read
inputs, values = data
inputs = Variable(inputs) # inputs: state: batchsize * human_num * (6+7)
values = Variable(values) # value of the state
self.optimizer.zero_grad() # clear the gradients
outputs = self.model(inputs) # forward function in SARL: input state, output value
loss = self.criterion(outputs, values)
loss.backward()
self.optimizer.step()
epoch_loss += loss.data.item()
average_epoch_loss = epoch_loss / len(self.memory)
logging.debug('Average loss in epoch %d: %.2E', epoch, average_epoch_loss)
return average_epoch_loss
def optimize_batch(self, num_batches):
if self.optimizer is None:
raise ValueError('Learning rate is not set!')
if self.data_loader is None:
self.data_loader = DataLoader(self.memory, self.batch_size, shuffle=True)
losses = 0
for _ in range(num_batches):
inputs, values = next(iter(self.data_loader))
inputs = Variable(inputs)
values = Variable(values)
self.optimizer.zero_grad()
outputs = self.model(inputs)
loss = self.criterion(outputs, values)
loss.backward()
self.optimizer.step()
losses += loss.data.item()
average_loss = losses / num_batches
logging.debug('Average loss : %.2E', average_loss)
return average_loss
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/crowd_nav/utils/__init__.py | sarl_star_ros/CrowdNav/crowd_nav/utils/__init__.py | python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false | |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/crowd_nav/utils/explorer.py | sarl_star_ros/CrowdNav/crowd_nav/utils/explorer.py | from __future__ import division
import logging
import copy
import torch
import numpy as np
from crowd_sim.envs.utils.info import *
class Explorer(object):
def __init__(self, env, robot, device, memory=None, gamma=None, target_policy=None):
self.env = env
self.robot = robot
self.robot_path_length_list = []
self.device = device
self.memory = memory
self.gamma = gamma
self.target_policy = target_policy
self.target_model = None
def update_target_model(self, target_model):
self.target_model = copy.deepcopy(target_model)
# @profile
def run_k_episodes(self, k, phase, update_memory=False, imitation_learning=False, episode=None,
print_failure=False):
self.robot.policy.set_phase(phase)
success_times = []
collision_times = []
timeout_times = []
success = 0
collision = 0
timeout = 0
too_close = 0
min_dist = []
cumulative_rewards = []
collision_cases = []
timeout_cases = []
for i in range(k):
logging.info("running %s/%s episode" %(i+1,k))
ob = self.env.reset(phase)
done = False
states = []
actions = []
rewards = []
length = 0
while not done:
action = self.robot.act(ob)
length = length + 0.25*np.linalg.norm([action.vx,action.vy])
ob, reward, done, info = self.env.step(action)
states.append(self.robot.policy.last_state)
actions.append(action)
rewards.append(reward)
if isinstance(info, Danger):
too_close += 1
min_dist.append(info.min_dist)
if isinstance(info, ReachGoal):
logging.info("%s/%s episode: Success!" % (i + 1, k))
self.robot_path_length_list.append(length)
logging.info("Path length: %s" % length)
success += 1
success_times.append(self.env.global_time)
elif isinstance(info, Collision):
logging.info("%s/%s episode: Collision!" % (i + 1, k))
collision += 1
collision_cases.append(i)
collision_times.append(self.env.global_time)
elif isinstance(info, Timeout):
logging.info("%s/%s episode: Timeout!" % (i + 1, k))
timeout += 1
timeout_cases.append(i)
timeout_times.append(self.env.time_limit)
else:
raise ValueError('Invalid end signal from environment')
if update_memory:
if isinstance(info, ReachGoal) or isinstance(info, Collision):
# only add positive(success) or negative(collision) experience in experience set
self.update_memory(states, actions, rewards, imitation_learning)
cumulative_rewards.append(sum([pow(self.gamma, t * self.robot.time_step * self.robot.v_pref)
* reward for t, reward in enumerate(rewards)])) # enumerate from 0
success_rate = success / k
collision_rate = collision / k
assert success + collision + timeout == k
avg_nav_time = sum(success_times) / len(success_times) if success_times else self.env.time_limit
avg_path_length = sum(self.robot_path_length_list) / len(self.robot_path_length_list)
logging.info("The average successful navigation path length: %s" % avg_path_length)
extra_info = '' if episode is None else 'in episode {} '.format(episode)
logging.info('{:<5} {}has success rate: {:.2f}, collision rate: {:.2f}, nav time: {:.2f}, total reward: {:.4f}'.
format(phase.upper(), extra_info, success_rate, collision_rate, avg_nav_time,
average(cumulative_rewards)))
if phase in ['val', 'test']:
total_time = sum(success_times + collision_times + timeout_times) * self.robot.time_step
logging.info('Frequency of being in danger: %.2f and average min separate distance in danger: %.2f',
too_close / total_time, average(min_dist))
if print_failure:
logging.info('Collision cases: ' + ' '.join([str(x) for x in collision_cases]))
logging.info('Timeout cases: ' + ' '.join([str(x) for x in timeout_cases]))
def update_memory(self, states, actions, rewards, imitation_learning=False):
if self.memory is None or self.gamma is None:
raise ValueError('Memory or gamma value is not set!')
for i, state in enumerate(states):
reward = rewards[i]
# VALUE UPDATE
if imitation_learning:
# define the value of states in IL as cumulative discounted rewards, which is the same in RL
state = self.target_policy.transform(state)
# value = pow(self.gamma, (len(states) - 1 - i) * self.robot.time_step * self.robot.v_pref)
value = sum([pow(self.gamma, max(t - i, 0) * self.robot.time_step * self.robot.v_pref) * reward
for t, reward in enumerate(rewards)])
else:
if i == len(states) - 1:
# terminal state
value = reward
else:
next_state = states[i + 1]
gamma_bar = pow(self.gamma, self.robot.time_step * self.robot.v_pref)
value = reward + gamma_bar * self.target_model(next_state.unsqueeze(0)).data.item()
value = torch.Tensor([value]).to(self.device)
# transform state of different human_num into fixed-size tensor
if len(state.size()) == 1:
human_num = 1
feature_size = state.size()[0]
else:
human_num, feature_size = state.size()
if human_num != 5:
padding = torch.zeros((5 - human_num, feature_size))
state = torch.cat([state, padding])
self.memory.push((state, value))
def average(input_list):
if input_list:
return sum(input_list) / len(input_list)
else:
return 0
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/crowd_nav/configs/__init__.py | sarl_star_ros/CrowdNav/crowd_nav/configs/__init__.py | python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false | |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/crowd_nav/policy/lstm_rl.py | sarl_star_ros/CrowdNav/crowd_nav/policy/lstm_rl.py | import torch
import torch.nn as nn
import numpy as np
import logging
from crowd_nav.policy.cadrl import mlp
from crowd_nav.policy.multi_human_rl import MultiHumanRL
class ValueNetwork1(nn.Module):
def __init__(self, input_dim, self_state_dim, mlp_dims, lstm_hidden_dim):
super().__init__()
self.self_state_dim = self_state_dim
self.lstm_hidden_dim = lstm_hidden_dim
self.mlp = mlp(self_state_dim + lstm_hidden_dim, mlp_dims)
self.lstm = nn.LSTM(input_dim, lstm_hidden_dim, batch_first=True)
def forward(self, state):
"""
First transform the world coordinates to self-centric coordinates and then do forward computation
:param state: tensor of shape (batch_size, # of humans, length of a joint state)
:return:
"""
size = state.shape
self_state = state[:, 0, :self.self_state_dim]
# human_state = state[:, :, self.self_state_dim:]
h0 = torch.zeros(1, size[0], self.lstm_hidden_dim)
c0 = torch.zeros(1, size[0], self.lstm_hidden_dim)
output, (hn, cn) = self.lstm(state, (h0, c0))
hn = hn.squeeze(0)
joint_state = torch.cat([self_state, hn], dim=1)
value = self.mlp(joint_state)
return value
class ValueNetwork2(nn.Module):
def __init__(self, input_dim, self_state_dim, mlp1_dims, mlp_dims, lstm_hidden_dim):
super().__init__()
self.self_state_dim = self_state_dim
self.lstm_hidden_dim = lstm_hidden_dim
self.mlp1 = mlp(input_dim, mlp1_dims)
self.mlp = mlp(self_state_dim + lstm_hidden_dim, mlp_dims)
self.lstm = nn.LSTM(mlp1_dims[-1], lstm_hidden_dim, batch_first=True)
def forward(self, state):
"""
First transform the world coordinates to self-centric coordinates and then do forward computation
:param state: tensor of shape (batch_size, # of humans, length of a joint state)
:return:
"""
size = state.shape
self_state = state[:, 0, :self.self_state_dim]
state = torch.reshape(state, (-1, size[2]))
mlp1_output = self.mlp1(state)
mlp1_output = torch.reshape(mlp1_output, (size[0], size[1], -1))
h0 = torch.zeros(1, size[0], self.lstm_hidden_dim)
c0 = torch.zeros(1, size[0], self.lstm_hidden_dim)
output, (hn, cn) = self.lstm(mlp1_output, (h0, c0))
hn = hn.squeeze(0)
joint_state = torch.cat([self_state, hn], dim=1)
value = self.mlp(joint_state)
return value
class LstmRL(MultiHumanRL):
def __init__(self):
super().__init__()
self.name = 'LSTM-RL'
self.with_interaction_module = None
self.interaction_module_dims = None
def configure(self, config):
self.set_common_parameters(config)
mlp_dims = [int(x) for x in config.get('lstm_rl', 'mlp2_dims').split(', ')]
global_state_dim = config.getint('lstm_rl', 'global_state_dim')
self.with_om = config.getboolean('lstm_rl', 'with_om')
with_interaction_module = config.getboolean('lstm_rl', 'with_interaction_module')
if with_interaction_module:
mlp1_dims = [int(x) for x in config.get('lstm_rl', 'mlp1_dims').split(', ')]
self.model = ValueNetwork2(self.input_dim(), self.self_state_dim, mlp1_dims, mlp_dims, global_state_dim)
else:
self.model = ValueNetwork1(self.input_dim(), self.self_state_dim, mlp_dims, global_state_dim)
self.multiagent_training = config.getboolean('lstm_rl', 'multiagent_training')
logging.info('Policy: {}LSTM-RL {} pairwise interaction module'.format(
'OM-' if self.with_om else '', 'w/' if with_interaction_module else 'w/o'))
def predict(self, state):
"""
Input state is the joint state of robot concatenated with the observable state of other agents
To predict the best action, agent samples actions and propagates one step to see how good the next state is
thus the reward function is needed
"""
def dist(human):
# sort human order by decreasing distance to the robot
return np.linalg.norm(np.array(human.position) - np.array(state.self_state.position))
state.human_states = sorted(state.human_states, key=dist, reverse=True)
return super().predict(state)
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/crowd_nav/policy/sarl.py | sarl_star_ros/CrowdNav/crowd_nav/policy/sarl.py | # Author: Changan Chen <changanvr@gmail.com>
# Modified by: Keyu Li <kyli@link.cuhk.edu.hk>
from __future__ import division
import numpy as np
import torch
import torch.nn as nn
from torch.nn.functional import softmax
import logging
from crowd_sim.envs.utils.action import ActionRot, ActionXY
from crowd_nav.policy.cadrl import mlp
from crowd_nav.policy.multi_human_rl import MultiHumanRL
class ValueNetwork(nn.Module):
def __init__(self, input_dim, self_state_dim, mlp1_dims, mlp2_dims, mlp3_dims, attention_dims, with_global_state,
cell_size, cell_num):
super(ValueNetwork, self).__init__()
self.self_state_dim = self_state_dim
self.global_state_dim = mlp1_dims[-1]
self.mlp1 = mlp(input_dim, mlp1_dims, last_relu=True)
self.mlp2 = mlp(mlp1_dims[-1], mlp2_dims)
self.with_global_state = with_global_state
if with_global_state:
self.attention = mlp(mlp1_dims[-1] * 2, attention_dims)
else:
self.attention = mlp(mlp1_dims[-1], attention_dims)
self.cell_size = cell_size
self.cell_num = cell_num
mlp3_input_dim = mlp2_dims[-1] + self.self_state_dim
self.mlp3 = mlp(mlp3_input_dim, mlp3_dims)
self.attention_weights = None
def forward(self, state):
"""
First transform the world coordinates to self-centric coordinates and then do forward computation
:param state: tensor of shape (batch_size, # of humans, length of a rotated state)
:return:
"""
size = state.shape
self_state = state[:, 0, :self.self_state_dim]
mlp1_output = self.mlp1(state.view((-1, size[2])))
mlp2_output = self.mlp2(mlp1_output)
if self.with_global_state:
# compute attention scores
global_state = torch.mean(mlp1_output.view(size[0], size[1], -1), 1, keepdim=True)
global_state = global_state.expand((size[0], size[1], self.global_state_dim)).\
contiguous().view(-1, self.global_state_dim)
attention_input = torch.cat([mlp1_output, global_state], dim=1)
else:
attention_input = mlp1_output
scores = self.attention(attention_input).view(size[0], size[1], 1).squeeze(dim=2)
# masked softmax
# weights = softmax(scores, dim=1).unsqueeze(2)
scores_exp = torch.exp(scores) * (scores != 0).float()
weights = (scores_exp / torch.sum(scores_exp, dim=1, keepdim=True)).unsqueeze(2)
self.attention_weights = weights[0, :, 0].data.cpu().numpy()
# output feature is a linear combination of input features
features = mlp2_output.view(size[0], size[1], -1)
# for converting to onnx
# expanded_weights = torch.cat([torch.zeros(weights.size()).copy_(weights) for _ in range(50)], dim=2)
weighted_feature = torch.sum(torch.mul(weights, features), dim=1)
# concatenate agent's state with global weighted humans' state
joint_state = torch.cat([self_state, weighted_feature], dim=1)
value = self.mlp3(joint_state)
return value
class SARL(MultiHumanRL):
def __init__(self):
super(SARL, self).__init__()
self.name = 'SARL'
self.with_costmap = False
self.gc = None
self.gc_resolution = None
self.gc_width = None
self.gc_ox = None
self.gc_oy = None
def configure(self, config):
self.set_common_parameters(config)
mlp1_dims = [int(x) for x in config.get('sarl', 'mlp1_dims').split(', ')]
mlp2_dims = [int(x) for x in config.get('sarl', 'mlp2_dims').split(', ')]
mlp3_dims = [int(x) for x in config.get('sarl', 'mlp3_dims').split(', ')]
attention_dims = [int(x) for x in config.get('sarl', 'attention_dims').split(', ')]
self.with_om = config.getboolean('sarl', 'with_om')
with_global_state = config.getboolean('sarl', 'with_global_state')
self.model = ValueNetwork(self.input_dim(), self.self_state_dim, mlp1_dims, mlp2_dims, mlp3_dims,
attention_dims, with_global_state, self.cell_size, self.cell_num)
self.multiagent_training = config.getboolean('sarl', 'multiagent_training')
if self.with_om:
self.name = 'OM-SARL'
logging.info('Policy: {} {} global state'.format(self.name, 'w/' if with_global_state else 'w/o'))
# predict the cost that the robot hits the static obstacles in the global map
def compute_cost(self, state):
costs = []
x = state.px
y = state.py
min_x = x - 0.3
min_y = y - 0.3
max_x = x + 0.3
max_y = y + 0.3
grid_min_x = int(round((min_x - self.gc_ox) / self.gc_resolution))
grid_min_y = int(round((min_y - self.gc_oy) / self.gc_resolution))
grid_max_x = int(round((max_x - self.gc_ox) / self.gc_resolution))
grid_max_y = int(round((max_y - self.gc_oy) / self.gc_resolution))
for i in range(grid_min_x, grid_max_x+1):
for j in range(grid_min_y, grid_max_y + 1):
index = i + self.gc_width * j
costs.append(self.gc[index])
max_cost = max(costs)
return max_cost
def predict(self, state):
"""
Takes pairwise joint state as input to value network and output action.
The input to the value network is always of shape (batch_size, # humans, rotated joint state length).
If with_costmap is True, the dangerous actions predicted by the value network will be screened out to avoid static obstacles on the map.
"""
if self.phase is None or self.device is None:
raise AttributeError('Phase, device attributes have to be set!')
if self.phase == 'train' and self.epsilon is None:
raise AttributeError('Epsilon attribute has to be set in training phase')
if self.reach_destination(state):
return ActionXY(0, 0) if self.kinematics == 'holonomic' else ActionRot(0, 0)
if self.action_space is None:
self.build_action_space(state.self_state.v_pref)
occupancy_maps = None
probability = np.random.random()
if self.phase == 'train' and probability < self.epsilon:
max_action = self.action_space[np.random.choice(len(self.action_space))]
else:
self.action_values = list()
max_value = float('-inf')
max_action = None
for action in self.action_space:
next_self_state = self.propagate(state.self_state, action)
next_self_state_further = self.propagate_more(state.self_state, action)
# abort actions which will probably cause collision with static obstacles in the costmap
if self.with_costmap is True:
cost = self.compute_cost(next_self_state_further)
if cost > 0:
print("********** Abort action:", action, " with cost:", cost, " that will hit the obstacles.")
continue
if self.query_env:
next_human_states, reward, done, info = self.env.onestep_lookahead(action)
else:
next_human_states = [self.propagate(human_state, ActionXY(human_state.vx, human_state.vy))
for human_state in state.human_states]
reward = self.compute_reward(next_self_state, next_human_states)
batch_next_states = torch.cat([torch.Tensor([next_self_state + next_human_state]).to(self.device)
for next_human_state in next_human_states], dim=0)
rotated_batch_input = self.rotate(batch_next_states).unsqueeze(0)
if self.with_om:
if occupancy_maps is None:
occupancy_maps = self.build_occupancy_maps(next_human_states).unsqueeze(0)
rotated_batch_input = torch.cat([rotated_batch_input, occupancy_maps], dim=2)
# VALUE UPDATE
next_state_value = self.model(rotated_batch_input).data.item()
value = reward + pow(self.gamma, self.time_step * state.self_state.v_pref) * next_state_value
self.action_values.append(value)
if value > max_value:
max_value = value
max_action = action
# print("********** choose action:", action)
# print("********** cost:", cost)
if max_action is None:
# if the robot is trapped, choose the turning action to escape
max_action = ActionRot(0, 0.78)
print("The robot is trapped. Rotate in place to escape......")
if self.phase == 'train':
self.last_state = self.transform(state)
return max_action
def compute_reward(self, nav, humans):
# collision detection
dmin = float('inf')
collision = False
if len(humans):
for i, human in enumerate(humans):
dist = np.linalg.norm((nav.px - human.px, nav.py - human.py)) - nav.radius - human.radius
if dist < 0:
collision = True
break
if dist < dmin:
dmin = dist
# check if reaching the goal
reaching_goal = np.linalg.norm((nav.px - nav.gx, nav.py - nav.gy)) < nav.radius
if collision:
reward = self.env.collision_penalty
elif reaching_goal:
reward = 1
elif dmin < self.env.discomfort_dist:
reward = (dmin - self.env.discomfort_dist) * self.env.discomfort_penalty_factor * self.env.time_step
else:
reward = 0
return reward
def get_attention_weights(self):
return self.model.attention_weights
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/crowd_nav/policy/cadrl.py | sarl_star_ros/CrowdNav/crowd_nav/policy/cadrl.py | # Author: Changan Chen <changanvr@gmail.com>
# Modified by: Keyu Li <kyli@link.cuhk.edu.hk>
from __future__ import division
import torch
import torch.nn as nn
import numpy as np
import itertools
import logging
from crowd_sim.envs.policy.policy import Policy
from crowd_sim.envs.utils.action import ActionRot, ActionXY
from crowd_sim.envs.utils.state import ObservableState, FullState
def mlp(input_dim, mlp_dims, last_relu=False):
layers = []
mlp_dims = [input_dim] + mlp_dims # chuan qi lai
for i in range(len(mlp_dims) - 1):
layers.append(nn.Linear(mlp_dims[i], mlp_dims[i + 1]))
if i != len(mlp_dims) - 2 or last_relu:
layers.append(nn.ReLU())
net = nn.Sequential(*layers)
return net
class ValueNetwork(nn.Module):
def __init__(self, input_dim, mlp_dims):
super(ValueNetwork, self).__init__()
self.value_network = mlp(input_dim, mlp_dims)
def forward(self, state):
value = self.value_network(state)
return value
class CADRL(Policy):
def __init__(self):
super(CADRL, self).__init__()
self.name = 'CADRL'
self.trainable = True
self.multiagent_training = None
self.kinematics = None
self.epsilon = None
self.gamma = None
self.sampling = None
self.speed_samples = None
self.rotation_samples = None
self.query_env = None
self.action_space = None
self.speeds = None
self.rotations = None
self.action_values = None
self.with_om = None
self.cell_num = None
self.cell_size = None
self.om_channel_size = None
self.self_state_dim = 6
self.human_state_dim = 7
self.joint_state_dim = self.self_state_dim + self.human_state_dim
def configure(self, config):
self.set_common_parameters(config)
mlp_dims = [int(x) for x in config.get('cadrl', 'mlp_dims').split(', ')]
self.model = ValueNetwork(self.joint_state_dim, mlp_dims)
self.multiagent_training = config.getboolean('cadrl', 'multiagent_training')
logging.info('Policy: CADRL without occupancy map')
def set_common_parameters(self, config):
self.gamma = config.getfloat('rl', 'gamma')
self.kinematics = config.get('action_space', 'kinematics')
self.sampling = config.get('action_space', 'sampling')
self.speed_samples = config.getint('action_space', 'speed_samples')
self.rotation_samples = config.getint('action_space', 'rotation_samples')
self.query_env = config.getboolean('action_space', 'query_env')
self.cell_num = config.getint('om', 'cell_num')
self.cell_size = config.getfloat('om', 'cell_size')
self.om_channel_size = config.getint('om', 'om_channel_size')
def set_device(self, device):
self.device = device
self.model.to(device)
def set_epsilon(self, epsilon):
self.epsilon = epsilon
def build_action_space(self, v_pref):
"""
Action space consists of 25 uniformly sampled actions in permitted range and 25 randomly sampled actions.
"""
holonomic = True if self.kinematics == 'holonomic' else False
speeds = [0] + [(np.exp((i + 1) / self.speed_samples) - 1) / (np.e - 1) * v_pref for i in range(self.speed_samples)]
if holonomic:
rotations = list(np.linspace(0, np.pi * 2, num=15, endpoint=False))
else:
rotations = list(np.linspace(-np.pi / 4, np.pi / 4, num=self.rotation_samples))
action_space = [ActionXY(0, 0) if holonomic else ActionRot(0, 0)]
for rotation, speed in itertools.product(rotations, speeds):
if holonomic:
action_space.append(ActionXY(speed * np.cos(rotation), speed * np.sin(rotation)))
else:
action_space.append(ActionRot(speed, rotation))
self.speeds = speeds
self.rotations = rotations
self.action_space = action_space
def propagate(self, state, action):
""" propogate the state for one step. """
if isinstance(state, ObservableState):
# propagate state of humans
next_px = state.px + action.vx * self.time_step
next_py = state.py + action.vy * self.time_step
next_state = ObservableState(next_px, next_py, action.vx, action.vy, state.radius)
elif isinstance(state, FullState):
# propagate state of current agent
# perform action without rotation
if self.kinematics == 'holonomic':
next_px = state.px + action.vx * self.time_step
next_py = state.py + action.vy * self.time_step
next_state = FullState(next_px, next_py, action.vx, action.vy, state.radius,
state.gx, state.gy, state.v_pref, state.theta)
else:
next_theta = state.theta + action.r
next_vx = action.v * np.cos(next_theta)
next_vy = action.v * np.sin(next_theta)
next_px = state.px + next_vx * self.time_step
next_py = state.py + next_vy * self.time_step
next_state = FullState(next_px, next_py, next_vx, next_vy, state.radius, state.gx, state.gy,
state.v_pref, next_theta)
else:
raise ValueError('Type error')
return next_state
def propagate_more(self, state, action):
""" propogate the state for two steps to see further. """
if isinstance(state, ObservableState):
# propagate state of humans
next_px = state.px + action.vx * self.time_step * 2
next_py = state.py + action.vy * self.time_step * 2
next_state = ObservableState(next_px, next_py, action.vx, action.vy, state.radius)
elif isinstance(state, FullState):
# propagate state of current agent
# perform action without rotation
if self.kinematics == 'holonomic':
next_px = state.px + action.vx * self.time_step * 2
next_py = state.py + action.vy * self.time_step * 2
next_state = FullState(next_px, next_py, action.vx, action.vy, state.radius,
state.gx, state.gy, state.v_pref, state.theta)
else:
next_theta = state.theta + action.r
next_vx = action.v * np.cos(next_theta)
next_vy = action.v * np.sin(next_theta)
next_px = state.px + next_vx * self.time_step * 2
next_py = state.py + next_vy * self.time_step * 2
next_state = FullState(next_px, next_py, next_vx, next_vy, state.radius, state.gx, state.gy,
state.v_pref, next_theta)
else:
raise ValueError('Type error')
return next_state
def predict(self, state):
"""
Input state is the joint state of robot concatenated by the observable state of other agents
To predict the best action, agent samples actions and propagates one step to see how good the next state is
thus the reward function is needed
"""
if self.phase is None or self.device is None:
raise AttributeError('Phase, device attributes have to be set!')
if self.phase == 'train' and self.epsilon is None:
raise AttributeError('Epsilon attribute has to be set in training phase')
if self.reach_destination(state):
return ActionXY(0, 0) if self.kinematics == 'holonomic' else ActionRot(0, 0)
if self.action_space is None:
self.build_action_space(state.self_state.v_pref)
probability = np.random.random()
if self.phase == 'train' and probability < self.epsilon:
max_action = self.action_space[np.random.choice(len(self.action_space))]
else:
self.action_values = list()
max_min_value = float('-inf')
max_action = None
for action in self.action_space:
next_self_state = self.propagate(state.self_state, action)
ob, reward, done, info = self.env.onestep_lookahead(action)
batch_next_states = torch.cat([torch.Tensor([next_self_state + next_human_state]).to(self.device)
for next_human_state in ob], dim=0)
# VALUE UPDATE
outputs = self.model(self.rotate(batch_next_states))
min_output, min_index = torch.min(outputs, 0)
min_value = reward + pow(self.gamma, self.time_step * state.self_state.v_pref) * min_output.data.item()
self.action_values.append(min_value)
if min_value > max_min_value:
max_min_value = min_value
max_action = action
if self.phase == 'train':
self.last_state = self.transform(state)
return max_action
def transform(self, state):
"""
Take the state passed from agent and transform it to tensor for batch training
:param state:
:return: tensor of shape (len(state), )
"""
assert len(state.human_states) == 1
state = torch.Tensor(state.self_state + state.human_states[0]).to(self.device)
state = self.rotate(state.unsqueeze(0)).squeeze(dim=0)
return state
def rotate(self, state):
"""
Transform the coordinate to agent-centric. x axis: position -> goal
Input state tensor is of size (batch_size, state_length)
"""
# 'px', 'py', 'vx', 'vy', 'radius', 'gx', 'gy', 'v_pref', 'theta', 'px1', 'py1', 'vx1', 'vy1', 'radius1'
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13
batch = state.shape[0]
dx = (state[:, 5] - state[:, 0]).reshape((batch, -1)) # -1 means calculated automatically
dy = (state[:, 6] - state[:, 1]).reshape((batch, -1))
rot = torch.atan2(state[:, 6] - state[:, 1], state[:, 5] - state[:, 0])
dg = torch.norm(torch.cat([dx, dy], dim=1), 2, dim=1, keepdim=True)
v_pref = state[:, 7].reshape((batch, -1))
vx = (state[:, 2] * torch.cos(rot) + state[:, 3] * torch.sin(rot)).reshape((batch, -1))
vy = (state[:, 3] * torch.cos(rot) - state[:, 2] * torch.sin(rot)).reshape((batch, -1))
radius = state[:, 4].reshape((batch, -1))
if self.kinematics == 'unicycle':
theta = (state[:, 8] - rot).reshape((batch, -1))
else:
# set theta to be zero since it's not used
theta = torch.zeros_like(v_pref)
vx1 = (state[:, 11] * torch.cos(rot) + state[:, 12] * torch.sin(rot)).reshape((batch, -1))
vy1 = (state[:, 12] * torch.cos(rot) - state[:, 11] * torch.sin(rot)).reshape((batch, -1))
px1 = (state[:, 9] - state[:, 0]) * torch.cos(rot) + (state[:, 10] - state[:, 1]) * torch.sin(rot)
px1 = px1.reshape((batch, -1))
py1 = (state[:, 10] - state[:, 1]) * torch.cos(rot) - (state[:, 9] - state[:, 0]) * torch.sin(rot)
py1 = py1.reshape((batch, -1))
radius1 = state[:, 13].reshape((batch, -1))
radius_sum = radius + radius1
da = torch.norm(torch.cat([(state[:, 0] - state[:, 9]).reshape((batch, -1)), (state[:, 1] - state[:, 10]).
reshape((batch, -1))], dim=1), 2, dim=1, keepdim=True)
new_state = torch.cat([dg, v_pref, theta, radius, vx, vy, px1, py1, vx1, vy1, radius1, da, radius_sum], dim=1)
return new_state
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/crowd_nav/policy/__init__.py | sarl_star_ros/CrowdNav/crowd_nav/policy/__init__.py | python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false | |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/crowd_nav/policy/multi_human_rl.py | sarl_star_ros/CrowdNav/crowd_nav/policy/multi_human_rl.py | # Author: Changan Chen <changanvr@gmail.com>
# Modified by: Keyu Li <kyli@link.cuhk.edu.hk>
from __future__ import division
import torch
import numpy as np
from crowd_sim.envs.utils.action import ActionRot, ActionXY
from crowd_nav.policy.cadrl import CADRL
class MultiHumanRL(CADRL):
def __init__(self):
super(MultiHumanRL, self).__init__()
self.with_costmap = False
self.gc = None
self.gc_resolution = None
self.gc_width = None
self.gc_ox = None
self.gc_oy = None
# predict the cost that robot hits the static obstacles in the global map
def compute_cost(self, state):
costs = []
x = state.px
y = state.py
min_x = x - 0.3
min_y = y - 0.3
max_x = x + 0.3
max_y = y + 0.3
grid_min_x = int(round((min_x - self.gc_ox) / self.gc_resolution))
grid_min_y = int(round((min_y - self.gc_oy) / self.gc_resolution))
grid_max_x = int(round((max_x - self.gc_ox) / self.gc_resolution))
grid_max_y = int(round((max_y - self.gc_oy) / self.gc_resolution))
for i in range(grid_min_x, grid_max_x+1):
for j in range(grid_min_y, grid_max_y + 1):
index = i + self.gc_width * j
costs.append(self.gc[index])
max_cost = max(costs)
return max_cost
def predict(self, state):
"""
Takes pairwise joint state as input to value network and output action.
The input to the value network is always of shape (batch_size, # humans, rotated joint state length).
If with_costmap is True, the dangerous actions predicted by the value network will be screened out to avoid static obstacles on the map.
"""
if self.phase is None or self.device is None:
raise AttributeError('Phase, device attributes have to be set!')
if self.phase == 'train' and self.epsilon is None:
raise AttributeError('Epsilon attribute has to be set in training phase')
if self.reach_destination(state):
return ActionXY(0, 0) if self.kinematics == 'holonomic' else ActionRot(0, 0)
if self.action_space is None:
self.build_action_space(state.self_state.v_pref)
occupancy_maps = None
probability = np.random.random()
if self.phase == 'train' and probability < self.epsilon:
max_action = self.action_space[np.random.choice(len(self.action_space))]
else:
self.action_values = list()
max_value = float('-inf')
max_action = None
for action in self.action_space:
next_self_state = self.propagate(state.self_state, action)
next_self_state_further = self.propagate_more(state.self_state, action)
# abort actions which will probably cause collision with static obstacles in the costmap
cost = self.compute_cost(next_self_state_further)
if cost > 0:
print("********** Abort action:", action, "cost:", cost, "that will hit the obstacles.")
continue
if self.query_env:
next_human_states, reward, done, info = self.env.onestep_lookahead(action)
else:
next_human_states = [self.propagate(human_state, ActionXY(human_state.vx, human_state.vy))
for human_state in state.human_states]
reward = self.compute_reward(next_self_state, next_human_states)
batch_next_states = torch.cat([torch.Tensor([next_self_state + next_human_state]).to(self.device)
for next_human_state in next_human_states], dim=0)
rotated_batch_input = self.rotate(batch_next_states).unsqueeze(0)
if self.with_om:
if occupancy_maps is None:
occupancy_maps = self.build_occupancy_maps(next_human_states).unsqueeze(0)
rotated_batch_input = torch.cat([rotated_batch_input, occupancy_maps], dim=2)
# VALUE UPDATE
next_state_value = self.model(rotated_batch_input).data.item()
value = reward + pow(self.gamma, self.time_step * state.self_state.v_pref) * next_state_value
self.action_values.append(value)
if value > max_value:
max_value = value
max_action = action
# print("********** choose action:", action)
# print("********** cost:", cost)
if max_action is None:
# if the robot is trapped, choose the turning action to escape
max_action = ActionRot(0, 0.78)
print("The robot is trapped. Rotate in place to escape......")
if self.phase == 'train':
self.last_state = self.transform(state)
return max_action
def compute_reward(self, nav, humans):
# collision detection
dmin = float('inf')
collision = False
if len(humans):
for i, human in enumerate(humans):
dist = np.linalg.norm((nav.px - human.px, nav.py - human.py)) - nav.radius - human.radius
if dist < 0:
collision = True
break
if dist < dmin:
dmin = dist
# check if reaching the goal
reaching_goal = np.linalg.norm((nav.px - nav.gx, nav.py - nav.gy)) < nav.radius
if collision:
reward = self.env.collision_penalty
elif reaching_goal:
reward = 1
elif dmin < self.env.discomfort_dist:
reward = (dmin - self.env.discomfort_dist) * self.env.discomfort_penalty_factor * self.env.time_step
else:
reward = 0
return reward
def transform(self, state):
"""
Take the state passed from agent and transform it to the input of value network
:param state:
:return: tensor of shape (# of humans, len(state))
"""
state_tensor = torch.cat([torch.Tensor([state.self_state + human_state]).to(self.device)
for human_state in state.human_states], dim=0)
if self.with_om:
occupancy_maps = self.build_occupancy_maps(state.human_states)
state_tensor = torch.cat([self.rotate(state_tensor), occupancy_maps], dim=1)
else:
state_tensor = self.rotate(state_tensor)
return state_tensor
def input_dim(self):
return self.joint_state_dim + (self.cell_num ** 2 * self.om_channel_size if self.with_om else 0)
# a**b means a^b
# if not with_om, input_dim = joint_state_dim
def build_occupancy_maps(self, human_states):
"""
:param human_states:
:return: tensor of shape (# human - 1, self.cell_num ** 2)
"""
occupancy_maps = []
for human in human_states:
other_humans = np.concatenate([np.array([(other_human.px, other_human.py, other_human.vx, other_human.vy)])
for other_human in human_states if other_human != human], axis=0)
other_px = other_humans[:, 0] - human.px
other_py = other_humans[:, 1] - human.py
# new x-axis is in the direction of human's velocity
human_velocity_angle = np.arctan2(human.vy, human.vx)
other_human_orientation = np.arctan2(other_py, other_px)
rotation = other_human_orientation - human_velocity_angle
distance = np.linalg.norm([other_px, other_py], axis=0)
other_px = np.cos(rotation) * distance
other_py = np.sin(rotation) * distance
# compute indices of humans in the grid
other_x_index = np.floor(other_px / self.cell_size + self.cell_num / 2)
other_y_index = np.floor(other_py / self.cell_size + self.cell_num / 2)
other_x_index[other_x_index < 0] = float('-inf')
other_x_index[other_x_index >= self.cell_num] = float('-inf')
other_y_index[other_y_index < 0] = float('-inf')
other_y_index[other_y_index >= self.cell_num] = float('-inf')
grid_indices = self.cell_num * other_y_index + other_x_index
occupancy_map = np.isin(range(self.cell_num ** 2), grid_indices)
if self.om_channel_size == 1:
occupancy_maps.append([occupancy_map.astype(int)])
else:
# calculate relative velocity for other agents
other_human_velocity_angles = np.arctan2(other_humans[:, 3], other_humans[:, 2])
rotation = other_human_velocity_angles - human_velocity_angle
speed = np.linalg.norm(other_humans[:, 2:4], axis=1)
other_vx = np.cos(rotation) * speed
other_vy = np.sin(rotation) * speed
dm = [list() for _ in range(self.cell_num ** 2 * self.om_channel_size)]
for i, index in np.ndenumerate(grid_indices):
if index in range(self.cell_num ** 2):
if self.om_channel_size == 2:
dm[2 * int(index)].append(other_vx[i])
dm[2 * int(index) + 1].append(other_vy[i])
elif self.om_channel_size == 3:
dm[2 * int(index)].append(1)
dm[2 * int(index) + 1].append(other_vx[i])
dm[2 * int(index) + 2].append(other_vy[i])
else:
raise NotImplementedError
for i, cell in enumerate(dm):
dm[i] = sum(dm[i]) / len(dm[i]) if len(dm[i]) != 0 else 0
occupancy_maps.append([dm])
return torch.from_numpy(np.concatenate(occupancy_maps, axis=0)).float()
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
LeeKeyu/sarl_star | https://github.com/LeeKeyu/sarl_star/blob/179e314db9a447c64219f8f108a9aae8074b555f/sarl_star_ros/CrowdNav/crowd_nav/policy/policy_factory.py | sarl_star_ros/CrowdNav/crowd_nav/policy/policy_factory.py | from crowd_sim.envs.policy.policy_factory import policy_factory
from crowd_nav.policy.cadrl import CADRL
from crowd_nav.policy.lstm_rl import LstmRL
from crowd_nav.policy.sarl import SARL
policy_factory['cadrl'] = CADRL
policy_factory['lstm_rl'] = LstmRL
policy_factory['sarl'] = SARL
| python | MIT | 179e314db9a447c64219f8f108a9aae8074b555f | 2026-01-05T07:13:38.970356Z | false |
DreamInvoker/GAIN | https://github.com/DreamInvoker/GAIN/blob/178344cf00789c7ba05cfe4dca90df4b17c2caa9/code/train.py | code/train.py | import time
import matplotlib
import matplotlib.pyplot as plt
import torch
from torch import nn
from torch import optim
from config import *
from data import DGLREDataset, DGLREDataloader, BERTDGLREDataset
from models.GAIN import GAIN_GloVe, GAIN_BERT
from test import test
from utils import Accuracy, get_cuda, logging, print_params
matplotlib.use('Agg')
# for ablation
# from models.GAIN_nomention import GAIN_GloVe, GAIN_BERT
def train(opt):
if opt.use_model == 'bert':
# datasets
train_set = BERTDGLREDataset(opt.train_set, opt.train_set_save, word2id, ner2id, rel2id, dataset_type='train',
opt=opt)
dev_set = BERTDGLREDataset(opt.dev_set, opt.dev_set_save, word2id, ner2id, rel2id, dataset_type='dev',
instance_in_train=train_set.instance_in_train, opt=opt)
# dataloaders
train_loader = DGLREDataloader(train_set, batch_size=opt.batch_size, shuffle=True,
negativa_alpha=opt.negativa_alpha)
dev_loader = DGLREDataloader(dev_set, batch_size=opt.test_batch_size, dataset_type='dev')
model = GAIN_BERT(opt)
elif opt.use_model == 'bilstm':
# datasets
train_set = DGLREDataset(opt.train_set, opt.train_set_save, word2id, ner2id, rel2id, dataset_type='train',
opt=opt)
dev_set = DGLREDataset(opt.dev_set, opt.dev_set_save, word2id, ner2id, rel2id, dataset_type='dev',
instance_in_train=train_set.instance_in_train, opt=opt)
# dataloaders
train_loader = DGLREDataloader(train_set, batch_size=opt.batch_size, shuffle=True,
negativa_alpha=opt.negativa_alpha)
dev_loader = DGLREDataloader(dev_set, batch_size=opt.test_batch_size, dataset_type='dev')
model = GAIN_GloVe(opt)
else:
assert 1 == 2, 'please choose a model from [bert, bilstm].'
print(model.parameters)
print_params(model)
start_epoch = 1
pretrain_model = opt.pretrain_model
lr = opt.lr
model_name = opt.model_name
if pretrain_model != '':
chkpt = torch.load(pretrain_model, map_location=torch.device('cpu'))
model.load_state_dict(chkpt['checkpoint'])
logging('load model from {}'.format(pretrain_model))
start_epoch = chkpt['epoch'] + 1
lr = chkpt['lr']
logging('resume from epoch {} with lr {}'.format(start_epoch, lr))
else:
logging('training from scratch with lr {}'.format(lr))
model = get_cuda(model)
if opt.use_model == 'bert':
bert_param_ids = list(map(id, model.bert.parameters()))
base_params = filter(lambda p: p.requires_grad and id(p) not in bert_param_ids, model.parameters())
optimizer = optim.AdamW([
{'params': model.bert.parameters(), 'lr': lr * 0.01},
{'params': base_params, 'weight_decay': opt.weight_decay}
], lr=lr)
else:
optimizer = optim.AdamW(filter(lambda p: p.requires_grad, model.parameters()), lr=lr,
weight_decay=opt.weight_decay)
BCE = nn.BCEWithLogitsLoss(reduction='none')
if opt.coslr:
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=(opt.epoch // 4) + 1)
checkpoint_dir = opt.checkpoint_dir
if not os.path.exists(checkpoint_dir):
os.mkdir(checkpoint_dir)
fig_result_dir = opt.fig_result_dir
if not os.path.exists(fig_result_dir):
os.mkdir(fig_result_dir)
best_ign_auc = 0.0
best_ign_f1 = 0.0
best_epoch = 0
model.train()
global_step = 0
total_loss = 0
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim(0.0, 1.0)
plt.xlim(0.0, 1.0)
plt.title('Precision-Recall')
plt.grid(True)
acc_NA, acc_not_NA, acc_total = Accuracy(), Accuracy(), Accuracy()
logging('begin..')
for epoch in range(start_epoch, opt.epoch + 1):
start_time = time.time()
for acc in [acc_NA, acc_not_NA, acc_total]:
acc.clear()
for ii, d in enumerate(train_loader):
relation_multi_label = d['relation_multi_label']
relation_mask = d['relation_mask']
relation_label = d['relation_label']
predictions = model(words=d['context_idxs'],
src_lengths=d['context_word_length'],
mask=d['context_word_mask'],
entity_type=d['context_ner'],
entity_id=d['context_pos'],
mention_id=d['context_mention'],
distance=None,
entity2mention_table=d['entity2mention_table'],
graphs=d['graphs'],
h_t_pairs=d['h_t_pairs'],
relation_mask=relation_mask,
path_table=d['path_table'],
entity_graphs=d['entity_graphs'],
ht_pair_distance=d['ht_pair_distance']
)
loss = torch.sum(BCE(predictions, relation_multi_label) * relation_mask.unsqueeze(2)) / (
opt.relation_nums * torch.sum(relation_mask))
optimizer.zero_grad()
loss.backward()
if opt.clip != -1:
nn.utils.clip_grad_value_(model.parameters(), opt.clip)
optimizer.step()
if opt.coslr:
scheduler.step(epoch)
output = torch.argmax(predictions, dim=-1)
output = output.data.cpu().numpy()
relation_label = relation_label.data.cpu().numpy()
for i in range(output.shape[0]):
for j in range(output.shape[1]):
label = relation_label[i][j]
if label < 0:
break
is_correct = (output[i][j] == label)
if label == 0:
acc_NA.add(is_correct)
else:
acc_not_NA.add(is_correct)
acc_total.add(is_correct)
global_step += 1
total_loss += loss.item()
log_step = opt.log_step
if global_step % log_step == 0:
cur_loss = total_loss / log_step
elapsed = time.time() - start_time
logging(
'| epoch {:2d} | step {:4d} | ms/b {:5.2f} | train loss {:5.3f} | NA acc: {:4.2f} | not NA acc: {:4.2f} | tot acc: {:4.2f} '.format(
epoch, global_step, elapsed * 1000 / log_step, cur_loss * 1000, acc_NA.get(), acc_not_NA.get(),
acc_total.get()))
total_loss = 0
start_time = time.time()
if epoch % opt.test_epoch == 0:
logging('-' * 89)
eval_start_time = time.time()
model.eval()
ign_f1, ign_auc, pr_x, pr_y = test(model, dev_loader, model_name, id2rel=id2rel)
model.train()
logging('| epoch {:3d} | time: {:5.2f}s'.format(epoch, time.time() - eval_start_time))
logging('-' * 89)
if ign_f1 > best_ign_f1:
best_ign_f1 = ign_f1
best_ign_auc = ign_auc
best_epoch = epoch
path = os.path.join(checkpoint_dir, model_name + '_best.pt')
torch.save({
'epoch': epoch,
'checkpoint': model.state_dict(),
'lr': lr,
'best_ign_f1': ign_f1,
'best_ign_auc': ign_auc,
'best_epoch': epoch
}, path)
plt.plot(pr_x, pr_y, lw=2, label=str(epoch))
plt.legend(loc="upper right")
plt.savefig(os.path.join(fig_result_dir, model_name))
if epoch % opt.save_model_freq == 0:
path = os.path.join(checkpoint_dir, model_name + '_{}.pt'.format(epoch))
torch.save({
'epoch': epoch,
'lr': lr,
'checkpoint': model.state_dict()
}, path)
print("Finish training")
print("Best epoch = %d | Best Ign F1 = %f" % (best_epoch, best_ign_f1))
print("Storing best result...")
print("Finish storing")
if __name__ == '__main__':
print('processId:', os.getpid())
print('prarent processId:', os.getppid())
opt = get_opt()
print(json.dumps(opt.__dict__, indent=4))
opt.data_word_vec = word2vec
train(opt)
| python | MIT | 178344cf00789c7ba05cfe4dca90df4b17c2caa9 | 2026-01-05T07:13:40.516065Z | false |
DreamInvoker/GAIN | https://github.com/DreamInvoker/GAIN/blob/178344cf00789c7ba05cfe4dca90df4b17c2caa9/code/utils.py | code/utils.py | from datetime import datetime
import numpy as np
import torch
def get_cuda(tensor):
if torch.cuda.is_available():
return tensor.cuda()
return tensor
def logging(s):
print(datetime.now(), s)
class Accuracy(object):
def __init__(self):
self.correct = 0
self.total = 0
def add(self, is_correct):
self.total += 1
if is_correct:
self.correct += 1
def get(self):
if self.total == 0:
return 0.0
else:
return float(self.correct) / self.total
def clear(self):
self.correct = 0
self.total = 0
def print_params(model):
print('total parameters:', sum([np.prod(list(p.size())) for p in model.parameters() if p.requires_grad]))
| python | MIT | 178344cf00789c7ba05cfe4dca90df4b17c2caa9 | 2026-01-05T07:13:40.516065Z | false |
DreamInvoker/GAIN | https://github.com/DreamInvoker/GAIN/blob/178344cf00789c7ba05cfe4dca90df4b17c2caa9/code/config.py | code/config.py | import argparse
import json
import os
import numpy as np
data_dir = '../data/'
prepro_dir = os.path.join(data_dir, 'prepro_data/')
if not os.path.exists(prepro_dir):
os.mkdir(prepro_dir)
rel2id = json.load(open(os.path.join(data_dir, 'rel2id.json'), "r"))
id2rel = {v: k for k, v in rel2id.items()}
word2id = json.load(open(os.path.join(data_dir, 'word2id.json'), "r"))
ner2id = json.load(open(os.path.join(data_dir, 'ner2id.json'), "r"))
word2vec = np.load(os.path.join(data_dir, 'vec.npy'))
def get_opt():
parser = argparse.ArgumentParser()
# datasets path
parser.add_argument('--train_set', type=str, default=os.path.join(data_dir, 'train_annotated.json'))
parser.add_argument('--dev_set', type=str, default=os.path.join(data_dir, 'dev.json'))
parser.add_argument('--test_set', type=str, default=os.path.join(data_dir, 'test.json'))
# save path of preprocessed datasets
parser.add_argument('--train_set_save', type=str, default=os.path.join(prepro_dir, 'train.pkl'))
parser.add_argument('--dev_set_save', type=str, default=os.path.join(prepro_dir, 'dev.pkl'))
parser.add_argument('--test_set_save', type=str, default=os.path.join(prepro_dir, 'test.pkl'))
# checkpoints
parser.add_argument('--checkpoint_dir', type=str, default='checkpoint')
parser.add_argument('--fig_result_dir', type=str, default='fig_result')
parser.add_argument('--model_name', type=str, default='train_model')
parser.add_argument('--pretrain_model', type=str, default='')
# task/Dataset-related
parser.add_argument('--vocabulary_size', type=int, default=200000)
parser.add_argument('--relation_nums', type=int, default=97)
parser.add_argument('--entity_type_num', type=int, default=7)
parser.add_argument('--max_entity_num', type=int, default=80)
# padding
parser.add_argument('--word_pad', type=int, default=0)
parser.add_argument('--entity_type_pad', type=int, default=0)
parser.add_argument('--entity_id_pad', type=int, default=0)
# word embedding
parser.add_argument('--word_emb_size', type=int, default=10)
parser.add_argument('--pre_train_word', action='store_true')
parser.add_argument('--data_word_vec', type=str)
parser.add_argument('--finetune_word', action='store_true')
# entity type embedding
parser.add_argument('--use_entity_type', action='store_true')
parser.add_argument('--entity_type_size', type=int, default=20)
# entity id embedding, i.e., coreference embedding in DocRED original paper
parser.add_argument('--use_entity_id', action='store_true')
parser.add_argument('--entity_id_size', type=int, default=20)
# BiLSTM
parser.add_argument('--nlayers', type=int, default=1)
parser.add_argument('--lstm_hidden_size', type=int, default=32)
parser.add_argument('--lstm_dropout', type=float, default=0.1)
# training settings
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--batch_size', type=int, default=1)
parser.add_argument('--test_batch_size', type=int, default=1)
parser.add_argument('--epoch', type=int, default=10)
parser.add_argument('--test_epoch', type=int, default=1)
parser.add_argument('--weight_decay', type=float, default=0.0001)
parser.add_argument('--negativa_alpha', type=float, default=0.0) # negative example nums v.s positive example num
parser.add_argument('--log_step', type=int, default=50)
parser.add_argument('--save_model_freq', type=int, default=1)
# gcn
parser.add_argument('--mention_drop', action='store_true')
parser.add_argument('--gcn_layers', type=int, default=2)
parser.add_argument('--gcn_dim', type=int, default=808)
parser.add_argument('--dropout', type=float, default=0.6)
parser.add_argument('--activation', type=str, default="relu")
# BERT
parser.add_argument('--bert_hid_size', type=int, default=768)
parser.add_argument('--bert_path', type=str, default="")
parser.add_argument('--bert_fix', action='store_true')
parser.add_argument('--coslr', action='store_true')
parser.add_argument('--clip', type=float, default=-1)
parser.add_argument('--k_fold', type=str, default="none")
# use BiLSTM / BERT encoder, default: BiLSTM encoder
parser.add_argument('--use_model', type=str, default="bilstm", choices=['bilstm', 'bert'],
help='you should choose between bert and bilstm')
# binary classification threshold, automatically find optimal threshold when -1
parser.add_argument('--input_theta', type=float, default=-1)
return parser.parse_args()
| python | MIT | 178344cf00789c7ba05cfe4dca90df4b17c2caa9 | 2026-01-05T07:13:40.516065Z | false |
DreamInvoker/GAIN | https://github.com/DreamInvoker/GAIN/blob/178344cf00789c7ba05cfe4dca90df4b17c2caa9/code/test.py | code/test.py | import sklearn.metrics
import torch
from config import *
from data import DGLREDataset, DGLREDataloader, BERTDGLREDataset
from models.GAIN import GAIN_GloVe, GAIN_BERT
from utils import get_cuda, logging, print_params
# for ablation
# from models.GCNRE_nomention import GAIN_GloVe, GAIN_BERT
def test(model, dataloader, modelname, id2rel, input_theta=-1, output=False, is_test=False, test_prefix='dev',
relation_num=97, ours=False):
# ours: inter-sentence F1 in LSR
total_recall_ignore = 0
test_result = []
total_recall = 0
total_steps = len(dataloader)
for cur_i, d in enumerate(dataloader):
print('step: {}/{}'.format(cur_i, total_steps))
with torch.no_grad():
labels = d['labels']
L_vertex = d['L_vertex']
titles = d['titles']
indexes = d['indexes']
overlaps = d['overlaps']
predictions = model(words=d['context_idxs'],
src_lengths=d['context_word_length'],
mask=d['context_word_mask'],
entity_type=d['context_ner'],
entity_id=d['context_pos'],
mention_id=d['context_mention'],
distance=None,
entity2mention_table=d['entity2mention_table'],
graphs=d['graphs'],
h_t_pairs=d['h_t_pairs'],
relation_mask=None,
path_table=d['path_table'],
entity_graphs=d['entity_graphs'],
ht_pair_distance=d['ht_pair_distance']
)
predict_re = torch.sigmoid(predictions)
predict_re = predict_re.data.cpu().numpy()
for i in range(len(labels)):
label = labels[i]
L = L_vertex[i]
title = titles[i]
index = indexes[i]
overlap = overlaps[i]
total_recall += len(label)
for l in label.values():
if not l:
total_recall_ignore += 1
j = 0
for h_idx in range(L):
for t_idx in range(L):
if h_idx != t_idx:
for r in range(1, relation_num):
rel_ins = (h_idx, t_idx, r)
intrain = label.get(rel_ins, False)
if (ours and (h_idx, t_idx) in overlap) or not ours:
test_result.append((rel_ins in label, float(predict_re[i, j, r]), intrain,
title, id2rel[r], index, h_idx, t_idx, r))
j += 1
test_result.sort(key=lambda x: x[1], reverse=True)
if ours:
total_recall = 0
for item in test_result:
if item[0]:
total_recall += 1
pr_x = []
pr_y = []
correct = 0
w = 0
if total_recall == 0:
total_recall = 1
for i, item in enumerate(test_result):
correct += item[0]
pr_y.append(float(correct) / (i + 1)) # Precision
pr_x.append(float(correct) / total_recall) # Recall
if item[1] > input_theta:
w = i
pr_x = np.asarray(pr_x, dtype='float32')
pr_y = np.asarray(pr_y, dtype='float32')
f1_arr = (2 * pr_x * pr_y / (pr_x + pr_y + 1e-20))
f1 = f1_arr.max()
f1_pos = f1_arr.argmax()
theta = test_result[f1_pos][1]
if input_theta == -1:
w = f1_pos
input_theta = theta
auc = sklearn.metrics.auc(x=pr_x, y=pr_y)
if not is_test:
logging('ALL : Theta {:3.4f} | F1 {:3.4f} | AUC {:3.4f}'.format(theta, f1, auc))
else:
logging(
'ma_f1 {:3.4f} | input_theta {:3.4f} test_result P {:3.4f} test_result R {:3.4f} test_result F1 {:3.4f} | AUC {:3.4f}' \
.format(f1, input_theta, pr_y[w], pr_x[w], f1_arr[w], auc))
if output:
# output = [x[-4:] for x in test_result[:w+1]]
output = [{'index': x[-4], 'h_idx': x[-3], 't_idx': x[-2], 'r_idx': x[-1],
'score': x[1], 'intrain': x[2],
'r': x[-5], 'title': x[-6]} for x in test_result[:w + 1]]
json.dump(output, open(test_prefix + "_index.json", "w"))
pr_x = []
pr_y = []
correct = correct_in_train = 0
w = 0
# https://github.com/thunlp/DocRED/issues/47
for i, item in enumerate(test_result):
correct += item[0]
if item[0] & item[2]:
correct_in_train += 1
if correct_in_train == correct:
p = 0
else:
p = float(correct - correct_in_train) / (i + 1 - correct_in_train)
pr_y.append(p)
pr_x.append(float(correct) / total_recall)
if item[1] > input_theta:
w = i
pr_x = np.asarray(pr_x, dtype='float32')
pr_y = np.asarray(pr_y, dtype='float32')
f1_arr = (2 * pr_x * pr_y / (pr_x + pr_y + 1e-20))
f1 = f1_arr.max()
auc = sklearn.metrics.auc(x=pr_x, y=pr_y)
logging(
'Ignore ma_f1 {:3.4f} | inhput_theta {:3.4f} test_result P {:3.4f} test_result R {:3.4f} test_result F1 {:3.4f} | AUC {:3.4f}' \
.format(f1, input_theta, pr_y[w], pr_x[w], f1_arr[w], auc))
return f1, auc, pr_x, pr_y
if __name__ == '__main__':
print('processId:', os.getpid())
print('prarent processId:', os.getppid())
opt = get_opt()
print(json.dumps(opt.__dict__, indent=4))
opt.data_word_vec = word2vec
if opt.use_model == 'bert':
# datasets
train_set = BERTDGLREDataset(opt.train_set, opt.train_set_save, word2id, ner2id, rel2id, dataset_type='train',
opt=opt)
test_set = BERTDGLREDataset(opt.test_set, opt.test_set_save, word2id, ner2id, rel2id, dataset_type='test',
instance_in_train=train_set.instance_in_train, opt=opt)
test_loader = DGLREDataloader(test_set, batch_size=opt.test_batch_size, dataset_type='test')
model = GAIN_BERT(opt)
elif opt.use_model == 'bilstm':
# datasets
train_set = DGLREDataset(opt.train_set, opt.train_set_save, word2id, ner2id, rel2id, dataset_type='train',
opt=opt)
test_set = DGLREDataset(opt.test_set, opt.test_set_save, word2id, ner2id, rel2id, dataset_type='test',
instance_in_train=train_set.instance_in_train, opt=opt)
test_loader = DGLREDataloader(test_set, batch_size=opt.test_batch_size, dataset_type='test')
model = GAIN_GloVe(opt)
else:
assert 1 == 2, 'please choose a model from [bert, bilstm].'
import gc
del train_set
gc.collect()
# print(model.parameters)
print_params(model)
start_epoch = 1
pretrain_model = opt.pretrain_model
lr = opt.lr
model_name = opt.model_name
if pretrain_model != '':
chkpt = torch.load(pretrain_model, map_location=torch.device('cpu'))
model.load_state_dict(chkpt['checkpoint'])
logging('load checkpoint from {}'.format(pretrain_model))
else:
assert 1 == 2, 'please provide checkpoint to evaluate.'
model = get_cuda(model)
model.eval()
f1, auc, pr_x, pr_y = test(model, test_loader, model_name, id2rel=id2rel,
input_theta=opt.input_theta, output=True, test_prefix='test', is_test=True, ours=False)
print('finished')
| python | MIT | 178344cf00789c7ba05cfe4dca90df4b17c2caa9 | 2026-01-05T07:13:40.516065Z | false |
DreamInvoker/GAIN | https://github.com/DreamInvoker/GAIN/blob/178344cf00789c7ba05cfe4dca90df4b17c2caa9/code/data.py | code/data.py | import json
import math
import os
import pickle
import random
from collections import defaultdict
import dgl
import numpy as np
import torch
from torch.utils.data import IterableDataset, DataLoader
from transformers import *
from models.GAIN import Bert
from utils import get_cuda
IGNORE_INDEX = -100
class DGLREDataset(IterableDataset):
def __init__(self, src_file, save_file, word2id, ner2id, rel2id,
dataset_type='train', instance_in_train=None, opt=None):
super(DGLREDataset, self).__init__()
# record training set mention triples
self.instance_in_train = set([]) if instance_in_train is None else instance_in_train
self.data = None
self.document_max_length = 512
self.INTRA_EDGE = 0
self.INTER_EDGE = 1
self.LOOP_EDGE = 2
self.count = 0
print('Reading data from {}.'.format(src_file))
if os.path.exists(save_file):
with open(file=save_file, mode='rb') as fr:
info = pickle.load(fr)
self.data = info['data']
self.instance_in_train = info['intrain_set']
print('load preprocessed data from {}.'.format(save_file))
else:
with open(file=src_file, mode='r', encoding='utf-8') as fr:
ori_data = json.load(fr)
print('loading..')
self.data = []
for i, doc in enumerate(ori_data):
title, entity_list, labels, sentences = \
doc['title'], doc['vertexSet'], doc.get('labels', []), doc['sents']
Ls = [0]
L = 0
for x in sentences:
L += len(x)
Ls.append(L)
for j in range(len(entity_list)):
for k in range(len(entity_list[j])):
sent_id = int(entity_list[j][k]['sent_id'])
entity_list[j][k]['sent_id'] = sent_id
dl = Ls[sent_id]
pos0, pos1 = entity_list[j][k]['pos']
entity_list[j][k]['global_pos'] = (pos0 + dl, pos1 + dl)
# generate positive examples
train_triple = []
new_labels = []
for label in labels:
head, tail, relation, evidence = label['h'], label['t'], label['r'], label['evidence']
assert (relation in rel2id), 'no such relation {} in rel2id'.format(relation)
label['r'] = rel2id[relation]
train_triple.append((head, tail))
label['in_train'] = False
# record training set mention triples and mark it for dev and test set
for n1 in entity_list[head]:
for n2 in entity_list[tail]:
mention_triple = (n1['name'], n2['name'], relation)
if dataset_type == 'train':
self.instance_in_train.add(mention_triple)
else:
if mention_triple in self.instance_in_train:
label['in_train'] = True
break
new_labels.append(label)
# generate negative examples
na_triple = []
for j in range(len(entity_list)):
for k in range(len(entity_list)):
if j != k and (j, k) not in train_triple:
na_triple.append((j, k))
# generate document ids
words = []
for sentence in sentences:
for word in sentence:
words.append(word)
if len(words) > self.document_max_length:
words = words[:self.document_max_length]
word_id = np.zeros((self.document_max_length,), dtype=np.int32)
pos_id = np.zeros((self.document_max_length,), dtype=np.int32)
ner_id = np.zeros((self.document_max_length,), dtype=np.int32)
mention_id = np.zeros((self.document_max_length,), dtype=np.int32)
for iii, w in enumerate(words):
word = word2id.get(w.lower(), word2id['UNK'])
word_id[iii] = word
entity2mention = defaultdict(list)
mention_idx = 1
already_exist = set() # dealing with NER overlapping problem
for idx, vertex in enumerate(entity_list, 1):
for v in vertex:
sent_id, (pos0, pos1), ner_type = v['sent_id'], v['global_pos'], v['type']
if (pos0, pos1) in already_exist:
continue
pos_id[pos0:pos1] = idx
ner_id[pos0:pos1] = ner2id[ner_type]
mention_id[pos0:pos1] = mention_idx
entity2mention[idx].append(mention_idx)
mention_idx += 1
already_exist.add((pos0, pos1))
# construct graph
graph = self.create_graph(Ls, mention_id, pos_id, entity2mention)
# construct entity graph & path
entity_graph, path = self.create_entity_graph(Ls, pos_id, entity2mention)
assert pos_id.max() == len(entity_list)
assert mention_id.max() == graph.number_of_nodes() - 1
overlap = doc.get('overlap_entity_pair', [])
new_overlap = [tuple(item) for item in overlap]
self.data.append({
'title': title,
'entities': entity_list,
'labels': new_labels,
'na_triple': na_triple,
'word_id': word_id,
'pos_id': pos_id,
'ner_id': ner_id,
'mention_id': mention_id,
'entity2mention': entity2mention,
'graph': graph,
'entity_graph': entity_graph,
'path': path,
'overlap': new_overlap
})
# save data
with open(file=save_file, mode='wb') as fw:
pickle.dump({'data': self.data, 'intrain_set': self.instance_in_train}, fw)
print('finish reading {} and save preprocessed data to {}.'.format(src_file, save_file))
if opt.k_fold != "none":
k_fold = opt.k_fold.split(',')
k, total = float(k_fold[0]), float(k_fold[1])
a = (k - 1) / total * len(self.data)
b = k / total * len(self.data)
self.data = self.data[:a] + self.data[b:]
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return self.data[idx]
def __iter__(self):
return iter(self.data)
def create_graph(self, Ls, mention_id, entity_id, entity2mention):
d = defaultdict(list)
# add intra-entity edges
for _, mentions in entity2mention.items():
for i in range(len(mentions)):
for j in range(i + 1, len(mentions)):
d[('node', 'intra', 'node')].append((mentions[i], mentions[j]))
d[('node', 'intra', 'node')].append((mentions[j], mentions[i]))
if d[('node', 'intra', 'node')] == []:
d[('node', 'intra', 'node')].append((entity2mention[1][0], 0))
for i in range(1, len(Ls)):
tmp = dict()
for j in range(Ls[i - 1], Ls[i]):
if mention_id[j] != 0:
tmp[mention_id[j]] = entity_id[j]
mention_entity_info = [(k, v) for k, v in tmp.items()]
# add self-loop & to-globle-node edges
for m in range(len(mention_entity_info)):
# self-loop
# d[('node', 'loop', 'node')].append((mention_entity_info[m][0], mention_entity_info[m][0]))
# to global node
d[('node', 'global', 'node')].append((mention_entity_info[m][0], 0))
d[('node', 'global', 'node')].append((0, mention_entity_info[m][0]))
# add inter edges
for m in range(len(mention_entity_info)):
for n in range(m + 1, len(mention_entity_info)):
if mention_entity_info[m][1] != mention_entity_info[n][1]:
# inter edge
d[('node', 'inter', 'node')].append((mention_entity_info[m][0], mention_entity_info[n][0]))
d[('node', 'inter', 'node')].append((mention_entity_info[n][0], mention_entity_info[m][0]))
# add self-loop for global node
# d[('node', 'loop', 'node')].append((0, 0))
if d[('node', 'inter', 'node')] == []:
d[('node', 'inter', 'node')].append((entity2mention[1][0], 0))
graph = dgl.heterograph(d)
return graph
def create_entity_graph(self, Ls, entity_id, entity2mention):
graph = dgl.DGLGraph()
graph.add_nodes(entity_id.max())
d = defaultdict(set)
for i in range(1, len(Ls)):
tmp = set()
for j in range(Ls[i - 1], Ls[i]):
if entity_id[j] != 0:
tmp.add(entity_id[j])
tmp = list(tmp)
for ii in range(len(tmp)):
for jj in range(ii + 1, len(tmp)):
d[tmp[ii] - 1].add(tmp[jj] - 1)
d[tmp[jj] - 1].add(tmp[ii] - 1)
a = []
b = []
for k, v in d.items():
for vv in v:
a.append(k)
b.append(vv)
graph.add_edges(a, b)
path = dict()
for i in range(0, graph.number_of_nodes()):
for j in range(i + 1, graph.number_of_nodes()):
a = set(graph.successors(i).numpy())
b = set(graph.successors(j).numpy())
c = [val + 1 for val in list(a & b)]
path[(i + 1, j + 1)] = c
return graph, path
class BERTDGLREDataset(IterableDataset):
def __init__(self, src_file, save_file, word2id, ner2id, rel2id,
dataset_type='train', instance_in_train=None, opt=None):
super(BERTDGLREDataset, self).__init__()
# record training set mention triples
self.instance_in_train = set([]) if instance_in_train is None else instance_in_train
self.data = None
self.document_max_length = 512
self.INFRA_EDGE = 0
self.INTER_EDGE = 1
self.LOOP_EDGE = 2
self.count = 0
print('Reading data from {}.'.format(src_file))
if os.path.exists(save_file):
with open(file=save_file, mode='rb') as fr:
info = pickle.load(fr)
self.data = info['data']
self.instance_in_train = info['intrain_set']
print('load preprocessed data from {}.'.format(save_file))
else:
bert = Bert(BertModel, 'bert-base-uncased', opt.bert_path)
with open(file=src_file, mode='r', encoding='utf-8') as fr:
ori_data = json.load(fr)
print('loading..')
self.data = []
for i, doc in enumerate(ori_data):
title, entity_list, labels, sentences = \
doc['title'], doc['vertexSet'], doc.get('labels', []), doc['sents']
Ls = [0]
L = 0
for x in sentences:
L += len(x)
Ls.append(L)
for j in range(len(entity_list)):
for k in range(len(entity_list[j])):
sent_id = int(entity_list[j][k]['sent_id'])
entity_list[j][k]['sent_id'] = sent_id
dl = Ls[sent_id]
pos0, pos1 = entity_list[j][k]['pos']
entity_list[j][k]['global_pos'] = (pos0 + dl, pos1 + dl)
# generate positive examples
train_triple = []
new_labels = []
for label in labels:
head, tail, relation, evidence = label['h'], label['t'], label['r'], label['evidence']
assert (relation in rel2id), 'no such relation {} in rel2id'.format(relation)
label['r'] = rel2id[relation]
train_triple.append((head, tail))
label['in_train'] = False
# record training set mention triples and mark it for dev and test set
for n1 in entity_list[head]:
for n2 in entity_list[tail]:
mention_triple = (n1['name'], n2['name'], relation)
if dataset_type == 'train':
self.instance_in_train.add(mention_triple)
else:
if mention_triple in self.instance_in_train:
label['in_train'] = True
break
new_labels.append(label)
# generate negative examples
na_triple = []
for j in range(len(entity_list)):
for k in range(len(entity_list)):
if j != k and (j, k) not in train_triple:
na_triple.append((j, k))
# generate document ids
words = []
for sentence in sentences:
for word in sentence:
words.append(word)
bert_token, bert_starts, bert_subwords = bert.subword_tokenize_to_ids(words)
word_id = np.zeros((self.document_max_length,), dtype=np.int32)
pos_id = np.zeros((self.document_max_length,), dtype=np.int32)
ner_id = np.zeros((self.document_max_length,), dtype=np.int32)
mention_id = np.zeros((self.document_max_length,), dtype=np.int32)
word_id[:] = bert_token[0]
entity2mention = defaultdict(list)
mention_idx = 1
already_exist = set()
for idx, vertex in enumerate(entity_list, 1):
for v in vertex:
sent_id, (pos0, pos1), ner_type = v['sent_id'], v['global_pos'], v['type']
pos0 = bert_starts[pos0]
pos1 = bert_starts[pos1] if pos1 < len(bert_starts) else 1024
if (pos0, pos1) in already_exist:
continue
if pos0 >= len(pos_id):
continue
pos_id[pos0:pos1] = idx
ner_id[pos0:pos1] = ner2id[ner_type]
mention_id[pos0:pos1] = mention_idx
entity2mention[idx].append(mention_idx)
mention_idx += 1
already_exist.add((pos0, pos1))
replace_i = 0
idx = len(entity_list)
if entity2mention[idx] == []:
entity2mention[idx].append(mention_idx)
while mention_id[replace_i] != 0:
replace_i += 1
mention_id[replace_i] = mention_idx
pos_id[replace_i] = idx
ner_id[replace_i] = ner2id[vertex[0]['type']]
mention_idx += 1
new_Ls = [0]
for ii in range(1, len(Ls)):
new_Ls.append(bert_starts[Ls[ii]] if Ls[ii] < len(bert_starts) else len(bert_subwords))
Ls = new_Ls
# construct graph
graph = self.create_graph(Ls, mention_id, pos_id, entity2mention)
# construct entity graph & path
entity_graph, path = self.create_entity_graph(Ls, pos_id, entity2mention)
assert pos_id.max() == len(entity_list)
assert mention_id.max() == graph.number_of_nodes() - 1
overlap = doc.get('overlap_entity_pair', [])
new_overlap = [tuple(item) for item in overlap]
self.data.append({
'title': title,
'entities': entity_list,
'labels': new_labels,
'na_triple': na_triple,
'word_id': word_id,
'pos_id': pos_id,
'ner_id': ner_id,
'mention_id': mention_id,
'entity2mention': entity2mention,
'graph': graph,
'entity_graph': entity_graph,
'path': path,
'overlap': new_overlap
})
# save data
with open(file=save_file, mode='wb') as fw:
pickle.dump({'data': self.data, 'intrain_set': self.instance_in_train}, fw)
print('finish reading {} and save preprocessed data to {}.'.format(src_file, save_file))
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return self.data[idx]
def __iter__(self):
return iter(self.data)
def create_graph(self, Ls, mention_id, entity_id, entity2mention):
d = defaultdict(list)
# add intra edges
for _, mentions in entity2mention.items():
for i in range(len(mentions)):
for j in range(i + 1, len(mentions)):
d[('node', 'intra', 'node')].append((mentions[i], mentions[j]))
d[('node', 'intra', 'node')].append((mentions[j], mentions[i]))
if d[('node', 'intra', 'node')] == []:
d[('node', 'intra', 'node')].append((entity2mention[1][0], 0))
for i in range(1, len(Ls)):
tmp = dict()
for j in range(Ls[i - 1], Ls[i]):
if mention_id[j] != 0:
tmp[mention_id[j]] = entity_id[j]
mention_entity_info = [(k, v) for k, v in tmp.items()]
# add self-loop & to-globle-node edges
for m in range(len(mention_entity_info)):
# self-loop
# d[('node', 'loop', 'node')].append((mention_entity_info[m][0], mention_entity_info[m][0]))
# to global node
d[('node', 'global', 'node')].append((mention_entity_info[m][0], 0))
d[('node', 'global', 'node')].append((0, mention_entity_info[m][0]))
# add inter edges
for m in range(len(mention_entity_info)):
for n in range(m + 1, len(mention_entity_info)):
if mention_entity_info[m][1] != mention_entity_info[n][1]:
# inter edge
d[('node', 'inter', 'node')].append((mention_entity_info[m][0], mention_entity_info[n][0]))
d[('node', 'inter', 'node')].append((mention_entity_info[n][0], mention_entity_info[m][0]))
# add self-loop for global node
# d[('node', 'loop', 'node')].append((0, 0))
if d[('node', 'inter', 'node')] == []:
d[('node', 'inter', 'node')].append((entity2mention[1][0], 0))
graph = dgl.heterograph(d)
return graph
def create_entity_graph(self, Ls, entity_id, entity2mention):
graph = dgl.DGLGraph()
graph.add_nodes(entity_id.max())
d = defaultdict(set)
for i in range(1, len(Ls)):
tmp = set()
for j in range(Ls[i - 1], Ls[i]):
if entity_id[j] != 0:
tmp.add(entity_id[j])
tmp = list(tmp)
for ii in range(len(tmp)):
for jj in range(ii + 1, len(tmp)):
d[tmp[ii] - 1].add(tmp[jj] - 1)
d[tmp[jj] - 1].add(tmp[ii] - 1)
a = []
b = []
for k, v in d.items():
for vv in v:
a.append(k)
b.append(vv)
graph.add_edges(a, b)
path = dict()
for i in range(0, graph.number_of_nodes()):
for j in range(i + 1, graph.number_of_nodes()):
a = set(graph.successors(i).numpy())
b = set(graph.successors(j).numpy())
c = [val + 1 for val in list(a & b)]
path[(i + 1, j + 1)] = c
return graph, path
class DGLREDataloader(DataLoader):
def __init__(self, dataset, batch_size, shuffle=False, h_t_limit_per_batch=300, h_t_limit=1722, relation_num=97,
max_length=512, negativa_alpha=0.0, dataset_type='train'):
super(DGLREDataloader, self).__init__(dataset, batch_size=batch_size)
self.shuffle = shuffle
self.length = len(self.dataset)
self.max_length = max_length
self.negativa_alpha = negativa_alpha
self.dataset_type = dataset_type
self.h_t_limit_per_batch = h_t_limit_per_batch
self.h_t_limit = h_t_limit
self.relation_num = relation_num
self.dis2idx = np.zeros((512), dtype='int64')
self.dis2idx[1] = 1
self.dis2idx[2:] = 2
self.dis2idx[4:] = 3
self.dis2idx[8:] = 4
self.dis2idx[16:] = 5
self.dis2idx[32:] = 6
self.dis2idx[64:] = 7
self.dis2idx[128:] = 8
self.dis2idx[256:] = 9
self.dis_size = 20
self.order = list(range(self.length))
def __iter__(self):
# shuffle
if self.shuffle:
random.shuffle(self.order)
self.data = [self.dataset[idx] for idx in self.order]
else:
self.data = self.dataset
batch_num = math.ceil(self.length / self.batch_size)
self.batches = [self.data[idx * self.batch_size: min(self.length, (idx + 1) * self.batch_size)]
for idx in range(0, batch_num)]
self.batches_order = [self.order[idx * self.batch_size: min(self.length, (idx + 1) * self.batch_size)]
for idx in range(0, batch_num)]
# begin
context_word_ids = torch.LongTensor(self.batch_size, self.max_length).cpu()
context_pos_ids = torch.LongTensor(self.batch_size, self.max_length).cpu()
context_ner_ids = torch.LongTensor(self.batch_size, self.max_length).cpu()
context_mention_ids = torch.LongTensor(self.batch_size, self.max_length).cpu()
context_word_mask = torch.LongTensor(self.batch_size, self.max_length).cpu()
context_word_length = torch.LongTensor(self.batch_size).cpu()
ht_pairs = torch.LongTensor(self.batch_size, self.h_t_limit, 2).cpu()
relation_multi_label = torch.Tensor(self.batch_size, self.h_t_limit, self.relation_num).cpu()
relation_mask = torch.Tensor(self.batch_size, self.h_t_limit).cpu()
relation_label = torch.LongTensor(self.batch_size, self.h_t_limit).cpu()
ht_pair_distance = torch.LongTensor(self.batch_size, self.h_t_limit).cpu()
for idx, minibatch in enumerate(self.batches):
cur_bsz = len(minibatch)
for mapping in [context_word_ids, context_pos_ids, context_ner_ids, context_mention_ids,
context_word_mask, context_word_length,
ht_pairs, ht_pair_distance, relation_multi_label, relation_mask, relation_label]:
if mapping is not None:
mapping.zero_()
relation_label.fill_(IGNORE_INDEX)
max_h_t_cnt = 0
label_list = []
L_vertex = []
titles = []
indexes = []
graph_list = []
entity_graph_list = []
entity2mention_table = []
path_table = []
overlaps = []
for i, example in enumerate(minibatch):
title, entities, labels, na_triple, word_id, pos_id, ner_id, mention_id, entity2mention, graph, entity_graph, path = \
example['title'], example['entities'], example['labels'], example['na_triple'], \
example['word_id'], example['pos_id'], example['ner_id'], example['mention_id'], example[
'entity2mention'], example['graph'], example['entity_graph'], example['path']
graph_list.append(graph)
entity_graph_list.append(entity_graph)
path_table.append(path)
overlaps.append(example['overlap'])
entity2mention_t = get_cuda(torch.zeros((pos_id.max() + 1, mention_id.max() + 1)))
for e, ms in entity2mention.items():
for m in ms:
entity2mention_t[e, m] = 1
entity2mention_table.append(entity2mention_t)
L = len(entities)
word_num = word_id.shape[0]
context_word_ids[i, :word_num].copy_(torch.from_numpy(word_id))
context_pos_ids[i, :word_num].copy_(torch.from_numpy(pos_id))
context_ner_ids[i, :word_num].copy_(torch.from_numpy(ner_id))
context_mention_ids[i, :word_num].copy_(torch.from_numpy(mention_id))
idx2label = defaultdict(list)
label_set = {}
for label in labels:
head, tail, relation, intrain, evidence = \
label['h'], label['t'], label['r'], label['in_train'], label['evidence']
idx2label[(head, tail)].append(relation)
label_set[(head, tail, relation)] = intrain
label_list.append(label_set)
if self.dataset_type == 'train':
train_tripe = list(idx2label.keys())
for j, (h_idx, t_idx) in enumerate(train_tripe):
hlist, tlist = entities[h_idx], entities[t_idx]
ht_pairs[i, j, :] = torch.Tensor([h_idx + 1, t_idx + 1])
label = idx2label[(h_idx, t_idx)]
delta_dis = hlist[0]['global_pos'][0] - tlist[0]['global_pos'][0]
if delta_dis < 0:
ht_pair_distance[i, j] = -int(self.dis2idx[-delta_dis]) + self.dis_size // 2
else:
ht_pair_distance[i, j] = int(self.dis2idx[delta_dis]) + self.dis_size // 2
for r in label:
relation_multi_label[i, j, r] = 1
relation_mask[i, j] = 1
rt = np.random.randint(len(label))
relation_label[i, j] = label[rt]
lower_bound = len(na_triple)
if self.negativa_alpha > 0.0:
random.shuffle(na_triple)
lower_bound = int(max(20, len(train_tripe) * self.negativa_alpha))
for j, (h_idx, t_idx) in enumerate(na_triple[:lower_bound], len(train_tripe)):
hlist, tlist = entities[h_idx], entities[t_idx]
ht_pairs[i, j, :] = torch.Tensor([h_idx + 1, t_idx + 1])
delta_dis = hlist[0]['global_pos'][0] - tlist[0]['global_pos'][0]
if delta_dis < 0:
ht_pair_distance[i, j] = -int(self.dis2idx[-delta_dis]) + self.dis_size // 2
else:
ht_pair_distance[i, j] = int(self.dis2idx[delta_dis]) + self.dis_size // 2
relation_multi_label[i, j, 0] = 1
relation_label[i, j] = 0
relation_mask[i, j] = 1
max_h_t_cnt = max(max_h_t_cnt, len(train_tripe) + lower_bound)
else:
j = 0
for h_idx in range(L):
for t_idx in range(L):
if h_idx != t_idx:
hlist, tlist = entities[h_idx], entities[t_idx]
ht_pairs[i, j, :] = torch.Tensor([h_idx + 1, t_idx + 1])
relation_mask[i, j] = 1
delta_dis = hlist[0]['global_pos'][0] - tlist[0]['global_pos'][0]
if delta_dis < 0:
ht_pair_distance[i, j] = -int(self.dis2idx[-delta_dis]) + self.dis_size // 2
else:
ht_pair_distance[i, j] = int(self.dis2idx[delta_dis]) + self.dis_size // 2
j += 1
max_h_t_cnt = max(max_h_t_cnt, j)
L_vertex.append(L)
titles.append(title)
indexes.append(self.batches_order[idx][i])
context_word_mask = context_word_ids > 0
context_word_length = context_word_mask.sum(1)
batch_max_length = context_word_length.max()
yield {'context_idxs': get_cuda(context_word_ids[:cur_bsz, :batch_max_length].contiguous()),
'context_pos': get_cuda(context_pos_ids[:cur_bsz, :batch_max_length].contiguous()),
'context_ner': get_cuda(context_ner_ids[:cur_bsz, :batch_max_length].contiguous()),
'context_mention': get_cuda(context_mention_ids[:cur_bsz, :batch_max_length].contiguous()),
'context_word_mask': get_cuda(context_word_mask[:cur_bsz, :batch_max_length].contiguous()),
'context_word_length': get_cuda(context_word_length[:cur_bsz].contiguous()),
'h_t_pairs': get_cuda(ht_pairs[:cur_bsz, :max_h_t_cnt, :2]),
'relation_label': get_cuda(relation_label[:cur_bsz, :max_h_t_cnt]).contiguous(),
'relation_multi_label': get_cuda(relation_multi_label[:cur_bsz, :max_h_t_cnt]),
'relation_mask': get_cuda(relation_mask[:cur_bsz, :max_h_t_cnt]),
'ht_pair_distance': get_cuda(ht_pair_distance[:cur_bsz, :max_h_t_cnt]),
'labels': label_list,
'L_vertex': L_vertex,
'titles': titles,
'indexes': indexes,
'graphs': graph_list,
'entity2mention_table': entity2mention_table,
'entity_graphs': entity_graph_list,
'path_table': path_table,
'overlaps': overlaps
}
| python | MIT | 178344cf00789c7ba05cfe4dca90df4b17c2caa9 | 2026-01-05T07:13:40.516065Z | false |
DreamInvoker/GAIN | https://github.com/DreamInvoker/GAIN/blob/178344cf00789c7ba05cfe4dca90df4b17c2caa9/code/models/GAIN.py | code/models/GAIN.py | import dgl
import dgl.nn.pytorch as dglnn
import numpy as np
import torch
import torch.nn as nn
from transformers import *
from utils import get_cuda
class GAIN_GloVe(nn.Module):
def __init__(self, config):
super(GAIN_GloVe, self).__init__()
self.config = config
word_emb_size = config.word_emb_size
vocabulary_size = config.vocabulary_size
encoder_input_size = word_emb_size
self.activation = nn.Tanh() if config.activation == 'tanh' else nn.ReLU()
self.word_emb = nn.Embedding(vocabulary_size, word_emb_size, padding_idx=config.word_pad)
if config.pre_train_word:
self.word_emb = nn.Embedding(config.data_word_vec.shape[0], word_emb_size, padding_idx=config.word_pad)
self.word_emb.weight.data.copy_(torch.from_numpy(config.data_word_vec[:, :word_emb_size]))
self.word_emb.weight.requires_grad = config.finetune_word
if config.use_entity_type:
encoder_input_size += config.entity_type_size
self.entity_type_emb = nn.Embedding(config.entity_type_num, config.entity_type_size,
padding_idx=config.entity_type_pad)
if config.use_entity_id:
encoder_input_size += config.entity_id_size
self.entity_id_emb = nn.Embedding(config.max_entity_num + 1, config.entity_id_size,
padding_idx=config.entity_id_pad)
self.encoder = BiLSTM(encoder_input_size, config)
self.gcn_dim = config.gcn_dim
assert self.gcn_dim == 2 * config.lstm_hidden_size, 'gcn dim should be the lstm hidden dim * 2'
rel_name_lists = ['intra', 'inter', 'global']
self.GCN_layers = nn.ModuleList([RelGraphConvLayer(self.gcn_dim, self.gcn_dim, rel_name_lists,
num_bases=len(rel_name_lists), activation=self.activation,
self_loop=True, dropout=self.config.dropout)
for i in range(config.gcn_layers)])
self.bank_size = self.config.gcn_dim * (self.config.gcn_layers + 1)
self.dropout = nn.Dropout(self.config.dropout)
self.predict = nn.Sequential(
nn.Linear(self.bank_size * 5 + self.gcn_dim * 4, self.bank_size * 2), #
self.activation,
self.dropout,
nn.Linear(self.bank_size * 2, config.relation_nums),
)
self.edge_layer = RelEdgeLayer(node_feat=self.gcn_dim, edge_feat=self.gcn_dim,
activation=self.activation, dropout=config.dropout)
self.path_info_mapping = nn.Linear(self.gcn_dim * 4, self.gcn_dim * 4)
self.attention = Attention(self.bank_size * 2, self.gcn_dim * 4)
def forward(self, **params):
'''
words: [batch_size, max_length]
src_lengths: [batchs_size]
mask: [batch_size, max_length]
entity_type: [batch_size, max_length]
entity_id: [batch_size, max_length]
mention_id: [batch_size, max_length]
distance: [batch_size, max_length]
entity2mention_table: list of [local_entity_num, local_mention_num]
graphs: list of DGLHeteroGraph
h_t_pairs: [batch_size, h_t_limit, 2]
'''
src = self.word_emb(params['words'])
mask = params['mask']
bsz, slen, _ = src.size()
if self.config.use_entity_type:
src = torch.cat([src, self.entity_type_emb(params['entity_type'])], dim=-1)
if self.config.use_entity_id:
src = torch.cat([src, self.entity_id_emb(params['entity_id'])], dim=-1)
# src: [batch_size, slen, encoder_input_size]
# src_lengths: [batchs_size]
encoder_outputs, (output_h_t, _) = self.encoder(src, params['src_lengths'])
encoder_outputs[mask == 0] = 0
# encoder_outputs: [batch_size, slen, 2*encoder_hid_size]
# output_h_t: [batch_size, 2*encoder_hid_size]
graphs = params['graphs']
mention_id = params['mention_id']
features = None
for i in range(len(graphs)):
encoder_output = encoder_outputs[i] # [slen, 2*encoder_hid_size]
mention_num = torch.max(mention_id[i])
mention_index = get_cuda(
(torch.arange(mention_num) + 1).unsqueeze(1).expand(-1, slen)) # [mention_num, slen]
mentions = mention_id[i].unsqueeze(0).expand(mention_num, -1) # [mention_num, slen]
select_metrix = (mention_index == mentions).float() # [mention_num, slen]
# average word -> mention
word_total_numbers = torch.sum(select_metrix, dim=-1).unsqueeze(-1).expand(-1, slen) # [mention_num, slen]
select_metrix = torch.where(word_total_numbers > 0, select_metrix / word_total_numbers, select_metrix)
x = torch.mm(select_metrix, encoder_output) # [mention_num, 2*encoder_hid_size]
x = torch.cat((output_h_t[i].unsqueeze(0), x), dim=0)
if features is None:
features = x
else:
features = torch.cat((features, x), dim=0)
graph_big = dgl.batch_hetero(graphs)
output_features = [features]
for GCN_layer in self.GCN_layers:
features = GCN_layer(graph_big, {"node": features})["node"] # [total_mention_nums, gcn_dim]
output_features.append(features)
output_feature = torch.cat(output_features, dim=-1)
graphs = dgl.unbatch_hetero(graph_big)
# mention -> entity
entity2mention_table = params['entity2mention_table'] # list of [entity_num, mention_num]
entity_num = torch.max(params['entity_id'])
entity_bank = get_cuda(torch.Tensor(bsz, entity_num, self.bank_size))
global_info = get_cuda(torch.Tensor(bsz, self.bank_size))
cur_idx = 0
entity_graph_feature = None
for i in range(len(graphs)):
# average mention -> entity
select_metrix = entity2mention_table[i].float() # [local_entity_num, mention_num]
select_metrix[0][0] = 1
mention_nums = torch.sum(select_metrix, dim=-1).unsqueeze(-1).expand(-1, select_metrix.size(1))
select_metrix = torch.where(mention_nums > 0, select_metrix / mention_nums, select_metrix)
node_num = graphs[i].number_of_nodes('node')
entity_representation = torch.mm(select_metrix, output_feature[cur_idx:cur_idx + node_num])
entity_bank[i, :select_metrix.size(0) - 1] = entity_representation[1:]
global_info[i] = output_feature[cur_idx]
cur_idx += node_num
if entity_graph_feature is None:
entity_graph_feature = entity_representation[1:, -self.config.gcn_dim:]
else:
entity_graph_feature = torch.cat(
(entity_graph_feature, entity_representation[1:, -self.config.gcn_dim:]), dim=0)
h_t_pairs = params['h_t_pairs']
h_t_pairs = h_t_pairs + (h_t_pairs == 0).long() - 1 # [batch_size, h_t_limit, 2]
h_t_limit = h_t_pairs.size(1)
# [batch_size, h_t_limit, bank_size]
h_entity_index = h_t_pairs[:, :, 0].unsqueeze(-1).expand(-1, -1, self.bank_size)
t_entity_index = h_t_pairs[:, :, 1].unsqueeze(-1).expand(-1, -1, self.bank_size)
# [batch_size, h_t_limit, bank_size]
h_entity = torch.gather(input=entity_bank, dim=1, index=h_entity_index)
t_entity = torch.gather(input=entity_bank, dim=1, index=t_entity_index)
global_info = global_info.unsqueeze(1).expand(-1, h_t_limit, -1)
entity_graphs = params['entity_graphs']
entity_graph_big = dgl.batch(entity_graphs)
self.edge_layer(entity_graph_big, entity_graph_feature)
entity_graphs = dgl.unbatch(entity_graph_big)
path_info = get_cuda(torch.zeros((bsz, h_t_limit, self.gcn_dim * 4)))
relation_mask = params['relation_mask']
path_table = params['path_table']
for i in range(len(entity_graphs)):
path_t = path_table[i]
for j in range(h_t_limit):
if relation_mask is not None and relation_mask[i, j].item() == 0:
break
h = h_t_pairs[i, j, 0].item()
t = h_t_pairs[i, j, 1].item()
# for evaluate
if relation_mask is None and h == 0 and t == 0:
continue
if (h + 1, t + 1) in path_t:
v = [val - 1 for val in path_t[(h + 1, t + 1)]]
elif (t + 1, h + 1) in path_t:
v = [val - 1 for val in path_t[(t + 1, h + 1)]]
else:
print(h, t, v)
print(entity_graphs[i].all_edges())
print(h_t_pairs)
print(relation_mask)
assert 1 == 2
middle_node_num = len(v)
if middle_node_num == 0:
continue
# forward
edge_ids = get_cuda(entity_graphs[i].edge_ids([h for _ in range(middle_node_num)], v))
forward_first = torch.index_select(entity_graphs[i].edata['h'], dim=0, index=edge_ids)
edge_ids = get_cuda(entity_graphs[i].edge_ids(v, [t for _ in range(middle_node_num)]))
forward_second = torch.index_select(entity_graphs[i].edata['h'], dim=0, index=edge_ids)
# backward
edge_ids = get_cuda(entity_graphs[i].edge_ids([t for _ in range(middle_node_num)], v))
backward_first = torch.index_select(entity_graphs[i].edata['h'], dim=0, index=edge_ids)
edge_ids = get_cuda(entity_graphs[i].edge_ids(v, [h for _ in range(middle_node_num)]))
backward_second = torch.index_select(entity_graphs[i].edata['h'], dim=0, index=edge_ids)
tmp_path_info = torch.cat((forward_first, forward_second, backward_first, backward_second), dim=-1)
_, attn_value = self.attention(torch.cat((h_entity[i, j], t_entity[i, j]), dim=-1), tmp_path_info)
path_info[i, j] = attn_value
entity_graphs[i].edata.pop('h')
path_info = self.dropout(
self.activation(
self.path_info_mapping(path_info)
)
)
predictions = self.predict(torch.cat(
(h_entity, t_entity, torch.abs(h_entity - t_entity), torch.mul(h_entity, t_entity), global_info, path_info),
dim=-1))
return predictions
class GAIN_BERT(nn.Module):
def __init__(self, config):
super(GAIN_BERT, self).__init__()
self.config = config
if config.activation == 'tanh':
self.activation = nn.Tanh()
elif config.activation == 'relu':
self.activation = nn.ReLU()
else:
assert 1 == 2, "you should provide activation function."
if config.use_entity_type:
self.entity_type_emb = nn.Embedding(config.entity_type_num, config.entity_type_size,
padding_idx=config.entity_type_pad)
if config.use_entity_id:
self.entity_id_emb = nn.Embedding(config.max_entity_num + 1, config.entity_id_size,
padding_idx=config.entity_id_pad)
self.bert = BertModel.from_pretrained(config.bert_path)
if config.bert_fix:
for p in self.bert.parameters():
p.requires_grad = False
self.gcn_dim = config.gcn_dim
assert self.gcn_dim == config.bert_hid_size + config.entity_id_size + config.entity_type_size
rel_name_lists = ['intra', 'inter', 'global']
self.GCN_layers = nn.ModuleList([RelGraphConvLayer(self.gcn_dim, self.gcn_dim, rel_name_lists,
num_bases=len(rel_name_lists), activation=self.activation,
self_loop=True, dropout=self.config.dropout)
for i in range(config.gcn_layers)])
self.bank_size = self.gcn_dim * (self.config.gcn_layers + 1)
self.dropout = nn.Dropout(self.config.dropout)
self.predict = nn.Sequential(
nn.Linear(self.bank_size * 5 + self.gcn_dim * 4, self.bank_size * 2),
self.activation,
self.dropout,
nn.Linear(self.bank_size * 2, config.relation_nums),
)
self.edge_layer = RelEdgeLayer(node_feat=self.gcn_dim, edge_feat=self.gcn_dim,
activation=self.activation, dropout=config.dropout)
self.path_info_mapping = nn.Linear(self.gcn_dim * 4, self.gcn_dim * 4)
self.attention = Attention(self.bank_size * 2, self.gcn_dim * 4)
def forward(self, **params):
'''
words: [batch_size, max_length]
src_lengths: [batchs_size]
mask: [batch_size, max_length]
entity_type: [batch_size, max_length]
entity_id: [batch_size, max_length]
mention_id: [batch_size, max_length]
distance: [batch_size, max_length]
entity2mention_table: list of [local_entity_num, local_mention_num]
graphs: list of DGLHeteroGraph
h_t_pairs: [batch_size, h_t_limit, 2]
ht_pair_distance: [batch_size, h_t_limit]
'''
words = params['words']
mask = params['mask']
bsz, slen = words.size()
encoder_outputs, sentence_cls = self.bert(input_ids=words, attention_mask=mask)
# encoder_outputs[mask == 0] = 0
if self.config.use_entity_type:
encoder_outputs = torch.cat([encoder_outputs, self.entity_type_emb(params['entity_type'])], dim=-1)
if self.config.use_entity_id:
encoder_outputs = torch.cat([encoder_outputs, self.entity_id_emb(params['entity_id'])], dim=-1)
sentence_cls = torch.cat(
(sentence_cls, get_cuda(torch.zeros((bsz, self.config.entity_type_size + self.config.entity_id_size)))),
dim=-1)
# encoder_outputs: [batch_size, slen, bert_hid+type_size+id_size]
# sentence_cls: [batch_size, bert_hid+type_size+id_size]
graphs = params['graphs']
mention_id = params['mention_id']
features = None
for i in range(len(graphs)):
encoder_output = encoder_outputs[i] # [slen, bert_hid]
mention_num = torch.max(mention_id[i])
mention_index = get_cuda(
(torch.arange(mention_num) + 1).unsqueeze(1).expand(-1, slen)) # [mention_num, slen]
mentions = mention_id[i].unsqueeze(0).expand(mention_num, -1) # [mention_num, slen]
select_metrix = (mention_index == mentions).float() # [mention_num, slen]
# average word -> mention
word_total_numbers = torch.sum(select_metrix, dim=-1).unsqueeze(-1).expand(-1, slen) # [mention_num, slen]
select_metrix = torch.where(word_total_numbers > 0, select_metrix / word_total_numbers, select_metrix)
x = torch.mm(select_metrix, encoder_output) # [mention_num, bert_hid]
x = torch.cat((sentence_cls[i].unsqueeze(0), x), dim=0)
if features is None:
features = x
else:
features = torch.cat((features, x), dim=0)
graph_big = dgl.batch_hetero(graphs)
output_features = [features]
for GCN_layer in self.GCN_layers:
features = GCN_layer(graph_big, {"node": features})["node"] # [total_mention_nums, gcn_dim]
output_features.append(features)
output_feature = torch.cat(output_features, dim=-1)
graphs = dgl.unbatch_hetero(graph_big)
# mention -> entity
entity2mention_table = params['entity2mention_table'] # list of [entity_num, mention_num]
entity_num = torch.max(params['entity_id'])
entity_bank = get_cuda(torch.Tensor(bsz, entity_num, self.bank_size))
global_info = get_cuda(torch.Tensor(bsz, self.bank_size))
cur_idx = 0
entity_graph_feature = None
for i in range(len(graphs)):
# average mention -> entity
select_metrix = entity2mention_table[i].float() # [local_entity_num, mention_num]
select_metrix[0][0] = 1
mention_nums = torch.sum(select_metrix, dim=-1).unsqueeze(-1).expand(-1, select_metrix.size(1))
select_metrix = torch.where(mention_nums > 0, select_metrix / mention_nums, select_metrix)
node_num = graphs[i].number_of_nodes('node')
entity_representation = torch.mm(select_metrix, output_feature[cur_idx:cur_idx + node_num])
entity_bank[i, :select_metrix.size(0) - 1] = entity_representation[1:]
global_info[i] = output_feature[cur_idx]
cur_idx += node_num
if entity_graph_feature is None:
entity_graph_feature = entity_representation[1:, -self.gcn_dim:]
else:
entity_graph_feature = torch.cat((entity_graph_feature, entity_representation[1:, -self.gcn_dim:]),
dim=0)
h_t_pairs = params['h_t_pairs']
h_t_pairs = h_t_pairs + (h_t_pairs == 0).long() - 1 # [batch_size, h_t_limit, 2]
h_t_limit = h_t_pairs.size(1)
# [batch_size, h_t_limit, bank_size]
h_entity_index = h_t_pairs[:, :, 0].unsqueeze(-1).expand(-1, -1, self.bank_size)
t_entity_index = h_t_pairs[:, :, 1].unsqueeze(-1).expand(-1, -1, self.bank_size)
# [batch_size, h_t_limit, bank_size]
h_entity = torch.gather(input=entity_bank, dim=1, index=h_entity_index)
t_entity = torch.gather(input=entity_bank, dim=1, index=t_entity_index)
global_info = global_info.unsqueeze(1).expand(-1, h_t_limit, -1)
entity_graphs = params['entity_graphs']
entity_graph_big = dgl.batch(entity_graphs)
self.edge_layer(entity_graph_big, entity_graph_feature)
entity_graphs = dgl.unbatch(entity_graph_big)
path_info = get_cuda(torch.zeros((bsz, h_t_limit, self.gcn_dim * 4)))
relation_mask = params['relation_mask']
path_table = params['path_table']
for i in range(len(entity_graphs)):
path_t = path_table[i]
for j in range(h_t_limit):
if relation_mask is not None and relation_mask[i, j].item() == 0:
break
h = h_t_pairs[i, j, 0].item()
t = h_t_pairs[i, j, 1].item()
# for evaluate
if relation_mask is None and h == 0 and t == 0:
continue
if (h + 1, t + 1) in path_t:
v = [val - 1 for val in path_t[(h + 1, t + 1)]]
elif (t + 1, h + 1) in path_t:
v = [val - 1 for val in path_t[(t + 1, h + 1)]]
else:
print(h, t, v)
print(entity_graphs[i].number_of_nodes())
print(entity_graphs[i].all_edges())
print(path_table)
print(h_t_pairs)
print(relation_mask)
assert 1 == 2
middle_node_num = len(v)
if middle_node_num == 0:
continue
# forward
edge_ids = get_cuda(entity_graphs[i].edge_ids([h for _ in range(middle_node_num)], v))
forward_first = torch.index_select(entity_graphs[i].edata['h'], dim=0, index=edge_ids)
edge_ids = get_cuda(entity_graphs[i].edge_ids(v, [t for _ in range(middle_node_num)]))
forward_second = torch.index_select(entity_graphs[i].edata['h'], dim=0, index=edge_ids)
# backward
edge_ids = get_cuda(entity_graphs[i].edge_ids([t for _ in range(middle_node_num)], v))
backward_first = torch.index_select(entity_graphs[i].edata['h'], dim=0, index=edge_ids)
edge_ids = get_cuda(entity_graphs[i].edge_ids(v, [h for _ in range(middle_node_num)]))
backward_second = torch.index_select(entity_graphs[i].edata['h'], dim=0, index=edge_ids)
tmp_path_info = torch.cat((forward_first, forward_second, backward_first, backward_second), dim=-1)
_, attn_value = self.attention(torch.cat((h_entity[i, j], t_entity[i, j]), dim=-1), tmp_path_info)
path_info[i, j] = attn_value
entity_graphs[i].edata.pop('h')
path_info = self.dropout(
self.activation(
self.path_info_mapping(path_info)
)
)
predictions = self.predict(torch.cat(
(h_entity, t_entity, torch.abs(h_entity - t_entity), torch.mul(h_entity, t_entity), global_info, path_info),
dim=-1))
# predictions = self.predict(torch.cat((h_entity, t_entity, torch.abs(h_entity-t_entity), torch.mul(h_entity, t_entity), global_info), dim=-1))
return predictions
class Attention(nn.Module):
def __init__(self, src_size, trg_size):
super().__init__()
self.W = nn.Bilinear(src_size, trg_size, 1)
self.softmax = nn.Softmax(dim=-1)
def forward(self, src, trg, attention_mask=None):
'''
src: [src_size]
trg: [middle_node, trg_size]
'''
score = self.W(src.unsqueeze(0).expand(trg.size(0), -1), trg)
score = self.softmax(score)
value = torch.mm(score.permute(1, 0), trg)
return score.squeeze(0), value.squeeze(0)
class BiLSTM(nn.Module):
def __init__(self, input_size, config):
super().__init__()
self.config = config
self.lstm = nn.LSTM(input_size=input_size, hidden_size=config.lstm_hidden_size,
num_layers=config.nlayers, batch_first=True,
bidirectional=True)
self.in_dropout = nn.Dropout(config.dropout)
self.out_dropout = nn.Dropout(config.dropout)
def forward(self, src, src_lengths):
'''
src: [batch_size, slen, input_size]
src_lengths: [batch_size]
'''
self.lstm.flatten_parameters()
bsz, slen, input_size = src.size()
src = self.in_dropout(src)
new_src_lengths, sort_index = torch.sort(src_lengths, dim=-1, descending=True)
new_src = torch.index_select(src, dim=0, index=sort_index)
packed_src = nn.utils.rnn.pack_padded_sequence(new_src, new_src_lengths, batch_first=True, enforce_sorted=True)
packed_outputs, (src_h_t, src_c_t) = self.lstm(packed_src)
outputs, _ = nn.utils.rnn.pad_packed_sequence(packed_outputs, batch_first=True,
padding_value=self.config.word_pad)
unsort_index = torch.argsort(sort_index)
outputs = torch.index_select(outputs, dim=0, index=unsort_index)
src_h_t = src_h_t.view(self.config.nlayers, 2, bsz, self.config.lstm_hidden_size)
src_c_t = src_c_t.view(self.config.nlayers, 2, bsz, self.config.lstm_hidden_size)
output_h_t = torch.cat((src_h_t[-1, 0], src_h_t[-1, 1]), dim=-1)
output_c_t = torch.cat((src_c_t[-1, 0], src_c_t[-1, 1]), dim=-1)
output_h_t = torch.index_select(output_h_t, dim=0, index=unsort_index)
output_c_t = torch.index_select(output_c_t, dim=0, index=unsort_index)
outputs = self.out_dropout(outputs)
output_h_t = self.out_dropout(output_h_t)
output_c_t = self.out_dropout(output_c_t)
return outputs, (output_h_t, output_c_t)
class RelGraphConvLayer(nn.Module):
r"""Relational graph convolution layer.
Parameters
----------
in_feat : int
Input feature size.
out_feat : int
Output feature size.
rel_names : list[str]
Relation names.
num_bases : int, optional
Number of bases. If is none, use number of relations. Default: None.
weight : bool, optional
True if a linear layer is applied after message passing. Default: True
bias : bool, optional
True if bias is added. Default: True
activation : callable, optional
Activation function. Default: None
self_loop : bool, optional
True to include self loop message. Default: False
dropout : float, optional
Dropout rate. Default: 0.0
"""
def __init__(self,
in_feat,
out_feat,
rel_names,
num_bases,
*,
weight=True,
bias=True,
activation=None,
self_loop=False,
dropout=0.0):
super(RelGraphConvLayer, self).__init__()
self.in_feat = in_feat
self.out_feat = out_feat
self.rel_names = rel_names
self.num_bases = num_bases
self.bias = bias
self.activation = activation
self.self_loop = self_loop
self.conv = dglnn.HeteroGraphConv({
rel: dglnn.GraphConv(in_feat, out_feat, norm='right', weight=False, bias=False)
for rel in rel_names
})
self.use_weight = weight
self.use_basis = num_bases < len(self.rel_names) and weight
if self.use_weight:
if self.use_basis:
self.basis = dglnn.WeightBasis((in_feat, out_feat), num_bases, len(self.rel_names))
else:
self.weight = nn.Parameter(torch.Tensor(len(self.rel_names), in_feat, out_feat))
nn.init.xavier_uniform_(self.weight, gain=nn.init.calculate_gain('relu'))
# bias
if bias:
self.h_bias = nn.Parameter(torch.Tensor(out_feat))
nn.init.zeros_(self.h_bias)
# weight for self loop
if self.self_loop:
self.loop_weight = nn.Parameter(torch.Tensor(in_feat, out_feat))
nn.init.xavier_uniform_(self.loop_weight,
gain=nn.init.calculate_gain('relu'))
self.dropout = nn.Dropout(dropout)
def forward(self, g, inputs):
"""Forward computation
Parameters
----------
g : DGLHeteroGraph
Input graph.
inputs : dict[str, torch.Tensor]
Node feature for each node type.
Returns
-------
dict[str, torch.Tensor]
New node features for each node type.
"""
g = g.local_var()
if self.use_weight:
weight = self.basis() if self.use_basis else self.weight
wdict = {self.rel_names[i]: {'weight': w.squeeze(0)}
for i, w in enumerate(torch.split(weight, 1, dim=0))}
else:
wdict = {}
hs = self.conv(g, inputs, mod_kwargs=wdict)
def _apply(ntype, h):
if self.self_loop:
h = h + torch.matmul(inputs[ntype], self.loop_weight)
if self.bias:
h = h + self.h_bias
if self.activation:
h = self.activation(h)
return self.dropout(h)
return {ntype: _apply(ntype, h) for ntype, h in hs.items()}
class RelEdgeLayer(nn.Module):
def __init__(self,
node_feat,
edge_feat,
activation,
dropout=0.0):
super(RelEdgeLayer, self).__init__()
self.node_feat = node_feat
self.edge_feat = edge_feat
self.activation = activation
self.dropout = nn.Dropout(dropout)
self.mapping = nn.Linear(node_feat * 2, edge_feat)
def forward(self, g, inputs):
# g = g.local_var()
g.ndata['h'] = inputs # [total_mention_num, node_feat]
g.apply_edges(lambda edges: {
'h': self.dropout(self.activation(self.mapping(torch.cat((edges.src['h'], edges.dst['h']), dim=-1))))})
g.ndata.pop('h')
class Bert():
MASK = '[MASK]'
CLS = "[CLS]"
SEP = "[SEP]"
def __init__(self, model_class, model_name, model_path=None):
super().__init__()
self.model_name = model_name
print(model_path)
self.tokenizer = BertTokenizer.from_pretrained(model_path)
self.max_len = 512
def tokenize(self, text, masked_idxs=None):
tokenized_text = self.tokenizer.tokenize(text)
if masked_idxs is not None:
for idx in masked_idxs:
tokenized_text[idx] = self.MASK
# prepend [CLS] and append [SEP]
# see https://github.com/huggingface/pytorch-pretrained-BERT/blob/master/examples/run_classifier.py#L195 # NOQA
tokenized = [self.CLS] + tokenized_text + [self.SEP]
return tokenized
def tokenize_to_ids(self, text, masked_idxs=None, pad=True):
tokens = self.tokenize(text, masked_idxs)
return tokens, self.convert_tokens_to_ids(tokens, pad=pad)
def convert_tokens_to_ids(self, tokens, pad=True):
token_ids = self.tokenizer.convert_tokens_to_ids(tokens)
ids = torch.tensor([token_ids])
# assert ids.size(1) < self.max_len
ids = ids[:, :self.max_len] # https://github.com/DreamInvoker/GAIN/issues/4
if pad:
padded_ids = torch.zeros(1, self.max_len).to(ids)
padded_ids[0, :ids.size(1)] = ids
mask = torch.zeros(1, self.max_len).to(ids)
mask[0, :ids.size(1)] = 1
return padded_ids, mask
else:
return ids
def flatten(self, list_of_lists):
for list in list_of_lists:
for item in list:
yield item
def subword_tokenize(self, tokens):
"""Segment each token into subwords while keeping track of
token boundaries.
Parameters
----------
tokens: A sequence of strings, representing input tokens.
Returns
-------
A tuple consisting of:
- A list of subwords, flanked by the special symbols required
by Bert (CLS and SEP).
- An array of indices into the list of subwords, indicating
that the corresponding subword is the start of a new
token. For example, [1, 3, 4, 7] means that the subwords
1, 3, 4, 7 are token starts, while all other subwords
(0, 2, 5, 6, 8...) are in or at the end of tokens.
This list allows selecting Bert hidden states that
represent tokens, which is necessary in sequence
labeling.
"""
subwords = list(map(self.tokenizer.tokenize, tokens))
subword_lengths = list(map(len, subwords))
subwords = [self.CLS] + list(self.flatten(subwords))[:509] + [self.SEP]
token_start_idxs = 1 + np.cumsum([0] + subword_lengths[:-1])
token_start_idxs[token_start_idxs > 509] = 512
return subwords, token_start_idxs
def subword_tokenize_to_ids(self, tokens):
"""Segment each token into subwords while keeping track of
token boundaries and convert subwords into IDs.
Parameters
----------
tokens: A sequence of strings, representing input tokens.
Returns
-------
A tuple consisting of:
- A list of subword IDs, including IDs of the special
symbols (CLS and SEP) required by Bert.
- A mask indicating padding tokens.
- An array of indices into the list of subwords. See
doc of subword_tokenize.
"""
subwords, token_start_idxs = self.subword_tokenize(tokens)
subword_ids, mask = self.convert_tokens_to_ids(subwords)
return subword_ids.numpy(), token_start_idxs, subwords
def segment_ids(self, segment1_len, segment2_len):
ids = [0] * segment1_len + [1] * segment2_len
return torch.tensor([ids])
| python | MIT | 178344cf00789c7ba05cfe4dca90df4b17c2caa9 | 2026-01-05T07:13:40.516065Z | false |
DreamInvoker/GAIN | https://github.com/DreamInvoker/GAIN/blob/178344cf00789c7ba05cfe4dca90df4b17c2caa9/code/models/GAIN_nomention.py | code/models/GAIN_nomention.py | import dgl
import dgl.nn.pytorch as dglnn
import torch
import torch.nn as nn
from transformers import *
from utils import get_cuda
# for no mention module ablation study
class GAIN_GloVe(nn.Module):
def __init__(self, config):
super(GAIN_GloVe, self).__init__()
self.config = config
word_emb_size = config.word_emb_size
vocabulary_size = config.vocabulary_size
encoder_input_size = word_emb_size
self.activation = nn.Tanh() if config.activation == 'tanh' else nn.ReLU()
self.word_emb = nn.Embedding(vocabulary_size, word_emb_size, padding_idx=config.word_pad)
if config.pre_train_word:
self.word_emb = nn.Embedding(config.data_word_vec.shape[0], word_emb_size, padding_idx=config.word_pad)
self.word_emb.weight.data.copy_(torch.from_numpy(config.data_word_vec[:, :word_emb_size]))
self.word_emb.weight.requires_grad = config.finetune_word
if config.use_entity_type:
encoder_input_size += config.entity_type_size
self.entity_type_emb = nn.Embedding(config.entity_type_num, config.entity_type_size,
padding_idx=config.entity_type_pad)
if config.use_entity_id:
encoder_input_size += config.entity_id_size
self.entity_id_emb = nn.Embedding(config.max_entity_num + 1, config.entity_id_size,
padding_idx=config.entity_id_pad)
self.encoder = BiLSTM(encoder_input_size, config)
self.gcn_dim = config.gcn_dim
assert self.gcn_dim == 2 * config.lstm_hidden_size, 'gcn dim should be the lstm hidden dim * 2'
rel_name_lists = ['intra', 'inter', 'global']
self.GCN_layers = nn.ModuleList([dglnn.GraphConv(self.gcn_dim, self.gcn_dim, norm='right', weight=True,
bias=True, activation=self.activation)
for i in range(config.gcn_layers)])
self.bank_size = self.config.gcn_dim * (self.config.gcn_layers + 1)
self.dropout = nn.Dropout(self.config.dropout)
self.predict = nn.Sequential(
nn.Linear(self.bank_size * 4 + self.gcn_dim * 5, self.bank_size * 2),
self.activation,
self.dropout,
nn.Linear(self.bank_size * 2, config.relation_nums),
)
self.edge_layer = RelEdgeLayer(node_feat=self.gcn_dim, edge_feat=self.gcn_dim,
activation=self.activation, dropout=config.dropout)
self.path_info_mapping = nn.Linear(self.gcn_dim * 4, self.gcn_dim * 4)
self.attention = Attention(self.bank_size * 2, self.gcn_dim * 4)
def forward(self, **params):
'''
words: [batch_size, max_length]
src_lengths: [batchs_size]
mask: [batch_size, max_length]
entity_type: [batch_size, max_length]
entity_id: [batch_size, max_length]
mention_id: [batch_size, max_length]
distance: [batch_size, max_length]
entity2mention_table: list of [local_entity_num, local_mention_num]
graphs: list of DGLHeteroGraph
h_t_pairs: [batch_size, h_t_limit, 2]
'''
src = self.word_emb(params['words'])
mask = params['mask']
bsz, slen, _ = src.size()
if self.config.use_entity_type:
src = torch.cat([src, self.entity_type_emb(params['entity_type'])], dim=-1)
if self.config.use_entity_id:
src = torch.cat([src, self.entity_id_emb(params['entity_id'])], dim=-1)
# src: [batch_size, slen, encoder_input_size]
# src_lengths: [batchs_size]
encoder_outputs, (output_h_t, _) = self.encoder(src, params['src_lengths'])
encoder_outputs[mask == 0] = 0
# encoder_outputs: [batch_size, slen, 2*encoder_hid_size]
# output_h_t: [batch_size, 2*encoder_hid_size]
graphs = params['graphs']
mention_id = params['mention_id']
features = None
for i in range(len(graphs)):
encoder_output = encoder_outputs[i] # [slen, 2*encoder_hid_size]
mention_num = torch.max(mention_id[i])
mention_index = get_cuda(
(torch.arange(mention_num) + 1).unsqueeze(1).expand(-1, slen)) # [mention_num, slen]
mentions = mention_id[i].unsqueeze(0).expand(mention_num, -1) # [mention_num, slen]
select_metrix = (mention_index == mentions).float() # [mention_num, slen]
# average word -> mention
word_total_numbers = torch.sum(select_metrix, dim=-1).unsqueeze(-1).expand(-1, slen) # [mention_num, slen]
select_metrix = torch.where(word_total_numbers > 0, select_metrix / word_total_numbers, select_metrix)
x = torch.mm(select_metrix, encoder_output) # [mention_num, 2*encoder_hid_size]
x = torch.cat((output_h_t[i].unsqueeze(0), x), dim=0)
# x = torch.cat((torch.max(encoder_output, dim=0)[0].unsqueeze(0), x), dim=0)
if features is None:
features = x
else:
features = torch.cat((features, x), dim=0)
# mention -> entity
entity2mention_table = params['entity2mention_table'] # list of [entity_num, mention_num]
entity_num = torch.max(params['entity_id'])
global_info = get_cuda(torch.Tensor(bsz, self.gcn_dim))
cur_idx = 0
entity_graph_feature = None
for i in range(len(graphs)):
# average mention -> entity
select_metrix = entity2mention_table[i].float() # [local_entity_num, mention_num]
select_metrix[0][0] = 1
mention_nums = torch.sum(select_metrix, dim=-1).unsqueeze(-1).expand(-1, select_metrix.size(1))
select_metrix = torch.where(mention_nums > 0, select_metrix / mention_nums, select_metrix)
node_num = graphs[i].number_of_nodes('node')
entity_representation = torch.mm(select_metrix, features[cur_idx:cur_idx + node_num])
global_info[i] = features[cur_idx]
cur_idx += node_num
if entity_graph_feature is None:
entity_graph_feature = entity_representation[1:]
else:
entity_graph_feature = torch.cat((entity_graph_feature, entity_representation[1:]), dim=0)
entity_graphs = params['entity_graphs']
entity_graph_big = dgl.batch(entity_graphs)
output_features = [entity_graph_feature]
for GCN_layer in self.GCN_layers:
entity_graph_feature = GCN_layer(entity_graph_big, entity_graph_feature)
output_features.append(entity_graph_feature)
output_features = torch.cat(output_features, dim=-1)
self.edge_layer(entity_graph_big, entity_graph_feature)
entity_bank = get_cuda(torch.Tensor(bsz, entity_num, self.bank_size))
entity_graphs = dgl.unbatch(entity_graph_big)
cur_idx = 0
for i in range(len(entity_graphs)):
node_num = entity_graphs[i].number_of_nodes()
entity_bank[i, :node_num] = output_features[cur_idx:cur_idx + node_num]
cur_idx += node_num
h_t_pairs = params['h_t_pairs']
h_t_pairs = h_t_pairs + (h_t_pairs == 0).long() - 1 # [batch_size, h_t_limit, 2]
h_t_limit = h_t_pairs.size(1)
# [batch_size, h_t_limit, bank_size]
h_entity_index = h_t_pairs[:, :, 0].unsqueeze(-1).expand(-1, -1, self.bank_size)
t_entity_index = h_t_pairs[:, :, 1].unsqueeze(-1).expand(-1, -1, self.bank_size)
# [batch_size, h_t_limit, bank_size]
h_entity = torch.gather(input=entity_bank, dim=1, index=h_entity_index)
t_entity = torch.gather(input=entity_bank, dim=1, index=t_entity_index)
global_info = global_info.unsqueeze(1).expand(-1, h_t_limit, -1)
path_info = get_cuda(torch.zeros((bsz, h_t_limit, self.gcn_dim * 4)))
relation_mask = params['relation_mask']
path_table = params['path_table']
for i in range(len(entity_graphs)):
path_t = path_table[i]
for j in range(h_t_limit):
if relation_mask is not None and relation_mask[i, j].item() == 0:
break
h = h_t_pairs[i, j, 0].item()
t = h_t_pairs[i, j, 1].item()
# for evaluate
if relation_mask is None and h == 0 and t == 0:
continue
if (h + 1, t + 1) in path_t:
v = [val - 1 for val in path_t[(h + 1, t + 1)]]
elif (t + 1, h + 1) in path_t:
v = [val - 1 for val in path_t[(t + 1, h + 1)]]
else:
print(h, t, v)
print(entity_graphs[i].all_edges())
print(h_t_pairs)
print(relation_mask)
assert 1 == 2
middle_node_num = len(v)
if middle_node_num == 0:
continue
# forward
edge_ids = get_cuda(entity_graphs[i].edge_ids([h for _ in range(middle_node_num)], v))
forward_first = torch.index_select(entity_graphs[i].edata['h'], dim=0, index=edge_ids)
edge_ids = get_cuda(entity_graphs[i].edge_ids(v, [t for _ in range(middle_node_num)]))
forward_second = torch.index_select(entity_graphs[i].edata['h'], dim=0, index=edge_ids)
# backward
edge_ids = get_cuda(entity_graphs[i].edge_ids([t for _ in range(middle_node_num)], v))
backward_first = torch.index_select(entity_graphs[i].edata['h'], dim=0, index=edge_ids)
edge_ids = get_cuda(entity_graphs[i].edge_ids(v, [h for _ in range(middle_node_num)]))
backward_second = torch.index_select(entity_graphs[i].edata['h'], dim=0, index=edge_ids)
tmp_path_info = torch.cat((forward_first, forward_second, backward_first, backward_second), dim=-1)
_, attn_value = self.attention(torch.cat((h_entity[i, j], t_entity[i, j]), dim=-1), tmp_path_info)
path_info[i, j] = attn_value
entity_graphs[i].edata.pop('h')
path_info = self.dropout(
self.activation(
self.path_info_mapping(path_info)
)
)
predictions = self.predict(torch.cat(
(h_entity, t_entity, torch.abs(h_entity - t_entity), torch.mul(h_entity, t_entity), global_info, path_info),
dim=-1))
return predictions
class GAIN_BERT(nn.Module):
def __init__(self, config):
super(GAIN_BERT, self).__init__()
self.config = config
self.activation = nn.Tanh() if config.activation == 'tanh' else nn.ReLU()
if config.use_entity_type:
self.entity_type_emb = nn.Embedding(config.entity_type_num, config.entity_type_size,
padding_idx=config.entity_type_pad)
if config.use_entity_id:
self.entity_id_emb = nn.Embedding(config.max_entity_num + 1, config.entity_id_size,
padding_idx=config.entity_id_pad)
self.bert = BertModel.from_pretrained(config.bert_path)
if config.bert_fix:
for p in self.bert.parameters():
p.requires_grad = False
self.gcn_dim = config.gcn_dim
assert self.gcn_dim == config.bert_hid_size + config.entity_id_size + config.entity_type_size
rel_name_lists = ['intra', 'inter', 'global']
self.GCN_layers = nn.ModuleList([dglnn.GraphConv(self.gcn_dim, self.gcn_dim, norm='right', weight=True,
bias=True, activation=self.activation)
for i in range(config.gcn_layers)])
self.bank_size = self.gcn_dim * (self.config.gcn_layers + 1)
self.dropout = nn.Dropout(self.config.dropout)
self.predict = nn.Sequential(
nn.Linear(self.bank_size * 4 + self.gcn_dim * 5, self.bank_size * 2),
self.activation,
self.dropout,
nn.Linear(self.bank_size * 2, config.relation_nums),
)
self.edge_layer = RelEdgeLayer(node_feat=self.gcn_dim, edge_feat=self.gcn_dim,
activation=self.activation, dropout=config.dropout)
self.path_info_mapping = nn.Linear(self.gcn_dim * 4, self.gcn_dim * 4)
self.attention = Attention(self.bank_size * 2, self.gcn_dim * 4)
# self.attention = Attention2(self.bank_size*2, self.gcn_dim*4, self.activation, config)
def forward(self, **params):
'''
words: [batch_size, max_length]
src_lengths: [batchs_size]
mask: [batch_size, max_length]
entity_type: [batch_size, max_length]
entity_id: [batch_size, max_length]
mention_id: [batch_size, max_length]
distance: [batch_size, max_length]
entity2mention_table: list of [local_entity_num, local_mention_num]
graphs: list of DGLHeteroGraph
h_t_pairs: [batch_size, h_t_limit, 2]
ht_pair_distance: [batch_size, h_t_limit]
'''
words = params['words']
mask = params['mask']
bsz, slen = words.size()
encoder_outputs, sentence_cls = self.bert(input_ids=words, attention_mask=mask)
# encoder_outputs[mask == 0] = 0
if self.config.use_entity_type:
encoder_outputs = torch.cat([encoder_outputs, self.entity_type_emb(params['entity_type'])], dim=-1)
if self.config.use_entity_id:
encoder_outputs = torch.cat([encoder_outputs, self.entity_id_emb(params['entity_id'])], dim=-1)
sentence_cls = torch.cat(
(sentence_cls, get_cuda(torch.zeros((bsz, self.config.entity_type_size + self.config.entity_id_size)))),
dim=-1)
# encoder_outputs: [batch_size, slen, bert_hid+type_size+id_size]
# sentence_cls: [batch_size, bert_hid+type_size+id_size]
graphs = params['graphs']
mention_id = params['mention_id']
features = None
for i in range(len(graphs)):
encoder_output = encoder_outputs[i] # [slen, bert_hid]
mention_num = torch.max(mention_id[i])
mention_index = get_cuda(
(torch.arange(mention_num) + 1).unsqueeze(1).expand(-1, slen)) # [mention_num, slen]
mentions = mention_id[i].unsqueeze(0).expand(mention_num, -1) # [mention_num, slen]
select_metrix = (mention_index == mentions).float() # [mention_num, slen]
# average word -> mention
word_total_numbers = torch.sum(select_metrix, dim=-1).unsqueeze(-1).expand(-1, slen) # [mention_num, slen]
select_metrix = torch.where(word_total_numbers > 0, select_metrix / word_total_numbers, select_metrix)
x = torch.mm(select_metrix, encoder_output) # [mention_num, bert_hid]
x = torch.cat((sentence_cls[i].unsqueeze(0), x), dim=0)
if features is None:
features = x
else:
features = torch.cat((features, x), dim=0)
# mention -> entity
entity2mention_table = params['entity2mention_table'] # list of [entity_num, mention_num]
entity_num = torch.max(params['entity_id'])
global_info = get_cuda(torch.Tensor(bsz, self.gcn_dim))
cur_idx = 0
entity_graph_feature = None
for i in range(len(graphs)):
# average mention -> entity
select_metrix = entity2mention_table[i].float() # [local_entity_num, mention_num]
select_metrix[0][0] = 1
mention_nums = torch.sum(select_metrix, dim=-1).unsqueeze(-1).expand(-1, select_metrix.size(1))
select_metrix = torch.where(mention_nums > 0, select_metrix / mention_nums, select_metrix)
node_num = graphs[i].number_of_nodes('node')
entity_representation = torch.mm(select_metrix, features[cur_idx:cur_idx + node_num])
global_info[i] = features[cur_idx]
cur_idx += node_num
if entity_graph_feature is None:
entity_graph_feature = entity_representation[1:]
else:
entity_graph_feature = torch.cat((entity_graph_feature, entity_representation[1:]), dim=0)
entity_graphs = params['entity_graphs']
entity_graph_big = dgl.batch(entity_graphs)
output_features = [entity_graph_feature]
for GCN_layer in self.GCN_layers:
entity_graph_feature = GCN_layer(entity_graph_big, entity_graph_feature)
output_features.append(entity_graph_feature)
output_features = torch.cat(output_features, dim=-1)
self.edge_layer(entity_graph_big, entity_graph_feature)
entity_bank = get_cuda(torch.Tensor(bsz, entity_num, self.bank_size))
entity_graphs = dgl.unbatch(entity_graph_big)
cur_idx = 0
for i in range(len(entity_graphs)):
node_num = entity_graphs[i].number_of_nodes()
entity_bank[i, :node_num] = output_features[cur_idx:cur_idx + node_num]
cur_idx += node_num
h_t_pairs = params['h_t_pairs']
h_t_pairs = h_t_pairs + (h_t_pairs == 0).long() - 1 # [batch_size, h_t_limit, 2]
h_t_limit = h_t_pairs.size(1)
# [batch_size, h_t_limit, bank_size]
h_entity_index = h_t_pairs[:, :, 0].unsqueeze(-1).expand(-1, -1, self.bank_size)
t_entity_index = h_t_pairs[:, :, 1].unsqueeze(-1).expand(-1, -1, self.bank_size)
# [batch_size, h_t_limit, bank_size]
h_entity = torch.gather(input=entity_bank, dim=1, index=h_entity_index)
t_entity = torch.gather(input=entity_bank, dim=1, index=t_entity_index)
global_info = global_info.unsqueeze(1).expand(-1, h_t_limit, -1)
path_info = get_cuda(torch.zeros((bsz, h_t_limit, self.gcn_dim * 4)))
relation_mask = params['relation_mask']
path_table = params['path_table']
for i in range(len(entity_graphs)):
path_t = path_table[i]
for j in range(h_t_limit):
if relation_mask is not None and relation_mask[i, j].item() == 0:
break
h = h_t_pairs[i, j, 0].item()
t = h_t_pairs[i, j, 1].item()
# for evaluate
if relation_mask is None and h == 0 and t == 0:
continue
if (h + 1, t + 1) in path_t:
v = [val - 1 for val in path_t[(h + 1, t + 1)]]
elif (t + 1, h + 1) in path_t:
v = [val - 1 for val in path_t[(t + 1, h + 1)]]
else:
print(h, t, v)
print(entity_graphs[i].number_of_nodes())
print(entity_graphs[i].all_edges())
print(path_table)
print(h_t_pairs)
print(relation_mask)
assert 1 == 2
middle_node_num = len(v)
if middle_node_num == 0:
continue
# forward
edge_ids = get_cuda(entity_graphs[i].edge_ids([h for _ in range(middle_node_num)], v))
forward_first = torch.index_select(entity_graphs[i].edata['h'], dim=0, index=edge_ids)
edge_ids = get_cuda(entity_graphs[i].edge_ids(v, [t for _ in range(middle_node_num)]))
forward_second = torch.index_select(entity_graphs[i].edata['h'], dim=0, index=edge_ids)
# backward
edge_ids = get_cuda(entity_graphs[i].edge_ids([t for _ in range(middle_node_num)], v))
backward_first = torch.index_select(entity_graphs[i].edata['h'], dim=0, index=edge_ids)
edge_ids = get_cuda(entity_graphs[i].edge_ids(v, [h for _ in range(middle_node_num)]))
backward_second = torch.index_select(entity_graphs[i].edata['h'], dim=0, index=edge_ids)
tmp_path_info = torch.cat((forward_first, forward_second, backward_first, backward_second), dim=-1)
_, attn_value = self.attention(torch.cat((h_entity[i, j], t_entity[i, j]), dim=-1), tmp_path_info)
path_info[i, j] = attn_value
entity_graphs[i].edata.pop('h')
path_info = self.dropout(
self.activation(
self.path_info_mapping(path_info)
)
)
predictions = self.predict(torch.cat(
(h_entity, t_entity, torch.abs(h_entity - t_entity), torch.mul(h_entity, t_entity), global_info, path_info),
dim=-1))
return predictions
class Attention(nn.Module):
def __init__(self, src_size, trg_size):
super().__init__()
self.W = nn.Bilinear(src_size, trg_size, 1)
self.softmax = nn.Softmax(dim=-1)
def forward(self, src, trg, attention_mask=None):
'''
src: [src_size]
trg: [middle_node, trg_size]
'''
score = self.W(src.unsqueeze(0).expand(trg.size(0), -1), trg)
score = self.softmax(score)
value = torch.mm(score.permute(1, 0), trg)
return score.squeeze(0), value.squeeze(0)
class BiLSTM(nn.Module):
def __init__(self, input_size, config):
super().__init__()
self.config = config
self.lstm = nn.LSTM(input_size=input_size, hidden_size=config.lstm_hidden_size,
num_layers=config.nlayers, batch_first=True,
bidirectional=True)
self.in_dropout = nn.Dropout(config.dropout)
self.out_dropout = nn.Dropout(config.dropout)
def forward(self, src, src_lengths):
'''
src: [batch_size, slen, input_size]
src_lengths: [batch_size]
'''
self.lstm.flatten_parameters()
bsz, slen, input_size = src.size()
src = self.in_dropout(src)
new_src_lengths, sort_index = torch.sort(src_lengths, dim=-1, descending=True)
new_src = torch.index_select(src, dim=0, index=sort_index)
packed_src = nn.utils.rnn.pack_padded_sequence(new_src, new_src_lengths, batch_first=True, enforce_sorted=True)
packed_outputs, (src_h_t, src_c_t) = self.lstm(packed_src)
outputs, _ = nn.utils.rnn.pad_packed_sequence(packed_outputs, batch_first=True,
padding_value=self.config.word_pad)
unsort_index = torch.argsort(sort_index)
outputs = torch.index_select(outputs, dim=0, index=unsort_index)
src_h_t = src_h_t.view(self.config.nlayers, 2, bsz, self.config.lstm_hidden_size)
src_c_t = src_c_t.view(self.config.nlayers, 2, bsz, self.config.lstm_hidden_size)
output_h_t = torch.cat((src_h_t[-1, 0], src_h_t[-1, 1]), dim=-1)
output_c_t = torch.cat((src_c_t[-1, 0], src_c_t[-1, 1]), dim=-1)
output_h_t = torch.index_select(output_h_t, dim=0, index=unsort_index)
output_c_t = torch.index_select(output_c_t, dim=0, index=unsort_index)
outputs = self.out_dropout(outputs)
output_h_t = self.out_dropout(output_h_t)
output_c_t = self.out_dropout(output_c_t)
return outputs, (output_h_t, output_c_t)
class RelGraphConvLayer(nn.Module):
r"""Relational graph convolution layer.
Parameters
----------
in_feat : int
Input feature size.
out_feat : int
Output feature size.
rel_names : list[str]
Relation names.
num_bases : int, optional
Number of bases. If is none, use number of relations. Default: None.
weight : bool, optional
True if a linear layer is applied after message passing. Default: True
bias : bool, optional
True if bias is added. Default: True
activation : callable, optional
Activation function. Default: None
self_loop : bool, optional
True to include self loop message. Default: False
dropout : float, optional
Dropout rate. Default: 0.0
"""
def __init__(self,
in_feat,
out_feat,
rel_names,
num_bases,
*,
weight=True,
bias=True,
activation=None,
self_loop=False,
dropout=0.0):
super(RelGraphConvLayer, self).__init__()
self.in_feat = in_feat
self.out_feat = out_feat
self.rel_names = rel_names
self.num_bases = num_bases
self.bias = bias
self.activation = activation
self.self_loop = self_loop
self.conv = dglnn.HeteroGraphConv({
rel: dglnn.GraphConv(in_feat, out_feat, norm='right', weight=False, bias=False)
for rel in rel_names
})
self.use_weight = weight
self.use_basis = num_bases < len(self.rel_names) and weight
if self.use_weight:
if self.use_basis:
self.basis = dglnn.WeightBasis((in_feat, out_feat), num_bases, len(self.rel_names))
else:
self.weight = nn.Parameter(torch.Tensor(len(self.rel_names), in_feat, out_feat))
nn.init.xavier_uniform_(self.weight, gain=nn.init.calculate_gain('relu'))
# bias
if bias:
self.h_bias = nn.Parameter(torch.Tensor(out_feat))
nn.init.zeros_(self.h_bias)
# weight for self loop
if self.self_loop:
self.loop_weight = nn.Parameter(torch.Tensor(in_feat, out_feat))
nn.init.xavier_uniform_(self.loop_weight,
gain=nn.init.calculate_gain('relu'))
self.dropout = nn.Dropout(dropout)
def forward(self, g, inputs):
"""Forward computation
Parameters
----------
g : DGLHeteroGraph
Input graph.
inputs : dict[str, torch.Tensor]
Node feature for each node type.
Returns
-------
dict[str, torch.Tensor]
New node features for each node type.
"""
g = g.local_var()
if self.use_weight:
weight = self.basis() if self.use_basis else self.weight
wdict = {self.rel_names[i]: {'weight': w.squeeze(0)}
for i, w in enumerate(torch.split(weight, 1, dim=0))}
else:
wdict = {}
hs = self.conv(g, inputs, mod_kwargs=wdict)
def _apply(ntype, h):
if self.self_loop:
h = h + torch.matmul(inputs[ntype], self.loop_weight)
if self.bias:
h = h + self.h_bias
if self.activation:
h = self.activation(h)
return self.dropout(h)
return {ntype: _apply(ntype, h) for ntype, h in hs.items()}
class RelEdgeLayer(nn.Module):
def __init__(self,
node_feat,
edge_feat,
activation,
dropout=0.0):
super(RelEdgeLayer, self).__init__()
self.node_feat = node_feat
self.edge_feat = edge_feat
self.activation = activation
self.dropout = nn.Dropout(dropout)
self.mapping = nn.Linear(node_feat * 2, edge_feat)
def forward(self, g, inputs):
# g = g.local_var()
g.ndata['h'] = inputs # [total_mention_num, node_feat]
g.apply_edges(lambda edges: {
'h': self.dropout(self.activation(self.mapping(torch.cat((edges.src['h'], edges.dst['h']), dim=-1))))})
g.ndata.pop('h')
| python | MIT | 178344cf00789c7ba05cfe4dca90df4b17c2caa9 | 2026-01-05T07:13:40.516065Z | false |
milsto/robust-kalman | https://github.com/milsto/robust-kalman/blob/0b241f7c5648efa5b8cba8d71623a0ff0b6ae3a0/robust_kalman/utils.py | robust_kalman/utils.py | """
Utilities for robust Kalman implementation and testing.
"""
import numpy as np
class HuberScore:
"""Robust Huber score function."""
def __init__(self, delta=1.5):
self._delta = delta
def evaluate(self, z):
if abs(z) >= self._delta:
return self._delta * abs(z) - pow(self._delta, 2) / 2.0
else:
return pow(z, 2) / 2.0
def derivative(self, z):
raise NotImplemented
class VariablesHistory:
"""Utility to easily track variable history for plotting."""
def __init__(self):
self._variables_history = dict()
def __getitem__(self, item):
return self._variables_history[item]
def update(self, variable_name, value):
if variable_name not in self._variables_history:
self._variables_history[variable_name] = list()
self._variables_history[variable_name].append(value)
class WindowStatisticsEstimator:
"""Windowed estimations of first and second moment of a random process."""
def __init__(self, win_size=25):
self._win_size = win_size
self._buffer = np.zeros((self._win_size,), np.float32)
self._head = 0
def update(self, value):
self._buffer[self._head] = value
self._head = (self._head + 1) % self._win_size
def mean(self):
return np.mean(self._buffer)
def variance(self):
return np.var(self._buffer) | python | MIT | 0b241f7c5648efa5b8cba8d71623a0ff0b6ae3a0 | 2026-01-05T07:13:41.231759Z | false |
milsto/robust-kalman | https://github.com/milsto/robust-kalman/blob/0b241f7c5648efa5b8cba8d71623a0ff0b6ae3a0/robust_kalman/__init__.py | robust_kalman/__init__.py | from .robust_kalman import RobustKalman
from . import utils
| python | MIT | 0b241f7c5648efa5b8cba8d71623a0ff0b6ae3a0 | 2026-01-05T07:13:41.231759Z | false |
milsto/robust-kalman | https://github.com/milsto/robust-kalman/blob/0b241f7c5648efa5b8cba8d71623a0ff0b6ae3a0/robust_kalman/robust_kalman.py | robust_kalman/robust_kalman.py | """
Robust Kalman filter implementation.
Author: Milos Stojanovic (github: milsto)
"""
import numpy as np
from scipy.optimize import minimize
from .utils import HuberScore
class RobustKalman():
"""Robust Kalman filter for estimation immune to outliers.
The implementation is based on rewriting classical linear recursive Kalman approach as linear regression problem.
Linear regression representation is equivalent to the original problem when it is solved as least square
minimization problem. To implement robust Kalman estimation, instead of least square criterion, some other robust
score function is used. The robust score function is responsible to suppress outliers during error
calculations by having less steep derivative when the error is too large (it is assumed that in that case an
outlier is observed).
Usage of robust estimations is controlled by use_robust_estimation flag. When it is turned off estimatior behaves
as classical recursive Kalman. Estimations of state covariance matrix P is always done by classical Kalman aproach
and is (good) approximation in the cases when robust score function is used. The robust estimation approach is slower
than the standard one and to solve nonlinear minimization problem the iterative Nedler-Mead algorithm is used.
A prototype of adaptive measurement variance estimation is also available with use_adaptive_statistics. The method
is based on estimation the variance based on history of the noise samples. Be aware that in this case the Kalman
procedure is not purely recursive anymore but uses memory to store previous samples.
"""
def __init__(self, F, B, H, x0, P0, Q0, R0, use_robust_estimation=False, use_adaptive_statistics=False, robust_score=HuberScore(delta=1.5)):
"""Initialize robust Kalman. All input matrices are coppied.
Args:
F: State transition matrix
B: Input transition matrix (may be None if model has no inputs)
H: Observation matrix
x0: Initial state vector
P0: Initial state covariance matrix
Q0: (Initial) state noise covariance
R0: (Initial) observation noise covariance
use_robust_estimation: True if robust estimation procedure should be used
use_adaptive_statistics: True if adaptive robust estimation of noise variance should be used
robust_score: Score function for robust estimation. (1.5)-Huber is the default.
"""
self.F = F.copy()
self.B = B.copy() if B is not None else None
self.H = H.copy()
self.x = x0.copy()
self.P = P0.copy()
self.Q = Q0.copy()
self.R = R0.copy()
self.use_robust_estimation = use_robust_estimation
self.use_adaptive_statistics = use_adaptive_statistics
# Used for adaptive noise estimation
self.history_inovation = list()
self.r_mean_est = 0.0
self.r_var_est = 0.0
self.robust_score = robust_score
def time_update(self, inputs=None):
"""
Time propagation of the system model.
Args:
inputs: Model inputs if any.
"""
if inputs is None:
self.x = np.matmul(self.F, self.x)
else:
self.x = np.matmul(self.F, self.x) + np.matmul(self.B, inputs)
self.P = np.matmul(np.matmul(self.F, self.P), self.F.T) + self.Q
def measurement_update(self, measurements):
"""
Measurement update. Not that time update must preceded the measurement update
for valid estimation results.
Args:
measurements: Observations of measured quantities.
"""
# Residual or inovation
self.inovation = measurements - np.matmul(self.H, self.x)
# Inovation covariance matrix
Pinov = np.matmul(np.matmul(self.H, self.P), self.H.T) + self.R
# Kalman gain K = Pxy * Pinov^-1
K = np.matmul(np.matmul(self.P, self.H.T), np.linalg.inv(Pinov))
if self.use_robust_estimation:
# Represent Kalman filter as linear regression problem
# Y = X * x_est + zeta
# This is achieved by stacking system and measurement update equations in matrix quation
# and than manipulating linear algebra to get the linear regression form (see reference papers for details).
# If we use simple square function as robust_score this update is
# equivalent to standard recursive linear Kalman (it is equivalent to least square minimization)
# But his approach is a bit slower so the standard implementation is used in the other case.
# Create block matrix representing covariance of error in linear regression representation of Kalman
epsilon_covariance = np.bmat([[self.P, np.zeros((self.P.shape[0], self.R.shape[1]))],
[np.zeros((self.R.shape[0], self.P.shape[1])), self.R]])
# Factorize covariance to S * S^T form with Cholesky decomposition.
S = np.linalg.cholesky(epsilon_covariance)
Sinv = np.linalg.inv(S)
# self.x <=> F(k-1) * x_est(k-1|k-1)
Y = np.matmul(Sinv, np.vstack((self.x, measurements)))
# |I|
# S^-1 *|H|
X = np.matmul(Sinv, np.vstack((np.eye(self.x.shape[0]), self.H)))
# Exact solution to non-robust Kalman linear regression problem (for debuge)
# exact = np.matmul(np.matmul(np.linalg.inv(np.matmul(X.T, X)), X.T), Y)
# TODO: Expose the possibility of using other optimization methods in interface.
res = minimize(lambda xx: self._m_estimate_criterion(xx, Y, X), self.x, method='nelder-mead')
self.x = res.x[np.newaxis].T
else:
# Linear state update
self.x = self.x + np.matmul(K, self.inovation)
# State prediction covariance update
# This covariance update is used for robust estimator also, and in thath
# case it is an approximation.
self.P = self.P - np.matmul(np.matmul(K, self.H), self.P)
if self.use_adaptive_statistics:
assert self.R.shape == (1, 1), 'Current implementation for robust variance estimation tested only for ' \
'models with one observable.'
self.history_inovation.append(self.inovation)
if len(self.history_inovation) < 6:
self.r_mean_est = 0.0
self.r_var_est = self.R[0, 0]
else:
# Adaptive estimate of R
r_arr = np.array(self.history_inovation, dtype=np.float32)
d = np.median(np.fabs(r_arr - np.median(r_arr)) / 0.6745)
self.r_mean_est = minimize(lambda xx: self._m_estimate_r_criterion(xx, r_arr, d), self.history_inovation[-1], method='nelder-mead').x
self.r_var_est = d**2 - np.matmul(np.matmul(self.H, self.P), self.H.T)
self.R[0, 0] = self.r_var_est
@property
def current_estimate(self):
return self.x
@property
def current_estimate_covariance(self):
return self.P
@property
def current_inovation(self):
return self.inovation
def _m_estimate_criterion(self, x, Y, X):
"""Criterion for robust state estimation"""
crit = 0.0
for i in range(Y.shape[0]):
crit += self.robust_score.evaluate(Y[i, :] - np.matmul(X[i, :], x))
#crit += (Y[i, :] - np.matmul(X[i, :], x))**2
return crit
def _m_estimate_r_criterion(self, x, r_est_arr, d):
"""Criterion for robust variance estimation in adaptive procedure."""
crit = 0.0
for i in range(len(r_est_arr)):
crit += self.robust_score.evaluate((r_est_arr[i] - x) / d)
return crit
| python | MIT | 0b241f7c5648efa5b8cba8d71623a0ff0b6ae3a0 | 2026-01-05T07:13:41.231759Z | false |
milsto/robust-kalman | https://github.com/milsto/robust-kalman/blob/0b241f7c5648efa5b8cba8d71623a0ff0b6ae3a0/examples/example_advanced.py | examples/example_advanced.py | """
Script to evaluate the robust and adaptive Kalman estimator.
Primarily used for to make conclusions and plots for the paper written
for Stochastic System Theory (MSc) course at University of Belgrade, School of Electrical Engineering.
Author: Milos Stojanovic (github: milsto)
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import sys
sys.path.insert(0, '..')
from robust_kalman import RobustKalman
from robust_kalman.utils import HuberScore, VariablesHistory, WindowStatisticsEstimator
np.random.seed(256)
params = {
'axes.labelsize': 10,
'font.size': 10,
'legend.fontsize': 10,
'xtick.labelsize': 10,
'ytick.labelsize': 10,
'text.usetex': True,
'font.family': 'Times'
}
matplotlib.rcParams.update(params)
# Plot robust score function
# t = np.linspace(-50, 50, 1000)
# h = HuberScore()
# hv = np.vectorize(h.evaluate)
# plt.plot(t, hv(t))
# plt.show()
# Example 1
# dt = 0.01
# end_time = 1.0
# F = np.array([[1, dt, dt**2 / 2], [0, 1, dt], [0, 0, 1]], np.float32)
# G = np.array([[0, 0, 1]], np.float32).T
# H = np.array([[1, 0, 0]], np.float32)
# x0 = np.array([[0.1, 0.1, 0.1]], np.float32).T
# P0 = np.eye(3, dtype=np.float32) * 0.01
# sigma_process = 10.0
# sigma_measure = 1.0
# x0_kalman = np.array([[0, 0, 0]], np.float32).T
# Example 2
dt = 0.01
end_time = 1.0
F = np.array([[1, dt], [0, 1]], np.float32)
G = np.array([[0.5 * dt**2, dt]], np.float32).T
H = np.array([[1, 0]], np.float32)
x0 = np.array([[0.01, 0.01]], np.float32).T
P0 = np.ones((2, 2), np.float32) * 0.001
sigma_process = 10.0
sigma_measure = 0.1
x0_kalman = np.array([[0, 0]], np.float32).T
IS_SPIKE_EXPERIMENT = True
PLOT_ADAPTIVE_CEE = True
Q0 = np.matmul(G, G.T) * sigma_process**2
R0 = np.eye(1, dtype=np.float32) * sigma_measure**2
kalman_linear = RobustKalman(F, None, H, x0_kalman, P0, Q0, R0, use_robust_estimation=False, use_adaptive_statistics=False)
kalman_robust = RobustKalman(F, None, H, x0_kalman, P0, Q0, R0, use_robust_estimation=True, use_adaptive_statistics=False)
kalman_robust_stat = RobustKalman(F, None, H, x0_kalman, P0, Q0, R0, use_robust_estimation=True, use_adaptive_statistics=True)
wstat_q = WindowStatisticsEstimator(win_size=25)
wstat_r = WindowStatisticsEstimator(win_size=25)
x = x0
z = np.matmul(H, x0)
cee_x = 0.0
cee_xres = 0.0
cee_xres_stat = 0.0
step = 2
t_axis = np.arange(0, end_time, dt)
history = VariablesHistory()
for t in t_axis:
history.update('x', x)
history.update('z', z)
history.update('x_kalman', kalman_linear.current_estimate)
history.update('x_kalman_robust', kalman_robust.current_estimate)
history.update('x_kalman_robust_stat', kalman_robust_stat.current_estimate)
cee_x += (np.linalg.norm(kalman_linear.current_estimate - x) / (np.linalg.norm(x) + 0.0001)) / step
cee_xres += (np.linalg.norm(kalman_robust.current_estimate - x) / (np.linalg.norm(x) + 0.0001)) / step
cee_xres_stat += (np.linalg.norm(kalman_robust_stat.current_estimate - x) / (np.linalg.norm(x) + 0.0001)) / step
history.update('cee_x_history', cee_x)
history.update('cee_xres_history', cee_xres)
history.update('cee_xres_stat_history', cee_xres_stat)
history.update('r_mean_est', kalman_robust_stat.r_mean_est)
history.update('r_var_est', kalman_robust_stat.r_var_est)
q = np.random.normal(0.0, sigma_process, size=(1, 1))
if not IS_SPIKE_EXPERIMENT:
r = 0.85 * np.random.normal(0.0, sigma_measure, size=(1, 1)) + 0.15 * np.random.normal(0.0, 5.0, size=(1, 1))
else:
rare_event = 1 if np.random.uniform(0, 1.0) > 0.9 else 0
r = np.random.normal(0.0, sigma_measure, size=(1, 1)) + np.random.choice([-1.0, 1.0]) * np.random.uniform(1.0, 1.5) * rare_event #+ 0.15 * np.random.normal(0.0, 5.0, size=(1, 1))
wstat_q.update(q)
wstat_r.update(r)
history.update('wstat_r_mean', wstat_r.mean())
history.update('wstat_r_var', wstat_r.variance())
x = np.matmul(F, x) + np.matmul(G, q)
z = np.matmul(H, x) + r
kalman_linear.time_update()
kalman_linear.measurement_update(z)
kalman_robust.time_update()
kalman_robust.measurement_update(z)
kalman_robust_stat.time_update()
kalman_robust_stat.measurement_update(z)
history.update('inov', kalman_robust.current_inovation)
step += 1
plt.figure(figsize=[15/2.54, 10/2.54])
plt.plot(t_axis, [x[0, 0] for x in history['x']], 'g', label='$x_0\ (true\ state)$')
plt.plot(t_axis, [z[0, 0] for z in history['z']], 'b', linewidth=0.5, label='$z_0\ (measurement)$')
plt.plot(t_axis, [k[0, 0] for k in history['x_kalman']], 'm', label='$\hat{x}^{Kalman}_0$')
plt.plot(t_axis, [k[0, 0] for k in history['x_kalman_robust']], 'r', label=r'$\hat{x}^\mathbf{robust\ Kalman}_0$')
plt.xlabel(r'$t [\mathrm{s}]$')
plt.ylabel(r'$x_0 [\mathrm{m}]$')
plt.axis('tight')
ax = plt.gca()
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
plt.legend()
if IS_SPIKE_EXPERIMENT:
plt.savefig('x0_spike_outliers.pdf')
else:
plt.savefig('x0_normal_outliers.pdf')
plt.figure(figsize=[15/2.54, 10/2.54])
plt.plot(t_axis, [x[1, 0] for x in history['x']], 'g', label='$x_1$')
plt.plot(t_axis, [k[1, 0] for k in history['x_kalman']], 'm', label='$\hat{x}^{Kalman}_1$')
plt.plot(t_axis, [k[1, 0] for k in history['x_kalman_robust']], 'r', label='$\hat{x}^{robust\ Kalman}_1$')
plt.xlabel(r'$t [\mathrm{s}]$')
plt.ylabel(r'$x_1 [\mathrm{m/s}]$')
plt.axis('tight')
ax = plt.gca()
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
plt.legend()
if IS_SPIKE_EXPERIMENT:
plt.savefig('x1_spike_outliers.pdf')
else:
plt.savefig('x1_normal_outliers.pdf')
plt.figure(figsize=[15/2.54, 10/2.54])
plt.plot(t_axis, history['cee_x_history'] / np.arange(1, len(history['cee_x_history']) + 1, 1), 'm', label='$\mathrm{CEE}_{Kalman}$')
plt.plot(t_axis, history['cee_xres_history'] / np.arange(1, len(history['cee_xres_history']) + 1, 1), 'r', label='$\mathrm{CEE}_{robust\ Kalman}$')
if PLOT_ADAPTIVE_CEE:
plt.plot(t_axis, history['cee_xres_stat_history'] / np.arange(1, len(history['cee_xres_stat_history']) + 1, 1), 'b', label='$\mathrm{CEE}_{robust\ adaptive\ Kalman}$')
plt.xlabel(r'$t [\mathrm{s}]$')
plt.ylabel(r'$\mathrm{CEE}$')
plt.axis('tight')
ax = plt.gca()
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
plt.legend()
if IS_SPIKE_EXPERIMENT:
plt.savefig('cee_spike_outliers.pdf')
else:
plt.savefig('cee_normal_outliers.pdf')
plt.figure(figsize=[15/2.54, 10/2.54])
plt.plot(t_axis, [k[0, 0] for k in history['inov']], 'k')
plt.title('inovation')
plt.figure(figsize=[15/2.54, 10/2.54])
plt.plot(t_axis, history['wstat_r_mean'], 'k', label=r'$\mathrm{E}\left\{r_{windowed}\right\}$')
plt.plot(t_axis, history['r_mean_est'], 'b', label=r'$\mathrm{E}\left\{\hat{r}_{est}\right\}$')
plt.xlabel(r'$t [\mathrm{s}]$')
plt.ylabel(r'$\mathrm{E}\left\{r\right\} [\mathrm{m}]$')
plt.axis('tight')
ax = plt.gca()
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
plt.legend()
if IS_SPIKE_EXPERIMENT:
plt.savefig('r_mean_spike_outliers.pdf')
else:
plt.savefig('r_mean_normal_outliers.pdf')
plt.figure(figsize=[15/2.54, 10/2.54])
plt.plot(t_axis, history['wstat_r_var'], 'k', label=r'$\mathrm{Var}\left\{r_{windowed}\right\}$')
plt.plot(t_axis, history['r_var_est'], 'b', label=r'$\mathrm{Var}\left\{\hat{r}_{est}\right\}$')
plt.xlabel(r'$t [\mathrm{s}]$')
plt.ylabel(r'$\mathrm{Var}\left\{r\right\} [\mathrm{m^2}]$')
plt.axis('tight')
ax = plt.gca()
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
plt.legend()
if IS_SPIKE_EXPERIMENT:
plt.savefig('r_variance_spike_outliers.pdf')
else:
plt.savefig('r_variance_normal_outliers.pdf')
plt.show()
| python | MIT | 0b241f7c5648efa5b8cba8d71623a0ff0b6ae3a0 | 2026-01-05T07:13:41.231759Z | false |
milsto/robust-kalman | https://github.com/milsto/robust-kalman/blob/0b241f7c5648efa5b8cba8d71623a0ff0b6ae3a0/examples/example_simple.py | examples/example_simple.py | """
Simple but fully functional example for usage of the RobustKalman implementation.
The system model is defined, system evaluation and estimation loop is implemented and results are plotted.
Author: Milos Stojanovic (github: milsto)
"""
import numpy as np
import matplotlib.pyplot as plt
import sys
sys.path.insert(0, '..')
from robust_kalman import RobustKalman
from robust_kalman.utils import HuberScore, VariablesHistory, WindowStatisticsEstimator
# Define a linear state space model
dt = 0.01
end_time = 1.0
F = np.array([[1, dt], [0, 1]], np.float32)
G = np.array([[0.5 * dt**2, dt]], np.float32).T
H = np.array([[1, 0]], np.float32)
x0 = np.array([[0.01, 0.01]], np.float32).T
P0 = np.ones((2, 2), np.float32) * 0.001
sigma_process = 10.0
sigma_measure = 0.1
x0_kalman = np.array([[0, 0]], np.float32).T
Q0 = np.matmul(G, G.T) * sigma_process**2
R0 = np.eye(1, dtype=np.float32) * sigma_measure**2
# Create instance of the robust Kalman filter filter
kalman_linear = RobustKalman(F, None, H, x0_kalman, P0, Q0, R0, use_robust_estimation=False)
kalman_robust = RobustKalman(F, None, H, x0_kalman, P0, Q0, R0, use_robust_estimation=True)
# Initialize
x = x0
z = np.matmul(H, x0)
t_axis = np.arange(0, end_time, dt)
# Use this utility to track variables over time for plotting
history = VariablesHistory()
for t in t_axis:
history.update('x', x)
history.update('z', z)
history.update('x_kalman', kalman_linear.current_estimate)
history.update('x_kalman_robust', kalman_robust.current_estimate)
q = np.random.normal(0.0, sigma_process, size=(1, 1))
rare_event = 1 if np.random.uniform(0, 1.0) > 0.9 else 0
r = np.random.normal(0.0, sigma_measure, size=(1, 1)) + np.random.choice([-1.0, 1.0]) * np.random.uniform(1.0, 1.5) * rare_event
x = np.matmul(F, x) + np.matmul(G, q)
z = np.matmul(H, x) + r
kalman_linear.time_update()
kalman_linear.measurement_update(z)
kalman_robust.time_update()
kalman_robust.measurement_update(z)
plt.plot(t_axis, [x[0, 0] for x in history['x']], 'g', label='$x_0\ (true\ state)$')
plt.plot(t_axis, [z[0, 0] for z in history['z']], 'b', linewidth=0.5, label='$z_0\ (measurement)$')
plt.plot(t_axis, [k[0, 0] for k in history['x_kalman']], 'm', label='$\hat{x}^{Kalman}_0$')
plt.plot(t_axis, [k[0, 0] for k in history['x_kalman_robust']], 'r', label='$\hat{x}^{robust\ Kalman}_0$')
plt.show() | python | MIT | 0b241f7c5648efa5b8cba8d71623a0ff0b6ae3a0 | 2026-01-05T07:13:41.231759Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/tools/train.py | tools/train.py | """
Main Training Script
Author: Xiaoyang Wu (xiaoyang.wu.cs@gmail.com)
Please cite our work if the code is helpful to you.
"""
import sys
import os
import torch
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(SCRIPT_DIR))
from pointcept.engines.defaults import (
default_argument_parser,
default_config_parser,
default_setup,
)
from pointcept.engines.train import TRAINERS
from pointcept.engines.launch import launch
def main_worker(cfg):
cfg = default_setup(cfg)
trainer = TRAINERS.build(dict(type=cfg.train.type, cfg=cfg))
trainer.train()
def main():
dataset = "scannet" # {scannet, scannet200, nuscenes}
config = "CDSegNet" # {CDSegNet, PTv3_CNF}
num_gpus = 2
config_file = f"../configs/{dataset}/{config}.py"
# the path of saving results
options = {'save_path': f'../exp/{dataset}/{config}'}
args = default_argument_parser().parse_args()
args.config_file = config_file
args.num_gpus = num_gpus
args.options = options
cfg = default_config_parser(args.config_file, args.options)
# the number of GPUs
cfg.num_gpus = num_gpus
# checkpoint path
weight = f"../exp/{dataset}/{config}/model/model_last.pth"
cfg.weight = weight
cfg.resume = True
# After {save_freq_threshold} epochs, the checkpoint is saved every {save_freq} epochs.
save_freq = 1
save_freq_threshold = 70
cfg.save_freq = save_freq
cfg.hooks[4].save_freq = save_freq
cfg.save_freq_threshold = save_freq_threshold
if(cfg.data_root.__contains__("scannet_debug")):
cfg.eval_epoch = cfg.epoch = 1
cfg.data.train.loop = 1
launch(
main_worker,
num_gpus_per_machine=args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
cfg=(cfg,),
)
if __name__ == "__main__":
os.environ["CUDA_VISIBLE_DEVICES"]="0,1"
main()
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/tools/test_time.py | tools/test_time.py | """
Main Testing Script
Author: Xiaoyang Wu (xiaoyang.wu.cs@gmail.com)
Please cite our work if the code is helpful to you.
"""
import sys
import os
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(SCRIPT_DIR))
from pointcept.engines.defaults import (
default_argument_parser,
default_config_parser,
default_setup,
)
from pointcept.engines.test import TESTERS
from pointcept.engines.launch import launch
def main_worker(cfg):
cfg = default_setup(cfg)
tester = TESTERS.build(dict(type=cfg.test.type, cfg=cfg))
tester.test()
def main():
dataset = "scannet" # {scannet, scannet200, nuscenes}
config = "CDSegNet" # {CDSegNet, PTv3_CNF}
weight = f"/root/models/models/{dataset}/{config}/best_model.pth"
num_gpus = 1
config_file = f"../configs/{dataset}/{config}_time.py"
options = {'save_path': f'../exp/{dataset}_test/{config}_time'}
args = default_argument_parser().parse_args()
args.config_file = config_file
args.num_gpus = num_gpus
args.options = options
cfg = default_config_parser(args.config_file, args.options)
cfg.weight = weight
cfg.num_gpus = num_gpus
# nG ~ N(nG;0,\tau*I), the input c' = c + nG
noise_level = None
cfg.noise_level = noise_level
# the mode of inference
'''
SSI : Single-Step Inference, semantic labels aregenerated by CN through a single-step iteration in NN.
MSAI : Multi-Step Average Inference (MSAI),
MSAI conducts T step iterations in NN and averages T outputs produced by CN.
MSFI : Multi-Step Final Inference, MSFI is determined by the output from the final iteration of CN.
'''
inference_mode = "SSI"
step = 1
cfg.inference_mode = inference_mode
cfg.step = step
launch(
main_worker,
num_gpus_per_machine=args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
cfg=(cfg,),
)
if __name__ == "__main__":
os.environ["CUDA_VISIBLE_DEVICES"]="0"
print(f"GPU : {os.environ['CUDA_VISIBLE_DEVICES']}")
main()
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/tools/test_CDSegNet_ScanNet200.py | tools/test_CDSegNet_ScanNet200.py | """
Main Testing Script
Author: Xiaoyang Wu (xiaoyang.wu.cs@gmail.com)
Please cite our work if the code is helpful to you.
"""
import sys
import os
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(SCRIPT_DIR))
from pointcept.engines.defaults import (
default_argument_parser,
default_config_parser,
default_setup,
)
from pointcept.engines.test import TESTERS
from pointcept.engines.launch import launch
def main_worker(cfg):
cfg = default_setup(cfg)
tester = TESTERS.build(dict(type=cfg.test.type, cfg=cfg))
tester.test()
def main():
dataset = "scannet200" # {scannet, scannet200, nuscenes}
config = "CDSegNet" # {CDSegNet, PTv3_CNF}
weight = f"/root/models/models/{dataset}/{config}/best_model.pth"
num_gpus = 2
config_file = f"../configs/{dataset}/{config}.py"
options = {'save_path': f'../exp/{dataset}_test/{config}'}
args = default_argument_parser().parse_args()
args.config_file = config_file
args.num_gpus = num_gpus
args.options = options
cfg = default_config_parser(args.config_file, args.options)
cfg.weight = weight
cfg.num_gpus = num_gpus
# nG ~ N(nG;0,\tau*I), the input c' = c + nG
noise_level = None
cfg.noise_level = noise_level
# the mode of inference
'''
SSI : Single-Step Inference, semantic labels aregenerated by CN through a single-step iteration in NN.
MSAI : Multi-Step Average Inference (MSAI),
MSAI conducts T step iterations in NN and averages T outputs produced by CN.
MSFI : Multi-Step Final Inference, MSFI is determined by the output from the final iteration of CN.
'''
inference_mode = "SSI"
step = 1
cfg.inference_mode = inference_mode
cfg.step = step
launch(
main_worker,
num_gpus_per_machine=args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
cfg=(cfg,),
)
if __name__ == "__main__":
os.environ["CUDA_VISIBLE_DEVICES"]="0,1"
print(f"GPU : {os.environ['CUDA_VISIBLE_DEVICES']}")
main()
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/tools/train_CDSegNet_nuScenes.py | tools/train_CDSegNet_nuScenes.py | """
Main Training Script
Author: Xiaoyang Wu (xiaoyang.wu.cs@gmail.com)
Please cite our work if the code is helpful to you.
"""
import sys
import os
import torch
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(SCRIPT_DIR))
from pointcept.engines.defaults import (
default_argument_parser,
default_config_parser,
default_setup,
)
from pointcept.engines.train import TRAINERS
from pointcept.engines.launch import launch
def main_worker(cfg):
cfg = default_setup(cfg)
trainer = TRAINERS.build(dict(type=cfg.train.type, cfg=cfg))
trainer.train()
def main():
dataset = "nuscenes" # {scannet, scannet200, nuscenes}
config = "CDSegNet" # {CDSegNet, PTv3_CNF}
num_gpus = 2
config_file = f"../configs/{dataset}/{config}.py"
# the path of saving results
options = {'save_path': f'../exp/{dataset}/{config}'}
args = default_argument_parser().parse_args()
args.config_file = config_file
args.num_gpus = num_gpus
args.options = options
cfg = default_config_parser(args.config_file, args.options)
# the number of GPUs
cfg.num_gpus = num_gpus
# checkpoint path
weight = f"../exp/{dataset}/{config}/model/model_last.pth"
cfg.weight = weight
cfg.resume = True
# After {save_freq_threshold} epochs, the checkpoint is saved every {save_freq} epochs.
save_freq = 1
save_freq_threshold = 70
cfg.save_freq = save_freq
cfg.hooks[4].save_freq = save_freq
cfg.save_freq_threshold = save_freq_threshold
if(cfg.data_root.__contains__("scannet_debug")):
cfg.eval_epoch = cfg.epoch = 1
cfg.data.train.loop = 1
launch(
main_worker,
num_gpus_per_machine=args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
cfg=(cfg,),
)
if __name__ == "__main__":
os.environ["CUDA_VISIBLE_DEVICES"]="0,1"
main()
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/tools/train_CDSegNet_ScanNet.py | tools/train_CDSegNet_ScanNet.py | """
Main Training Script
Author: Xiaoyang Wu (xiaoyang.wu.cs@gmail.com)
Please cite our work if the code is helpful to you.
"""
import sys
import os
import torch
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(SCRIPT_DIR))
from pointcept.engines.defaults import (
default_argument_parser,
default_config_parser,
default_setup,
)
from pointcept.engines.train import TRAINERS
from pointcept.engines.launch import launch
def main_worker(cfg):
cfg = default_setup(cfg)
trainer = TRAINERS.build(dict(type=cfg.train.type, cfg=cfg))
trainer.train()
def main():
dataset = "scannet" # {scannet, scannet200, nuscenes}
config = "CDSegNet" # {CDSegNet, PTv3_CNF}
num_gpus = 2
config_file = f"../configs/{dataset}/{config}.py"
# the path of saving results
options = {'save_path': f'../exp/{dataset}/{config}'}
args = default_argument_parser().parse_args()
args.config_file = config_file
args.num_gpus = num_gpus
args.options = options
cfg = default_config_parser(args.config_file, args.options)
# the number of GPUs
cfg.num_gpus = num_gpus
# checkpoint path
weight = f"../exp/{dataset}/{config}/model/model_last.pth"
cfg.weight = weight
cfg.resume = True
# After {save_freq_threshold} epochs, the checkpoint is saved every {save_freq} epochs.
save_freq = 1
save_freq_threshold = 70
cfg.save_freq = save_freq
cfg.hooks[4].save_freq = save_freq
cfg.save_freq_threshold = save_freq_threshold
if(cfg.data_root.__contains__("scannet_debug")):
cfg.eval_epoch = cfg.epoch = 1
cfg.data.train.loop = 1
launch(
main_worker,
num_gpus_per_machine=args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
cfg=(cfg,),
)
if __name__ == "__main__":
os.environ["CUDA_VISIBLE_DEVICES"]="0,1"
main()
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/tools/train_CDSegNet_ScanNet200.py | tools/train_CDSegNet_ScanNet200.py | """
Main Training Script
Author: Xiaoyang Wu (xiaoyang.wu.cs@gmail.com)
Please cite our work if the code is helpful to you.
"""
import sys
import os
import torch
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(SCRIPT_DIR))
from pointcept.engines.defaults import (
default_argument_parser,
default_config_parser,
default_setup,
)
from pointcept.engines.train import TRAINERS
from pointcept.engines.launch import launch
def main_worker(cfg):
cfg = default_setup(cfg)
trainer = TRAINERS.build(dict(type=cfg.train.type, cfg=cfg))
trainer.train()
def main():
dataset = "scannet200" # {scannet, scannet200, nuscenes}
config = "CDSegNet" # {CDSegNet, PTv3_CNF}
num_gpus = 2
config_file = f"../configs/{dataset}/{config}.py"
# the path of saving results
options = {'save_path': f'../exp/{dataset}/{config}'}
args = default_argument_parser().parse_args()
args.config_file = config_file
args.num_gpus = num_gpus
args.options = options
cfg = default_config_parser(args.config_file, args.options)
# the number of GPUs
cfg.num_gpus = num_gpus
# checkpoint path
weight = f"../exp/{dataset}/{config}/model/model_last.pth"
cfg.weight = weight
cfg.resume = True
# After {save_freq_threshold} epochs, the checkpoint is saved every {save_freq} epochs.
save_freq = 1
save_freq_threshold = 70
cfg.save_freq = save_freq
cfg.hooks[4].save_freq = save_freq
cfg.save_freq_threshold = save_freq_threshold
if(cfg.data_root.__contains__("scannet_debug")):
cfg.eval_epoch = cfg.epoch = 1
cfg.data.train.loop = 1
launch(
main_worker,
num_gpus_per_machine=args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
cfg=(cfg,),
)
if __name__ == "__main__":
os.environ["CUDA_VISIBLE_DEVICES"]="0,1"
main()
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/tools/test_CDSegNet_ScanNet.py | tools/test_CDSegNet_ScanNet.py | """
Main Testing Script
Author: Xiaoyang Wu (xiaoyang.wu.cs@gmail.com)
Please cite our work if the code is helpful to you.
"""
import sys
import os
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(SCRIPT_DIR))
from pointcept.engines.defaults import (
default_argument_parser,
default_config_parser,
default_setup,
)
from pointcept.engines.test import TESTERS
from pointcept.engines.launch import launch
def main_worker(cfg):
cfg = default_setup(cfg)
tester = TESTERS.build(dict(type=cfg.test.type, cfg=cfg))
tester.test()
def main():
dataset = "scannet" # {scannet, scannet200, nuscenes}
config = "CDSegNet" # {CDSegNet, PTv3_CNF}
weight = f"/root/models/models/{dataset}/{config}/best_model.pth"
num_gpus = 2
config_file = f"../configs/{dataset}/{config}.py"
options = {'save_path': f'../exp/{dataset}_test/{config}'}
args = default_argument_parser().parse_args()
args.config_file = config_file
args.num_gpus = num_gpus
args.options = options
cfg = default_config_parser(args.config_file, args.options)
cfg.weight = weight
cfg.num_gpus = num_gpus
# nG ~ N(nG;0,\tau*I), the input c' = c + nG
noise_level = None
cfg.noise_level = noise_level
# the mode of inference
'''
SSI : Single-Step Inference, semantic labels aregenerated by CN through a single-step iteration in NN.
MSAI : Multi-Step Average Inference (MSAI),
MSAI conducts T step iterations in NN and averages T outputs produced by CN.
MSFI : Multi-Step Final Inference, MSFI is determined by the output from the final iteration of CN.
'''
inference_mode = "SSI"
step = 1
cfg.inference_mode = inference_mode
cfg.step = step
launch(
main_worker,
num_gpus_per_machine=args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
cfg=(cfg,),
)
if __name__ == "__main__":
os.environ["CUDA_VISIBLE_DEVICES"]="0,1"
print(f"GPU : {os.environ['CUDA_VISIBLE_DEVICES']}")
main()
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/tools/test_CDSegNet_nuScenes.py | tools/test_CDSegNet_nuScenes.py | """
Main Testing Script
Author: Xiaoyang Wu (xiaoyang.wu.cs@gmail.com)
Please cite our work if the code is helpful to you.
"""
import sys
import os
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(SCRIPT_DIR))
from pointcept.engines.defaults import (
default_argument_parser,
default_config_parser,
default_setup,
)
from pointcept.engines.test import TESTERS
from pointcept.engines.launch import launch
def main_worker(cfg):
cfg = default_setup(cfg)
tester = TESTERS.build(dict(type=cfg.test.type, cfg=cfg))
tester.test()
def main():
dataset = "nuscenes" # {scannet, scannet200, nuscenes}
config = "CDSegNet" # {CDSegNet, PTv3_CNF}
weight = f"/root/models/models/{dataset}/{config}/best_model.pth"
num_gpus = 2
config_file = f"../configs/{dataset}/{config}.py"
options = {'save_path': f'../exp/{dataset}_test/{config}'}
args = default_argument_parser().parse_args()
args.config_file = config_file
args.num_gpus = num_gpus
args.options = options
cfg = default_config_parser(args.config_file, args.options)
cfg.weight = weight
cfg.num_gpus = num_gpus
# nG ~ N(nG;0,\tau*I), the input c' = c + nG
noise_level = None
cfg.noise_level = noise_level
# the mode of inference
'''
SSI : Single-Step Inference, semantic labels aregenerated by CN through a single-step iteration in NN.
MSAI : Multi-Step Average Inference (MSAI),
MSAI conducts T step iterations in NN and averages T outputs produced by CN.
MSFI : Multi-Step Final Inference, MSFI is determined by the output from the final iteration of CN.
'''
inference_mode = "SSI"
step = 1
cfg.inference_mode = inference_mode
cfg.step = step
launch(
main_worker,
num_gpus_per_machine=args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
cfg=(cfg,),
)
if __name__ == "__main__":
os.environ["CUDA_VISIBLE_DEVICES"]="0,1"
print(f"GPU : {os.environ['CUDA_VISIBLE_DEVICES']}")
main()
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/tools/test.py | tools/test.py | """
Main Testing Script
Author: Xiaoyang Wu (xiaoyang.wu.cs@gmail.com)
Please cite our work if the code is helpful to you.
"""
import sys
import os
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(SCRIPT_DIR))
from pointcept.engines.defaults import (
default_argument_parser,
default_config_parser,
default_setup,
)
from pointcept.engines.test import TESTERS
from pointcept.engines.launch import launch
def main_worker(cfg):
cfg = default_setup(cfg)
tester = TESTERS.build(dict(type=cfg.test.type, cfg=cfg))
tester.test()
def main():
dataset = "nuscenes" # {scannet, scannet200, nuscenes}
config = "CDSegNet" # {CDSegNet, PTv3_CNF}
weight = f"/root/models/models/{dataset}/{config}/best_model.pth"
num_gpus = 2
config_file = f"../configs/{dataset}/{config}.py"
options = {'save_path': f'../exp/{dataset}_test/{config}'}
args = default_argument_parser().parse_args()
args.config_file = config_file
args.num_gpus = num_gpus
args.options = options
cfg = default_config_parser(args.config_file, args.options)
cfg.weight = weight
cfg.num_gpus = num_gpus
# nG ~ N(nG;0,\tau*I), the input c' = c + nG
noise_level = None
cfg.noise_level = noise_level
# the mode of inference
'''
SSI : Single-Step Inference, semantic labels aregenerated by CN through a single-step iteration in NN.
MSAI : Multi-Step Average Inference (MSAI),
MSAI conducts T step iterations in NN and averages T outputs produced by CN.
MSFI : Multi-Step Final Inference, MSFI is determined by the output from the final iteration of CN.
'''
inference_mode = "SSI"
step = 1
cfg.inference_mode = inference_mode
cfg.step = step
launch(
main_worker,
num_gpus_per_machine=args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
cfg=(cfg,),
)
if __name__ == "__main__":
os.environ["CUDA_VISIBLE_DEVICES"]="0,1"
print(f"GPU : {os.environ['CUDA_VISIBLE_DEVICES']}")
main()
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/__init__.py | pointcept/__init__.py | python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false | |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/datasets/defaults.py | pointcept/datasets/defaults.py | """
Default Datasets
Author: Xiaoyang Wu (xiaoyang.wu.cs@gmail.com)
Please cite our work if the code is helpful to you.
"""
import os
import glob
import numpy as np
import torch
from copy import deepcopy
from torch.utils.data import Dataset
from collections.abc import Sequence
from pointcept.utils.logger import get_root_logger
from .builder import DATASETS, build_dataset
from .transform import Compose, TRANSFORMS
@DATASETS.register_module()
class DefaultDataset(Dataset):
def __init__(
self,
split="train",
data_root="data/dataset",
transform=None,
test_mode=False,
test_cfg=None,
loop=1,
):
super(DefaultDataset, self).__init__()
self.data_root = data_root
self.split = split
self.transform = Compose(transform)
self.loop = (
loop if not test_mode else 1
) # force make loop = 1 while in test mode
self.test_mode = test_mode
self.test_cfg = test_cfg if test_mode else None
if test_mode:
self.test_voxelize = (
TRANSFORMS.build(self.test_cfg.voxelize)
if self.test_cfg.voxelize is not None
else None
)
self.test_crop = (
TRANSFORMS.build(self.test_cfg.crop)
if self.test_cfg.crop is not None
else None
)
self.post_transform = Compose(self.test_cfg.post_transform)
self.aug_transform = [Compose(aug) for aug in self.test_cfg.aug_transform]
self.data_list = self.get_data_list()
# if(split == "train"):
# self.data_list = self.data_list[:10]
logger = get_root_logger()
logger.info(
"Totally {} x {} samples in {} set.".format(
len(self.data_list), self.loop, split
)
)
def get_data_list(self):
if isinstance(self.split, str):
data_list = glob.glob(os.path.join(self.data_root, self.split, "*.pth"))
elif isinstance(self.split, Sequence):
data_list = []
for split in self.split:
data_list += glob.glob(os.path.join(self.data_root, split, "*.pth"))
else:
raise NotImplementedError
return data_list
def get_data(self, idx):
data = torch.load(self.data_list[idx % len(self.data_list)])
coord = data["coord"]
color = data["color"]
normal = data["normal"]
if "semantic_gt" in data.keys():
segment = data["semantic_gt"].reshape([-1])
else:
segment = np.ones(coord.shape[0]) * -1
data_dict = dict(coord=coord, normal=normal, color=color, segment=segment)
return data_dict
def get_data_name(self, idx):
return os.path.basename(self.data_list[idx % len(self.data_list)]).split(".")[0]
def prepare_train_data(self, idx):
# load data
data_dict = self.get_data(idx)
data_dict = self.transform(data_dict)
return data_dict
def prepare_test_data(self, idx):
# load data
data_dict = self.get_data(idx)
#coord = np.copy(data_dict["coord"])
data_dict = self.transform(data_dict)
result_dict = dict(
segment=data_dict.pop("segment"), name=self.get_data_name(idx), coord=data_dict["coord"]
)
if "origin_segment" in data_dict:
assert "inverse" in data_dict
result_dict["origin_segment"] = data_dict.pop("origin_segment")
result_dict["inverse"] = data_dict.pop("inverse")
data_dict_list = []
for aug in self.aug_transform:
data_dict_list.append(aug(deepcopy(data_dict)))
fragment_list = []
for data in data_dict_list:
if self.test_voxelize is not None:
data_part_list = self.test_voxelize(data)
else:
data["index"] = np.arange(data["coord"].shape[0])
data_part_list = [data]
for data_part in data_part_list:
if self.test_crop is not None:
data_part = self.test_crop(data_part)
else:
data_part = [data_part]
fragment_list += data_part
for i in range(len(fragment_list)):
fragment_list[i] = self.post_transform(fragment_list[i])
result_dict["fragment_list"] = fragment_list
return result_dict
def __getitem__(self, idx):
if self.test_mode:
return self.prepare_test_data(idx)
else:
return self.prepare_train_data(idx)
def __len__(self):
return len(self.data_list) * self.loop
@DATASETS.register_module()
class ConcatDataset(Dataset):
def __init__(self, datasets, loop=1):
super(ConcatDataset, self).__init__()
self.datasets = [build_dataset(dataset) for dataset in datasets]
self.loop = loop
self.data_list = self.get_data_list()
logger = get_root_logger()
logger.info(
"Totally {} x {} samples in the concat set.".format(
len(self.data_list), self.loop
)
)
def get_data_list(self):
data_list = []
for i in range(len(self.datasets)):
data_list.extend(
zip(
np.ones(len(self.datasets[i])) * i, np.arange(len(self.datasets[i]))
)
)
return data_list
def get_data(self, idx):
dataset_idx, data_idx = self.data_list[idx % len(self.data_list)]
return self.datasets[dataset_idx][data_idx]
def get_data_name(self, idx):
dataset_idx, data_idx = self.data_list[idx % len(self.data_list)]
return self.datasets[dataset_idx].get_data_name(data_idx)
def __getitem__(self, idx):
return self.get_data(idx)
def __len__(self):
return len(self.data_list) * self.loop
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/datasets/arkitscenes.py | pointcept/datasets/arkitscenes.py | """
ArkitScenes Dataset
Author: Xiaoyang Wu (xiaoyang.wu.cs@gmail.com)
Please cite our work if the code is helpful to you.
"""
import os
import glob
import numpy as np
import torch
from copy import deepcopy
from torch.utils.data import Dataset
from pointcept.utils.logger import get_root_logger
from .builder import DATASETS
from .transform import Compose, TRANSFORMS
from .preprocessing.scannet.meta_data.scannet200_constants import VALID_CLASS_IDS_200
@DATASETS.register_module()
class ArkitScenesDataset(Dataset):
def __init__(
self,
split="Training",
data_root="data/ARKitScenesMesh",
transform=None,
test_mode=False,
test_cfg=None,
loop=1,
):
super(ArkitScenesDataset, self).__init__()
self.data_root = data_root
self.split = split
self.transform = Compose(transform)
self.loop = (
loop if not test_mode else 1
) # force make loop = 1 while in test mode
self.test_mode = test_mode
self.test_cfg = test_cfg if test_mode else None
self.class2id = np.array(VALID_CLASS_IDS_200)
if test_mode:
self.test_voxelize = TRANSFORMS.build(self.test_cfg.voxelize)
self.test_crop = TRANSFORMS.build(self.test_cfg.crop)
self.post_transform = Compose(self.test_cfg.post_transform)
self.aug_transform = [Compose(aug) for aug in self.test_cfg.aug_transform]
self.data_list = self.get_data_list()
logger = get_root_logger()
logger.info(
"Totally {} x {} samples in {} set.".format(
len(self.data_list), self.loop, split
)
)
def get_data_list(self):
if isinstance(self.split, str):
data_list = glob.glob(os.path.join(self.data_root, self.split, "*.pth"))
elif isinstance(self.split, list):
data_list = []
for split in self.split:
data_list += glob.glob(os.path.join(self.data_root, split, "*.pth"))
else:
raise NotImplementedError
return data_list
def get_data(self, idx):
data = torch.load(self.data_list[idx % len(self.data_list)])
coord = data["coord"]
color = data["color"]
normal = data["normal"]
segment = np.zeros(coord.shape[0])
data_dict = dict(coord=coord, normal=normal, color=color, segment=segment)
return data_dict
def get_data_name(self, idx):
data_idx = self.data_idx[idx % len(self.data_idx)]
return os.path.basename(self.data_list[data_idx]).split(".")[0]
def prepare_train_data(self, idx):
# load data
data_dict = self.get_data(idx)
data_dict = self.transform(data_dict)
return data_dict
def prepare_test_data(self, idx):
# load data
data_dict = self.get_data(idx)
segment = data_dict.pop("segment")
data_dict = self.transform(data_dict)
data_dict_list = []
for aug in self.aug_transform:
data_dict_list.append(aug(deepcopy(data_dict)))
input_dict_list = []
for data in data_dict_list:
data_part_list = self.test_voxelize(data)
for data_part in data_part_list:
data_part_list = self.test_crop(data_part)
input_dict_list += data_part_list
for i in range(len(input_dict_list)):
input_dict_list[i] = self.post_transform(input_dict_list[i])
return input_dict_list, segment
def __getitem__(self, idx):
if self.test_mode:
return self.prepare_test_data(idx)
else:
return self.prepare_train_data(idx)
def __len__(self):
return len(self.data_list) * self.loop
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/datasets/modelnet.py | pointcept/datasets/modelnet.py | """
ModelNet40 Dataset
get sampled point clouds of ModelNet40 (XYZ and normal from mesh, 10k points per shape)
at "https://shapenet.cs.stanford.edu/media/modelnet40_normal_resampled.zip"
Author: Xiaoyang Wu (xiaoyang.wu.cs@gmail.com)
Please cite our work if the code is helpful to you.
"""
import os
import numpy as np
from torch.utils.data import Dataset
from pointcept.utils.logger import get_root_logger
from .builder import DATASETS
from .transform import Compose
@DATASETS.register_module()
class ModelNetDataset(Dataset):
def __init__(
self,
split="train",
data_root="data/modelnet40_normal_resampled",
class_names=None,
transform=None,
test_mode=False,
test_cfg=None,
cache_data=False,
loop=1,
):
super(ModelNetDataset, self).__init__()
self.data_root = data_root
self.class_names = dict(zip(class_names, range(len(class_names))))
self.split = split
self.transform = Compose(transform)
self.loop = (
loop if not test_mode else 1
) # force make loop = 1 while in test mode
self.cache_data = cache_data
self.test_mode = test_mode
self.test_cfg = test_cfg if test_mode else None
self.cache = {}
if test_mode:
# TODO: Optimize
pass
self.data_list = self.get_data_list()
logger = get_root_logger()
logger.info(
"Totally {} x {} samples in {} set.".format(
len(self.data_list), self.loop, split
)
)
def get_data_list(self):
assert isinstance(self.split, str)
split_path = os.path.join(
self.data_root, "modelnet40_{}.txt".format(self.split)
)
data_list = np.loadtxt(split_path, dtype="str")
return data_list
def get_data(self, idx):
data_idx = idx % len(self.data_list)
if self.cache_data:
coord, normal, category = self.cache[data_idx]
else:
data_shape = "_".join(self.data_list[data_idx].split("_")[0:-1])
data_path = os.path.join(
self.data_root, data_shape, self.data_list[data_idx] + ".txt"
)
data = np.loadtxt(data_path, delimiter=",").astype(np.float32)
coord, normal = data[:, 0:3], data[:, 3:6]
category = np.array([self.class_names[data_shape]])
if self.cache_data:
self.cache[data_idx] = (coord, normal, category)
data_dict = dict(coord=coord, normal=normal, category=category)
return data_dict
def prepare_train_data(self, idx):
data_dict = self.get_data(idx)
data_dict = self.transform(data_dict)
return data_dict
def prepare_test_data(self, idx):
assert idx < len(self.data_list)
data_dict = self.get_data(idx)
data_dict = self.transform(data_dict)
return data_dict
def get_data_name(self, idx):
data_idx = idx % len(self.data_list)
return self.data_list[data_idx]
def __getitem__(self, idx):
if self.test_mode:
return self.prepare_test_data(idx)
else:
return self.prepare_train_data(idx)
def __len__(self):
return len(self.data_list) * self.loop
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/datasets/nuscenes.py | pointcept/datasets/nuscenes.py | """
nuScenes Dataset
Author: Xiaoyang Wu (xiaoyang.wu.cs@gmail.com), Zheng Zhang
Please cite our work if the code is helpful to you.
"""
import os
import numpy as np
from collections.abc import Sequence
import pickle
from .builder import DATASETS
from .defaults import DefaultDataset
@DATASETS.register_module()
class NuScenesDataset(DefaultDataset):
def __init__(
self,
split="train",
data_root="data/nuscenes",
sweeps=10,
transform=None,
test_mode=False,
test_cfg=None,
loop=1,
ignore_index=-1,
):
self.sweeps = sweeps
self.ignore_index = ignore_index
self.learning_map = self.get_learning_map(ignore_index)
super().__init__(
split=split,
data_root=data_root,
transform=transform,
test_mode=test_mode,
test_cfg=test_cfg,
loop=loop,
)
def get_info_path(self, split):
assert split in ["train", "val", "test"]
if split == "train":
return os.path.join(
self.data_root, "info", f"nuscenes_infos_{self.sweeps}sweeps_train.pkl"
)
elif split == "val":
return os.path.join(
self.data_root, "info", f"nuscenes_infos_{self.sweeps}sweeps_val.pkl"
)
elif split == "test":
return os.path.join(
self.data_root, "info", f"nuscenes_infos_{self.sweeps}sweeps_test.pkl"
)
else:
raise NotImplementedError
def get_data_list(self):
if isinstance(self.split, str):
info_paths = [self.get_info_path(self.split)]
elif isinstance(self.split, Sequence):
info_paths = [self.get_info_path(s) for s in self.split]
else:
raise NotImplementedError
data_list = []
for info_path in info_paths:
with open(info_path, "rb") as f:
info = pickle.load(f)
data_list.extend(info)
return data_list
def get_data(self, idx):
data = self.data_list[idx % len(self.data_list)]
lidar_path = os.path.join(self.data_root, "raw", data["lidar_path"])
points = np.fromfile(str(lidar_path), dtype=np.float32, count=-1).reshape(
[-1, 5]
)
coord = points[:, :3]
strength = points[:, 3].reshape([-1, 1]) / 255 # scale strength to [0, 1]
if "gt_segment_path" in data.keys():
gt_segment_path = os.path.join(
self.data_root, "raw", data["gt_segment_path"]
)
segment = np.fromfile(
str(gt_segment_path), dtype=np.uint8, count=-1
).reshape([-1])
segment = np.vectorize(self.learning_map.__getitem__)(segment).astype(
np.int64
)
else:
segment = np.ones((points.shape[0],), dtype=np.int64) * self.ignore_index
data_dict = dict(coord=coord, strength=strength, segment=segment)
return data_dict
def get_data_name(self, idx):
# return data name for lidar seg, optimize the code when need to support detection
return self.data_list[idx % len(self.data_list)]["lidar_token"]
@staticmethod
def get_learning_map(ignore_index):
learning_map = {
0: ignore_index,
1: ignore_index,
2: 6,
3: 6,
4: 6,
5: ignore_index,
6: 6,
7: ignore_index,
8: ignore_index,
9: 0,
10: ignore_index,
11: ignore_index,
12: 7,
13: ignore_index,
14: 1,
15: 2,
16: 2,
17: 3,
18: 4,
19: ignore_index,
20: ignore_index,
21: 5,
22: 8,
23: 9,
24: 10,
25: 11,
26: 12,
27: 13,
28: 14,
29: ignore_index,
30: 15,
31: ignore_index,
}
return learning_map
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/datasets/scannet.py | pointcept/datasets/scannet.py | """
ScanNet20 / ScanNet200 / ScanNet Data Efficient Dataset
Author: Xiaoyang Wu (xiaoyang.wu.cs@gmail.com)
Please cite our work if the code is helpful to you.
"""
import os
import glob
import numpy as np
import torch
from copy import deepcopy
from torch.utils.data import Dataset
from collections.abc import Sequence
from pointcept.utils.logger import get_root_logger
from pointcept.utils.cache import shared_dict
from .builder import DATASETS
from .transform import Compose, TRANSFORMS
from .preprocessing.scannet.meta_data.scannet200_constants import (
VALID_CLASS_IDS_20,
VALID_CLASS_IDS_200,
)
@DATASETS.register_module()
class ScanNetDataset(Dataset):
class2id = np.array(VALID_CLASS_IDS_20)
def __init__(
self,
split="train",
data_root="data/scannet",
transform=None,
lr_file=None,
la_file=None,
ignore_index=-1,
test_mode=False,
test_cfg=None,
cache=False,
loop=1,
):
super(ScanNetDataset, self).__init__()
self.data_root = data_root
self.split = split
self.transform = Compose(transform)
self.cache = cache
self.loop = (
loop if not test_mode else 1
) # force make loop = 1 while in test mode
self.test_mode = test_mode
self.test_cfg = test_cfg if test_mode else None
if test_mode:
self.test_voxelize = TRANSFORMS.build(self.test_cfg.voxelize)
self.test_crop = (
TRANSFORMS.build(self.test_cfg.crop) if self.test_cfg.crop else None
)
self.post_transform = Compose(self.test_cfg.post_transform)
self.aug_transform = [Compose(aug) for aug in self.test_cfg.aug_transform]
if lr_file:
self.data_list = [
os.path.join(data_root, "train", name + ".pth")
for name in np.loadtxt(lr_file, dtype=str)
]
else:
self.data_list = self.get_data_list()
self.la = torch.load(la_file) if la_file else None
self.ignore_index = ignore_index
logger = get_root_logger()
logger.info(
"Totally {} x {} samples in {} set.".format(
len(self.data_list), self.loop, split
)
)
def get_data_list(self):
if isinstance(self.split, str):
data_list = glob.glob(os.path.join(self.data_root, self.split, "*"))
elif isinstance(self.split, Sequence):
data_list = []
for split in self.split:
data_list += glob.glob(os.path.join(self.data_root, split, "*"))
else:
raise NotImplementedError
data_list = [data.replace(".pth", "") for data in data_list]
return data_list
def get_data(self, idx):
data_path = self.data_list[idx % len(self.data_list)]
if(os.path.exists(f"{data_path}.pth")):
data_path = f"{data_path}.pth"
if not self.cache:
data = torch.load(data_path)
else:
data_name = data_path.replace(os.path.dirname(self.data_root), "").split(
"."
)[0]
cache_name = "pointcept" + data_name.replace(os.path.sep, "-")
data = shared_dict(cache_name)
else:
data = {}
data["coord"] = np.load(os.path.join(data_path, "coord.npy"))
data["color"] = np.load(os.path.join(data_path, "color.npy"))
data["normal"] = np.load(os.path.join(data_path, "normal.npy"))
data["semantic_gt20"] = np.load(os.path.join(data_path, "segment20.npy"))
data["semantic_gt200"] = np.load(os.path.join(data_path, "segment200.npy"))
data["instance"] = np.load(os.path.join(data_path, "instance.npy"))
data["scene_id"] = data_path.split("/")[-1]
coord = data["coord"]
color = data["color"]
normal = data["normal"]
scene_id = data["scene_id"]
if "semantic_gt20" in data.keys():
segment = data["semantic_gt20"].reshape([-1])
else:
segment = np.ones(coord.shape[0]) * -1
if "instance_gt" in data.keys():
instance = data["instance_gt"].reshape([-1])
else:
instance = np.ones(coord.shape[0]) * -1
data_dict = dict(
coord=coord,
normal=normal,
color=color,
segment=segment,
instance=instance,
scene_id=scene_id,
)
if self.la:
sampled_index = self.la[self.get_data_name(idx)]
mask = np.ones_like(segment).astype(np.bool)
mask[sampled_index] = False
segment[mask] = self.ignore_index
data_dict["segment"] = segment
data_dict["sampled_index"] = sampled_index
return data_dict
def get_data_name(self, idx):
return os.path.basename(self.data_list[idx % len(self.data_list)]).split(".")[0]
def prepare_train_data(self, idx):
# load data
data_dict = self.get_data(idx)
data_dict = self.transform(data_dict)
return data_dict
def prepare_test_data(self, idx):
# load data
data_dict = self.get_data(idx)
segment = data_dict.pop("segment")
data_dict = self.transform(data_dict)
data_dict_list = []
for aug in self.aug_transform:
data_dict_list.append(aug(deepcopy(data_dict)))
input_dict_list = []
for data in data_dict_list:
data_part_list = self.test_voxelize(data)
for data_part in data_part_list:
if self.test_crop:
data_part = self.test_crop(data_part)
else:
data_part = [data_part]
input_dict_list += data_part
for i in range(len(input_dict_list)):
input_dict_list[i] = self.post_transform(input_dict_list[i])
data_dict = dict(
fragment_list=input_dict_list, segment=segment, name=self.get_data_name(idx)
)
return data_dict
def __getitem__(self, idx):
if self.test_mode:
return self.prepare_test_data(idx)
else:
return self.prepare_train_data(idx)
def __len__(self):
return len(self.data_list) * self.loop
@DATASETS.register_module()
class ScanNet200Dataset(ScanNetDataset):
class2id = np.array(VALID_CLASS_IDS_200)
def get_data(self, idx):
data_path = self.data_list[idx % len(self.data_list)]
if(os.path.exists(f"{data_path}.pth")):
data_path = f"{data_path}.pth"
data = torch.load(data_path)
else:
data = {}
data["coord"] = np.load(os.path.join(data_path, "coord.npy"))
data["color"] = np.load(os.path.join(data_path, "color.npy"))
data["normal"] = np.load(os.path.join(data_path, "normal.npy"))
data["semantic_gt20"] = np.load(os.path.join(data_path, "segment20.npy"))
data["semantic_gt200"] = np.load(os.path.join(data_path, "segment200.npy"))
data["instance"] = np.load(os.path.join(data_path, "instance.npy"))
data["scene_id"] = data_path.split("/")[-1]
coord = data["coord"]
color = data["color"]
normal = data["normal"]
scene_id = data["scene_id"]
if "semantic_gt200" in data.keys():
segment = data["semantic_gt200"].reshape([-1])
else:
segment = np.ones(coord.shape[0]) * -1
if "instance_gt" in data.keys():
instance = data["instance_gt"].reshape([-1])
else:
instance = np.ones(coord.shape[0]) * -1
data_dict = dict(
coord=coord,
normal=normal,
color=color,
segment=segment,
instance=instance,
scene_id=scene_id,
)
if self.la:
sampled_index = self.la[self.get_data_name(idx)]
segment[sampled_index] = self.ignore_index
data_dict["segment"] = segment
data_dict["sampled_index"] = sampled_index
return data_dict
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/datasets/transform.py | pointcept/datasets/transform.py | """
3D Point Cloud Augmentation
Inspirited by chrischoy/SpatioTemporalSegmentation
Author: Xiaoyang Wu (xiaoyang.wu.cs@gmail.com)
Please cite our work if the code is helpful to you.
"""
import random
import numbers
import scipy
import scipy.ndimage
import scipy.interpolate
import scipy.stats
import numpy as np
import torch
import copy
from collections.abc import Sequence, Mapping
from pointcept.utils.registry import Registry
TRANSFORMS = Registry("transforms")
@TRANSFORMS.register_module()
class Collect(object):
def __init__(self, keys, offset_keys_dict=None, **kwargs):
"""
e.g. Collect(keys=[coord], feat_keys=[coord, color])
"""
if offset_keys_dict is None:
offset_keys_dict = dict(offset="coord")
self.keys = keys
self.offset_keys = offset_keys_dict
self.kwargs = kwargs
def __call__(self, data_dict):
data = dict()
if isinstance(self.keys, str):
self.keys = [self.keys]
for key in self.keys:
data[key] = data_dict[key]
for key, value in self.offset_keys.items():
data[key] = torch.tensor([data_dict[value].shape[0]])
for name, keys in self.kwargs.items():
name = name.replace("_keys", "")
assert isinstance(keys, Sequence)
data[name] = torch.cat([data_dict[key].float() for key in keys], dim=1)
return data
@TRANSFORMS.register_module()
class Copy(object):
def __init__(self, keys_dict=None):
if keys_dict is None:
keys_dict = dict(coord="origin_coord", segment="origin_segment")
self.keys_dict = keys_dict
def __call__(self, data_dict):
for key, value in self.keys_dict.items():
if isinstance(data_dict[key], np.ndarray):
data_dict[value] = data_dict[key].copy()
elif isinstance(data_dict[key], torch.Tensor):
data_dict[value] = data_dict[key].clone().detach()
else:
data_dict[value] = copy.deepcopy(data_dict[key])
return data_dict
@TRANSFORMS.register_module()
class ToTensor(object):
def __call__(self, data):
if isinstance(data, torch.Tensor):
return data
elif isinstance(data, str):
# note that str is also a kind of sequence, judgement should before sequence
return data
elif isinstance(data, int):
return torch.LongTensor([data])
elif isinstance(data, float):
return torch.FloatTensor([data])
elif isinstance(data, np.ndarray) and np.issubdtype(data.dtype, bool):
return torch.from_numpy(data)
elif isinstance(data, np.ndarray) and np.issubdtype(data.dtype, np.integer):
return torch.from_numpy(data).long()
elif isinstance(data, np.ndarray) and np.issubdtype(data.dtype, np.floating):
return torch.from_numpy(data).float()
elif isinstance(data, Mapping):
result = {sub_key: self(item) for sub_key, item in data.items()}
return result
elif isinstance(data, Sequence):
result = [self(item) for item in data]
return result
else:
raise TypeError(f"type {type(data)} cannot be converted to tensor.")
@TRANSFORMS.register_module()
class Add(object):
def __init__(self, keys_dict=None):
if keys_dict is None:
keys_dict = dict()
self.keys_dict = keys_dict
def __call__(self, data_dict):
for key, value in self.keys_dict.items():
data_dict[key] = value
return data_dict
@TRANSFORMS.register_module()
class NormalizeColor(object):
def __call__(self, data_dict):
if "color" in data_dict.keys():
data_dict["color"] = data_dict["color"] / 127.5 - 1
return data_dict
@TRANSFORMS.register_module()
class NormalizeCoord(object):
def __call__(self, data_dict):
if "coord" in data_dict.keys():
# modified from pointnet2
centroid = np.mean(data_dict["coord"], axis=0)
data_dict["coord"] -= centroid
m = np.max(np.sqrt(np.sum(data_dict["coord"] ** 2, axis=1)))
data_dict["coord"] = data_dict["coord"] / m
return data_dict
@TRANSFORMS.register_module()
class PositiveShift(object):
def __call__(self, data_dict):
if "coord" in data_dict.keys():
coord_min = np.min(data_dict["coord"], 0)
data_dict["coord"] -= coord_min
return data_dict
@TRANSFORMS.register_module()
class CenterShift(object):
def __init__(self, apply_z=True):
self.apply_z = apply_z
def __call__(self, data_dict):
if "coord" in data_dict.keys():
x_min, y_min, z_min = data_dict["coord"].min(axis=0)
x_max, y_max, _ = data_dict["coord"].max(axis=0)
if self.apply_z:
shift = [(x_min + x_max) / 2, (y_min + y_max) / 2, z_min]
else:
shift = [(x_min + x_max) / 2, (y_min + y_max) / 2, 0]
data_dict["coord"] -= shift
return data_dict
@TRANSFORMS.register_module()
class RandomShift(object):
def __init__(self, shift=((-0.2, 0.2), (-0.2, 0.2), (0, 0))):
self.shift = shift
def __call__(self, data_dict):
if "coord" in data_dict.keys():
shift_x = np.random.uniform(self.shift[0][0], self.shift[0][1])
shift_y = np.random.uniform(self.shift[1][0], self.shift[1][1])
shift_z = np.random.uniform(self.shift[2][0], self.shift[2][1])
data_dict["coord"] += [shift_x, shift_y, shift_z]
return data_dict
@TRANSFORMS.register_module()
class PointClip(object):
def __init__(self, point_cloud_range=(-80, -80, -3, 80, 80, 1)):
self.point_cloud_range = point_cloud_range
def __call__(self, data_dict):
if "coord" in data_dict.keys():
data_dict["coord"] = np.clip(
data_dict["coord"],
a_min=self.point_cloud_range[:3],
a_max=self.point_cloud_range[3:],
)
return data_dict
@TRANSFORMS.register_module()
class RandomDropout(object):
def __init__(self, dropout_ratio=0.2, dropout_application_ratio=0.5):
"""
upright_axis: axis index among x,y,z, i.e. 2 for z
"""
self.dropout_ratio = dropout_ratio
self.dropout_application_ratio = dropout_application_ratio
def __call__(self, data_dict):
if random.random() < self.dropout_application_ratio:
n = len(data_dict["coord"])
idx = np.random.choice(n, int(n * (1 - self.dropout_ratio)), replace=False)
if "sampled_index" in data_dict:
# for ScanNet data efficient, we need to make sure labeled point is sampled.
idx = np.unique(np.append(idx, data_dict["sampled_index"]))
mask = np.zeros_like(data_dict["segment"]).astype(bool)
mask[data_dict["sampled_index"]] = True
data_dict["sampled_index"] = np.where(mask[idx])[0]
if "coord" in data_dict.keys():
data_dict["coord"] = data_dict["coord"][idx]
if "color" in data_dict.keys():
data_dict["color"] = data_dict["color"][idx]
if "normal" in data_dict.keys():
data_dict["normal"] = data_dict["normal"][idx]
if "strength" in data_dict.keys():
data_dict["strength"] = data_dict["strength"][idx]
if "segment" in data_dict.keys():
data_dict["segment"] = data_dict["segment"][idx]
if "instance" in data_dict.keys():
data_dict["instance"] = data_dict["instance"][idx]
return data_dict
@TRANSFORMS.register_module()
class RandomRotate(object):
def __init__(self, angle=None, center=None, axis="z", always_apply=False, p=0.5):
self.angle = [-1, 1] if angle is None else angle
self.axis = axis
self.always_apply = always_apply
self.p = p if not self.always_apply else 1
self.center = center
def __call__(self, data_dict):
if random.random() > self.p:
return data_dict
angle = np.random.uniform(self.angle[0], self.angle[1]) * np.pi
rot_cos, rot_sin = np.cos(angle), np.sin(angle)
if self.axis == "x":
rot_t = np.array([[1, 0, 0], [0, rot_cos, -rot_sin], [0, rot_sin, rot_cos]])
elif self.axis == "y":
rot_t = np.array([[rot_cos, 0, rot_sin], [0, 1, 0], [-rot_sin, 0, rot_cos]])
elif self.axis == "z":
rot_t = np.array([[rot_cos, -rot_sin, 0], [rot_sin, rot_cos, 0], [0, 0, 1]])
else:
raise NotImplementedError
if "coord" in data_dict.keys():
if self.center is None:
x_min, y_min, z_min = data_dict["coord"].min(axis=0)
x_max, y_max, z_max = data_dict["coord"].max(axis=0)
center = [(x_min + x_max) / 2, (y_min + y_max) / 2, (z_min + z_max) / 2]
else:
center = self.center
data_dict["coord"] -= center
data_dict["coord"] = np.dot(data_dict["coord"], np.transpose(rot_t))
data_dict["coord"] += center
if "normal" in data_dict.keys():
data_dict["normal"] = np.dot(data_dict["normal"], np.transpose(rot_t))
return data_dict
@TRANSFORMS.register_module()
class RandomRotateTargetAngle(object):
def __init__(
self, angle=(1 / 2, 1, 3 / 2), center=None, axis="z", always_apply=False, p=0.75
):
self.angle = angle
self.axis = axis
self.always_apply = always_apply
self.p = p if not self.always_apply else 1
self.center = center
def __call__(self, data_dict):
if random.random() > self.p:
return data_dict
angle = np.random.choice(self.angle) * np.pi
rot_cos, rot_sin = np.cos(angle), np.sin(angle)
if self.axis == "x":
rot_t = np.array([[1, 0, 0], [0, rot_cos, -rot_sin], [0, rot_sin, rot_cos]])
elif self.axis == "y":
rot_t = np.array([[rot_cos, 0, rot_sin], [0, 1, 0], [-rot_sin, 0, rot_cos]])
elif self.axis == "z":
rot_t = np.array([[rot_cos, -rot_sin, 0], [rot_sin, rot_cos, 0], [0, 0, 1]])
else:
raise NotImplementedError
if "coord" in data_dict.keys():
if self.center is None:
x_min, y_min, z_min = data_dict["coord"].min(axis=0)
x_max, y_max, z_max = data_dict["coord"].max(axis=0)
center = [(x_min + x_max) / 2, (y_min + y_max) / 2, (z_min + z_max) / 2]
else:
center = self.center
data_dict["coord"] -= center
data_dict["coord"] = np.dot(data_dict["coord"], np.transpose(rot_t))
data_dict["coord"] += center
if "normal" in data_dict.keys():
data_dict["normal"] = np.dot(data_dict["normal"], np.transpose(rot_t))
return data_dict
@TRANSFORMS.register_module()
class RandomScale(object):
def __init__(self, scale=None, anisotropic=False):
self.scale = scale if scale is not None else [0.95, 1.05]
self.anisotropic = anisotropic
def __call__(self, data_dict):
if "coord" in data_dict.keys():
scale = np.random.uniform(
self.scale[0], self.scale[1], 3 if self.anisotropic else 1
)
data_dict["coord"] *= scale
return data_dict
@TRANSFORMS.register_module()
class RandomFlip(object):
def __init__(self, p=0.5):
self.p = p
def __call__(self, data_dict):
if np.random.rand() < self.p:
if "coord" in data_dict.keys():
data_dict["coord"][:, 0] = -data_dict["coord"][:, 0]
if "normal" in data_dict.keys():
data_dict["normal"][:, 0] = -data_dict["normal"][:, 0]
if np.random.rand() < self.p:
if "coord" in data_dict.keys():
data_dict["coord"][:, 1] = -data_dict["coord"][:, 1]
if "normal" in data_dict.keys():
data_dict["normal"][:, 1] = -data_dict["normal"][:, 1]
return data_dict
@TRANSFORMS.register_module()
class RandomJitter(object):
def __init__(self, sigma=0.01, clip=0.05):
assert clip > 0
self.sigma = sigma
self.clip = clip
def __call__(self, data_dict):
if "coord" in data_dict.keys():
jitter = np.clip(
self.sigma * np.random.randn(data_dict["coord"].shape[0], 3),
-self.clip,
self.clip,
)
data_dict["coord"] += jitter
return data_dict
# @TRANSFORMS.register_module()
# class ClipGaussianJitter(object):
# def __init__(self, scalar=0.02, store_jitter=False):
# self.scalar = scalar
# self.mean = np.mean(3)
# self.cov = np.identity(3)
# self.quantile = 1.96
# self.store_jitter = store_jitter
#
# def __call__(self, data_dict):
# if "coord" in data_dict.keys():
# jitter = np.random.multivariate_normal(
# self.mean, self.cov, data_dict["coord"].shape[0]
# )
# jitter = self.scalar * np.clip(jitter / 1.96, -1, 1)
# data_dict["coord"] += jitter
# if self.store_jitter:
# data_dict["jitter"] = jitter
# return data_dict
@TRANSFORMS.register_module()
class ClipGaussianJitter(object):
def __init__(self, mean=0, std=1):
self.mean = mean
self.std = std
def __call__(self, data_dict):
if "coord" in data_dict.keys():
jitter = np.random.normal(
self.mean, self.std, data_dict["coord"].shape
)
data_dict["coord"] += jitter
return data_dict
@TRANSFORMS.register_module()
class ChromaticAutoContrast(object):
def __init__(self, p=0.2, blend_factor=None):
self.p = p
self.blend_factor = blend_factor
def __call__(self, data_dict):
if "color" in data_dict.keys() and np.random.rand() < self.p:
lo = np.min(data_dict["color"], 0, keepdims=True)
hi = np.max(data_dict["color"], 0, keepdims=True)
scale = 255 / (hi - lo)
contrast_feat = (data_dict["color"][:, :3] - lo) * scale
blend_factor = (
np.random.rand() if self.blend_factor is None else self.blend_factor
)
data_dict["color"][:, :3] = (1 - blend_factor) * data_dict["color"][
:, :3
] + blend_factor * contrast_feat
return data_dict
@TRANSFORMS.register_module()
class ChromaticTranslation(object):
def __init__(self, p=0.95, ratio=0.05):
self.p = p
self.ratio = ratio
def __call__(self, data_dict):
if "color" in data_dict.keys() and np.random.rand() < self.p:
tr = (np.random.rand(1, 3) - 0.5) * 255 * 2 * self.ratio
data_dict["color"][:, :3] = np.clip(tr + data_dict["color"][:, :3], 0, 255)
return data_dict
@TRANSFORMS.register_module()
class ChromaticJitter(object):
def __init__(self, p=0.95, std=0.005):
self.p = p
self.std = std
def __call__(self, data_dict):
if "color" in data_dict.keys() and np.random.rand() < self.p:
noise = np.random.randn(data_dict["color"].shape[0], 3)
noise *= self.std * 255
data_dict["color"][:, :3] = np.clip(
noise + data_dict["color"][:, :3], 0, 255
)
return data_dict
@TRANSFORMS.register_module()
class RandomColorGrayScale(object):
def __init__(self, p):
self.p = p
@staticmethod
def rgb_to_grayscale(color, num_output_channels=1):
if color.shape[-1] < 3:
raise TypeError(
"Input color should have at least 3 dimensions, but found {}".format(
color.shape[-1]
)
)
if num_output_channels not in (1, 3):
raise ValueError("num_output_channels should be either 1 or 3")
r, g, b = color[..., 0], color[..., 1], color[..., 2]
gray = (0.2989 * r + 0.587 * g + 0.114 * b).astype(color.dtype)
gray = np.expand_dims(gray, axis=-1)
if num_output_channels == 3:
gray = np.broadcast_to(gray, color.shape)
return gray
def __call__(self, data_dict):
if np.random.rand() < self.p:
data_dict["color"] = self.rgb_to_grayscale(data_dict["color"], 3)
return data_dict
@TRANSFORMS.register_module()
class RandomColorJitter(object):
"""
Random Color Jitter for 3D point cloud (refer torchvision)
"""
def __init__(self, brightness=0, contrast=0, saturation=0, hue=0, p=0.95):
self.brightness = self._check_input(brightness, "brightness")
self.contrast = self._check_input(contrast, "contrast")
self.saturation = self._check_input(saturation, "saturation")
self.hue = self._check_input(
hue, "hue", center=0, bound=(-0.5, 0.5), clip_first_on_zero=False
)
self.p = p
@staticmethod
def _check_input(
value, name, center=1, bound=(0, float("inf")), clip_first_on_zero=True
):
if isinstance(value, numbers.Number):
if value < 0:
raise ValueError(
"If {} is a single number, it must be non negative.".format(name)
)
value = [center - float(value), center + float(value)]
if clip_first_on_zero:
value[0] = max(value[0], 0.0)
elif isinstance(value, (tuple, list)) and len(value) == 2:
if not bound[0] <= value[0] <= value[1] <= bound[1]:
raise ValueError("{} values should be between {}".format(name, bound))
else:
raise TypeError(
"{} should be a single number or a list/tuple with length 2.".format(
name
)
)
# if value is 0 or (1., 1.) for brightness/contrast/saturation
# or (0., 0.) for hue, do nothing
if value[0] == value[1] == center:
value = None
return value
@staticmethod
def blend(color1, color2, ratio):
ratio = float(ratio)
bound = 255.0
return (
(ratio * color1 + (1.0 - ratio) * color2)
.clip(0, bound)
.astype(color1.dtype)
)
@staticmethod
def rgb2hsv(rgb):
r, g, b = rgb[..., 0], rgb[..., 1], rgb[..., 2]
maxc = np.max(rgb, axis=-1)
minc = np.min(rgb, axis=-1)
eqc = maxc == minc
cr = maxc - minc
s = cr / (np.ones_like(maxc) * eqc + maxc * (1 - eqc))
cr_divisor = np.ones_like(maxc) * eqc + cr * (1 - eqc)
rc = (maxc - r) / cr_divisor
gc = (maxc - g) / cr_divisor
bc = (maxc - b) / cr_divisor
hr = (maxc == r) * (bc - gc)
hg = ((maxc == g) & (maxc != r)) * (2.0 + rc - bc)
hb = ((maxc != g) & (maxc != r)) * (4.0 + gc - rc)
h = hr + hg + hb
h = (h / 6.0 + 1.0) % 1.0
return np.stack((h, s, maxc), axis=-1)
@staticmethod
def hsv2rgb(hsv):
h, s, v = hsv[..., 0], hsv[..., 1], hsv[..., 2]
i = np.floor(h * 6.0)
f = (h * 6.0) - i
i = i.astype(np.int32)
p = np.clip((v * (1.0 - s)), 0.0, 1.0)
q = np.clip((v * (1.0 - s * f)), 0.0, 1.0)
t = np.clip((v * (1.0 - s * (1.0 - f))), 0.0, 1.0)
i = i % 6
mask = np.expand_dims(i, axis=-1) == np.arange(6)
a1 = np.stack((v, q, p, p, t, v), axis=-1)
a2 = np.stack((t, v, v, q, p, p), axis=-1)
a3 = np.stack((p, p, t, v, v, q), axis=-1)
a4 = np.stack((a1, a2, a3), axis=-1)
return np.einsum("...na, ...nab -> ...nb", mask.astype(hsv.dtype), a4)
def adjust_brightness(self, color, brightness_factor):
if brightness_factor < 0:
raise ValueError(
"brightness_factor ({}) is not non-negative.".format(brightness_factor)
)
return self.blend(color, np.zeros_like(color), brightness_factor)
def adjust_contrast(self, color, contrast_factor):
if contrast_factor < 0:
raise ValueError(
"contrast_factor ({}) is not non-negative.".format(contrast_factor)
)
mean = np.mean(RandomColorGrayScale.rgb_to_grayscale(color))
return self.blend(color, mean, contrast_factor)
def adjust_saturation(self, color, saturation_factor):
if saturation_factor < 0:
raise ValueError(
"saturation_factor ({}) is not non-negative.".format(saturation_factor)
)
gray = RandomColorGrayScale.rgb_to_grayscale(color)
return self.blend(color, gray, saturation_factor)
def adjust_hue(self, color, hue_factor):
if not (-0.5 <= hue_factor <= 0.5):
raise ValueError(
"hue_factor ({}) is not in [-0.5, 0.5].".format(hue_factor)
)
orig_dtype = color.dtype
hsv = self.rgb2hsv(color / 255.0)
h, s, v = hsv[..., 0], hsv[..., 1], hsv[..., 2]
h = (h + hue_factor) % 1.0
hsv = np.stack((h, s, v), axis=-1)
color_hue_adj = (self.hsv2rgb(hsv) * 255.0).astype(orig_dtype)
return color_hue_adj
@staticmethod
def get_params(brightness, contrast, saturation, hue):
fn_idx = torch.randperm(4)
b = (
None
if brightness is None
else np.random.uniform(brightness[0], brightness[1])
)
c = None if contrast is None else np.random.uniform(contrast[0], contrast[1])
s = (
None
if saturation is None
else np.random.uniform(saturation[0], saturation[1])
)
h = None if hue is None else np.random.uniform(hue[0], hue[1])
return fn_idx, b, c, s, h
def __call__(self, data_dict):
(
fn_idx,
brightness_factor,
contrast_factor,
saturation_factor,
hue_factor,
) = self.get_params(self.brightness, self.contrast, self.saturation, self.hue)
for fn_id in fn_idx:
if (
fn_id == 0
and brightness_factor is not None
and np.random.rand() < self.p
):
data_dict["color"] = self.adjust_brightness(
data_dict["color"], brightness_factor
)
elif (
fn_id == 1 and contrast_factor is not None and np.random.rand() < self.p
):
data_dict["color"] = self.adjust_contrast(
data_dict["color"], contrast_factor
)
elif (
fn_id == 2
and saturation_factor is not None
and np.random.rand() < self.p
):
data_dict["color"] = self.adjust_saturation(
data_dict["color"], saturation_factor
)
elif fn_id == 3 and hue_factor is not None and np.random.rand() < self.p:
data_dict["color"] = self.adjust_hue(data_dict["color"], hue_factor)
return data_dict
@TRANSFORMS.register_module()
class HueSaturationTranslation(object):
@staticmethod
def rgb_to_hsv(rgb):
# Translated from source of colorsys.rgb_to_hsv
# r,g,b should be a numpy arrays with values between 0 and 255
# rgb_to_hsv returns an array of floats between 0.0 and 1.0.
rgb = rgb.astype("float")
hsv = np.zeros_like(rgb)
# in case an RGBA array was passed, just copy the A channel
hsv[..., 3:] = rgb[..., 3:]
r, g, b = rgb[..., 0], rgb[..., 1], rgb[..., 2]
maxc = np.max(rgb[..., :3], axis=-1)
minc = np.min(rgb[..., :3], axis=-1)
hsv[..., 2] = maxc
mask = maxc != minc
hsv[mask, 1] = (maxc - minc)[mask] / maxc[mask]
rc = np.zeros_like(r)
gc = np.zeros_like(g)
bc = np.zeros_like(b)
rc[mask] = (maxc - r)[mask] / (maxc - minc)[mask]
gc[mask] = (maxc - g)[mask] / (maxc - minc)[mask]
bc[mask] = (maxc - b)[mask] / (maxc - minc)[mask]
hsv[..., 0] = np.select(
[r == maxc, g == maxc], [bc - gc, 2.0 + rc - bc], default=4.0 + gc - rc
)
hsv[..., 0] = (hsv[..., 0] / 6.0) % 1.0
return hsv
@staticmethod
def hsv_to_rgb(hsv):
# Translated from source of colorsys.hsv_to_rgb
# h,s should be a numpy arrays with values between 0.0 and 1.0
# v should be a numpy array with values between 0.0 and 255.0
# hsv_to_rgb returns an array of uints between 0 and 255.
rgb = np.empty_like(hsv)
rgb[..., 3:] = hsv[..., 3:]
h, s, v = hsv[..., 0], hsv[..., 1], hsv[..., 2]
i = (h * 6.0).astype("uint8")
f = (h * 6.0) - i
p = v * (1.0 - s)
q = v * (1.0 - s * f)
t = v * (1.0 - s * (1.0 - f))
i = i % 6
conditions = [s == 0.0, i == 1, i == 2, i == 3, i == 4, i == 5]
rgb[..., 0] = np.select(conditions, [v, q, p, p, t, v], default=v)
rgb[..., 1] = np.select(conditions, [v, v, v, q, p, p], default=t)
rgb[..., 2] = np.select(conditions, [v, p, t, v, v, q], default=p)
return rgb.astype("uint8")
def __init__(self, hue_max=0.5, saturation_max=0.2):
self.hue_max = hue_max
self.saturation_max = saturation_max
def __call__(self, data_dict):
if "color" in data_dict.keys():
# Assume color[:, :3] is rgb
hsv = HueSaturationTranslation.rgb_to_hsv(data_dict["color"][:, :3])
hue_val = (np.random.rand() - 0.5) * 2 * self.hue_max
sat_ratio = 1 + (np.random.rand() - 0.5) * 2 * self.saturation_max
hsv[..., 0] = np.remainder(hue_val + hsv[..., 0] + 1, 1)
hsv[..., 1] = np.clip(sat_ratio * hsv[..., 1], 0, 1)
data_dict["color"][:, :3] = np.clip(
HueSaturationTranslation.hsv_to_rgb(hsv), 0, 255
)
return data_dict
@TRANSFORMS.register_module()
class RandomColorDrop(object):
def __init__(self, p=0.2, color_augment=0.0):
self.p = p
self.color_augment = color_augment
def __call__(self, data_dict):
if "color" in data_dict.keys() and np.random.rand() < self.p:
data_dict["color"] *= self.color_augment
return data_dict
def __repr__(self):
return "RandomColorDrop(color_augment: {}, p: {})".format(
self.color_augment, self.p
)
@TRANSFORMS.register_module()
class ElasticDistortion(object):
def __init__(self, distortion_params=None):
self.distortion_params = (
[[0.2, 0.4], [0.8, 1.6]] if distortion_params is None else distortion_params
)
@staticmethod
def elastic_distortion(coords, granularity, magnitude):
"""
Apply elastic distortion on sparse coordinate space.
pointcloud: numpy array of (number of points, at least 3 spatial dims)
granularity: size of the noise grid (in same scale[m/cm] as the voxel grid)
magnitude: noise multiplier
"""
blurx = np.ones((3, 1, 1, 1)).astype("float32") / 3
blury = np.ones((1, 3, 1, 1)).astype("float32") / 3
blurz = np.ones((1, 1, 3, 1)).astype("float32") / 3
coords_min = coords.min(0)
# Create Gaussian noise tensor of the size given by granularity.
noise_dim = ((coords - coords_min).max(0) // granularity).astype(int) + 3
noise = np.random.randn(*noise_dim, 3).astype(np.float32)
# Smoothing.
for _ in range(2):
noise = scipy.ndimage.filters.convolve(
noise, blurx, mode="constant", cval=0
)
noise = scipy.ndimage.filters.convolve(
noise, blury, mode="constant", cval=0
)
noise = scipy.ndimage.filters.convolve(
noise, blurz, mode="constant", cval=0
)
# Trilinear interpolate noise filters for each spatial dimensions.
ax = [
np.linspace(d_min, d_max, d)
for d_min, d_max, d in zip(
coords_min - granularity,
coords_min + granularity * (noise_dim - 2),
noise_dim,
)
]
interp = scipy.interpolate.RegularGridInterpolator(
ax, noise, bounds_error=False, fill_value=0
)
coords += interp(coords) * magnitude
return coords
def __call__(self, data_dict):
if "coord" in data_dict.keys() and self.distortion_params is not None:
if random.random() < 0.95:
for granularity, magnitude in self.distortion_params:
data_dict["coord"] = self.elastic_distortion(
data_dict["coord"], granularity, magnitude
)
return data_dict
@TRANSFORMS.register_module()
class GridSample(object):
def __init__(
self,
grid_size=0.05,
hash_type="fnv",
mode="train",
keys=("coord", "color", "normal", "segment"),
return_inverse=False,
return_grid_coord=False,
return_min_coord=False,
return_displacement=False,
project_displacement=False,
):
self.grid_size = grid_size
self.hash = self.fnv_hash_vec if hash_type == "fnv" else self.ravel_hash_vec
assert mode in ["train", "test"]
self.mode = mode
self.keys = keys
self.return_inverse = return_inverse
self.return_grid_coord = return_grid_coord
self.return_min_coord = return_min_coord
self.return_displacement = return_displacement
self.project_displacement = project_displacement
def __call__(self, data_dict):
assert "coord" in data_dict.keys()
scaled_coord = data_dict["coord"] / np.array(self.grid_size)
grid_coord = np.floor(scaled_coord).astype(int)
min_coord = grid_coord.min(0)
grid_coord -= min_coord
scaled_coord -= min_coord
min_coord = min_coord * np.array(self.grid_size)
key = self.hash(grid_coord)
idx_sort = np.argsort(key)
key_sort = key[idx_sort]
_, inverse, count = np.unique(key_sort, return_inverse=True, return_counts=True)
if self.mode == "train": # train mode
idx_select = (
np.cumsum(np.insert(count, 0, 0)[0:-1])
+ np.random.randint(0, count.max(), count.size) % count
)
idx_unique = idx_sort[idx_select]
if "sampled_index" in data_dict:
# for ScanNet data efficient, we need to make sure labeled point is sampled.
idx_unique = np.unique(
np.append(idx_unique, data_dict["sampled_index"])
)
mask = np.zeros_like(data_dict["segment"]).astype(bool)
mask[data_dict["sampled_index"]] = True
data_dict["sampled_index"] = np.where(mask[idx_unique])[0]
if self.return_inverse:
data_dict["inverse"] = np.zeros_like(inverse)
data_dict["inverse"][idx_sort] = inverse
if self.return_grid_coord:
data_dict["grid_coord"] = grid_coord[idx_unique]
if self.return_min_coord:
data_dict["min_coord"] = min_coord.reshape([1, 3])
if self.return_displacement:
displacement = (
scaled_coord - grid_coord - 0.5
) # [0, 1] -> [-0.5, 0.5] displacement to center
if self.project_displacement:
displacement = np.sum(
displacement * data_dict["normal"], axis=-1, keepdims=True
)
data_dict["displacement"] = displacement[idx_unique]
for key in self.keys:
data_dict[key] = data_dict[key][idx_unique]
return data_dict
elif self.mode == "test": # test mode
data_part_list = []
for i in range(count.max()):
idx_select = np.cumsum(np.insert(count, 0, 0)[0:-1]) + i % count
idx_part = idx_sort[idx_select]
data_part = dict(index=idx_part)
if self.return_inverse:
data_dict["inverse"] = np.zeros_like(inverse)
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | true |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/datasets/structure3d.py | pointcept/datasets/structure3d.py | """
Structured3D Datasets
Author: Xiaoyang Wu (xiaoyang.wu.cs@gmail.com)
Please cite our work if the code is helpful to you.
"""
import os
import glob
from collections.abc import Sequence
from .defaults import DefaultDataset
from .builder import DATASETS
@DATASETS.register_module()
class Structured3DDataset(DefaultDataset):
def get_data_list(self):
if isinstance(self.split, str):
data_list = glob.glob(os.path.join(self.data_root, self.split, "*/*.pth"))
elif isinstance(self.split, Sequence):
data_list = []
for split in self.split:
data_list += glob.glob(os.path.join(self.data_root, split, "*/*.pth"))
else:
raise NotImplementedError
return data_list
def get_data_name(self, idx):
file_path = self.data_list[idx % len(self.data_list)]
dir_path, file_name = os.path.split(file_path)
scene_name = os.path.basename(dir_path)
room_name = os.path.splitext(file_name)[0]
data_name = f"{scene_name}_{room_name}"
return data_name
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/datasets/semantic_kitti.py | pointcept/datasets/semantic_kitti.py | """
Semantic KITTI dataset
Author: Xiaoyang Wu (xiaoyang.wu.cs@gmail.com)
Please cite our work if the code is helpful to you.
"""
import os
import numpy as np
from .builder import DATASETS
from .defaults import DefaultDataset
@DATASETS.register_module()
class SemanticKITTIDataset(DefaultDataset):
def __init__(
self,
split="train",
data_root="data/semantic_kitti",
transform=None,
test_mode=False,
test_cfg=None,
loop=1,
ignore_index=-1,
):
self.ignore_index = ignore_index
self.learning_map = self.get_learning_map(ignore_index)
self.learning_map_inv = self.get_learning_map_inv(ignore_index)
super().__init__(
split=split,
data_root=data_root,
transform=transform,
test_mode=test_mode,
test_cfg=test_cfg,
loop=loop,
)
def get_data_list(self):
split2seq = dict(
train=[0, 1, 2, 3, 4, 5, 6, 7, 9, 10],
val=[8],
test=[11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21],
)
if isinstance(self.split, str):
seq_list = split2seq[self.split]
elif isinstance(self.split, list):
seq_list = []
for split in self.split:
seq_list += split2seq[split]
else:
raise NotImplementedError
data_list = []
for seq in seq_list:
seq = str(seq).zfill(2)
seq_folder = os.path.join(self.data_root, "dataset", "sequences", seq)
seq_files = sorted(os.listdir(os.path.join(seq_folder, "velodyne")))
data_list += [
os.path.join(seq_folder, "velodyne", file) for file in seq_files
]
return data_list
def get_data(self, idx):
data_path = self.data_list[idx % len(self.data_list)]
with open(data_path, "rb") as b:
scan = np.fromfile(b, dtype=np.float32).reshape(-1, 4)
coord = scan[:, :3]
strength = scan[:, -1].reshape([-1, 1])
label_file = data_path.replace("velodyne", "labels").replace(".bin", ".label")
if os.path.exists(label_file):
with open(label_file, "rb") as a:
segment = np.fromfile(a, dtype=np.int32).reshape(-1)
segment = np.vectorize(self.learning_map.__getitem__)(
segment & 0xFFFF
).astype(np.int32)
else:
segment = np.zeros(scan.shape[0]).astype(np.int32)
data_dict = dict(coord=coord, strength=strength, segment=segment)
return data_dict
def get_data_name(self, idx):
file_path = self.data_list[idx % len(self.data_list)]
dir_path, file_name = os.path.split(file_path)
sequence_name = os.path.basename(os.path.dirname(dir_path))
frame_name = os.path.splitext(file_name)[0]
data_name = f"{sequence_name}_{frame_name}"
return data_name
@staticmethod
def get_learning_map(ignore_index):
learning_map = {
0: ignore_index, # "unlabeled"
1: ignore_index, # "outlier" mapped to "unlabeled" --------------------------mapped
10: 0, # "car"
11: 1, # "bicycle"
13: 4, # "bus" mapped to "other-vehicle" --------------------------mapped
15: 2, # "motorcycle"
16: 4, # "on-rails" mapped to "other-vehicle" ---------------------mapped
18: 3, # "truck"
20: 4, # "other-vehicle"
30: 5, # "person"
31: 6, # "bicyclist"
32: 7, # "motorcyclist"
40: 8, # "road"
44: 9, # "parking"
48: 10, # "sidewalk"
49: 11, # "other-ground"
50: 12, # "building"
51: 13, # "fence"
52: ignore_index, # "other-structure" mapped to "unlabeled" ------------------mapped
60: 8, # "lane-marking" to "road" ---------------------------------mapped
70: 14, # "vegetation"
71: 15, # "trunk"
72: 16, # "terrain"
80: 17, # "pole"
81: 18, # "traffic-sign"
99: ignore_index, # "other-object" to "unlabeled" ----------------------------mapped
252: 0, # "moving-car" to "car" ------------------------------------mapped
253: 6, # "moving-bicyclist" to "bicyclist" ------------------------mapped
254: 5, # "moving-person" to "person" ------------------------------mapped
255: 7, # "moving-motorcyclist" to "motorcyclist" ------------------mapped
256: 4, # "moving-on-rails" mapped to "other-vehicle" --------------mapped
257: 4, # "moving-bus" mapped to "other-vehicle" -------------------mapped
258: 3, # "moving-truck" to "truck" --------------------------------mapped
259: 4, # "moving-other"-vehicle to "other-vehicle" ----------------mapped
}
return learning_map
@staticmethod
def get_learning_map_inv(ignore_index):
learning_map_inv = {
ignore_index: ignore_index, # "unlabeled"
0: 10, # "car"
1: 11, # "bicycle"
2: 15, # "motorcycle"
3: 18, # "truck"
4: 20, # "other-vehicle"
5: 30, # "person"
6: 31, # "bicyclist"
7: 32, # "motorcyclist"
8: 40, # "road"
9: 44, # "parking"
10: 48, # "sidewalk"
11: 49, # "other-ground"
12: 50, # "building"
13: 51, # "fence"
14: 70, # "vegetation"
15: 71, # "trunk"
16: 72, # "terrain"
17: 80, # "pole"
18: 81, # "traffic-sign"
}
return learning_map_inv
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/datasets/utils.py | pointcept/datasets/utils.py | """
Utils for Datasets
Author: Xiaoyang Wu (xiaoyang.wu.cs@gmail.com)
Please cite our work if the code is helpful to you.
"""
import random
from collections.abc import Mapping, Sequence
import numpy as np
import torch
from torch.utils.data.dataloader import default_collate
def collate_fn(batch):
"""
collate function for point cloud which support dict and list,
'coord' is necessary to determine 'offset'
"""
if not isinstance(batch, Sequence):
raise TypeError(f"{batch.dtype} is not supported.")
if isinstance(batch[0], torch.Tensor):
return torch.cat(list(batch))
elif isinstance(batch[0], str):
# str is also a kind of Sequence, judgement should before Sequence
return list(batch)
elif isinstance(batch[0], Sequence):
for data in batch:
data.append(torch.tensor([data[0].shape[0]]))
batch = [collate_fn(samples) for samples in zip(*batch)]
batch[-1] = torch.cumsum(batch[-1], dim=0).int()
return batch
elif isinstance(batch[0], Mapping):
batch = {key: collate_fn([d[key] for d in batch]) for key in batch[0]}
for key in batch.keys():
if "offset" in key:
batch[key] = torch.cumsum(batch[key], dim=0)
return batch
else:
return default_collate(batch)
def point_collate_fn(batch, mix_prob=0):
assert isinstance(
batch[0], Mapping
) # currently, only support input_dict, rather than input_list
batch = collate_fn(batch)
if "offset" in batch.keys():
# Mix3d (https://arxiv.org/pdf/2110.02210.pdf)
if random.random() < mix_prob:
batch["offset"] = torch.cat(
[batch["offset"][1:-1:2], batch["offset"][-1].unsqueeze(0)], dim=0
)
return batch
def gaussian_kernel(dist2: np.array, a: float = 1, c: float = 5):
return a * np.exp(-dist2 / (2 * c**2))
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/datasets/waymo.py | pointcept/datasets/waymo.py | """
Waymo dataset
Author: Xiaoyang Wu (xiaoyang.wu.cs@gmail.com)
Please cite our work if the code is helpful to you.
"""
import os
import numpy as np
import glob
from .builder import DATASETS
from .defaults import DefaultDataset
@DATASETS.register_module()
class WaymoDataset(DefaultDataset):
def __init__(
self,
split="training",
data_root="data/waymo",
transform=None,
test_mode=False,
test_cfg=None,
loop=1,
ignore_index=-1,
):
self.ignore_index = ignore_index
super().__init__(
split=split,
data_root=data_root,
transform=transform,
test_mode=test_mode,
test_cfg=test_cfg,
loop=loop,
)
def get_data_list(self):
if isinstance(self.split, str):
self.split = [self.split]
data_list = []
for split in self.split:
data_list += glob.glob(
os.path.join(self.data_root, split, "*", "velodyne", "*.bin")
)
return data_list
def get_data(self, idx):
data_path = self.data_list[idx % len(self.data_list)]
with open(data_path, "rb") as b:
scan = np.fromfile(b, dtype=np.float32).reshape(-1, 4)
coord = scan[:, :3]
strength = np.tanh(scan[:, -1].reshape([-1, 1]))
label_file = data_path.replace("velodyne", "labels").replace(".bin", ".label")
if os.path.exists(label_file):
with open(label_file, "rb") as a:
segment = (
np.fromfile(a, dtype=np.int32).reshape(-1, 2)[:, 1] - 1
) # ignore_index 0 -> -1
else:
segment = np.zeros(scan.shape[0]).astype(np.int32)
data_dict = dict(coord=coord, strength=strength, segment=segment)
return data_dict
def get_data_name(self, idx):
file_path = self.data_list[idx % len(self.data_list)]
dir_path, file_name = os.path.split(file_path)
sequence_name = os.path.basename(os.path.dirname(dir_path))
frame_name = os.path.splitext(file_name)[0]
data_name = f"{sequence_name}_{frame_name}"
return data_name
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/datasets/__init__.py | pointcept/datasets/__init__.py | from .defaults import DefaultDataset, ConcatDataset
from .builder import build_dataset
from .utils import point_collate_fn, collate_fn
# indoor scene
from .s3dis import S3DISDataset
from .scannet import ScanNetDataset, ScanNet200Dataset
from .scannet_pair import ScanNetPairDataset
from .arkitscenes import ArkitScenesDataset
from .structure3d import Structured3DDataset
# outdoor scene
from .semantic_kitti import SemanticKITTIDataset
from .nuscenes import NuScenesDataset
from .waymo import WaymoDataset
# object
from .modelnet import ModelNetDataset
from .shapenet_part import ShapeNetPartDataset
# dataloader
from .dataloader import MultiDatasetDataloader
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/datasets/dataloader.py | pointcept/datasets/dataloader.py | from functools import partial
import weakref
import torch
import torch.utils.data
import pointcept.utils.comm as comm
from pointcept.datasets.utils import point_collate_fn
from pointcept.datasets import ConcatDataset
from pointcept.utils.env import set_seed
class MultiDatasetDummySampler:
def __init__(self):
self.dataloader = None
def set_epoch(self, epoch):
if comm.get_world_size() > 1:
for dataloader in self.dataloader.dataloaders:
dataloader.sampler.set_epoch(epoch)
return
class MultiDatasetDataloader:
"""
Multiple Datasets Dataloader, batch data from a same dataset and mix up ratio determined by loop of each sub dataset.
The overall length is determined by the main dataset (first) and loop of concat dataset.
"""
def __init__(
self,
concat_dataset: ConcatDataset,
batch_size_per_gpu: int,
num_worker_per_gpu: int,
mix_prob=0,
seed=None,
):
self.datasets = concat_dataset.datasets
self.ratios = [dataset.loop for dataset in self.datasets]
# reset data loop, original loop serve as ratios
for dataset in self.datasets:
dataset.loop = 1
# determine union training epoch by main dataset
self.datasets[0].loop = concat_dataset.loop
# build sub-dataloaders
num_workers = num_worker_per_gpu // len(self.datasets)
self.dataloaders = []
for dataset_id, dataset in enumerate(self.datasets):
if comm.get_world_size() > 1:
sampler = torch.utils.data.distributed.DistributedSampler(dataset)
else:
sampler = None
init_fn = (
partial(
self._worker_init_fn,
dataset_id=dataset_id,
num_workers=num_workers,
num_datasets=len(self.datasets),
rank=comm.get_rank(),
seed=seed,
)
if seed is not None
else None
)
self.dataloaders.append(
torch.utils.data.DataLoader(
dataset,
batch_size=batch_size_per_gpu,
shuffle=(sampler is None),
num_workers=num_worker_per_gpu,
sampler=sampler,
collate_fn=partial(point_collate_fn, mix_prob=mix_prob),
pin_memory=True,
worker_init_fn=init_fn,
drop_last=True,
persistent_workers=True,
)
)
self.sampler = MultiDatasetDummySampler()
self.sampler.dataloader = weakref.proxy(self)
def __iter__(self):
iterator = [iter(dataloader) for dataloader in self.dataloaders]
while True:
for i in range(len(self.ratios)):
for _ in range(self.ratios[i]):
try:
batch = next(iterator[i])
except StopIteration:
if i == 0:
return
else:
iterator[i] = iter(self.dataloaders[i])
batch = next(iterator[i])
yield batch
def __len__(self):
main_data_loader_length = len(self.dataloaders[0])
return (
main_data_loader_length // self.ratios[0] * sum(self.ratios)
+ main_data_loader_length % self.ratios[0]
)
@staticmethod
def _worker_init_fn(worker_id, num_workers, dataset_id, num_datasets, rank, seed):
worker_seed = (
num_workers * num_datasets * rank
+ num_workers * dataset_id
+ worker_id
+ seed
)
set_seed(worker_seed)
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/datasets/shapenet_part.py | pointcept/datasets/shapenet_part.py | """
ShapeNet Part Dataset (Unmaintained)
get processed shapenet part dataset
at "https://shapenet.cs.stanford.edu/media/shapenetcore_partanno_segmentation_benchmark_v0_normal.zip"
Author: Xiaoyang Wu (xiaoyang.wu.cs@gmail.com)
Please cite our work if the code is helpful to you.
"""
import os
import json
import torch
import numpy as np
from copy import deepcopy
from torch.utils.data import Dataset
from pointcept.utils.logger import get_root_logger
from .builder import DATASETS
from .transform import Compose
@DATASETS.register_module()
class ShapeNetPartDataset(Dataset):
def __init__(
self,
split="train",
data_root="data/shapenetcore_partanno_segmentation_benchmark_v0_normal",
transform=None,
test_mode=False,
test_cfg=None,
loop=1,
):
super(ShapeNetPartDataset, self).__init__()
self.data_root = data_root
self.split = split
self.transform = Compose(transform)
self.loop = (
loop if not test_mode else 1
) # force make loop = 1 while in test mode
self.test_mode = test_mode
self.test_cfg = test_cfg if test_mode else None
self.cache = {}
# load categories file
self.categories = []
self.category2part = {
"Airplane": [0, 1, 2, 3],
"Bag": [4, 5],
"Cap": [6, 7],
"Car": [8, 9, 10, 11],
"Chair": [12, 13, 14, 15],
"Earphone": [16, 17, 18],
"Guitar": [19, 20, 21],
"Knife": [22, 23],
"Lamp": [24, 25, 26, 27],
"Laptop": [28, 29],
"Motorbike": [30, 31, 32, 33, 34, 35],
"Mug": [36, 37],
"Pistol": [38, 39, 40],
"Rocket": [41, 42, 43],
"Skateboard": [44, 45, 46],
"Table": [47, 48, 49],
}
self.token2category = {}
with open(os.path.join(self.data_root, "synsetoffset2category.txt"), "r") as f:
for line in f:
ls = line.strip().split()
self.token2category[ls[1]] = len(self.categories)
self.categories.append(ls[0])
if test_mode:
self.post_transform = Compose(self.test_cfg.post_transform)
self.aug_transform = [Compose(aug) for aug in self.test_cfg.aug_transform]
# load data list
if isinstance(self.split, str):
self.data_list = self.load_data_list(self.split)
elif isinstance(self.split, list):
self.data_list = []
for s in self.split:
self.data_list += self.load_data_list(s)
else:
raise NotImplementedError
logger = get_root_logger()
logger.info(
"Totally {} x {} samples in {} set.".format(
len(self.data_idx), self.loop, split
)
)
def load_data_list(self, split):
split_file = os.path.join(
self.data_root,
"train_test_split",
"shuffled_{}_file_list.json".format(split),
)
if not os.path.isfile(split_file):
raise (RuntimeError("Split file do not exist: " + split_file + "\n"))
with open(split_file, "r") as f:
# drop "shape_data/" and append ".txt"
data_list = [
os.path.join(self.data_root, data[11:] + ".txt")
for data in json.load(f)
]
return data_list
def prepare_train_data(self, idx):
# load data
data_idx = idx % len(self.data_list)
if data_idx in self.cache:
coord, norm, segment, cls_token = self.cache[data_idx]
else:
data = np.loadtxt(self.data_list[data_idx]).astype(np.float32)
cls_token = self.token2category[
os.path.basename(os.path.dirname(self.data_list[data_idx]))
]
coord, norm, segment = (
data[:, :3],
data[:, 3:6],
data[:, 6].astype(np.int32),
)
self.cache[data_idx] = (coord, norm, segment, cls_token)
data_dict = dict(coord=coord, norm=norm, segment=segment, cls_token=cls_token)
data_dict = self.transform(data_dict)
return data_dict
def prepare_test_data(self, idx):
# load data
data_idx = self.data_idx[idx % len(self.data_idx)]
data = np.loadtxt(self.data_list[data_idx]).astype(np.float32)
cls_token = self.token2category[
os.path.basename(os.path.dirname(self.data_list[data_idx]))
]
coord, norm, segment = data[:, :3], data[:, 3:6], data[:, 6].astype(np.int32)
data_dict = dict(coord=coord, norm=norm, cls_token=cls_token)
data_dict = self.transform(data_dict)
data_dict_list = []
for aug in self.aug_transform:
data_dict_list.append(self.post_transform(aug(deepcopy(data_dict))))
data_dict = dict(
fragment_list=data_dict_list, segment=segment, name=self.get_data_name(idx)
)
return data_dict
def get_data_name(self, idx):
data_idx = self.data_idx[idx % len(self.data_idx)]
return os.path.basename(self.data_list[data_idx]).split(".")[0]
def __getitem__(self, idx):
if self.test_mode:
return self.prepare_test_data(idx)
else:
return self.prepare_train_data(idx)
def __len__(self):
return len(self.data_idx) * self.loop
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/datasets/s3dis.py | pointcept/datasets/s3dis.py | """
S3DIS Dataset
Author: Xiaoyang Wu (xiaoyang.wu.cs@gmail.com)
Please cite our work if the code is helpful to you.
"""
import os
import glob
import numpy as np
import torch
from copy import deepcopy
from torch.utils.data import Dataset
from collections.abc import Sequence
from pointcept.utils.logger import get_root_logger
from pointcept.utils.cache import shared_dict
from .builder import DATASETS
from .transform import Compose, TRANSFORMS
@DATASETS.register_module()
class S3DISDataset(Dataset):
def __init__(
self,
split=("Area_1", "Area_2", "Area_3", "Area_4", "Area_6"),
data_root="data/s3dis",
transform=None,
test_mode=False,
test_cfg=None,
cache=False,
loop=1,
):
super(S3DISDataset, self).__init__()
self.data_root = data_root
self.split = split
self.transform = Compose(transform)
self.cache = cache
self.loop = (
loop if not test_mode else 1
) # force make loop = 1 while in test mode
self.test_mode = test_mode
self.test_cfg = test_cfg if test_mode else None
if test_mode:
self.test_voxelize = TRANSFORMS.build(self.test_cfg.voxelize)
self.test_crop = (
TRANSFORMS.build(self.test_cfg.crop) if self.test_cfg.crop else None
)
self.post_transform = Compose(self.test_cfg.post_transform)
self.aug_transform = [Compose(aug) for aug in self.test_cfg.aug_transform]
self.data_list = self.get_data_list()
logger = get_root_logger()
logger.info(
"Totally {} x {} samples in {} set.".format(
len(self.data_list), self.loop, split
)
)
def get_data_list(self):
if isinstance(self.split, str):
data_list = glob.glob(os.path.join(self.data_root, self.split, "*.pth"))
elif isinstance(self.split, Sequence):
data_list = []
for split in self.split:
data_list += glob.glob(os.path.join(self.data_root, split, "*.pth"))
else:
raise NotImplementedError
return data_list
def get_data(self, idx):
data_path = self.data_list[idx % len(self.data_list)]
if not self.cache:
data = torch.load(data_path)
else:
data_name = data_path.replace(os.path.dirname(self.data_root), "").split(
"."
)[0]
cache_name = "pointcept" + data_name.replace(os.path.sep, "-")
data = shared_dict(cache_name)
name = (
os.path.basename(self.data_list[idx % len(self.data_list)])
.split("_")[0]
.replace("R", " r")
)
coord = data["coord"]
color = data["color"]
scene_id = data_path
if "semantic_gt" in data.keys():
segment = data["semantic_gt"].reshape([-1])
else:
segment = np.ones(coord.shape[0]) * -1
if "instance_gt" in data.keys():
instance = data["instance_gt"].reshape([-1])
else:
instance = np.ones(coord.shape[0]) * -1
data_dict = dict(
name=name,
coord=coord,
color=color,
segment=segment,
instance=instance,
scene_id=scene_id,
)
if "normal" in data.keys():
data_dict["normal"] = data["normal"]
return data_dict
def get_data_name(self, idx):
return os.path.basename(self.data_list[idx % len(self.data_list)]).split(".")[0]
def prepare_train_data(self, idx):
# load data
data_dict = self.get_data(idx)
data_dict = self.transform(data_dict)
return data_dict
def prepare_test_data(self, idx):
# load data
data_dict = self.get_data(idx)
segment = data_dict.pop("segment")
data_dict = self.transform(data_dict)
data_dict_list = []
for aug in self.aug_transform:
data_dict_list.append(aug(deepcopy(data_dict)))
input_dict_list = []
for data in data_dict_list:
data_part_list = self.test_voxelize(data)
for data_part in data_part_list:
if self.test_crop:
data_part = self.test_crop(data_part)
else:
data_part = [data_part]
input_dict_list += data_part
for i in range(len(input_dict_list)):
input_dict_list[i] = self.post_transform(input_dict_list[i])
data_dict = dict(
fragment_list=input_dict_list, segment=segment, name=self.get_data_name(idx)
)
return data_dict
def __getitem__(self, idx):
if self.test_mode:
return self.prepare_test_data(idx)
else:
return self.prepare_train_data(idx)
def __len__(self):
return len(self.data_list) * self.loop
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/datasets/scannet_pair.py | pointcept/datasets/scannet_pair.py | """
ScanNet Pair Dataset (Frame-level contrastive view)
Refer PointContrast
Author: Xiaoyang Wu (xiaoyang.wu.cs@gmail.com)
Please cite our work if the code is helpful to you.
"""
import os
import glob
import numpy as np
import torch
from copy import deepcopy
from torch.utils.data import Dataset
from pointcept.utils.logger import get_root_logger
from .builder import DATASETS
from .transform import Compose, TRANSFORMS
@DATASETS.register_module()
class ScanNetPairDataset(Dataset):
def __init__(
self,
data_root="data/scannet_pair",
overlap_threshold=0.3,
view1_transform=None,
view2_transform=None,
loop=1,
**kwargs
):
super(ScanNetPairDataset, self).__init__()
self.data_root = data_root
self.overlap_threshold = overlap_threshold
self.view1_transform = Compose(view1_transform)
self.view2_transform = Compose(view2_transform)
self.loop = loop
self.data_list = self.get_data_list()
logger = get_root_logger()
logger.info("Totally {} x {} samples.".format(len(self.data_list), self.loop))
def get_data_list(self):
data_list = []
overlap_list = glob.glob(
os.path.join(self.data_root, "*", "pcd", "overlap.txt")
)
for overlap_file in overlap_list:
with open(overlap_file) as f:
overlap = f.readlines()
overlap = [pair.strip().split() for pair in overlap]
data_list.extend(
[
pair[:2]
for pair in overlap
if float(pair[2]) > self.overlap_threshold
]
)
return data_list
def get_data(self, idx):
pair = self.data_list[idx % len(self.data_list)]
view1_dict = torch.load(self.data_root + pair[0])
view2_dict = torch.load(self.data_root + pair[1])
return view1_dict, view2_dict
def get_data_name(self, idx):
return os.path.basename(self.data_list[idx % len(self.data_list)]).split(".")[0]
def prepare_train_data(self, idx):
# load data
view1_dict, view2_dict = self.get_data(idx)
view1_dict = self.view1_transform(view1_dict)
view2_dict = self.view2_transform(view2_dict)
data_dict = dict()
for key, value in view1_dict.items():
data_dict["view1_" + key] = value
for key, value in view2_dict.items():
data_dict["view2_" + key] = value
return data_dict
def prepare_test_data(self, idx):
raise NotImplementedError
def __getitem__(self, idx):
return self.prepare_train_data(idx)
def __len__(self):
return len(self.data_list) * self.loop
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/datasets/builder.py | pointcept/datasets/builder.py | """
Dataset Builder
Author: Xiaoyang Wu (xiaoyang.wu.cs@gmail.com)
Please cite our work if the code is helpful to you.
"""
from pointcept.utils.registry import Registry
DATASETS = Registry("datasets")
def build_dataset(cfg):
"""Build datasets."""
return DATASETS.build(cfg)
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/datasets/preprocessing/s3dis/preprocess_s3dis_voxelized.py | pointcept/datasets/preprocessing/s3dis/preprocess_s3dis_voxelized.py | """
Preprocessing Script for S3DIS
Parsing normal vectors has a large consumption of memory. Please reduce max_workers if memory is limited.
Author: Xiaoyang Wu (xiaoyang.wu.cs@gmail.com)
Please cite our work if the code is helpful to you.
"""
import os
import argparse
import glob
import torch
import numpy as np
import multiprocessing as mp
from concurrent.futures import ProcessPoolExecutor
from itertools import repeat
from pointcept.datasets.transform import GridSample
def voxelize_parser(data_path, dataset_root, output_root, voxel_size):
print(f"Parsing data: {data_path}")
out_path = data_path.replace(dataset_root, output_root)
os.makedirs(os.path.dirname(out_path), exist_ok=True)
data = torch.load(data_path)
data = GridSample(
grid_size=voxel_size, hash_type="fnv", mode="train", keys=data.keys()
)(data)
torch.save(data, out_path)
def main_process():
parser = argparse.ArgumentParser()
parser.add_argument(
"--dataset_root", required=True, help="Path to processed S3DIS dataset"
)
parser.add_argument(
"--output_root",
required=True,
help="Output path where area folders will be located",
)
parser.add_argument(
"--voxel_size", default=0.01, type=float, help="Voxel size for voxelization"
)
args = parser.parse_args()
data_list = glob.glob(os.path.join(args.dataset_root, "*/*.pth"))
# Preprocess data.
print("Processing scenes...")
pool = ProcessPoolExecutor(max_workers=mp.cpu_count())
# pool = ProcessPoolExecutor(max_workers=1)
_ = list(
pool.map(
voxelize_parser,
data_list,
repeat(args.dataset_root),
repeat(args.output_root),
repeat(args.voxel_size),
)
)
if __name__ == "__main__":
main_process()
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/datasets/preprocessing/s3dis/preprocess_s3dis.py | pointcept/datasets/preprocessing/s3dis/preprocess_s3dis.py | """
Preprocessing Script for S3DIS
Parsing normal vectors has a large consumption of memory. Please reduce max_workers if memory is limited.
Author: Xiaoyang Wu (xiaoyang.wu.cs@gmail.com)
Please cite our work if the code is helpful to you.
"""
import os
import argparse
import glob
import torch
import numpy as np
import multiprocessing as mp
try:
import open3d
except ImportError:
import warnings
warnings.warn("Please install open3d for parsing normal")
try:
import trimesh
except ImportError:
import warnings
warnings.warn("Please install trimesh for parsing normal")
from concurrent.futures import ProcessPoolExecutor
from itertools import repeat
area_mesh_dict = {}
def parse_room(
room, angle, dataset_root, output_root, align_angle=True, parse_normal=False
):
print("Parsing: {}".format(room))
classes = [
"ceiling",
"floor",
"wall",
"beam",
"column",
"window",
"door",
"table",
"chair",
"sofa",
"bookcase",
"board",
"clutter",
]
class2label = {cls: i for i, cls in enumerate(classes)}
source_dir = os.path.join(dataset_root, room)
save_path = os.path.join(output_root, room) + ".pth"
os.makedirs(os.path.dirname(save_path), exist_ok=True)
object_path_list = sorted(glob.glob(os.path.join(source_dir, "Annotations/*.txt")))
room_coords = []
room_colors = []
room_normals = []
room_semantic_gt = []
room_instance_gt = []
for object_id, object_path in enumerate(object_path_list):
object_name = os.path.basename(object_path).split("_")[0]
obj = np.loadtxt(object_path)
coords = obj[:, :3]
colors = obj[:, 3:6]
# note: in some room there is 'stairs' class
class_name = object_name if object_name in classes else "clutter"
semantic_gt = np.repeat(class2label[class_name], coords.shape[0])
semantic_gt = semantic_gt.reshape([-1, 1])
instance_gt = np.repeat(object_id, coords.shape[0])
instance_gt = instance_gt.reshape([-1, 1])
room_coords.append(coords)
room_colors.append(colors)
room_semantic_gt.append(semantic_gt)
room_instance_gt.append(instance_gt)
room_coords = np.ascontiguousarray(np.vstack(room_coords))
if parse_normal:
x_min, z_max, y_min = np.min(room_coords, axis=0)
x_max, z_min, y_max = np.max(room_coords, axis=0)
z_max = -z_max
z_min = -z_min
max_bound = np.array([x_max, y_max, z_max]) + 0.1
min_bound = np.array([x_min, y_min, z_min]) - 0.1
bbox = open3d.geometry.AxisAlignedBoundingBox(
min_bound=min_bound, max_bound=max_bound
)
# crop room
room_mesh = (
area_mesh_dict[os.path.dirname(room)]
.crop(bbox)
.transform(
np.array([[1, 0, 0, 0], [0, 0, -1, 0], [0, 1, 0, 0], [0, 0, 0, 1]])
)
)
vertices = np.array(room_mesh.vertices)
faces = np.array(room_mesh.triangles)
vertex_normals = np.array(room_mesh.vertex_normals)
room_mesh = trimesh.Trimesh(
vertices=vertices, faces=faces, vertex_normals=vertex_normals
)
(closest_points, distances, face_id) = room_mesh.nearest.on_surface(room_coords)
room_normals = room_mesh.face_normals[face_id]
if align_angle:
angle = (2 - angle / 180) * np.pi
rot_cos, rot_sin = np.cos(angle), np.sin(angle)
rot_t = np.array([[rot_cos, -rot_sin, 0], [rot_sin, rot_cos, 0], [0, 0, 1]])
room_center = (np.max(room_coords, axis=0) + np.min(room_coords, axis=0)) / 2
room_coords = (room_coords - room_center) @ np.transpose(rot_t) + room_center
if parse_normal:
room_normals = room_normals @ np.transpose(rot_t)
room_colors = np.ascontiguousarray(np.vstack(room_colors))
room_semantic_gt = np.ascontiguousarray(np.vstack(room_semantic_gt))
room_instance_gt = np.ascontiguousarray(np.vstack(room_instance_gt))
save_dict = dict(
coord=room_coords,
color=room_colors,
semantic_gt=room_semantic_gt,
instance_gt=room_instance_gt,
)
if parse_normal:
save_dict["normal"] = room_normals
torch.save(save_dict, save_path)
def main_process():
parser = argparse.ArgumentParser()
parser.add_argument(
"--dataset_root", required=True, help="Path to Stanford3dDataset_v1.2 dataset"
)
parser.add_argument(
"--output_root",
required=True,
help="Output path where area folders will be located",
)
parser.add_argument(
"--raw_root",
default=None,
help="Path to Stanford2d3dDataset_noXYZ dataset (optional)",
)
parser.add_argument(
"--align_angle", action="store_true", help="Whether align room angles"
)
parser.add_argument(
"--parse_normal", action="store_true", help="Whether process normal"
)
args = parser.parse_args()
if args.parse_normal:
assert args.raw_root is not None
room_list = []
angle_list = []
# Load room information
print("Loading room information ...")
for i in range(1, 7):
area_info = np.loadtxt(
os.path.join(
args.dataset_root,
"Area_{}".format(i),
"Area_{}_alignmentAngle.txt".format(i),
),
dtype=str,
)
room_list += [
os.path.join("Area_{}".format(i), room_info[0]) for room_info in area_info
]
angle_list += [int(room_info[1]) for room_info in area_info]
if args.parse_normal:
# load raw mesh file to extract normal
print("Loading raw mesh file ...")
for i in range(1, 7):
if i != 5:
mesh_dir = os.path.join(
args.raw_root, "area_{}".format(i), "3d", "rgb.obj"
)
mesh = open3d.io.read_triangle_mesh(mesh_dir)
mesh.triangle_uvs.clear()
else:
mesh_a_dir = os.path.join(
args.raw_root, "area_{}a".format(i), "3d", "rgb.obj"
)
mesh_b_dir = os.path.join(
args.raw_root, "area_{}b".format(i), "3d", "rgb.obj"
)
mesh_a = open3d.io.read_triangle_mesh(mesh_a_dir)
mesh_a.triangle_uvs.clear()
mesh_b = open3d.io.read_triangle_mesh(mesh_b_dir)
mesh_b.triangle_uvs.clear()
mesh_b = mesh_b.transform(
np.array(
[
[0, 0, -1, -4.09703582],
[0, 1, 0, 0],
[1, 0, 0, -6.22617759],
[0, 0, 0, 1],
]
)
)
mesh = mesh_a + mesh_b
area_mesh_dict["Area_{}".format(i)] = mesh
print("Area_{} mesh is loaded".format(i))
# Preprocess data.
print("Processing scenes...")
# pool = ProcessPoolExecutor(max_workers=mp.cpu_count())
pool = ProcessPoolExecutor(max_workers=8) # peak 110G memory when parsing normal.
_ = list(
pool.map(
parse_room,
room_list,
angle_list,
repeat(args.dataset_root),
repeat(args.output_root),
repeat(args.align_angle),
repeat(args.parse_normal),
)
)
if __name__ == "__main__":
main_process()
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/datasets/preprocessing/structured3d/preprocess_structured3d.py | pointcept/datasets/preprocessing/structured3d/preprocess_structured3d.py | """
Preprocessing Script for Structured3D
Author: Xiaoyang Wu (xiaoyang.wu.cs@gmail.com)
Please cite our work if the code is helpful to you.
"""
import argparse
import io
import sys
import os
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(SCRIPT_DIR))
import PIL
from PIL import Image
import cv2
import zipfile
import numpy as np
import torch
import multiprocessing as mp
from concurrent.futures import ProcessPoolExecutor
from itertools import repeat
from pointcept.datasets.transform import GridSample
VALID_CLASS_IDS_25 = (
1,
2,
3,
4,
5,
6,
7,
8,
9,
11,
14,
15,
16,
17,
18,
19,
22,
24,
25,
32,
34,
35,
38,
39,
40,
)
CLASS_LABELS_25 = (
"wall",
"floor",
"cabinet",
"bed",
"chair",
"sofa",
"table",
"door",
"window",
"picture",
"desk",
"shelves",
"curtain",
"dresser",
"pillow",
"mirror",
"ceiling",
"refrigerator",
"television",
"nightstand",
"sink",
"lamp",
"otherstructure",
"otherfurniture",
"otherprop",
)
def normal_from_cross_product(points_2d: np.ndarray) -> np.ndarray:
xyz_points_pad = np.pad(points_2d, ((0, 1), (0, 1), (0, 0)), mode="symmetric")
xyz_points_ver = (xyz_points_pad[:, :-1, :] - xyz_points_pad[:, 1:, :])[:-1, :, :]
xyz_points_hor = (xyz_points_pad[:-1, :, :] - xyz_points_pad[1:, :, :])[:, :-1, :]
xyz_normal = np.cross(xyz_points_hor, xyz_points_ver)
xyz_dist = np.linalg.norm(xyz_normal, axis=-1, keepdims=True)
xyz_normal = np.divide(
xyz_normal, xyz_dist, out=np.zeros_like(xyz_normal), where=xyz_dist != 0
)
return xyz_normal
class Structured3DReader:
def __init__(self, files):
super().__init__()
if isinstance(files, str):
files = [files]
self.readers = [zipfile.ZipFile(f, "r") for f in files]
self.names_mapper = dict()
for idx, reader in enumerate(self.readers):
for name in reader.namelist():
self.names_mapper[name] = idx
def filelist(self):
return list(self.names_mapper.keys())
def listdir(self, dir_name):
dir_name = dir_name.lstrip(os.path.sep).rstrip(os.path.sep)
file_list = list(
np.unique(
[
f.replace(dir_name + os.path.sep, "", 1).split(os.path.sep)[0]
for f in self.filelist()
if f.startswith(dir_name + os.path.sep)
]
)
)
if "" in file_list:
file_list.remove("")
return file_list
def read(self, file_name):
split = self.names_mapper[file_name]
return self.readers[split].read(file_name)
def read_camera(self, camera_path):
z2y_top_m = np.array([[0, 1, 0], [0, 0, 1], [1, 0, 0]], dtype=np.float32)
cam_extr = np.fromstring(self.read(camera_path), dtype=np.float32, sep=" ")
cam_t = np.matmul(z2y_top_m, cam_extr[:3] / 1000)
if cam_extr.shape[0] > 3:
cam_front, cam_up = cam_extr[3:6], cam_extr[6:9]
cam_n = np.cross(cam_front, cam_up)
cam_r = np.stack((cam_front, cam_up, cam_n), axis=1).astype(np.float32)
cam_r = np.matmul(z2y_top_m, cam_r)
cam_f = cam_extr[9:11]
else:
cam_r = np.eye(3, dtype=np.float32)
cam_f = None
return cam_r, cam_t, cam_f
def read_depth(self, depth_path):
depth = cv2.imdecode(
np.frombuffer(self.read(depth_path), np.uint8), cv2.IMREAD_UNCHANGED
)[..., np.newaxis]
depth[depth == 0] = 65535
return depth
def read_color(self, color_path):
color = cv2.imdecode(
np.frombuffer(self.read(color_path), np.uint8), cv2.IMREAD_UNCHANGED
)[..., :3][..., ::-1]
return color
def read_segment(self, segment_path):
segment = np.array(PIL.Image.open(io.BytesIO(self.read(segment_path))))[
..., np.newaxis
]
return segment
def parse_scene(
scene,
dataset_root,
output_root,
ignore_index=-1,
grid_size=None,
fuse_prsp=True,
fuse_pano=True,
vis=False,
):
assert fuse_prsp or fuse_pano
reader = Structured3DReader(
[
os.path.join(dataset_root, f)
for f in os.listdir(dataset_root)
if f.endswith(".zip")
]
)
scene_id = int(os.path.basename(scene).split("_")[-1])
if scene_id < 3000:
split = "train"
elif 3000 <= scene_id < 3250:
split = "val"
else:
split = "test"
print(f"Processing: {scene} in {split}")
scene_output_path = os.path.join(output_root, split, os.path.basename(scene))
os.makedirs(scene_output_path, exist_ok=True)
rooms = reader.listdir(os.path.join("Structured3D", scene, "2D_rendering"))
for room in rooms:
room_path = os.path.join("Structured3D", scene, "2D_rendering", room)
coord_list = list()
color_list = list()
normal_list = list()
segment_list = list()
if fuse_prsp:
prsp_path = os.path.join(room_path, "perspective", "full")
frames = reader.listdir(prsp_path)
for frame in frames:
try:
cam_r, cam_t, cam_f = reader.read_camera(
os.path.join(prsp_path, frame, "camera_pose.txt")
)
depth = reader.read_depth(
os.path.join(prsp_path, frame, "depth.png")
)
color = reader.read_color(
os.path.join(prsp_path, frame, "rgb_rawlight.png")
)
segment = reader.read_segment(
os.path.join(prsp_path, frame, "semantic.png")
)
except:
print(
f"Skipping {scene}_room{room}_frame{frame} perspective view due to loading error"
)
else:
fx, fy = cam_f
height, width = depth.shape[0], depth.shape[1]
pixel = np.transpose(np.indices((width, height)), (2, 1, 0))
pixel = pixel.reshape((-1, 2))
pixel = np.hstack((pixel, np.ones((pixel.shape[0], 1))))
k = np.diag([1.0, 1.0, 1.0])
k[0, 2] = width / 2
k[1, 2] = height / 2
k[0, 0] = k[0, 2] / np.tan(fx)
k[1, 1] = k[1, 2] / np.tan(fy)
coord = (
depth.reshape((-1, 1)) * (np.linalg.inv(k) @ pixel.T).T
).reshape(height, width, 3)
coord = coord @ np.array([[0, 0, 1], [0, -1, 0], [1, 0, 0]])
normal = normal_from_cross_product(coord)
# Filtering invalid points
view_dist = np.maximum(
np.linalg.norm(coord, axis=-1, keepdims=True), float(10e-5)
)
cosine_dist = np.sum(
(coord * normal / view_dist), axis=-1, keepdims=True
)
cosine_dist = np.abs(cosine_dist)
mask = ((cosine_dist > 0.15) & (depth < 65535) & (segment > 0))[
..., 0
].reshape(-1)
coord = np.matmul(coord / 1000, cam_r.T) + cam_t
normal = normal_from_cross_product(coord)
if sum(mask) > 0:
coord_list.append(coord.reshape(-1, 3)[mask])
color_list.append(color.reshape(-1, 3)[mask])
normal_list.append(normal.reshape(-1, 3)[mask])
segment_list.append(segment.reshape(-1, 1)[mask])
else:
print(
f"Skipping {scene}_room{room}_frame{frame} perspective view due to all points are filtered out"
)
if fuse_pano:
pano_path = os.path.join(room_path, "panorama")
try:
_, cam_t, _ = reader.read_camera(
os.path.join(pano_path, "camera_xyz.txt")
)
depth = reader.read_depth(os.path.join(pano_path, "full", "depth.png"))
color = reader.read_color(
os.path.join(pano_path, "full", "rgb_rawlight.png")
)
segment = reader.read_segment(
os.path.join(pano_path, "full", "semantic.png")
)
except:
print(f"Skipping {scene}_room{room} panorama view due to loading error")
else:
p_h, p_w = depth.shape[:2]
p_a = np.arange(p_w, dtype=np.float32) / p_w * 2 * np.pi - np.pi
p_b = np.arange(p_h, dtype=np.float32) / p_h * np.pi * -1 + np.pi / 2
p_a = np.tile(p_a[None], [p_h, 1])[..., np.newaxis]
p_b = np.tile(p_b[:, None], [1, p_w])[..., np.newaxis]
p_a_sin, p_a_cos, p_b_sin, p_b_cos = (
np.sin(p_a),
np.cos(p_a),
np.sin(p_b),
np.cos(p_b),
)
x = depth * p_a_cos * p_b_cos
y = depth * p_b_sin
z = depth * p_a_sin * p_b_cos
coord = np.concatenate([x, y, z], axis=-1) / 1000
normal = normal_from_cross_product(coord)
# Filtering invalid points
view_dist = np.maximum(
np.linalg.norm(coord, axis=-1, keepdims=True), float(10e-5)
)
cosine_dist = np.sum(
(coord * normal / view_dist), axis=-1, keepdims=True
)
cosine_dist = np.abs(cosine_dist)
mask = ((cosine_dist > 0.15) & (depth < 65535) & (segment > 0))[
..., 0
].reshape(-1)
coord = coord + cam_t
if sum(mask) > 0:
coord_list.append(coord.reshape(-1, 3)[mask])
color_list.append(color.reshape(-1, 3)[mask])
normal_list.append(normal.reshape(-1, 3)[mask])
segment_list.append(segment.reshape(-1, 1)[mask])
else:
print(
f"Skipping {scene}_room{room} panorama view due to all points are filtered out"
)
if len(coord_list) > 0:
coord = np.concatenate(coord_list, axis=0)
coord = coord @ np.array([[1, 0, 0], [0, 0, 1], [0, 1, 0]])
color = np.concatenate(color_list, axis=0)
normal = np.concatenate(normal_list, axis=0)
normal = normal @ np.array([[1, 0, 0], [0, 0, 1], [0, 1, 0]])
segment = np.concatenate(segment_list, axis=0)
segment25 = np.ones_like(segment, dtype=np.int64) * ignore_index
for idx, value in enumerate(VALID_CLASS_IDS_25):
mask = np.all(segment == value, axis=-1)
segment25[mask] = idx
data_dict = dict(
coord=coord.astype("float32"),
color=color.astype("uint8"),
normal=normal.astype("float32"),
semantic_gt=segment25.astype("int16"),
)
# Grid sampling data
if grid_size is not None:
sampler = GridSample(
grid_size=grid_size,
keys=("coord", "color", "normal", "semantic_gt"),
)
data_dict = sampler(data_dict)
torch.save(data_dict, os.path.join(scene_output_path, f"room_{room}.pth"))
if vis:
from pointcept.utils.visualization import save_point_cloud
os.makedirs("./vis", exist_ok=True)
save_point_cloud(
coord, color / 255, f"./vis/{scene}_room{room}_color.ply"
)
save_point_cloud(
coord, (normal + 1) / 2, f"./vis/{scene}_room{room}_normal.ply"
)
else:
print(f"Skipping {scene}_room{room} due to no valid points")
if __name__ == "__main__":
dataset_root = "/data/qwt/dataset/Structured3D/datasets--Pointcept--structured3d-compressed/snapshots/b2de6e97591c13123c7f2927c2e94c3dc6e9cda8"
output_root = "/data/qwt/dataset/Structured3D/process"
num_workers = 4
grid_size = 0.01
fuse_prsp = True
fuse_pano = True
parser = argparse.ArgumentParser()
parser.add_argument(
"--dataset_root",
default=dataset_root,
help="Path to the ScanNet dataset containing scene folders.",
)
parser.add_argument(
"--output_root",
default=output_root,
help="Output path where train/val folders will be located.",
)
parser.add_argument(
"--num_workers",
default=num_workers,
type=int,
help="Num workers for preprocessing.",
)
parser.add_argument(
"--grid_size", default=grid_size, type=float, help="Grid size for grid sampling."
)
parser.add_argument("--ignore_index", default=-1, type=float, help="Ignore index.")
parser.add_argument(
"--fuse_prsp", default=fuse_prsp, help="Whether fuse perspective view."
)
parser.add_argument(
"--fuse_pano", default=fuse_pano, help="Whether fuse panorama view."
)
config = parser.parse_args()
reader = Structured3DReader(
[
os.path.join(config.dataset_root, f)
for f in os.listdir(config.dataset_root)
if f.endswith(".zip")
]
)
scenes_list = reader.listdir("Structured3D")
scenes_list = sorted(scenes_list)
os.makedirs(os.path.join(config.output_root, "train"), exist_ok=True)
os.makedirs(os.path.join(config.output_root, "val"), exist_ok=True)
os.makedirs(os.path.join(config.output_root, "test"), exist_ok=True)
# Preprocess data.
print("Processing scenes...")
pool = ProcessPoolExecutor(max_workers=config.num_workers)
_ = list(
pool.map(
parse_scene,
scenes_list,
repeat(config.dataset_root),
repeat(config.output_root),
repeat(config.ignore_index),
repeat(config.grid_size),
repeat(config.fuse_prsp),
repeat(config.fuse_pano),
)
)
pool.shutdown()
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/datasets/preprocessing/scannet/preprocess_scannet.py | pointcept/datasets/preprocessing/scannet/preprocess_scannet.py | """
Preprocessing Script for ScanNet 20/200
Author: Xiaoyang Wu (xiaoyang.wu.cs@gmail.com)
Please cite our work if the code is helpful to you.
"""
import warnings
import torch
warnings.filterwarnings("ignore", category=DeprecationWarning)
import os
import argparse
import glob
import json
import plyfile
import numpy as np
import pandas as pd
import multiprocessing as mp
from concurrent.futures import ProcessPoolExecutor
from itertools import repeat
# Load external constants
from meta_data.scannet200_constants import VALID_CLASS_IDS_200, VALID_CLASS_IDS_20
CLOUD_FILE_PFIX = "_vh_clean_2"
SEGMENTS_FILE_PFIX = ".0.010000.segs.json"
AGGREGATIONS_FILE_PFIX = ".aggregation.json"
CLASS_IDS200 = VALID_CLASS_IDS_200
CLASS_IDS20 = VALID_CLASS_IDS_20
IGNORE_INDEX = -1
def read_plymesh(filepath):
"""Read ply file and return it as numpy array. Returns None if emtpy."""
with open(filepath, "rb") as f:
plydata = plyfile.PlyData.read(f)
if plydata.elements:
vertices = pd.DataFrame(plydata["vertex"].data).values
faces = np.stack(plydata["face"].data["vertex_indices"], axis=0)
return vertices, faces
# Map the raw category id to the point cloud
def point_indices_from_group(seg_indices, group, labels_pd):
group_segments = np.array(group["segments"])
label = group["label"]
# Map the category name to id
label_id20 = labels_pd[labels_pd["raw_category"] == label]["nyu40id"]
label_id20 = int(label_id20.iloc[0]) if len(label_id20) > 0 else 0
label_id200 = labels_pd[labels_pd["raw_category"] == label]["id"]
label_id200 = int(label_id200.iloc[0]) if len(label_id200) > 0 else 0
# Only store for the valid categories
if label_id20 in CLASS_IDS20:
label_id20 = CLASS_IDS20.index(label_id20)
else:
label_id20 = IGNORE_INDEX
if label_id200 in CLASS_IDS200:
label_id200 = CLASS_IDS200.index(label_id200)
else:
label_id200 = IGNORE_INDEX
# get points, where segment indices (points labelled with segment ids) are in the group segment list
point_idx = np.where(np.isin(seg_indices, group_segments))[0]
return point_idx, label_id20, label_id200
def face_normal(vertex, face):
v01 = vertex[face[:, 1]] - vertex[face[:, 0]]
v02 = vertex[face[:, 2]] - vertex[face[:, 0]]
vec = np.cross(v01, v02)
length = np.sqrt(np.sum(vec**2, axis=1, keepdims=True)) + 1.0e-8
nf = vec / length
area = length * 0.5
return nf, area
def vertex_normal(vertex, face):
nf, area = face_normal(vertex, face)
nf = nf * area
nv = np.zeros_like(vertex)
for i in range(face.shape[0]):
nv[face[i]] += nf[i]
length = np.sqrt(np.sum(nv**2, axis=1, keepdims=True)) + 1.0e-8
nv = nv / length
return nv
def handle_process(
scene_path, output_path, labels_pd, train_scenes, val_scenes, parse_normals=True
):
scene_id = os.path.basename(scene_path)
mesh_path = os.path.join(scene_path, f"{scene_id}{CLOUD_FILE_PFIX}.ply")
segments_file = os.path.join(
scene_path, f"{scene_id}{CLOUD_FILE_PFIX}{SEGMENTS_FILE_PFIX}"
)
aggregations_file = os.path.join(scene_path, f"{scene_id}{AGGREGATIONS_FILE_PFIX}")
info_file = os.path.join(scene_path, f"{scene_id}.txt")
if scene_id in train_scenes:
output_file = os.path.join(output_path, "train", f"{scene_id}.pth")
split_name = "train"
elif scene_id in val_scenes:
output_file = os.path.join(output_path, "val", f"{scene_id}.pth")
split_name = "val"
else:
output_file = os.path.join(output_path, "test", f"{scene_id}.pth")
split_name = "test"
print(f"Processing: {scene_id} in {split_name}")
vertices, faces = read_plymesh(mesh_path)
coords = vertices[:, :3]
colors = vertices[:, 3:6]
save_dict = dict(coord=coords, color=colors, scene_id=scene_id)
# # Rotating the mesh to axis aligned
# info_dict = {}
# with open(info_file) as f:
# for line in f:
# (key, val) = line.split(" = ")
# info_dict[key] = np.fromstring(val, sep=' ')
#
# if 'axisAlignment' not in info_dict:
# rot_matrix = np.identity(4)
# else:
# rot_matrix = info_dict['axisAlignment'].reshape(4, 4)
# r_coords = coords.transpose()
# r_coords = np.append(r_coords, np.ones((1, r_coords.shape[1])), axis=0)
# r_coords = np.dot(rot_matrix, r_coords)
# coords = r_coords
# Parse Normals
if parse_normals:
save_dict["normal"] = vertex_normal(coords, faces)
# Load segments file
if split_name != "test":
with open(segments_file) as f:
segments = json.load(f)
seg_indices = np.array(segments["segIndices"])
# Load Aggregations file
with open(aggregations_file) as f:
aggregation = json.load(f)
seg_groups = np.array(aggregation["segGroups"])
# Generate new labels
semantic_gt20 = np.ones((vertices.shape[0])) * IGNORE_INDEX
semantic_gt200 = np.ones((vertices.shape[0])) * IGNORE_INDEX
instance_ids = np.ones((vertices.shape[0])) * IGNORE_INDEX
for group in seg_groups:
point_idx, label_id20, label_id200 = point_indices_from_group(
seg_indices, group, labels_pd
)
semantic_gt20[point_idx] = label_id20
semantic_gt200[point_idx] = label_id200
instance_ids[point_idx] = group["id"]
semantic_gt20 = semantic_gt20.astype(int)
semantic_gt200 = semantic_gt200.astype(int)
instance_ids = instance_ids.astype(int)
save_dict["semantic_gt20"] = semantic_gt20
save_dict["semantic_gt200"] = semantic_gt200
save_dict["instance_gt"] = instance_ids
# Concatenate with original cloud
processed_vertices = np.hstack((semantic_gt200, instance_ids))
if np.any(np.isnan(processed_vertices)) or not np.all(
np.isfinite(processed_vertices)
):
raise ValueError(f"Find NaN in Scene: {scene_id}")
# Save processed data
torch.save(save_dict, output_file)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--dataset_root",
required=True,
help="Path to the ScanNet dataset containing scene folders",
)
parser.add_argument(
"--output_root",
required=True,
help="Output path where train/val folders will be located",
)
parser.add_argument(
"--parse_normals", default=True, type=bool, help="Whether parse point normals"
)
config = parser.parse_args()
# Load label map
labels_pd = pd.read_csv(
"pointcept/datasets/preprocessing/scannet/meta_data/scannetv2-labels.combined.tsv",
sep="\t",
header=0,
)
# Load train/val splits
with open(
"pointcept/datasets/preprocessing/scannet/meta_data/scannetv2_train.txt"
) as train_file:
train_scenes = train_file.read().splitlines()
with open(
"pointcept/datasets/preprocessing/scannet/meta_data/scannetv2_val.txt"
) as val_file:
val_scenes = val_file.read().splitlines()
# Create output directories
train_output_dir = os.path.join(config.output_root, "train")
os.makedirs(train_output_dir, exist_ok=True)
val_output_dir = os.path.join(config.output_root, "val")
os.makedirs(val_output_dir, exist_ok=True)
test_output_dir = os.path.join(config.output_root, "test")
os.makedirs(test_output_dir, exist_ok=True)
# Load scene paths
scene_paths = sorted(glob.glob(config.dataset_root + "/scans*/scene*"))
# Preprocess data.
print("Processing scenes...")
pool = ProcessPoolExecutor(max_workers=mp.cpu_count())
# pool = ProcessPoolExecutor(max_workers=1)
_ = list(
pool.map(
handle_process,
scene_paths,
repeat(config.output_root),
repeat(labels_pd),
repeat(train_scenes),
repeat(val_scenes),
repeat(config.parse_normals),
)
)
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/datasets/preprocessing/scannet/meta_data/scannet200_splits.py | pointcept/datasets/preprocessing/scannet/meta_data/scannet200_splits.py | # This file contains the HEAD - COMMON - TAIL split category ids for ScanNet 200
HEAD_CATS_SCANNET_200 = [
"tv stand",
"curtain",
"blinds",
"shower curtain",
"bookshelf",
"tv",
"kitchen cabinet",
"pillow",
"lamp",
"dresser",
"monitor",
"object",
"ceiling",
"board",
"stove",
"closet wall",
"couch",
"office chair",
"kitchen counter",
"shower",
"closet",
"doorframe",
"sofa chair",
"mailbox",
"nightstand",
"washing machine",
"picture",
"book",
"sink",
"recycling bin",
"table",
"backpack",
"shower wall",
"toilet",
"copier",
"counter",
"stool",
"refrigerator",
"window",
"file cabinet",
"chair",
"wall",
"plant",
"coffee table",
"stairs",
"armchair",
"cabinet",
"bathroom vanity",
"bathroom stall",
"mirror",
"blackboard",
"trash can",
"stair rail",
"box",
"towel",
"door",
"clothes",
"whiteboard",
"bed",
"floor",
"bathtub",
"desk",
"wardrobe",
"clothes dryer",
"radiator",
"shelf",
]
COMMON_CATS_SCANNET_200 = [
"cushion",
"end table",
"dining table",
"keyboard",
"bag",
"toilet paper",
"printer",
"blanket",
"microwave",
"shoe",
"computer tower",
"bottle",
"bin",
"ottoman",
"bench",
"basket",
"fan",
"laptop",
"person",
"paper towel dispenser",
"oven",
"rack",
"piano",
"suitcase",
"rail",
"container",
"telephone",
"stand",
"light",
"laundry basket",
"pipe",
"seat",
"column",
"bicycle",
"ladder",
"jacket",
"storage bin",
"coffee maker",
"dishwasher",
"machine",
"mat",
"windowsill",
"bulletin board",
"fireplace",
"mini fridge",
"water cooler",
"shower door",
"pillar",
"ledge",
"furniture",
"cart",
"decoration",
"closet door",
"vacuum cleaner",
"dish rack",
"range hood",
"projector screen",
"divider",
"bathroom counter",
"laundry hamper",
"bathroom stall door",
"ceiling light",
"trash bin",
"bathroom cabinet",
"structure",
"storage organizer",
"potted plant",
"mattress",
]
TAIL_CATS_SCANNET_200 = [
"paper",
"plate",
"soap dispenser",
"bucket",
"clock",
"guitar",
"toilet paper holder",
"speaker",
"cup",
"paper towel roll",
"bar",
"toaster",
"ironing board",
"soap dish",
"toilet paper dispenser",
"fire extinguisher",
"ball",
"hat",
"shower curtain rod",
"paper cutter",
"tray",
"toaster oven",
"mouse",
"toilet seat cover dispenser",
"storage container",
"scale",
"tissue box",
"light switch",
"crate",
"power outlet",
"sign",
"projector",
"candle",
"plunger",
"stuffed animal",
"headphones",
"broom",
"guitar case",
"dustpan",
"hair dryer",
"water bottle",
"handicap bar",
"purse",
"vent",
"shower floor",
"water pitcher",
"bowl",
"paper bag",
"alarm clock",
"music stand",
"laundry detergent",
"dumbbell",
"tube",
"cd case",
"closet rod",
"coffee kettle",
"shower head",
"keyboard piano",
"case of water bottles",
"coat rack",
"folded chair",
"fire alarm",
"power strip",
"calendar",
"poster",
"luggage",
]
# Given the different size of the official train and val sets, not all ScanNet200 categories are present in the validation set.
# Here we list of categories with labels and IDs present in both train and validation set, and the remaining categories those are present in train, but not in val
# We dont evaluate on unseen validation categories in this benchmark
VALID_CLASS_IDS_200_VALIDATION = (
"wall",
"chair",
"floor",
"table",
"door",
"couch",
"cabinet",
"shelf",
"desk",
"office chair",
"bed",
"pillow",
"sink",
"picture",
"window",
"toilet",
"bookshelf",
"monitor",
"curtain",
"book",
"armchair",
"coffee table",
"box",
"refrigerator",
"lamp",
"kitchen cabinet",
"towel",
"clothes",
"tv",
"nightstand",
"counter",
"dresser",
"stool",
"cushion",
"plant",
"ceiling",
"bathtub",
"end table",
"dining table",
"keyboard",
"bag",
"backpack",
"toilet paper",
"printer",
"tv stand",
"whiteboard",
"blanket",
"shower curtain",
"trash can",
"closet",
"stairs",
"microwave",
"stove",
"shoe",
"computer tower",
"bottle",
"bin",
"ottoman",
"bench",
"board",
"washing machine",
"mirror",
"copier",
"basket",
"sofa chair",
"file cabinet",
"fan",
"laptop",
"shower",
"paper",
"person",
"paper towel dispenser",
"oven",
"blinds",
"rack",
"plate",
"blackboard",
"piano",
"suitcase",
"rail",
"radiator",
"recycling bin",
"container",
"wardrobe",
"soap dispenser",
"telephone",
"bucket",
"clock",
"stand",
"light",
"laundry basket",
"pipe",
"clothes dryer",
"guitar",
"toilet paper holder",
"seat",
"speaker",
"column",
"ladder",
"bathroom stall",
"shower wall",
"cup",
"jacket",
"storage bin",
"coffee maker",
"dishwasher",
"paper towel roll",
"machine",
"mat",
"windowsill",
"bar",
"toaster",
"bulletin board",
"ironing board",
"fireplace",
"soap dish",
"kitchen counter",
"doorframe",
"toilet paper dispenser",
"mini fridge",
"fire extinguisher",
"ball",
"hat",
"shower curtain rod",
"water cooler",
"paper cutter",
"tray",
"shower door",
"pillar",
"ledge",
"toaster oven",
"mouse",
"toilet seat cover dispenser",
"furniture",
"cart",
"scale",
"tissue box",
"light switch",
"crate",
"power outlet",
"decoration",
"sign",
"projector",
"closet door",
"vacuum cleaner",
"plunger",
"stuffed animal",
"headphones",
"dish rack",
"broom",
"range hood",
"dustpan",
"hair dryer",
"water bottle",
"handicap bar",
"vent",
"shower floor",
"water pitcher",
"mailbox",
"bowl",
"paper bag",
"projector screen",
"divider",
"laundry detergent",
"bathroom counter",
"object",
"bathroom vanity",
"closet wall",
"laundry hamper",
"bathroom stall door",
"ceiling light",
"trash bin",
"dumbbell",
"stair rail",
"tube",
"bathroom cabinet",
"closet rod",
"coffee kettle",
"shower head",
"keyboard piano",
"case of water bottles",
"coat rack",
"folded chair",
"fire alarm",
"power strip",
"calendar",
"poster",
"potted plant",
"mattress",
)
CLASS_LABELS_200_VALIDATION = (
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
13,
14,
15,
16,
17,
18,
19,
21,
22,
23,
24,
26,
27,
28,
29,
31,
32,
33,
34,
35,
36,
38,
39,
40,
41,
42,
44,
45,
46,
47,
48,
49,
50,
51,
52,
54,
55,
56,
57,
58,
59,
62,
63,
64,
65,
66,
67,
68,
69,
70,
71,
72,
73,
74,
75,
76,
77,
78,
79,
80,
82,
84,
86,
87,
88,
89,
90,
93,
95,
96,
97,
98,
99,
100,
101,
102,
103,
104,
105,
106,
107,
110,
112,
115,
116,
118,
120,
122,
125,
128,
130,
131,
132,
134,
136,
138,
139,
140,
141,
145,
148,
154,
155,
156,
157,
159,
161,
163,
165,
166,
168,
169,
170,
177,
180,
185,
188,
191,
193,
195,
202,
208,
213,
214,
229,
230,
232,
233,
242,
250,
261,
264,
276,
283,
300,
304,
312,
323,
325,
342,
356,
370,
392,
395,
408,
417,
488,
540,
562,
570,
609,
748,
776,
1156,
1163,
1164,
1165,
1166,
1167,
1168,
1169,
1170,
1171,
1172,
1173,
1175,
1176,
1179,
1180,
1181,
1182,
1184,
1185,
1186,
1187,
1188,
1189,
1191,
)
VALID_CLASS_IDS_200_TRAIN_ONLY = (
"bicycle",
"storage container",
"candle",
"guitar case",
"purse",
"alarm clock",
"music stand",
"cd case",
"structure",
"storage organizer",
"luggage",
)
CLASS_LABELS_200_TRAIN_ONLY = (
121,
221,
286,
331,
399,
572,
581,
1174,
1178,
1183,
1190,
)
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/datasets/preprocessing/scannet/meta_data/scannet200_constants.py | pointcept/datasets/preprocessing/scannet/meta_data/scannet200_constants.py | # ScanNet Benchmark constants
VALID_CLASS_IDS_20 = (
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
14,
16,
24,
28,
33,
34,
36,
39,
)
CLASS_LABELS_20 = (
"wall",
"floor",
"cabinet",
"bed",
"chair",
"sofa",
"table",
"door",
"window",
"bookshelf",
"picture",
"counter",
"desk",
"curtain",
"refrigerator",
"shower curtain",
"toilet",
"sink",
"bathtub",
"otherfurniture",
)
SCANNET_COLOR_MAP_20 = {
0: (0.0, 0.0, 0.0),
1: (174.0, 199.0, 232.0),
2: (152.0, 223.0, 138.0),
3: (31.0, 119.0, 180.0),
4: (255.0, 187.0, 120.0),
5: (188.0, 189.0, 34.0),
6: (140.0, 86.0, 75.0),
7: (255.0, 152.0, 150.0),
8: (214.0, 39.0, 40.0),
9: (197.0, 176.0, 213.0),
10: (148.0, 103.0, 189.0),
11: (196.0, 156.0, 148.0),
12: (23.0, 190.0, 207.0),
14: (247.0, 182.0, 210.0),
15: (66.0, 188.0, 102.0),
16: (219.0, 219.0, 141.0),
17: (140.0, 57.0, 197.0),
18: (202.0, 185.0, 52.0),
19: (51.0, 176.0, 203.0),
20: (200.0, 54.0, 131.0),
21: (92.0, 193.0, 61.0),
22: (78.0, 71.0, 183.0),
23: (172.0, 114.0, 82.0),
24: (255.0, 127.0, 14.0),
25: (91.0, 163.0, 138.0),
26: (153.0, 98.0, 156.0),
27: (140.0, 153.0, 101.0),
28: (158.0, 218.0, 229.0),
29: (100.0, 125.0, 154.0),
30: (178.0, 127.0, 135.0),
32: (146.0, 111.0, 194.0),
33: (44.0, 160.0, 44.0),
34: (112.0, 128.0, 144.0),
35: (96.0, 207.0, 209.0),
36: (227.0, 119.0, 194.0),
37: (213.0, 92.0, 176.0),
38: (94.0, 106.0, 211.0),
39: (82.0, 84.0, 163.0),
40: (100.0, 85.0, 144.0),
}
# ScanNet200 Benchmark constants
VALID_CLASS_IDS_200 = (
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
13,
14,
15,
16,
17,
18,
19,
21,
22,
23,
24,
26,
27,
28,
29,
31,
32,
33,
34,
35,
36,
38,
39,
40,
41,
42,
44,
45,
46,
47,
48,
49,
50,
51,
52,
54,
55,
56,
57,
58,
59,
62,
63,
64,
65,
66,
67,
68,
69,
70,
71,
72,
73,
74,
75,
76,
77,
78,
79,
80,
82,
84,
86,
87,
88,
89,
90,
93,
95,
96,
97,
98,
99,
100,
101,
102,
103,
104,
105,
106,
107,
110,
112,
115,
116,
118,
120,
121,
122,
125,
128,
130,
131,
132,
134,
136,
138,
139,
140,
141,
145,
148,
154,
155,
156,
157,
159,
161,
163,
165,
166,
168,
169,
170,
177,
180,
185,
188,
191,
193,
195,
202,
208,
213,
214,
221,
229,
230,
232,
233,
242,
250,
261,
264,
276,
283,
286,
300,
304,
312,
323,
325,
331,
342,
356,
370,
392,
395,
399,
408,
417,
488,
540,
562,
570,
572,
581,
609,
748,
776,
1156,
1163,
1164,
1165,
1166,
1167,
1168,
1169,
1170,
1171,
1172,
1173,
1174,
1175,
1176,
1178,
1179,
1180,
1181,
1182,
1183,
1184,
1185,
1186,
1187,
1188,
1189,
1190,
1191,
)
CLASS_LABELS_200 = (
"wall",
"chair",
"floor",
"table",
"door",
"couch",
"cabinet",
"shelf",
"desk",
"office chair",
"bed",
"pillow",
"sink",
"picture",
"window",
"toilet",
"bookshelf",
"monitor",
"curtain",
"book",
"armchair",
"coffee table",
"box",
"refrigerator",
"lamp",
"kitchen cabinet",
"towel",
"clothes",
"tv",
"nightstand",
"counter",
"dresser",
"stool",
"cushion",
"plant",
"ceiling",
"bathtub",
"end table",
"dining table",
"keyboard",
"bag",
"backpack",
"toilet paper",
"printer",
"tv stand",
"whiteboard",
"blanket",
"shower curtain",
"trash can",
"closet",
"stairs",
"microwave",
"stove",
"shoe",
"computer tower",
"bottle",
"bin",
"ottoman",
"bench",
"board",
"washing machine",
"mirror",
"copier",
"basket",
"sofa chair",
"file cabinet",
"fan",
"laptop",
"shower",
"paper",
"person",
"paper towel dispenser",
"oven",
"blinds",
"rack",
"plate",
"blackboard",
"piano",
"suitcase",
"rail",
"radiator",
"recycling bin",
"container",
"wardrobe",
"soap dispenser",
"telephone",
"bucket",
"clock",
"stand",
"light",
"laundry basket",
"pipe",
"clothes dryer",
"guitar",
"toilet paper holder",
"seat",
"speaker",
"column",
"bicycle",
"ladder",
"bathroom stall",
"shower wall",
"cup",
"jacket",
"storage bin",
"coffee maker",
"dishwasher",
"paper towel roll",
"machine",
"mat",
"windowsill",
"bar",
"toaster",
"bulletin board",
"ironing board",
"fireplace",
"soap dish",
"kitchen counter",
"doorframe",
"toilet paper dispenser",
"mini fridge",
"fire extinguisher",
"ball",
"hat",
"shower curtain rod",
"water cooler",
"paper cutter",
"tray",
"shower door",
"pillar",
"ledge",
"toaster oven",
"mouse",
"toilet seat cover dispenser",
"furniture",
"cart",
"storage container",
"scale",
"tissue box",
"light switch",
"crate",
"power outlet",
"decoration",
"sign",
"projector",
"closet door",
"vacuum cleaner",
"candle",
"plunger",
"stuffed animal",
"headphones",
"dish rack",
"broom",
"guitar case",
"range hood",
"dustpan",
"hair dryer",
"water bottle",
"handicap bar",
"purse",
"vent",
"shower floor",
"water pitcher",
"mailbox",
"bowl",
"paper bag",
"alarm clock",
"music stand",
"projector screen",
"divider",
"laundry detergent",
"bathroom counter",
"object",
"bathroom vanity",
"closet wall",
"laundry hamper",
"bathroom stall door",
"ceiling light",
"trash bin",
"dumbbell",
"stair rail",
"tube",
"bathroom cabinet",
"cd case",
"closet rod",
"coffee kettle",
"structure",
"shower head",
"keyboard piano",
"case of water bottles",
"coat rack",
"storage organizer",
"folded chair",
"fire alarm",
"power strip",
"calendar",
"poster",
"potted plant",
"luggage",
"mattress",
)
SCANNET_COLOR_MAP_200 = {
0: (0.0, 0.0, 0.0),
1: (174.0, 199.0, 232.0),
2: (188.0, 189.0, 34.0),
3: (152.0, 223.0, 138.0),
4: (255.0, 152.0, 150.0),
5: (214.0, 39.0, 40.0),
6: (91.0, 135.0, 229.0),
7: (31.0, 119.0, 180.0),
8: (229.0, 91.0, 104.0),
9: (247.0, 182.0, 210.0),
10: (91.0, 229.0, 110.0),
11: (255.0, 187.0, 120.0),
13: (141.0, 91.0, 229.0),
14: (112.0, 128.0, 144.0),
15: (196.0, 156.0, 148.0),
16: (197.0, 176.0, 213.0),
17: (44.0, 160.0, 44.0),
18: (148.0, 103.0, 189.0),
19: (229.0, 91.0, 223.0),
21: (219.0, 219.0, 141.0),
22: (192.0, 229.0, 91.0),
23: (88.0, 218.0, 137.0),
24: (58.0, 98.0, 137.0),
26: (177.0, 82.0, 239.0),
27: (255.0, 127.0, 14.0),
28: (237.0, 204.0, 37.0),
29: (41.0, 206.0, 32.0),
31: (62.0, 143.0, 148.0),
32: (34.0, 14.0, 130.0),
33: (143.0, 45.0, 115.0),
34: (137.0, 63.0, 14.0),
35: (23.0, 190.0, 207.0),
36: (16.0, 212.0, 139.0),
38: (90.0, 119.0, 201.0),
39: (125.0, 30.0, 141.0),
40: (150.0, 53.0, 56.0),
41: (186.0, 197.0, 62.0),
42: (227.0, 119.0, 194.0),
44: (38.0, 100.0, 128.0),
45: (120.0, 31.0, 243.0),
46: (154.0, 59.0, 103.0),
47: (169.0, 137.0, 78.0),
48: (143.0, 245.0, 111.0),
49: (37.0, 230.0, 205.0),
50: (14.0, 16.0, 155.0),
51: (196.0, 51.0, 182.0),
52: (237.0, 80.0, 38.0),
54: (138.0, 175.0, 62.0),
55: (158.0, 218.0, 229.0),
56: (38.0, 96.0, 167.0),
57: (190.0, 77.0, 246.0),
58: (208.0, 49.0, 84.0),
59: (208.0, 193.0, 72.0),
62: (55.0, 220.0, 57.0),
63: (10.0, 125.0, 140.0),
64: (76.0, 38.0, 202.0),
65: (191.0, 28.0, 135.0),
66: (211.0, 120.0, 42.0),
67: (118.0, 174.0, 76.0),
68: (17.0, 242.0, 171.0),
69: (20.0, 65.0, 247.0),
70: (208.0, 61.0, 222.0),
71: (162.0, 62.0, 60.0),
72: (210.0, 235.0, 62.0),
73: (45.0, 152.0, 72.0),
74: (35.0, 107.0, 149.0),
75: (160.0, 89.0, 237.0),
76: (227.0, 56.0, 125.0),
77: (169.0, 143.0, 81.0),
78: (42.0, 143.0, 20.0),
79: (25.0, 160.0, 151.0),
80: (82.0, 75.0, 227.0),
82: (253.0, 59.0, 222.0),
84: (240.0, 130.0, 89.0),
86: (123.0, 172.0, 47.0),
87: (71.0, 194.0, 133.0),
88: (24.0, 94.0, 205.0),
89: (134.0, 16.0, 179.0),
90: (159.0, 32.0, 52.0),
93: (213.0, 208.0, 88.0),
95: (64.0, 158.0, 70.0),
96: (18.0, 163.0, 194.0),
97: (65.0, 29.0, 153.0),
98: (177.0, 10.0, 109.0),
99: (152.0, 83.0, 7.0),
100: (83.0, 175.0, 30.0),
101: (18.0, 199.0, 153.0),
102: (61.0, 81.0, 208.0),
103: (213.0, 85.0, 216.0),
104: (170.0, 53.0, 42.0),
105: (161.0, 192.0, 38.0),
106: (23.0, 241.0, 91.0),
107: (12.0, 103.0, 170.0),
110: (151.0, 41.0, 245.0),
112: (133.0, 51.0, 80.0),
115: (184.0, 162.0, 91.0),
116: (50.0, 138.0, 38.0),
118: (31.0, 237.0, 236.0),
120: (39.0, 19.0, 208.0),
121: (223.0, 27.0, 180.0),
122: (254.0, 141.0, 85.0),
125: (97.0, 144.0, 39.0),
128: (106.0, 231.0, 176.0),
130: (12.0, 61.0, 162.0),
131: (124.0, 66.0, 140.0),
132: (137.0, 66.0, 73.0),
134: (250.0, 253.0, 26.0),
136: (55.0, 191.0, 73.0),
138: (60.0, 126.0, 146.0),
139: (153.0, 108.0, 234.0),
140: (184.0, 58.0, 125.0),
141: (135.0, 84.0, 14.0),
145: (139.0, 248.0, 91.0),
148: (53.0, 200.0, 172.0),
154: (63.0, 69.0, 134.0),
155: (190.0, 75.0, 186.0),
156: (127.0, 63.0, 52.0),
157: (141.0, 182.0, 25.0),
159: (56.0, 144.0, 89.0),
161: (64.0, 160.0, 250.0),
163: (182.0, 86.0, 245.0),
165: (139.0, 18.0, 53.0),
166: (134.0, 120.0, 54.0),
168: (49.0, 165.0, 42.0),
169: (51.0, 128.0, 133.0),
170: (44.0, 21.0, 163.0),
177: (232.0, 93.0, 193.0),
180: (176.0, 102.0, 54.0),
185: (116.0, 217.0, 17.0),
188: (54.0, 209.0, 150.0),
191: (60.0, 99.0, 204.0),
193: (129.0, 43.0, 144.0),
195: (252.0, 100.0, 106.0),
202: (187.0, 196.0, 73.0),
208: (13.0, 158.0, 40.0),
213: (52.0, 122.0, 152.0),
214: (128.0, 76.0, 202.0),
221: (187.0, 50.0, 115.0),
229: (180.0, 141.0, 71.0),
230: (77.0, 208.0, 35.0),
232: (72.0, 183.0, 168.0),
233: (97.0, 99.0, 203.0),
242: (172.0, 22.0, 158.0),
250: (155.0, 64.0, 40.0),
261: (118.0, 159.0, 30.0),
264: (69.0, 252.0, 148.0),
276: (45.0, 103.0, 173.0),
283: (111.0, 38.0, 149.0),
286: (184.0, 9.0, 49.0),
300: (188.0, 174.0, 67.0),
304: (53.0, 206.0, 53.0),
312: (97.0, 235.0, 252.0),
323: (66.0, 32.0, 182.0),
325: (236.0, 114.0, 195.0),
331: (241.0, 154.0, 83.0),
342: (133.0, 240.0, 52.0),
356: (16.0, 205.0, 144.0),
370: (75.0, 101.0, 198.0),
392: (237.0, 95.0, 251.0),
395: (191.0, 52.0, 49.0),
399: (227.0, 254.0, 54.0),
408: (49.0, 206.0, 87.0),
417: (48.0, 113.0, 150.0),
488: (125.0, 73.0, 182.0),
540: (229.0, 32.0, 114.0),
562: (158.0, 119.0, 28.0),
570: (60.0, 205.0, 27.0),
572: (18.0, 215.0, 201.0),
581: (79.0, 76.0, 153.0),
609: (134.0, 13.0, 116.0),
748: (192.0, 97.0, 63.0),
776: (108.0, 163.0, 18.0),
1156: (95.0, 220.0, 156.0),
1163: (98.0, 141.0, 208.0),
1164: (144.0, 19.0, 193.0),
1165: (166.0, 36.0, 57.0),
1166: (212.0, 202.0, 34.0),
1167: (23.0, 206.0, 34.0),
1168: (91.0, 211.0, 236.0),
1169: (79.0, 55.0, 137.0),
1170: (182.0, 19.0, 117.0),
1171: (134.0, 76.0, 14.0),
1172: (87.0, 185.0, 28.0),
1173: (82.0, 224.0, 187.0),
1174: (92.0, 110.0, 214.0),
1175: (168.0, 80.0, 171.0),
1176: (197.0, 63.0, 51.0),
1178: (175.0, 199.0, 77.0),
1179: (62.0, 180.0, 98.0),
1180: (8.0, 91.0, 150.0),
1181: (77.0, 15.0, 130.0),
1182: (154.0, 65.0, 96.0),
1183: (197.0, 152.0, 11.0),
1184: (59.0, 155.0, 45.0),
1185: (12.0, 147.0, 145.0),
1186: (54.0, 35.0, 219.0),
1187: (210.0, 73.0, 181.0),
1188: (221.0, 124.0, 77.0),
1189: (149.0, 214.0, 66.0),
1190: (72.0, 185.0, 134.0),
1191: (42.0, 94.0, 198.0),
}
# For instance segmentation the non-object categories
VALID_PANOPTIC_IDS = (1, 3)
CLASS_LABELS_PANOPTIC = ("wall", "floor")
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/datasets/preprocessing/scannet/scannet_pair/compute_full_overlapping.py | pointcept/datasets/preprocessing/scannet/scannet_pair/compute_full_overlapping.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import torch
import numpy as np
import math
import glob, os
import argparse
import open3d as o3d
def make_open3d_point_cloud(xyz, color=None, voxel_size=None):
if np.isnan(xyz).any():
return None
xyz = xyz[:, :3]
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(xyz)
if color is not None:
pcd.colors = o3d.utility.Vector3dVector(color)
if voxel_size is not None:
pcd = pcd.voxel_down_sample(voxel_size)
return pcd
def compute_overlap_ratio(pcd0, pcd1, voxel_size):
pcd0_down = pcd0.voxel_down_sample(voxel_size)
pcd1_down = pcd1.voxel_down_sample(voxel_size)
matching01 = get_matching_indices(pcd0_down, pcd1_down, voxel_size * 1.5, 1)
matching10 = get_matching_indices(pcd1_down, pcd0_down, voxel_size * 1.5, 1)
overlap0 = float(len(matching01)) / float(len(pcd0_down.points))
overlap1 = float(len(matching10)) / float(len(pcd1_down.points))
return max(overlap0, overlap1)
def get_matching_indices(source, pcd_tree, search_voxel_size, K=None):
match_inds = []
for i, point in enumerate(source.points):
[_, idx, _] = pcd_tree.search_radius_vector_3d(point, search_voxel_size)
if K is not None:
idx = idx[:K]
for j in idx:
match_inds.append((i, j))
return match_inds
def compute_full_overlapping(data_root, scene_id, voxel_size=0.05):
_points = [
(
pcd_name,
make_open3d_point_cloud(
torch.load(pcd_name)["coord"], voxel_size=voxel_size
),
)
for pcd_name in glob.glob(os.path.join(data_root, scene_id, "pcd", "*.pth"))
]
points = [(pcd_name, pcd) for (pcd_name, pcd) in _points if pcd is not None]
print(
"load {} point clouds ({} invalid has been filtered), computing matching/overlapping".format(
len(points), len(_points) - len(points)
)
)
matching_matrix = np.zeros((len(points), len(points)))
for i, (pcd0_name, pcd0) in enumerate(points):
print("matching to...{}".format(pcd0_name))
pcd0_tree = o3d.geometry.KDTreeFlann(copy.deepcopy(pcd0))
for j, (pcd1_name, pcd1) in enumerate(points):
if i == j:
continue
matching_matrix[i, j] = float(
len(get_matching_indices(pcd1, pcd0_tree, 1.5 * voxel_size, 1))
) / float(len(pcd1.points))
# write to file
with open(os.path.join(data_root, scene_id, "pcd", "overlap.txt"), "w") as f:
for i, (pcd0_name, pcd0) in enumerate(points):
for j, (pcd1_name, pcd1) in enumerate(points):
if i < j:
overlap = max(matching_matrix[i, j], matching_matrix[j, i])
f.write(
"{} {} {}\n".format(
pcd0_name.replace(data_root, ""),
pcd1_name.replace(data_root, ""),
overlap,
)
)
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/datasets/preprocessing/scannet/scannet_pair/SensorData.py | pointcept/datasets/preprocessing/scannet/scannet_pair/SensorData.py | import os, struct
import numpy as np
import zlib
import imageio
import cv2
COMPRESSION_TYPE_COLOR = {-1: "unknown", 0: "raw", 1: "png", 2: "jpeg"}
COMPRESSION_TYPE_DEPTH = {
-1: "unknown",
0: "raw_ushort",
1: "zlib_ushort",
2: "occi_ushort",
}
class RGBDFrame:
def load(self, file_handle):
self.camera_to_world = np.asarray(
struct.unpack("f" * 16, file_handle.read(16 * 4)), dtype=np.float32
).reshape(4, 4)
self.timestamp_color = struct.unpack("Q", file_handle.read(8))[0]
self.timestamp_depth = struct.unpack("Q", file_handle.read(8))[0]
self.color_size_bytes = struct.unpack("Q", file_handle.read(8))[0]
self.depth_size_bytes = struct.unpack("Q", file_handle.read(8))[0]
self.color_data = b"".join(
struct.unpack(
"c" * self.color_size_bytes, file_handle.read(self.color_size_bytes)
)
)
self.depth_data = b"".join(
struct.unpack(
"c" * self.depth_size_bytes, file_handle.read(self.depth_size_bytes)
)
)
def decompress_depth(self, compression_type):
if compression_type == "zlib_ushort":
return self.decompress_depth_zlib()
else:
raise
def decompress_depth_zlib(self):
return zlib.decompress(self.depth_data)
def decompress_color(self, compression_type):
if compression_type == "jpeg":
return self.decompress_color_jpeg()
else:
raise
def decompress_color_jpeg(self):
return imageio.imread(self.color_data)
class SensorData:
def __init__(self, filename):
self.version = 4
self.load(filename)
def load(self, filename):
with open(filename, "rb") as f:
version = struct.unpack("I", f.read(4))[0]
assert self.version == version
strlen = struct.unpack("Q", f.read(8))[0]
self.sensor_name = b"".join(struct.unpack("c" * strlen, f.read(strlen)))
self.intrinsic_color = np.asarray(
struct.unpack("f" * 16, f.read(16 * 4)), dtype=np.float32
).reshape(4, 4)
self.extrinsic_color = np.asarray(
struct.unpack("f" * 16, f.read(16 * 4)), dtype=np.float32
).reshape(4, 4)
self.intrinsic_depth = np.asarray(
struct.unpack("f" * 16, f.read(16 * 4)), dtype=np.float32
).reshape(4, 4)
self.extrinsic_depth = np.asarray(
struct.unpack("f" * 16, f.read(16 * 4)), dtype=np.float32
).reshape(4, 4)
self.color_compression_type = COMPRESSION_TYPE_COLOR[
struct.unpack("i", f.read(4))[0]
]
self.depth_compression_type = COMPRESSION_TYPE_DEPTH[
struct.unpack("i", f.read(4))[0]
]
self.color_width = struct.unpack("I", f.read(4))[0]
self.color_height = struct.unpack("I", f.read(4))[0]
self.depth_width = struct.unpack("I", f.read(4))[0]
self.depth_height = struct.unpack("I", f.read(4))[0]
self.depth_shift = struct.unpack("f", f.read(4))[0]
num_frames = struct.unpack("Q", f.read(8))[0]
self.frames = []
for i in range(num_frames):
frame = RGBDFrame()
frame.load(f)
self.frames.append(frame)
def export_depth_images(self, output_path, image_size=None, frame_skip=1):
if not os.path.exists(output_path):
os.makedirs(output_path)
print(
"exporting", len(self.frames) // frame_skip, " depth frames to", output_path
)
for f in range(0, len(self.frames), frame_skip):
if os.path.exists((os.path.join(output_path, str(f) + ".png"))):
continue
if f % 100 == 0:
print(
"exporting",
f,
"th depth frames to",
os.path.join(output_path, str(f) + ".png"),
)
depth_data = self.frames[f].decompress_depth(self.depth_compression_type)
depth = np.fromstring(depth_data, dtype=np.uint16).reshape(
self.depth_height, self.depth_width
)
if image_size is not None:
depth = cv2.resize(
depth,
(image_size[1], image_size[0]),
interpolation=cv2.INTER_NEAREST,
)
imageio.imwrite(os.path.join(output_path, str(f) + ".png"), depth)
def export_color_images(self, output_path, image_size=None, frame_skip=1):
if not os.path.exists(output_path):
os.makedirs(output_path)
print(
"exporting", len(self.frames) // frame_skip, "color frames to", output_path
)
for f in range(0, len(self.frames), frame_skip):
if os.path.exists((os.path.join(output_path, str(f) + ".png"))):
continue
if f % 100 == 0:
print(
"exporting",
f,
"th color frames to",
os.path.join(output_path, str(f) + ".png"),
)
color = self.frames[f].decompress_color(self.color_compression_type)
if image_size is not None:
color = cv2.resize(
color,
(image_size[1], image_size[0]),
interpolation=cv2.INTER_NEAREST,
)
# imageio.imwrite(os.path.join(output_path, str(f) + '.jpg'), color)
imageio.imwrite(os.path.join(output_path, str(f) + ".png"), color)
def save_mat_to_file(self, matrix, filename):
with open(filename, "w") as f:
for line in matrix:
np.savetxt(f, line[np.newaxis], fmt="%f")
def export_poses(self, output_path, frame_skip=1):
if not os.path.exists(output_path):
os.makedirs(output_path)
print(
"exporting", len(self.frames) // frame_skip, "camera poses to", output_path
)
for f in range(0, len(self.frames), frame_skip):
self.save_mat_to_file(
self.frames[f].camera_to_world,
os.path.join(output_path, str(f) + ".txt"),
)
def export_intrinsics(self, output_path):
if not os.path.exists(output_path):
os.makedirs(output_path)
print("exporting camera intrinsics to", output_path)
self.save_mat_to_file(
self.intrinsic_color, os.path.join(output_path, "intrinsic_color.txt")
)
self.save_mat_to_file(
self.extrinsic_color, os.path.join(output_path, "extrinsic_color.txt")
)
self.save_mat_to_file(
self.intrinsic_depth, os.path.join(output_path, "intrinsic_depth.txt")
)
self.save_mat_to_file(
self.extrinsic_depth, os.path.join(output_path, "extrinsic_depth.txt")
)
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/datasets/preprocessing/scannet/scannet_pair/reader.py | pointcept/datasets/preprocessing/scannet/scannet_pair/reader.py | import argparse
import os, sys
from SensorData import SensorData
def reader(
filename,
output_path,
frame_skip,
export_color_images=False,
export_depth_images=False,
export_poses=False,
export_intrinsics=False,
):
if not os.path.exists(output_path):
os.makedirs(output_path)
# load the data
print("loading %s..." % filename)
sd = SensorData(filename)
if export_depth_images:
sd.export_depth_images(
os.path.join(output_path, "depth"), frame_skip=frame_skip
)
if export_color_images:
sd.export_color_images(
os.path.join(output_path, "color"), frame_skip=frame_skip
)
if export_poses:
sd.export_poses(os.path.join(output_path, "pose"), frame_skip=frame_skip)
if export_intrinsics:
sd.export_intrinsics(os.path.join(output_path, "intrinsic"))
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/datasets/preprocessing/scannet/scannet_pair/generage_list.py | pointcept/datasets/preprocessing/scannet/scannet_pair/generage_list.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import glob, os, sys
from SensorData import SensorData
# params
parser = argparse.ArgumentParser()
# data paths
parser.add_argument("--target_dir", required=True, help="path to the target dir")
opt = parser.parse_args()
print(opt)
def main():
overlaps = glob.glob(os.path.join(opt.target_dir, "*/pcd/overlap.txt"))
with open(os.path.join(opt.target_dir, "overlap30.txt"), "w") as f:
for fo in overlaps:
for line in open(fo):
pcd0, pcd1, op = line.strip().split()
if float(op) >= 0.3:
print("{} {} {}".format(pcd0, pcd1, op), file=f)
print("done")
if __name__ == "__main__":
main()
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/datasets/preprocessing/scannet/scannet_pair/plyfile.py | pointcept/datasets/preprocessing/scannet/scannet_pair/plyfile.py | # Copyright 2014 Darsh Ranjan
#
# This file is part of python-plyfile.
#
# python-plyfile is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# python-plyfile is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with python-plyfile. If not, see
# <http://www.gnu.org/licenses/>.
from itertools import islice as _islice
import numpy as _np
from sys import byteorder as _byteorder
try:
_range = xrange
except NameError:
_range = range
# Many-many relation
_data_type_relation = [
("int8", "i1"),
("char", "i1"),
("uint8", "u1"),
("uchar", "b1"),
("uchar", "u1"),
("int16", "i2"),
("short", "i2"),
("uint16", "u2"),
("ushort", "u2"),
("int32", "i4"),
("int", "i4"),
("uint32", "u4"),
("uint", "u4"),
("float32", "f4"),
("float", "f4"),
("float64", "f8"),
("double", "f8"),
]
_data_types = dict(_data_type_relation)
_data_type_reverse = dict((b, a) for (a, b) in _data_type_relation)
_types_list = []
_types_set = set()
for _a, _b in _data_type_relation:
if _a not in _types_set:
_types_list.append(_a)
_types_set.add(_a)
if _b not in _types_set:
_types_list.append(_b)
_types_set.add(_b)
_byte_order_map = {"ascii": "=", "binary_little_endian": "<", "binary_big_endian": ">"}
_byte_order_reverse = {"<": "binary_little_endian", ">": "binary_big_endian"}
_native_byte_order = {"little": "<", "big": ">"}[_byteorder]
def _lookup_type(type_str):
if type_str not in _data_type_reverse:
try:
type_str = _data_types[type_str]
except KeyError:
raise ValueError("field type %r not in %r" % (type_str, _types_list))
return _data_type_reverse[type_str]
def _split_line(line, n):
fields = line.split(None, n)
if len(fields) == n:
fields.append("")
assert len(fields) == n + 1
return fields
def make2d(array, cols=None, dtype=None):
"""
Make a 2D array from an array of arrays. The `cols' and `dtype'
arguments can be omitted if the array is not empty.
"""
if (cols is None or dtype is None) and not len(array):
raise RuntimeError("cols and dtype must be specified for empty " "array")
if cols is None:
cols = len(array[0])
if dtype is None:
dtype = array[0].dtype
return _np.fromiter(array, [("_", dtype, (cols,))], count=len(array))["_"]
class PlyParseError(Exception):
"""
Raised when a PLY file cannot be parsed.
The attributes `element', `row', `property', and `message' give
additional information.
"""
def __init__(self, message, element=None, row=None, prop=None):
self.message = message
self.element = element
self.row = row
self.prop = prop
s = ""
if self.element:
s += "element %r: " % self.element.name
if self.row is not None:
s += "row %d: " % self.row
if self.prop:
s += "property %r: " % self.prop.name
s += self.message
Exception.__init__(self, s)
def __repr__(self):
return (
"PlyParseError(%r, element=%r, row=%r, prop=%r)" % self.message,
self.element,
self.row,
self.prop,
)
class PlyData(object):
"""
PLY file header and data.
A PlyData instance is created in one of two ways: by the static
method PlyData.read (to read a PLY file), or directly from __init__
given a sequence of elements (which can then be written to a PLY
file).
"""
def __init__(
self, elements=[], text=False, byte_order="=", comments=[], obj_info=[]
):
"""
elements: sequence of PlyElement instances.
text: whether the resulting PLY file will be text (True) or
binary (False).
byte_order: '<' for little-endian, '>' for big-endian, or '='
for native. This is only relevant if `text' is False.
comments: sequence of strings that will be placed in the header
between the 'ply' and 'format ...' lines.
obj_info: like comments, but will be placed in the header with
"obj_info ..." instead of "comment ...".
"""
if byte_order == "=" and not text:
byte_order = _native_byte_order
self.byte_order = byte_order
self.text = text
self.comments = list(comments)
self.obj_info = list(obj_info)
self.elements = elements
def _get_elements(self):
return self._elements
def _set_elements(self, elements):
self._elements = tuple(elements)
self._index()
elements = property(_get_elements, _set_elements)
def _get_byte_order(self):
return self._byte_order
def _set_byte_order(self, byte_order):
if byte_order not in ["<", ">", "="]:
raise ValueError("byte order must be '<', '>', or '='")
self._byte_order = byte_order
byte_order = property(_get_byte_order, _set_byte_order)
def _index(self):
self._element_lookup = dict((elt.name, elt) for elt in self._elements)
if len(self._element_lookup) != len(self._elements):
raise ValueError("two elements with same name")
@staticmethod
def _parse_header(stream):
"""
Parse a PLY header from a readable file-like stream.
"""
lines = []
comments = {"comment": [], "obj_info": []}
while True:
line = stream.readline().decode("ascii").strip()
fields = _split_line(line, 1)
if fields[0] == "end_header":
break
elif fields[0] in comments.keys():
lines.append(fields)
else:
lines.append(line.split())
a = 0
if lines[a] != ["ply"]:
raise PlyParseError("expected 'ply'")
a += 1
while lines[a][0] in comments.keys():
comments[lines[a][0]].append(lines[a][1])
a += 1
if lines[a][0] != "format":
raise PlyParseError("expected 'format'")
if lines[a][2] != "1.0":
raise PlyParseError("expected version '1.0'")
if len(lines[a]) != 3:
raise PlyParseError("too many fields after 'format'")
fmt = lines[a][1]
if fmt not in _byte_order_map:
raise PlyParseError("don't understand format %r" % fmt)
byte_order = _byte_order_map[fmt]
text = fmt == "ascii"
a += 1
while a < len(lines) and lines[a][0] in comments.keys():
comments[lines[a][0]].append(lines[a][1])
a += 1
return PlyData(
PlyElement._parse_multi(lines[a:]),
text,
byte_order,
comments["comment"],
comments["obj_info"],
)
@staticmethod
def read(stream):
"""
Read PLY data from a readable file-like object or filename.
"""
(must_close, stream) = _open_stream(stream, "read")
try:
data = PlyData._parse_header(stream)
for elt in data:
elt._read(stream, data.text, data.byte_order)
finally:
if must_close:
stream.close()
return data
def write(self, stream):
"""
Write PLY data to a writeable file-like object or filename.
"""
(must_close, stream) = _open_stream(stream, "write")
try:
stream.write(self.header.encode("ascii"))
stream.write(b"\r\n")
for elt in self:
elt._write(stream, self.text, self.byte_order)
finally:
if must_close:
stream.close()
@property
def header(self):
"""
Provide PLY-formatted metadata for the instance.
"""
lines = ["ply"]
if self.text:
lines.append("format ascii 1.0")
else:
lines.append("format " + _byte_order_reverse[self.byte_order] + " 1.0")
# Some information is lost here, since all comments are placed
# between the 'format' line and the first element.
for c in self.comments:
lines.append("comment " + c)
for c in self.obj_info:
lines.append("obj_info " + c)
lines.extend(elt.header for elt in self.elements)
lines.append("end_header")
return "\r\n".join(lines)
def __iter__(self):
return iter(self.elements)
def __len__(self):
return len(self.elements)
def __contains__(self, name):
return name in self._element_lookup
def __getitem__(self, name):
return self._element_lookup[name]
def __str__(self):
return self.header
def __repr__(self):
return "PlyData(%r, text=%r, byte_order=%r, " "comments=%r, obj_info=%r)" % (
self.elements,
self.text,
self.byte_order,
self.comments,
self.obj_info,
)
def _open_stream(stream, read_or_write):
if hasattr(stream, read_or_write):
return (False, stream)
try:
return (True, open(stream, read_or_write[0] + "b"))
except TypeError:
raise RuntimeError("expected open file or filename")
class PlyElement(object):
"""
PLY file element.
A client of this library doesn't normally need to instantiate this
directly, so the following is only for the sake of documenting the
internals.
Creating a PlyElement instance is generally done in one of two ways:
as a byproduct of PlyData.read (when reading a PLY file) and by
PlyElement.describe (before writing a PLY file).
"""
def __init__(self, name, properties, count, comments=[]):
"""
This is not part of the public interface. The preferred methods
of obtaining PlyElement instances are PlyData.read (to read from
a file) and PlyElement.describe (to construct from a numpy
array).
"""
self._name = str(name)
self._check_name()
self._count = count
self._properties = tuple(properties)
self._index()
self.comments = list(comments)
self._have_list = any(isinstance(p, PlyListProperty) for p in self.properties)
@property
def count(self):
return self._count
def _get_data(self):
return self._data
def _set_data(self, data):
self._data = data
self._count = len(data)
self._check_sanity()
data = property(_get_data, _set_data)
def _check_sanity(self):
for prop in self.properties:
if prop.name not in self._data.dtype.fields:
raise ValueError("dangling property %r" % prop.name)
def _get_properties(self):
return self._properties
def _set_properties(self, properties):
self._properties = tuple(properties)
self._check_sanity()
self._index()
properties = property(_get_properties, _set_properties)
def _index(self):
self._property_lookup = dict((prop.name, prop) for prop in self._properties)
if len(self._property_lookup) != len(self._properties):
raise ValueError("two properties with same name")
def ply_property(self, name):
return self._property_lookup[name]
@property
def name(self):
return self._name
def _check_name(self):
if any(c.isspace() for c in self._name):
msg = "element name %r contains spaces" % self._name
raise ValueError(msg)
def dtype(self, byte_order="="):
"""
Return the numpy dtype of the in-memory representation of the
data. (If there are no list properties, and the PLY format is
binary, then this also accurately describes the on-disk
representation of the element.)
"""
return [(prop.name, prop.dtype(byte_order)) for prop in self.properties]
@staticmethod
def _parse_multi(header_lines):
"""
Parse a list of PLY element definitions.
"""
elements = []
while header_lines:
(elt, header_lines) = PlyElement._parse_one(header_lines)
elements.append(elt)
return elements
@staticmethod
def _parse_one(lines):
"""
Consume one element definition. The unconsumed input is
returned along with a PlyElement instance.
"""
a = 0
line = lines[a]
if line[0] != "element":
raise PlyParseError("expected 'element'")
if len(line) > 3:
raise PlyParseError("too many fields after 'element'")
if len(line) < 3:
raise PlyParseError("too few fields after 'element'")
(name, count) = (line[1], int(line[2]))
comments = []
properties = []
while True:
a += 1
if a >= len(lines):
break
if lines[a][0] == "comment":
comments.append(lines[a][1])
elif lines[a][0] == "property":
properties.append(PlyProperty._parse_one(lines[a]))
else:
break
return (PlyElement(name, properties, count, comments), lines[a:])
@staticmethod
def describe(data, name, len_types={}, val_types={}, comments=[]):
"""
Construct a PlyElement from an array's metadata.
len_types and val_types can be given as mappings from list
property names to type strings (like 'u1', 'f4', etc., or
'int8', 'float32', etc.). These can be used to define the length
and value types of list properties. List property lengths
always default to type 'u1' (8-bit unsigned integer), and value
types default to 'i4' (32-bit integer).
"""
if not isinstance(data, _np.ndarray):
raise TypeError("only numpy arrays are supported")
if len(data.shape) != 1:
raise ValueError("only one-dimensional arrays are " "supported")
count = len(data)
properties = []
descr = data.dtype.descr
for t in descr:
if not isinstance(t[1], str):
raise ValueError("nested records not supported")
if not t[0]:
raise ValueError("field with empty name")
if len(t) != 2 or t[1][1] == "O":
# non-scalar field, which corresponds to a list
# property in PLY.
if t[1][1] == "O":
if len(t) != 2:
raise ValueError("non-scalar object fields not " "supported")
len_str = _data_type_reverse[len_types.get(t[0], "u1")]
if t[1][1] == "O":
val_type = val_types.get(t[0], "i4")
val_str = _lookup_type(val_type)
else:
val_str = _lookup_type(t[1][1:])
prop = PlyListProperty(t[0], len_str, val_str)
else:
val_str = _lookup_type(t[1][1:])
prop = PlyProperty(t[0], val_str)
properties.append(prop)
elt = PlyElement(name, properties, count, comments)
elt.data = data
return elt
def _read(self, stream, text, byte_order):
"""
Read the actual data from a PLY file.
"""
if text:
self._read_txt(stream)
else:
if self._have_list:
# There are list properties, so a simple load is
# impossible.
self._read_bin(stream, byte_order)
else:
# There are no list properties, so loading the data is
# much more straightforward.
self._data = _np.fromfile(stream, self.dtype(byte_order), self.count)
if len(self._data) < self.count:
k = len(self._data)
del self._data
raise PlyParseError("early end-of-file", self, k)
self._check_sanity()
def _write(self, stream, text, byte_order):
"""
Write the data to a PLY file.
"""
if text:
self._write_txt(stream)
else:
if self._have_list:
# There are list properties, so serialization is
# slightly complicated.
self._write_bin(stream, byte_order)
else:
# no list properties, so serialization is
# straightforward.
self.data.astype(self.dtype(byte_order), copy=False).tofile(stream)
def _read_txt(self, stream):
"""
Load a PLY element from an ASCII-format PLY file. The element
may contain list properties.
"""
self._data = _np.empty(self.count, dtype=self.dtype())
k = 0
for line in _islice(iter(stream.readline, b""), self.count):
fields = iter(line.strip().split())
for prop in self.properties:
try:
self._data[prop.name][k] = prop._from_fields(fields)
except StopIteration:
raise PlyParseError("early end-of-line", self, k, prop)
except ValueError:
raise PlyParseError("malformed input", self, k, prop)
try:
next(fields)
except StopIteration:
pass
else:
raise PlyParseError("expected end-of-line", self, k)
k += 1
if k < self.count:
del self._data
raise PlyParseError("early end-of-file", self, k)
def _write_txt(self, stream):
"""
Save a PLY element to an ASCII-format PLY file. The element may
contain list properties.
"""
for rec in self.data:
fields = []
for prop in self.properties:
fields.extend(prop._to_fields(rec[prop.name]))
_np.savetxt(stream, [fields], "%.18g", newline="\r\n")
def _read_bin(self, stream, byte_order):
"""
Load a PLY element from a binary PLY file. The element may
contain list properties.
"""
self._data = _np.empty(self.count, dtype=self.dtype(byte_order))
for k in _range(self.count):
for prop in self.properties:
try:
self._data[prop.name][k] = prop._read_bin(stream, byte_order)
except StopIteration:
raise PlyParseError("early end-of-file", self, k, prop)
def _write_bin(self, stream, byte_order):
"""
Save a PLY element to a binary PLY file. The element may
contain list properties.
"""
for rec in self.data:
for prop in self.properties:
prop._write_bin(rec[prop.name], stream, byte_order)
@property
def header(self):
"""
Format this element's metadata as it would appear in a PLY
header.
"""
lines = ["element %s %d" % (self.name, self.count)]
# Some information is lost here, since all comments are placed
# between the 'element' line and the first property definition.
for c in self.comments:
lines.append("comment " + c)
lines.extend(list(map(str, self.properties)))
return "\r\n".join(lines)
def __getitem__(self, key):
return self.data[key]
def __setitem__(self, key, value):
self.data[key] = value
def __str__(self):
return self.header
def __repr__(self):
return "PlyElement(%r, %r, count=%d, comments=%r)" % (
self.name,
self.properties,
self.count,
self.comments,
)
class PlyProperty(object):
"""
PLY property description. This class is pure metadata; the data
itself is contained in PlyElement instances.
"""
def __init__(self, name, val_dtype):
self._name = str(name)
self._check_name()
self.val_dtype = val_dtype
def _get_val_dtype(self):
return self._val_dtype
def _set_val_dtype(self, val_dtype):
self._val_dtype = _data_types[_lookup_type(val_dtype)]
val_dtype = property(_get_val_dtype, _set_val_dtype)
@property
def name(self):
return self._name
def _check_name(self):
if any(c.isspace() for c in self._name):
msg = "Error: property name %r contains spaces" % self._name
raise RuntimeError(msg)
@staticmethod
def _parse_one(line):
assert line[0] == "property"
if line[1] == "list":
if len(line) > 5:
raise PlyParseError("too many fields after " "'property list'")
if len(line) < 5:
raise PlyParseError("too few fields after " "'property list'")
return PlyListProperty(line[4], line[2], line[3])
else:
if len(line) > 3:
raise PlyParseError("too many fields after " "'property'")
if len(line) < 3:
raise PlyParseError("too few fields after " "'property'")
return PlyProperty(line[2], line[1])
def dtype(self, byte_order="="):
"""
Return the numpy dtype description for this property (as a tuple
of strings).
"""
return byte_order + self.val_dtype
def _from_fields(self, fields):
"""
Parse from generator. Raise StopIteration if the property could
not be read.
"""
return _np.dtype(self.dtype()).type(next(fields))
def _to_fields(self, data):
"""
Return generator over one item.
"""
yield _np.dtype(self.dtype()).type(data)
def _read_bin(self, stream, byte_order):
"""
Read data from a binary stream. Raise StopIteration if the
property could not be read.
"""
try:
return _np.fromfile(stream, self.dtype(byte_order), 1)[0]
except IndexError:
raise StopIteration
def _write_bin(self, data, stream, byte_order):
"""
Write data to a binary stream.
"""
_np.dtype(self.dtype(byte_order)).type(data).tofile(stream)
def __str__(self):
val_str = _data_type_reverse[self.val_dtype]
return "property %s %s" % (val_str, self.name)
def __repr__(self):
return "PlyProperty(%r, %r)" % (self.name, _lookup_type(self.val_dtype))
class PlyListProperty(PlyProperty):
"""
PLY list property description.
"""
def __init__(self, name, len_dtype, val_dtype):
PlyProperty.__init__(self, name, val_dtype)
self.len_dtype = len_dtype
def _get_len_dtype(self):
return self._len_dtype
def _set_len_dtype(self, len_dtype):
self._len_dtype = _data_types[_lookup_type(len_dtype)]
len_dtype = property(_get_len_dtype, _set_len_dtype)
def dtype(self, byte_order="="):
"""
List properties always have a numpy dtype of "object".
"""
return "|O"
def list_dtype(self, byte_order="="):
"""
Return the pair (len_dtype, val_dtype) (both numpy-friendly
strings).
"""
return (byte_order + self.len_dtype, byte_order + self.val_dtype)
def _from_fields(self, fields):
(len_t, val_t) = self.list_dtype()
n = int(_np.dtype(len_t).type(next(fields)))
data = _np.loadtxt(list(_islice(fields, n)), val_t, ndmin=1)
if len(data) < n:
raise StopIteration
return data
def _to_fields(self, data):
"""
Return generator over the (numerical) PLY representation of the
list data (length followed by actual data).
"""
(len_t, val_t) = self.list_dtype()
data = _np.asarray(data, dtype=val_t).ravel()
yield _np.dtype(len_t).type(data.size)
for x in data:
yield x
def _read_bin(self, stream, byte_order):
(len_t, val_t) = self.list_dtype(byte_order)
try:
n = _np.fromfile(stream, len_t, 1)[0]
except IndexError:
raise StopIteration
data = _np.fromfile(stream, val_t, n)
if len(data) < n:
raise StopIteration
return data
def _write_bin(self, data, stream, byte_order):
"""
Write data to a binary stream.
"""
(len_t, val_t) = self.list_dtype(byte_order)
data = _np.asarray(data, dtype=val_t).ravel()
_np.array(data.size, dtype=len_t).tofile(stream)
data.tofile(stream)
def __str__(self):
len_str = _data_type_reverse[self.len_dtype]
val_str = _data_type_reverse[self.val_dtype]
return "property list %s %s %s" % (len_str, val_str, self.name)
def __repr__(self):
return "PlyListProperty(%r, %r, %r)" % (
self.name,
_lookup_type(self.len_dtype),
_lookup_type(self.val_dtype),
)
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/datasets/preprocessing/scannet/scannet_pair/point_cloud_extractor.py | pointcept/datasets/preprocessing/scannet/scannet_pair/point_cloud_extractor.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import glob, os
import numpy as np
import cv2
import torch
def extractor(input_path, output_path):
if not os.path.exists(output_path):
os.mkdir(output_path)
# Load Depth Camera Intrinsic
depth_intrinsic = np.loadtxt(input_path + "/intrinsic/intrinsic_depth.txt")
print("Depth intrinsic: ")
print(depth_intrinsic)
# Compute Camrea Distance (just for demo, so you can choose the camera distance in frame sampling)
poses = sorted(
glob.glob(input_path + "/pose/*.txt"),
key=lambda a: int(os.path.basename(a).split(".")[0]),
)
depths = sorted(
glob.glob(input_path + "/depth/*.png"),
key=lambda a: int(os.path.basename(a).split(".")[0]),
)
colors = sorted(
glob.glob(input_path + "/color/*.png"),
key=lambda a: int(os.path.basename(a).split(".")[0]),
)
# # Get Aligned Point Clouds.
for ind, (pose, depth, color) in enumerate(zip(poses, depths, colors)):
name = os.path.basename(pose).split(".")[0]
if os.path.exists(output_path + "/{}.npz".format(name)):
continue
try:
print("=" * 50, ": {}".format(pose))
depth_img = cv2.imread(depth, -1) # read 16bit grayscale image
mask = depth_img != 0
color_image = cv2.imread(color)
color_image = cv2.resize(color_image, (640, 480))
color_image = np.reshape(color_image[mask], [-1, 3])
colors = np.zeros_like(color_image)
colors[:, 0] = color_image[:, 2]
colors[:, 1] = color_image[:, 1]
colors[:, 2] = color_image[:, 0]
pose = np.loadtxt(poses[ind])
print("Camera pose: ")
print(pose)
depth_shift = 1000.0
x, y = np.meshgrid(
np.linspace(0, depth_img.shape[1] - 1, depth_img.shape[1]),
np.linspace(0, depth_img.shape[0] - 1, depth_img.shape[0]),
)
uv_depth = np.zeros((depth_img.shape[0], depth_img.shape[1], 3))
uv_depth[:, :, 0] = x
uv_depth[:, :, 1] = y
uv_depth[:, :, 2] = depth_img / depth_shift
uv_depth = np.reshape(uv_depth, [-1, 3])
uv_depth = uv_depth[np.where(uv_depth[:, 2] != 0), :].squeeze()
intrinsic_inv = np.linalg.inv(depth_intrinsic)
fx = depth_intrinsic[0, 0]
fy = depth_intrinsic[1, 1]
cx = depth_intrinsic[0, 2]
cy = depth_intrinsic[1, 2]
bx = depth_intrinsic[0, 3]
by = depth_intrinsic[1, 3]
point_list = []
n = uv_depth.shape[0]
points = np.ones((n, 4))
X = (uv_depth[:, 0] - cx) * uv_depth[:, 2] / fx + bx
Y = (uv_depth[:, 1] - cy) * uv_depth[:, 2] / fy + by
points[:, 0] = X
points[:, 1] = Y
points[:, 2] = uv_depth[:, 2]
points_world = np.dot(points, np.transpose(pose))
print(points_world.shape)
pcd = dict(coord=points_world[:, :3], color=colors)
# pcd_save = np.zeros((points_world.shape[0], 7))
# pcd_save[:, :3] = points_world[:, :3]
# pcd_save[:, 3:6] = colors
# print('Saving npz file...')
# np.savez(output_path + '/{}.npz'.format(name), pcd=pcd_save)
torch.save(pcd, output_path + "/{}.pth".format(name))
except:
continue
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/datasets/preprocessing/scannet/scannet_pair/preprocess.py | pointcept/datasets/preprocessing/scannet/scannet_pair/preprocess.py | import os
import argparse
import glob
import multiprocessing as mp
from concurrent.futures import ProcessPoolExecutor
from itertools import repeat
from reader import reader
from point_cloud_extractor import extractor
from compute_full_overlapping import compute_full_overlapping
frame_skip = 25
def parse_sens(sens_dir, output_dir):
scene_id = os.path.basename(os.path.dirname(sens_dir))
print(f"Parsing sens data{sens_dir}")
reader(
sens_dir,
os.path.join(output_dir, scene_id),
frame_skip,
export_color_images=True,
export_depth_images=True,
export_poses=True,
export_intrinsics=True,
)
extractor(
os.path.join(output_dir, scene_id), os.path.join(output_dir, scene_id, "pcd")
)
compute_full_overlapping(output_dir, scene_id)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--dataset_root",
required=True,
help="Path to the ScanNet dataset containing scene folders",
)
parser.add_argument(
"--output_root",
required=True,
help="Output path where train/val folders will be located",
)
opt = parser.parse_args()
sens_list = sorted(glob.glob(os.path.join(opt.dataset_root, "scans/scene*/*.sens")))
# Preprocess data.
pool = ProcessPoolExecutor(max_workers=mp.cpu_count())
# pool = ProcessPoolExecutor(max_workers=1)
print("Processing scenes...")
_ = list(pool.map(parse_sens, sens_list, repeat(opt.output_root)))
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/datasets/preprocessing/arkitscenes/preprocess_arkitscenes_mesh.py | pointcept/datasets/preprocessing/arkitscenes/preprocess_arkitscenes_mesh.py | """
Preprocessing ArkitScenes
"""
import os
import argparse
import glob
import plyfile
import numpy as np
import pandas as pd
import multiprocessing as mp
from concurrent.futures import ProcessPoolExecutor
from itertools import repeat
import torch
def read_plymesh(filepath):
"""Read ply file and return it as numpy array. Returns None if emtpy."""
with open(filepath, "rb") as f:
plydata = plyfile.PlyData.read(f)
if plydata.elements:
vertices = pd.DataFrame(plydata["vertex"].data).values
faces = np.stack(plydata["face"].data["vertex_indices"], axis=0)
return vertices, faces
def face_normal(vertex, face):
v01 = vertex[face[:, 1]] - vertex[face[:, 0]]
v02 = vertex[face[:, 2]] - vertex[face[:, 0]]
vec = np.cross(v01, v02)
length = np.sqrt(np.sum(vec**2, axis=1, keepdims=True)) + 1.0e-8
nf = vec / length
area = length * 0.5
return nf, area
def vertex_normal(vertex, face):
nf, area = face_normal(vertex, face)
nf = nf * area
nv = np.zeros_like(vertex)
for i in range(face.shape[0]):
nv[face[i]] += nf[i]
length = np.sqrt(np.sum(nv**2, axis=1, keepdims=True)) + 1.0e-8
nv = nv / length
return nv
def parse_scene(scene_path, output_dir):
print(f"Parsing scene {scene_path}")
split = os.path.basename(os.path.dirname(os.path.dirname(scene_path)))
scene_id = os.path.basename(os.path.dirname(scene_path))
vertices, faces = read_plymesh(scene_path)
coords = vertices[:, :3]
colors = vertices[:, 3:6]
data_dict = dict(coord=coords, color=colors, scene_id=scene_id)
data_dict["normal"] = vertex_normal(coords, faces)
torch.save(data_dict, os.path.join(output_dir, split, f"{scene_id}.pth"))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--dataset_root",
required=True,
help="Path to the ScanNet dataset containing scene folders",
)
parser.add_argument(
"--output_root",
required=True,
help="Output path where train/val folders will be located",
)
opt = parser.parse_args()
# Create output directories
train_output_dir = os.path.join(opt.output_root, "Training")
os.makedirs(train_output_dir, exist_ok=True)
val_output_dir = os.path.join(opt.output_root, "Validation")
os.makedirs(val_output_dir, exist_ok=True)
# Load scene paths
scene_paths = sorted(glob.glob(opt.dataset_root + "/3dod/*/*/*_mesh.ply"))
# Preprocess data.
pool = ProcessPoolExecutor(max_workers=mp.cpu_count())
# pool = ProcessPoolExecutor(max_workers=1)
print("Processing scenes...")
_ = list(pool.map(parse_scene, scene_paths, repeat(opt.output_root)))
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/datasets/preprocessing/nuscenes/preprocess_nuscenes_info.py | pointcept/datasets/preprocessing/nuscenes/preprocess_nuscenes_info.py | """
Preprocessing Script for nuScenes Informantion
modified from OpenPCDet (https://github.com/open-mmlab/OpenPCDet)
Author: Xiaoyang Wu (xiaoyang.wu.cs@gmail.com)
Please cite our work if the code is helpful to you.
"""
import os
from pathlib import Path
import numpy as np
import argparse
import tqdm
import pickle
from functools import reduce
from pyquaternion import Quaternion
from nuscenes.nuscenes import NuScenes
from nuscenes.utils import splits
from nuscenes.utils.geometry_utils import transform_matrix
map_name_from_general_to_detection = {
"human.pedestrian.adult": "pedestrian",
"human.pedestrian.child": "pedestrian",
"human.pedestrian.wheelchair": "ignore",
"human.pedestrian.stroller": "ignore",
"human.pedestrian.personal_mobility": "ignore",
"human.pedestrian.police_officer": "pedestrian",
"human.pedestrian.construction_worker": "pedestrian",
"animal": "ignore",
"vehicle.car": "car",
"vehicle.motorcycle": "motorcycle",
"vehicle.bicycle": "bicycle",
"vehicle.bus.bendy": "bus",
"vehicle.bus.rigid": "bus",
"vehicle.truck": "truck",
"vehicle.construction": "construction_vehicle",
"vehicle.emergency.ambulance": "ignore",
"vehicle.emergency.police": "ignore",
"vehicle.trailer": "trailer",
"movable_object.barrier": "barrier",
"movable_object.trafficcone": "traffic_cone",
"movable_object.pushable_pullable": "ignore",
"movable_object.debris": "ignore",
"static_object.bicycle_rack": "ignore",
}
cls_attr_dist = {
"barrier": {
"cycle.with_rider": 0,
"cycle.without_rider": 0,
"pedestrian.moving": 0,
"pedestrian.sitting_lying_down": 0,
"pedestrian.standing": 0,
"vehicle.moving": 0,
"vehicle.parked": 0,
"vehicle.stopped": 0,
},
"bicycle": {
"cycle.with_rider": 2791,
"cycle.without_rider": 8946,
"pedestrian.moving": 0,
"pedestrian.sitting_lying_down": 0,
"pedestrian.standing": 0,
"vehicle.moving": 0,
"vehicle.parked": 0,
"vehicle.stopped": 0,
},
"bus": {
"cycle.with_rider": 0,
"cycle.without_rider": 0,
"pedestrian.moving": 0,
"pedestrian.sitting_lying_down": 0,
"pedestrian.standing": 0,
"vehicle.moving": 9092,
"vehicle.parked": 3294,
"vehicle.stopped": 3881,
},
"car": {
"cycle.with_rider": 0,
"cycle.without_rider": 0,
"pedestrian.moving": 0,
"pedestrian.sitting_lying_down": 0,
"pedestrian.standing": 0,
"vehicle.moving": 114304,
"vehicle.parked": 330133,
"vehicle.stopped": 46898,
},
"construction_vehicle": {
"cycle.with_rider": 0,
"cycle.without_rider": 0,
"pedestrian.moving": 0,
"pedestrian.sitting_lying_down": 0,
"pedestrian.standing": 0,
"vehicle.moving": 882,
"vehicle.parked": 11549,
"vehicle.stopped": 2102,
},
"ignore": {
"cycle.with_rider": 307,
"cycle.without_rider": 73,
"pedestrian.moving": 0,
"pedestrian.sitting_lying_down": 0,
"pedestrian.standing": 0,
"vehicle.moving": 165,
"vehicle.parked": 400,
"vehicle.stopped": 102,
},
"motorcycle": {
"cycle.with_rider": 4233,
"cycle.without_rider": 8326,
"pedestrian.moving": 0,
"pedestrian.sitting_lying_down": 0,
"pedestrian.standing": 0,
"vehicle.moving": 0,
"vehicle.parked": 0,
"vehicle.stopped": 0,
},
"pedestrian": {
"cycle.with_rider": 0,
"cycle.without_rider": 0,
"pedestrian.moving": 157444,
"pedestrian.sitting_lying_down": 13939,
"pedestrian.standing": 46530,
"vehicle.moving": 0,
"vehicle.parked": 0,
"vehicle.stopped": 0,
},
"traffic_cone": {
"cycle.with_rider": 0,
"cycle.without_rider": 0,
"pedestrian.moving": 0,
"pedestrian.sitting_lying_down": 0,
"pedestrian.standing": 0,
"vehicle.moving": 0,
"vehicle.parked": 0,
"vehicle.stopped": 0,
},
"trailer": {
"cycle.with_rider": 0,
"cycle.without_rider": 0,
"pedestrian.moving": 0,
"pedestrian.sitting_lying_down": 0,
"pedestrian.standing": 0,
"vehicle.moving": 3421,
"vehicle.parked": 19224,
"vehicle.stopped": 1895,
},
"truck": {
"cycle.with_rider": 0,
"cycle.without_rider": 0,
"pedestrian.moving": 0,
"pedestrian.sitting_lying_down": 0,
"pedestrian.standing": 0,
"vehicle.moving": 21339,
"vehicle.parked": 55626,
"vehicle.stopped": 11097,
},
}
def get_available_scenes(nusc):
available_scenes = []
for scene in nusc.scene:
scene_token = scene["token"]
scene_rec = nusc.get("scene", scene_token)
sample_rec = nusc.get("sample", scene_rec["first_sample_token"])
sd_rec = nusc.get("sample_data", sample_rec["data"]["LIDAR_TOP"])
has_more_frames = True
scene_not_exist = False
while has_more_frames:
lidar_path, boxes, _ = nusc.get_sample_data(sd_rec["token"])
if not Path(lidar_path).exists():
scene_not_exist = True
break
else:
break
if scene_not_exist:
continue
available_scenes.append(scene)
return available_scenes
def get_sample_data(nusc, sample_data_token, selected_anntokens=None):
"""
Returns the data path as well as all annotations related to that sample_data.
Note that the boxes are transformed into the current sensor"s coordinate frame.
Args:
nusc:
sample_data_token: Sample_data token.
selected_anntokens: If provided only return the selected annotation.
Returns:
"""
# Retrieve sensor & pose records
sd_record = nusc.get("sample_data", sample_data_token)
cs_record = nusc.get("calibrated_sensor", sd_record["calibrated_sensor_token"])
sensor_record = nusc.get("sensor", cs_record["sensor_token"])
pose_record = nusc.get("ego_pose", sd_record["ego_pose_token"])
data_path = nusc.get_sample_data_path(sample_data_token)
if sensor_record["modality"] == "camera":
cam_intrinsic = np.array(cs_record["camera_intrinsic"])
else:
cam_intrinsic = None
# Retrieve all sample annotations and map to sensor coordinate system.
if selected_anntokens is not None:
boxes = list(map(nusc.get_box, selected_anntokens))
else:
boxes = nusc.get_boxes(sample_data_token)
# Make list of Box objects including coord system transforms.
box_list = []
for box in boxes:
box.velocity = nusc.box_velocity(box.token)
# Move box to ego vehicle coord system
box.translate(-np.array(pose_record["translation"]))
box.rotate(Quaternion(pose_record["rotation"]).inverse)
# Move box to sensor coord system
box.translate(-np.array(cs_record["translation"]))
box.rotate(Quaternion(cs_record["rotation"]).inverse)
box_list.append(box)
return data_path, box_list, cam_intrinsic
def quaternion_yaw(q: Quaternion) -> float:
"""
Calculate the yaw angle from a quaternion.
Note that this only works for a quaternion that represents a box in lidar or global coordinate frame.
It does not work for a box in the camera frame.
:param q: Quaternion of interest.
:return: Yaw angle in radians.
"""
# Project into xy plane.
v = np.dot(q.rotation_matrix, np.array([1, 0, 0]))
# Measure yaw using arctan.
yaw = np.arctan2(v[1], v[0])
return yaw
def obtain_sensor2top(
nusc, sensor_token, l2e_t, l2e_r_mat, e2g_t, e2g_r_mat, sensor_type="lidar"
):
"""Obtain the info with RT matric from general sensor to Top LiDAR.
Args:
nusc (class): Dataset class in the nuScenes dataset.
sensor_token (str): Sample data token corresponding to the
specific sensor type.
l2e_t (np.ndarray): Translation from lidar to ego in shape (1, 3).
l2e_r_mat (np.ndarray): Rotation matrix from lidar to ego
in shape (3, 3).
e2g_t (np.ndarray): Translation from ego to global in shape (1, 3).
e2g_r_mat (np.ndarray): Rotation matrix from ego to global
in shape (3, 3).
sensor_type (str): Sensor to calibrate. Default: "lidar".
Returns:
sweep (dict): Sweep information after transformation.
"""
sd_rec = nusc.get("sample_data", sensor_token)
cs_record = nusc.get("calibrated_sensor", sd_rec["calibrated_sensor_token"])
pose_record = nusc.get("ego_pose", sd_rec["ego_pose_token"])
data_path = str(nusc.get_sample_data_path(sd_rec["token"]))
# if os.getcwd() in data_path: # path from lyftdataset is absolute path
# data_path = data_path.split(f"{os.getcwd()}/")[-1] # relative path
sweep = {
"data_path": data_path,
"type": sensor_type,
"sample_data_token": sd_rec["token"],
"sensor2ego_translation": cs_record["translation"],
"sensor2ego_rotation": cs_record["rotation"],
"ego2global_translation": pose_record["translation"],
"ego2global_rotation": pose_record["rotation"],
"timestamp": sd_rec["timestamp"],
}
l2e_r_s = sweep["sensor2ego_rotation"]
l2e_t_s = sweep["sensor2ego_translation"]
e2g_r_s = sweep["ego2global_rotation"]
e2g_t_s = sweep["ego2global_translation"]
# obtain the RT from sensor to Top LiDAR
# sweep->ego->global->ego'->lidar
l2e_r_s_mat = Quaternion(l2e_r_s).rotation_matrix
e2g_r_s_mat = Quaternion(e2g_r_s).rotation_matrix
R = (l2e_r_s_mat.T @ e2g_r_s_mat.T) @ (
np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T
)
T = (l2e_t_s @ e2g_r_s_mat.T + e2g_t_s) @ (
np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T
)
T -= (
e2g_t @ (np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T)
+ l2e_t @ np.linalg.inv(l2e_r_mat).T
).squeeze(0)
sweep["sensor2lidar_rotation"] = R.T # points @ R.T + T
sweep["sensor2lidar_translation"] = T
return sweep
def fill_trainval_infos(
data_path, nusc, train_scenes, test=False, max_sweeps=10, with_camera=False
):
train_nusc_infos = []
val_nusc_infos = []
progress_bar = tqdm.tqdm(
total=len(nusc.sample), desc="create_info", dynamic_ncols=True
)
ref_chan = "LIDAR_TOP" # The radar channel from which we track back n sweeps to aggregate the point cloud.
chan = "LIDAR_TOP" # The reference channel of the current sample_rec that the point clouds are mapped to.
for index, sample in enumerate(nusc.sample):
progress_bar.update()
ref_sd_token = sample["data"][ref_chan]
ref_sd_rec = nusc.get("sample_data", ref_sd_token)
ref_cs_rec = nusc.get(
"calibrated_sensor", ref_sd_rec["calibrated_sensor_token"]
)
ref_pose_rec = nusc.get("ego_pose", ref_sd_rec["ego_pose_token"])
ref_time = 1e-6 * ref_sd_rec["timestamp"]
ref_lidar_path, ref_boxes, _ = get_sample_data(nusc, ref_sd_token)
ref_cam_front_token = sample["data"]["CAM_FRONT"]
ref_cam_path, _, ref_cam_intrinsic = nusc.get_sample_data(ref_cam_front_token)
# Homogeneous transform from ego car frame to reference frame
ref_from_car = transform_matrix(
ref_cs_rec["translation"], Quaternion(ref_cs_rec["rotation"]), inverse=True
)
# Homogeneous transformation matrix from global to _current_ ego car frame
car_from_global = transform_matrix(
ref_pose_rec["translation"],
Quaternion(ref_pose_rec["rotation"]),
inverse=True,
)
info = {
"lidar_path": Path(ref_lidar_path).relative_to(data_path).__str__(),
"lidar_token": ref_sd_token,
"cam_front_path": Path(ref_cam_path).relative_to(data_path).__str__(),
"cam_intrinsic": ref_cam_intrinsic,
"token": sample["token"],
"sweeps": [],
"ref_from_car": ref_from_car,
"car_from_global": car_from_global,
"timestamp": ref_time,
}
if with_camera:
info["cams"] = dict()
l2e_r = ref_cs_rec["rotation"]
l2e_t = (ref_cs_rec["translation"],)
e2g_r = ref_pose_rec["rotation"]
e2g_t = ref_pose_rec["translation"]
l2e_r_mat = Quaternion(l2e_r).rotation_matrix
e2g_r_mat = Quaternion(e2g_r).rotation_matrix
# obtain 6 image's information per frame
camera_types = [
"CAM_FRONT",
"CAM_FRONT_RIGHT",
"CAM_FRONT_LEFT",
"CAM_BACK",
"CAM_BACK_LEFT",
"CAM_BACK_RIGHT",
]
for cam in camera_types:
cam_token = sample["data"][cam]
cam_path, _, camera_intrinsics = nusc.get_sample_data(cam_token)
cam_info = obtain_sensor2top(
nusc, cam_token, l2e_t, l2e_r_mat, e2g_t, e2g_r_mat, cam
)
cam_info["data_path"] = (
Path(cam_info["data_path"]).relative_to(data_path).__str__()
)
cam_info.update(camera_intrinsics=camera_intrinsics)
info["cams"].update({cam: cam_info})
sample_data_token = sample["data"][chan]
curr_sd_rec = nusc.get("sample_data", sample_data_token)
sweeps = []
while len(sweeps) < max_sweeps - 1:
if curr_sd_rec["prev"] == "":
if len(sweeps) == 0:
sweep = {
"lidar_path": Path(ref_lidar_path)
.relative_to(data_path)
.__str__(),
"sample_data_token": curr_sd_rec["token"],
"transform_matrix": None,
"time_lag": curr_sd_rec["timestamp"] * 0,
}
sweeps.append(sweep)
else:
sweeps.append(sweeps[-1])
else:
curr_sd_rec = nusc.get("sample_data", curr_sd_rec["prev"])
# Get past pose
current_pose_rec = nusc.get("ego_pose", curr_sd_rec["ego_pose_token"])
global_from_car = transform_matrix(
current_pose_rec["translation"],
Quaternion(current_pose_rec["rotation"]),
inverse=False,
)
# Homogeneous transformation matrix from sensor coordinate frame to ego car frame.
current_cs_rec = nusc.get(
"calibrated_sensor", curr_sd_rec["calibrated_sensor_token"]
)
car_from_current = transform_matrix(
current_cs_rec["translation"],
Quaternion(current_cs_rec["rotation"]),
inverse=False,
)
tm = reduce(
np.dot,
[ref_from_car, car_from_global, global_from_car, car_from_current],
)
lidar_path = nusc.get_sample_data_path(curr_sd_rec["token"])
time_lag = ref_time - 1e-6 * curr_sd_rec["timestamp"]
sweep = {
"lidar_path": Path(lidar_path).relative_to(data_path).__str__(),
"sample_data_token": curr_sd_rec["token"],
"transform_matrix": tm,
"global_from_car": global_from_car,
"car_from_current": car_from_current,
"time_lag": time_lag,
}
sweeps.append(sweep)
info["sweeps"] = sweeps
assert len(info["sweeps"]) == max_sweeps - 1, (
f"sweep {curr_sd_rec['token']} only has {len(info['sweeps'])} sweeps, "
f"you should duplicate to sweep num {max_sweeps - 1}"
)
if not test:
# processing gt bbox
annotations = [
nusc.get("sample_annotation", token) for token in sample["anns"]
]
# the filtering gives 0.5~1 map improvement
num_lidar_pts = np.array([anno["num_lidar_pts"] for anno in annotations])
num_radar_pts = np.array([anno["num_radar_pts"] for anno in annotations])
mask = num_lidar_pts + num_radar_pts > 0
locs = np.array([b.center for b in ref_boxes]).reshape(-1, 3)
dims = np.array([b.wlh for b in ref_boxes]).reshape(-1, 3)[
:, [1, 0, 2]
] # wlh == > dxdydz (lwh)
velocity = np.array([b.velocity for b in ref_boxes]).reshape(-1, 3)
rots = np.array([quaternion_yaw(b.orientation) for b in ref_boxes]).reshape(
-1, 1
)
names = np.array([b.name for b in ref_boxes])
tokens = np.array([b.token for b in ref_boxes])
gt_boxes = np.concatenate([locs, dims, rots, velocity[:, :2]], axis=1)
assert len(annotations) == len(gt_boxes) == len(velocity)
info["gt_boxes"] = gt_boxes[mask, :]
info["gt_boxes_velocity"] = velocity[mask, :]
info["gt_names"] = np.array(
[map_name_from_general_to_detection[name] for name in names]
)[mask]
info["gt_boxes_token"] = tokens[mask]
info["num_lidar_pts"] = num_lidar_pts[mask]
info["num_radar_pts"] = num_radar_pts[mask]
# processing gt segment
segment_path = nusc.get("lidarseg", ref_sd_token)["filename"]
info["gt_segment_path"] = segment_path
if sample["scene_token"] in train_scenes:
train_nusc_infos.append(info)
else:
val_nusc_infos.append(info)
progress_bar.close()
return train_nusc_infos, val_nusc_infos
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--dataset_root", required=True, help="Path to the nuScenes dataset."
)
parser.add_argument(
"--output_root",
required=True,
help="Output path where processed information located.",
)
parser.add_argument(
"--max_sweeps", default=10, type=int, help="Max number of sweeps. Default: 10."
)
parser.add_argument(
"--with_camera",
action="store_true",
default=False,
help="Whether use camera or not.",
)
config = parser.parse_args()
print(f"Loading nuScenes tables for version v1.0-trainval...")
nusc_trainval = NuScenes(
version="v1.0-trainval", dataroot=config.dataset_root, verbose=False
)
available_scenes_trainval = get_available_scenes(nusc_trainval)
available_scene_names_trainval = [s["name"] for s in available_scenes_trainval]
print("total scene num:", len(nusc_trainval.scene))
print("exist scene num:", len(available_scenes_trainval))
assert len(available_scenes_trainval) == len(nusc_trainval.scene) == 850
print(f"Loading nuScenes tables for version v1.0-test...")
nusc_test = NuScenes(
version="v1.0-test", dataroot=config.dataset_root, verbose=False
)
available_scenes_test = get_available_scenes(nusc_test)
available_scene_names_test = [s["name"] for s in available_scenes_test]
print("total scene num:", len(nusc_test.scene))
print("exist scene num:", len(available_scenes_test))
assert len(available_scenes_test) == len(nusc_test.scene) == 150
train_scenes = splits.train
train_scenes = set(
[
available_scenes_trainval[available_scene_names_trainval.index(s)]["token"]
for s in train_scenes
]
)
test_scenes = splits.test
test_scenes = set(
[
available_scenes_test[available_scene_names_test.index(s)]["token"]
for s in test_scenes
]
)
print(f"Filling trainval information...")
train_nusc_infos, val_nusc_infos = fill_trainval_infos(
config.dataset_root,
nusc_trainval,
train_scenes,
test=False,
max_sweeps=config.max_sweeps,
with_camera=config.with_camera,
)
print(f"Filling test information...")
test_nusc_infos, _ = fill_trainval_infos(
config.dataset_root,
nusc_test,
test_scenes,
test=True,
max_sweeps=config.max_sweeps,
with_camera=config.with_camera,
)
print(f"Saving nuScenes information...")
os.makedirs(os.path.join(config.output_root, "info"), exist_ok=True)
print(
f"train sample: {len(train_nusc_infos)}, val sample: {len(val_nusc_infos)}, test sample: {len(test_nusc_infos)}"
)
with open(
os.path.join(
config.output_root,
"info",
f"nuscenes_infos_{config.max_sweeps}sweeps_train.pkl",
),
"wb",
) as f:
pickle.dump(train_nusc_infos, f)
with open(
os.path.join(
config.output_root,
"info",
f"nuscenes_infos_{config.max_sweeps}sweeps_val.pkl",
),
"wb",
) as f:
pickle.dump(val_nusc_infos, f)
with open(
os.path.join(
config.output_root,
"info",
f"nuscenes_infos_{config.max_sweeps}sweeps_test.pkl",
),
"wb",
) as f:
pickle.dump(test_nusc_infos, f)
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/datasets/preprocessing/waymo/preprocess_waymo.py | pointcept/datasets/preprocessing/waymo/preprocess_waymo.py | """
Preprocessing Script for ScanNet 20/200
Author: Xiaoyang Wu (xiaoyang.wu.cs@gmail.com)
Please cite our work if the code is helpful to you.
"""
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import argparse
import numpy as np
import tensorflow.compat.v1 as tf
from waymo_open_dataset.utils import frame_utils
from waymo_open_dataset import dataset_pb2 as open_dataset
import glob
import multiprocessing as mp
from concurrent.futures import ProcessPoolExecutor
from itertools import repeat
def create_lidar(frame):
"""Parse and save the lidar data in psd format.
Args:
frame (:obj:`Frame`): Open dataset frame proto.
"""
(
range_images,
camera_projections,
segmentation_labels,
range_image_top_pose,
) = frame_utils.parse_range_image_and_camera_projection(frame)
points, cp_points = frame_utils.convert_range_image_to_point_cloud(
frame,
range_images,
camera_projections,
range_image_top_pose,
keep_polar_features=True,
)
points_ri2, cp_points_ri2 = frame_utils.convert_range_image_to_point_cloud(
frame,
range_images,
camera_projections,
range_image_top_pose,
ri_index=1,
keep_polar_features=True,
)
# 3d points in vehicle frame.
points_all = np.concatenate(points, axis=0)
points_all_ri2 = np.concatenate(points_ri2, axis=0)
# point labels.
points_all = np.concatenate([points_all, points_all_ri2], axis=0)
velodyne = np.c_[points_all[:, 3:6], points_all[:, 1]]
velodyne = velodyne.reshape((velodyne.shape[0] * velodyne.shape[1]))
return velodyne
def create_label(frame):
(
range_images,
camera_projections,
segmentation_labels,
range_image_top_pose,
) = frame_utils.parse_range_image_and_camera_projection(frame)
point_labels = convert_range_image_to_point_cloud_labels(
frame, range_images, segmentation_labels
)
point_labels_ri2 = convert_range_image_to_point_cloud_labels(
frame, range_images, segmentation_labels, ri_index=1
)
# point labels.
point_labels_all = np.concatenate(point_labels, axis=0)
point_labels_all_ri2 = np.concatenate(point_labels_ri2, axis=0)
point_labels_all = np.concatenate([point_labels_all, point_labels_all_ri2], axis=0)
labels = point_labels_all
return labels
def convert_range_image_to_point_cloud_labels(
frame, range_images, segmentation_labels, ri_index=0
):
"""Convert segmentation labels from range images to point clouds.
Args:
frame: open dataset frame
range_images: A dict of {laser_name, [range_image_first_return,
range_image_second_return]}.
segmentation_labels: A dict of {laser_name, [range_image_first_return,
range_image_second_return]}.
ri_index: 0 for the first return, 1 for the second return.
Returns:
point_labels: {[N, 2]} list of 3d lidar points's segmentation labels. 0 for
points that are not labeled.
"""
calibrations = sorted(frame.context.laser_calibrations, key=lambda c: c.name)
point_labels = []
for c in calibrations:
range_image = range_images[c.name][ri_index]
range_image_tensor = tf.reshape(
tf.convert_to_tensor(range_image.data), range_image.shape.dims
)
range_image_mask = range_image_tensor[..., 0] > 0
if c.name in segmentation_labels:
sl = segmentation_labels[c.name][ri_index]
sl_tensor = tf.reshape(tf.convert_to_tensor(sl.data), sl.shape.dims)
sl_points_tensor = tf.gather_nd(sl_tensor, tf.where(range_image_mask))
else:
num_valid_point = tf.math.reduce_sum(tf.cast(range_image_mask, tf.int32))
sl_points_tensor = tf.zeros([num_valid_point, 2], dtype=tf.int32)
point_labels.append(sl_points_tensor.numpy())
return point_labels
def handle_process(file_path, output_root):
file = os.path.basename(file_path)
split = os.path.basename(os.path.dirname(file_path))
print(f"Parsing {split}/{file}")
save_path = os.path.join(output_root, split, file.split(".")[0])
os.makedirs(os.path.join(save_path, "velodyne"), exist_ok=True)
if split != "testing":
os.makedirs(os.path.join(save_path, "labels"), exist_ok=True)
data_group = tf.data.TFRecordDataset(file_path, compression_type="")
count = 0
for data in data_group:
frame = open_dataset.Frame()
frame.ParseFromString(bytearray(data.numpy()))
if frame.lasers[0].ri_return1.segmentation_label_compressed:
file_idx = "0" * (6 - len(str(count))) + str(count)
point_cloud = create_lidar(frame)
point_cloud.astype(np.float32).tofile(
os.path.join(save_path, "velodyne", f"{file_idx}.bin")
)
if split != "testing":
label = create_label(frame)
label.tofile(os.path.join(save_path, "labels", f"{file_idx}.label"))
count += 1
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--dataset_root",
required=True,
help="Path to the ScanNet dataset containing scene folders",
)
parser.add_argument(
"--output_root",
required=True,
help="Output path where train/val folders will be located",
)
parser.add_argument(
"--splits",
required=True,
nargs="+",
choices=["training", "validation", "testing"],
help="Splits need to process ([training, validation, testing]).",
)
parser.add_argument(
"--num_workers",
default=mp.cpu_count(),
type=int,
help="Num workers for preprocessing.",
)
config = parser.parse_args()
# load file list
file_list = glob.glob(
os.path.join(os.path.abspath(config.dataset_root), "*", "*.tfrecord")
)
assert len(file_list) == 1150
# Create output directories
for split in config.splits:
os.makedirs(os.path.join(config.output_root, split), exist_ok=True)
file_list = [
file
for file in file_list
if os.path.basename(os.path.dirname(file)) in config.splits
]
# Preprocess data.
print("Processing scenes...")
pool = ProcessPoolExecutor(max_workers=config.num_workers)
_ = list(pool.map(handle_process, file_list, repeat(config.output_root)))
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/models/default.py | pointcept/models/default.py | import torch.nn as nn
import torch
import numpy as np
import math
from scipy import special
from pointcept.utils.comm import calc_t_emb
from pointcept.models.losses import build_criteria
from pointcept.models.utils.structure import Point
from .builder import MODELS, build_model
### ---------------------------- ① ------------------------------- ###
@MODELS.register_module()
class DefaultSegmentorV2(nn.Module):
'''
①GD + CN(CNF) : Gaussion(Continous) Diffusion + Conditional Network
'''
def __init__(
self,
backbone=None,
criteria=None,
loss_type="EW",
task_num=2,
num_classes=20,
T=1000,
beta_start=0.0001,
beta_end=0.02,
noise_schedule="linear",
T_dim=128,
dm=False,
dm_input="xt",
dm_target="noise",
dm_min_snr=None,
condition=False,
c_in_channels=6
):
super().__init__()
self.backbone = build_model(backbone)
self.criteria = build_criteria(cfg=criteria,loss_type=loss_type,task_num=task_num)
self.num_classes = num_classes
self.T = T
self.beta_start = beta_start
self.beta_end = beta_end
self.noise_schedule = noise_schedule
self.T_dim = T_dim
self.condition = condition
self.dm = dm
self.dm_input = dm_input
self.dm_target = dm_target
self.dm_min_snr = dm_min_snr
self.c_in_channels = c_in_channels
if(self.dm):
# ---- diffusion params ----
self.eps = 1e-6
self.Beta, self.Alpha ,self.Alpha_bar, self.Sigma, self.SNR= self.get_diffusion_hyperparams(
noise_schedule=noise_schedule,
T=self.T,
beta_start=self.beta_start,
beta_end=self.beta_end,
)
# ---- diffusion params ----
self.Beta = self.Beta.float().cuda()
self.Alpha = self.Alpha.float().cuda()
self.Alpha_bar = self.Alpha_bar.float().cuda()
self.Sigma = self.Sigma.float().cuda()
self.SNR = self.SNR.float().cuda() if dm_min_snr is None else torch.clamp(self.SNR.float().cuda(),max=dm_min_snr)
def get_diffusion_hyperparams(
self,
noise_schedule,
beta_start,
beta_end,
T
):
"""
Compute diffusion process hyperparameters
Parameters:
T (int): number of diffusion steps
beta_0 and beta_T (float): beta schedule start/end value,
where any beta_t in the middle is linearly interpolated
Returns:
a dictionary of diffusion hyperparameters including:
T (int), Beta/Alpha/Alpha_bar/Sigma (torch.tensor on cpu, shape=(T, ))
These cpu tensors are changed to cuda tensors on each individual gpu
"""
# Beta = torch.linspace(noise_schedule,beta_start, beta_end, T)
Beta = self.get_diffusion_betas(
type=noise_schedule,
start=beta_start,
stop=beta_end,
T=T
)
# at = 1 - bt
Alpha = 1 - Beta
# at_
Alpha_bar = Alpha + 0
# 方差
Beta_tilde = Beta + 0
for t in range(1, T):
# \bar{\alpha}_t = \prod_{s=1}^t \alpha_s
Alpha_bar[t] *= Alpha_bar[t - 1]
# \tilde{\beta}_t = (1-\bar{\alpha}_{t-1}) / (1-\bar{\alpha}_t) * \beta_t
Beta_tilde[t] *= (1-Alpha_bar[t-1]) / (1-Alpha_bar[t])
# 标准差
Sigma = torch.sqrt(Beta_tilde) # \sigma_t^2 = \tilde{\beta}_t
Sigma[0] = 0.0
'''
SNR = at ** 2 / sigma ** 2
at = sqrt(at_), sigma = sqrt(1 - at_)
q(xt|x0) = sqrt(at_) * x0 + sqrt(1 - at_) * noise
'''
SNR = Alpha_bar / (1 - Alpha_bar)
return Beta, Alpha, Alpha_bar, Sigma, SNR
def get_diffusion_betas(self, type='linear', start=0.0001, stop=0.02, T=1000):
"""Get betas from the hyperparameters."""
if type == 'linear':
# Used by Ho et al. for DDPM, https://arxiv.org/abs/2006.11239.
# To be used with Gaussian diffusion models in continuous and discrete
# state spaces.
# To be used with transition_mat_type = 'gaussian'
scale = 1000 / T
beta_start = scale * start
beta_end = scale * stop
return torch.linspace(beta_start, beta_end, T, dtype=torch.float64)
elif type == 'cosine':
# Schedule proposed by Hoogeboom et al. https://arxiv.org/abs/2102.05379
# To be used with transition_mat_type = 'uniform'.
steps = T + 1
s = 0.008
# t = torch.linspace(0, T, steps, dtype=torch.float64) / T
t = torch.linspace(start, stop, steps, dtype=torch.float64) / T
alphas_cumprod = torch.cos((t + s) / (1 + s) * math.pi * 0.5) ** 2
alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])
return torch.clip(betas, 0, 0.999)
elif type == 'sigmoid': # 1/T, 1/(T-1), 1/(T-2), ..., 1
# Proposed by Sohl-Dickstein et al., https://arxiv.org/abs/1503.03585
# To be used with absorbing state models.
# ensures that the probability of decaying to the absorbing state
# increases linearly over time, and is 1 for t = T-1 (the final time).
# To be used with transition_mat_type = 'absorbing'
start = -3
end = 3
tau = 1
steps = T + 1
t = torch.linspace(0, T, steps, dtype=torch.float64) / T
v_start = torch.tensor(start / tau).sigmoid()
v_end = torch.tensor(end / tau).sigmoid()
alphas_cumprod = (-((t * (end - start) + start) / tau).sigmoid() + v_end) / (v_end - v_start)
alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])
return torch.clip(betas, 0, 0.999)
elif type == "laplace":
mu = 0.0
b = 0.5
lmb = lambda t: mu - b * torch.sign(0.5 - t) * torch.log(1 - 2 * torch.abs(0.5 - t))
snr_func = lambda t: torch.exp(lmb(t))
alpha_func = lambda t: torch.sqrt(snr_func(t) / (1 + snr_func(t)))
# sigma_func = lambda t: torch.sqrt(1 / (1 + snr_func(t)))
timesteps = torch.linspace(0, 1, 1002)[1:-1]
alphas_cumprod = []
for t in timesteps:
a = alpha_func(t) ** 2
alphas_cumprod.append(a)
alphas_cumprod = torch.cat(alphas_cumprod,dim=0)
alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])
return torch.clip(betas, 0, 0.999)
else:
raise NotImplementedError(type)
def continuous_p_ddim_sample(self, x_t, t, noise):
if(self.dm_target == "noise"):
# x0 = (xt - sqrt(1-at_) * noise) / sqrt(at_)
c_x0 = (x_t - torch.sqrt(1 - self.Alpha_bar[t]) * noise) / torch.sqrt(self.Alpha_bar[t])
elif(self.dm_target == "x0"):
c_x0 = noise
# noise = (xt - sqrt(1-at_) * x0) / sqrt(1-at_)
noise = (x_t - torch.sqrt(self.Alpha_bar[t]) * c_x0) / torch.sqrt(1 - self.Alpha_bar[t])
if(t[0] == 0):
return c_x0
# sqrt(at-1_) * (xt - sqrt(1-at_) * noise) / sqrt(at_)
c_xt_1_1 = torch.sqrt(self.Alpha_bar[t-1]) * c_x0
# sqrt(1 - at-1_) * noise
c_xt_1_2 = torch.sqrt(1 - self.Alpha_bar[t-1]) * noise
# xt-1 = sqrt(at-1_) * (xt - sqrt(1-at_) * noise) / sqrt(at_) + sqrt(1 - at-1_) * noise
c_xt_1 = c_xt_1_1 + c_xt_1_2
return c_xt_1
def continuous_q_sample(self,x_0, t, noise=None):
if(noise is None):
# sampling from Gaussian distribution
noise = torch.normal(0, 1, size=x_0.shape, dtype=torch.float32).cuda()
# xt = sqrt(at_) * x0 + sqrt(1-at_) * noise
x_t = torch.sqrt(self.Alpha_bar[t]) * x_0 + torch.sqrt(1 - self.Alpha_bar[t]) * noise
return x_t
def get_time_schedule(self, T=1000, step=5):
times = np.linspace(-1, T - 1, num = step + 1, dtype=int)[::-1]
return times
def add_gaussian_noise(self, pts, sigma=0.1, clamp=0.03):
# input: (b, 3, n)
assert (clamp > 0)
# jittered_data = torch.clamp(sigma * torch.randn_like(pts), -1 * clamp, clamp)
jittered_data = sigma * torch.randn_like(pts).cuda()
jittered_data = jittered_data + pts
return jittered_data
def add_random_noise(self, pts, sigma=0.1, clamp=0.03):
# input: (b, 3, n)
assert (clamp > 0)
# jittered_data = torch.clamp(sigma * torch.rand_like(pts), -1 * clamp, clamp).cuda()
jittered_data = sigma * torch.rand_like(pts).cuda()
jittered_data = jittered_data + pts
return jittered_data
def add_laplace_noise(self, pts, sigma=0.1, clamp=0.03, loc=0.0, scale=1.0):
# input: (b, 3, n)
assert (clamp > 0)
laplace_distribution = torch.distributions.Laplace(loc=loc, scale=scale)
jittered_data = sigma * laplace_distribution.sample(pts.shape).cuda()
# jittered_data = torch.clamp(sigma * laplace_distribution.sample(pts.shape), -1 * clamp, clamp).cuda()
jittered_data = jittered_data + pts
return jittered_data
def add_possion_noise(self, pts, sigma=0.1, clamp=0.03, rate=3.0):
# input: (b, 3, n)
assert (clamp > 0)
poisson_distribution = torch.distributions.Poisson(rate)
jittered_data = sigma * poisson_distribution.sample(pts.shape).cuda()
# jittered_data = torch.clamp(sigma * poisson_distribution.sample(pts.shape), -1 * clamp, clamp).cuda()
jittered_data = jittered_data + pts
return jittered_data
def init_feature(self, input_dict):
point = {}
point["coord"] = input_dict["coord"]
point["grid_coord"] = input_dict["grid_coord"]
point["offset"] = input_dict["offset"]
return point
def inference_ddim(self, input_dict, T=1000, step=1, report=10, eval=True, mode="avg", noise_level=None):
if(noise_level is not None):
input_dict["feat"] = self.add_gaussian_noise(input_dict["feat"],sigma=noise_level)
#input_dict["feat"] = self.add_random_noise(input_dict["feat"],sigma=noise_level)
#input_dict["feat"] = self.add_laplace_noise(input_dict["feat"],sigma=noise_level)
if(self.condition):
### ---- PT V3 + DM ---- ###
c_point = self.init_feature(input_dict)
n_point = self.init_feature(input_dict)
# ---- initial input ---- #
n_point["feat"] = input_dict["feat"]
if(self.c_in_channels == n_point["feat"].shape[-1]):
c_point['feat'] = c_target = input_dict["feat"]
else:
c_point['feat'] = c_target = input_dict["coord"]
c_point['feat'] = torch.normal(0, 1, size=c_target.shape, dtype=torch.float32).cuda()
# ---- initial input ---- #
N = len(c_target)
n_pred = torch.zeros(size=(N, self.num_classes), dtype=torch.float32).cuda()
time_schedule = self.get_time_schedule(T, step)
time_is = reversed(range(len(time_schedule)))
for i, t in zip(time_is, time_schedule):
if ((i + 1) % report == 0 or t <= 0):
print(f" ---- current : [{i + 1 if t > 0 else 0}/{step}] steps ----")
# ---- T steps ---- #
ts = t * torch.ones((N, 1), dtype=torch.int64).cuda()
if (self.T_dim != -1):
c_point['t_emb'] = calc_t_emb(ts, t_emb_dim=self.T_dim).cuda()
# ---- T steps ---- #
# ---- c_xt ---- #
c_xt = c_point["feat"]
# ---- c_xt ---- #
# ---- pred c_epsilon and n_x0 ---- #
c_point, n_point = self.backbone(c_point, n_point)
# ---- pred c_epsilon and n_x0 ---- #
# ---- c_xs ---- #
c_epslon_ = c_point["feat"]
c_xs = self.continuous_p_ddim_sample(
c_xt,
ts,
c_epslon_,
).float()
c_point = self.init_feature(input_dict)
c_point["feat"] = c_xs
# ---- c_xs ---- #
# ---- n_pred ---- #
if(mode == "avg"):
n_pred += n_point["feat"]
elif(mode == "final"):
n_pred = n_point["feat"]
# ---- n_pred ---- #
# ---- n_feature ---- #
n_point = self.init_feature(input_dict)
n_point["feat"] = input_dict["feat"]
# ---- n_feature ---- #
if (t <= 0):
break
if(mode == "avg"):
n_point["feat"] = n_pred / len(time_schedule)
elif(mode == "final"):
n_point["feat"] = n_pred
### ---- PT V3 + DM ---- ###
else:
### ---- PT V3 ---- ###
n_point = self.backbone(n_point=input_dict)
### ---- PT V3 ---- ###
if(eval):
point = {}
point['n_pred'] = n_point["feat"]
point['n_target'] = input_dict['segment']
point['loss_mode'] = "eval"
loss = self.criteria(point)
return dict(loss=loss, seg_logits=n_point["feat"])
else:
return dict(seg_logits=n_point["feat"])
def inference(self, input_dict, eval=True, noise_level=None):
if(noise_level is not None):
input_dict["feat"] = self.add_gaussian_noise(input_dict["feat"],sigma=noise_level)
#input_dict["feat"] = self.add_random_noise(input_dict["feat"],sigma=noise_level)
#input_dict["feat"] = self.add_laplace_noise(input_dict["feat"],sigma=noise_level)
if(self.condition):
### ---- PT V3 + DM ---- ###
c_point = self.init_feature(input_dict)
n_point = self.init_feature(input_dict)
# ---- initial input ---- #
n_point["feat"] = input_dict["feat"]
if(self.c_in_channels == n_point["feat"].shape[-1]):
c_point['feat'] = c_target = input_dict["feat"]
else:
c_point['feat'] = c_target = input_dict["coord"]
t = 0
if(self.dm and self.dm_input == "xt"):
c_point['feat'] = torch.normal(0, 1, size=c_target.shape, dtype=torch.float32).cuda()
t = self.T - 1
# ---- initial input ---- #
N = len(c_target)
# ---- T steps ---- #
ts = t * torch.ones((N, 1), dtype=torch.int64).cuda()
if (self.T_dim != -1):
c_point['t_emb'] = calc_t_emb(ts, t_emb_dim=self.T_dim).cuda()
# ---- T steps ---- #
# ---- pred c_epsilon and n_x0 ---- #
c_point, n_point = self.backbone(c_point, n_point)
# ---- pred c_epsilon and n_x0 ---- #
### ---- PT V3 + DM ---- ###
else:
### ---- PT V3 ---- ###
n_point = self.backbone(n_point=input_dict)
### ---- PT V3 ---- ###
if(eval):
point = {}
point['n_pred'] = n_point["feat"]
point['n_target'] = input_dict['segment']
point['loss_mode'] = "eval"
loss = self.criteria(point)
return dict(loss=loss, seg_logits=n_point["feat"])
else:
return dict(seg_logits=n_point["feat"])
def forward(self, input_dict):
point = {}
if(self.condition):
### ---- PT V3 + DM ---- ###
c_point = self.init_feature(input_dict)
n_point = self.init_feature(input_dict)
c_point = Point(c_point)
n_point = Point(n_point)
batch = n_point["batch"]
B = len(torch.unique(batch))
# ---- initial input ---- #
n_point["feat"] = input_dict["feat"]
if(self.c_in_channels == n_point["feat"].shape[-1]):
c_point['feat'] = c_target = input_dict["feat"]
else:
c_point['feat'] = c_target = input_dict["coord"]
# ---- initial input ---- #
# ---- continuous diffusion ---- #
if(self.dm):
# --- T_embeding ---- #
ts = torch.randint(0, self.T, size=(B, 1), dtype=torch.int64).cuda()
if (self.T_dim != -1):
c_point["t_emb"] = calc_t_emb(ts, self.T_dim)[batch, :]
ts = ts[batch, :]
# --- T_embeding ---- #
# ---- add noise ---- #
c_x0 = c_target
c_noise = torch.normal(0, 1, size=c_x0.shape,dtype=torch.float32).cuda()
c_xt = self.continuous_q_sample(c_x0, ts, c_noise)
c_point['feat'] = c_xt
# ---- add noise ---- #
# ---- diffusion target ---- #
if(self.dm_target == "noise"):
c_target = c_noise
# ---- diffusion target ---- #
# ---- SNR Loss Weight ----
if (self.dm_min_snr is not None):
point["snr_loss_weight"] = self.SNR[ts]
# ---- SNR Loss Weight ----
# ---- continuous diffusion ---- #
# ---- output ---- #
c_point, n_point = self.backbone(c_point, n_point)
# ---- output ---- #
point['c_pred'] = c_point["feat"]
point['c_target'] = c_target
### ---- PT V3 + DM ---- ###
else:
### ---- PT V3 ---- ###
n_point = Point(input_dict)
n_point = self.backbone(n_point=n_point)
### ---- PT V3 ---- ###
point['n_pred'] = n_point['feat']
point['n_target'] = input_dict['segment']
point['loss_mode'] = "train"
loss = self.criteria(point)
return dict(loss=loss)
### ---------------------------- ① ------------------------------- ###
### ---------------------------- ③ ------------------------------- ###
@MODELS.register_module()
class ContinuousDMSegmentor(nn.Module):
'''
③CN + GD(NCF) : Conditional(No Dffusion Process) Network (CN) + Gaussion(Continous) Diffusion (CD)
'''
def __init__(
self,
backbone=None,
criteria=None,
loss_type="EW",
task_num=2,
num_classes=20,
T=1000,
beta_start=0.0001,
beta_end=0.02,
noise_schedule="linear",
T_dim=128,
dm=False,
dm_input="xt",
dm_target="noise",
dm_min_snr=None,
condition=False,
c_in_channels=6
):
super().__init__()
self.backbone = build_model(backbone)
self.criteria = build_criteria(cfg=criteria,loss_type=loss_type,task_num=task_num)
self.num_classes = num_classes
self.T = T
self.beta_start = beta_start
self.beta_end = beta_end
self.noise_schedule = noise_schedule
self.T_dim = T_dim
self.condition = condition
self.dm = dm
self.dm_input = dm_input
self.dm_target = dm_target
self.dm_min_snr = dm_min_snr
self.c_in_channels = c_in_channels
if(self.dm):
# ---- diffusion params ----
self.eps = 1e-6
self.Beta, self.Alpha ,self.Alpha_bar, self.Sigma, self.SNR= self.get_diffusion_hyperparams(
noise_schedule=noise_schedule,
T=self.T,
beta_start=self.beta_start,
beta_end=self.beta_end,
)
# ---- diffusion params ----
self.Beta = self.Beta.float().cuda()
self.Alpha = self.Alpha.float().cuda()
self.Alpha_bar = self.Alpha_bar.float().cuda()
self.Sigma = self.Sigma.float().cuda()
self.SNR = self.SNR.float().cuda() if dm_min_snr is None else torch.clamp(self.SNR.float().cuda(),max=dm_min_snr)
def get_diffusion_hyperparams(
self,
noise_schedule,
beta_start,
beta_end,
T
):
"""
Compute diffusion process hyperparameters
Parameters:
T (int): number of diffusion steps
beta_0 and beta_T (float): beta schedule start/end value,
where any beta_t in the middle is linearly interpolated
Returns:
a dictionary of diffusion hyperparameters including:
T (int), Beta/Alpha/Alpha_bar/Sigma (torch.tensor on cpu, shape=(T, ))
These cpu tensors are changed to cuda tensors on each individual gpu
"""
# Beta = torch.linspace(noise_schedule,beta_start, beta_end, T)
Beta = self.get_diffusion_betas(
type=noise_schedule,
start=beta_start,
stop=beta_end,
T=T
)
# at = 1 - bt
Alpha = 1 - Beta
# at_
Alpha_bar = Alpha + 0
# 方差
Beta_tilde = Beta + 0
for t in range(1, T):
# \bar{\alpha}_t = \prod_{s=1}^t \alpha_s
Alpha_bar[t] *= Alpha_bar[t - 1]
# \tilde{\beta}_t = (1-\bar{\alpha}_{t-1}) / (1-\bar{\alpha}_t) * \beta_t
Beta_tilde[t] *= (1-Alpha_bar[t-1]) / (1-Alpha_bar[t])
# 标准差
Sigma = torch.sqrt(Beta_tilde) # \sigma_t^2 = \tilde{\beta}_t
Sigma[0] = 0.0
'''
SNR = at ** 2 / sigma ** 2
at = sqrt(at_), sigma = sqrt(1 - at_)
q(xt|x0) = sqrt(at_) * x0 + sqrt(1 - at_) * noise
'''
SNR = Alpha_bar / (1 - Alpha_bar)
return Beta, Alpha, Alpha_bar, Sigma, SNR
def get_diffusion_betas(self, type='linear', start=0.0001, stop=0.02, T=1000):
"""Get betas from the hyperparameters."""
if type == 'linear':
# Used by Ho et al. for DDPM, https://arxiv.org/abs/2006.11239.
# To be used with Gaussian diffusion models in continuous and discrete
# state spaces.
# To be used with transition_mat_type = 'gaussian'
scale = 1000 / T
beta_start = scale * start
beta_end = scale * stop
return torch.linspace(beta_start, beta_end, T, dtype=torch.float64)
elif type == 'cosine':
# Schedule proposed by Hoogeboom et al. https://arxiv.org/abs/2102.05379
# To be used with transition_mat_type = 'uniform'.
steps = T + 1
s = 0.008
# t = torch.linspace(0, T, steps, dtype=torch.float64) / T
t = torch.linspace(start, stop, steps, dtype=torch.float64) / T
alphas_cumprod = torch.cos((t + s) / (1 + s) * math.pi * 0.5) ** 2
alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])
return torch.clip(betas, 0, 0.999)
elif type == 'sigmoid': # 1/T, 1/(T-1), 1/(T-2), ..., 1
# Proposed by Sohl-Dickstein et al., https://arxiv.org/abs/1503.03585
# To be used with absorbing state models.
# ensures that the probability of decaying to the absorbing state
# increases linearly over time, and is 1 for t = T-1 (the final time).
# To be used with transition_mat_type = 'absorbing'
start = -3
end = 3
tau = 1
steps = T + 1
t = torch.linspace(0, T, steps, dtype=torch.float64) / T
v_start = torch.tensor(start / tau).sigmoid()
v_end = torch.tensor(end / tau).sigmoid()
alphas_cumprod = (-((t * (end - start) + start) / tau).sigmoid() + v_end) / (v_end - v_start)
alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])
return torch.clip(betas, 0, 0.999)
elif type == "laplace":
mu = 0.0
b = 0.5
lmb = lambda t: mu - b * torch.sign(0.5 - t) * torch.log(1 - 2 * torch.abs(0.5 - t))
snr_func = lambda t: torch.exp(lmb(t))
alpha_func = lambda t: torch.sqrt(snr_func(t) / (1 + snr_func(t)))
# sigma_func = lambda t: torch.sqrt(1 / (1 + snr_func(t)))
timesteps = torch.linspace(0, 1, 1002)[1:-1]
alphas_cumprod = []
for t in timesteps:
a = alpha_func(t) ** 2
alphas_cumprod.append(a)
alphas_cumprod = torch.cat(alphas_cumprod,dim=0)
alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])
return torch.clip(betas, 0, 0.999)
else:
raise NotImplementedError(type)
def continuous_p_ddim_sample(self, x_t, t, noise):
if(self.dm_target == "noise"):
# x0 = (xt - sqrt(1-at_) * noise) / sqrt(at_)
x0 = (x_t - torch.sqrt(1 - self.Alpha_bar[t]) * noise) / torch.sqrt(self.Alpha_bar[t])
else:
x0 = noise
# noise = (xt - sqrt(1-at_) * x0) / sqrt(1-at_)
noise = (x_t - torch.sqrt(self.Alpha_bar[t]) * x0) / torch.sqrt(1 - self.Alpha_bar[t])
if(t[0] == 0):
return x0
# sqrt(at-1_) * (xt - sqrt(1-at_) * noise) / sqrt(at_)
xs_1 = torch.sqrt(self.Alpha_bar[t-1]) * x0
# sqrt(1 - at-1_) * noise
xs_2 = torch.sqrt(1 - self.Alpha_bar[t-1]) * noise
# xt-1 = sqrt(at-1_) * (xt - sqrt(1-at_) * noise) / sqrt(at_) + sqrt(1 - at-1_) * noise
xs = xs_1 + xs_2
return xs
def continuous_q_sample(self,x_0, t, noise=None):
if(noise is None):
# sampling from Gaussian distribution
noise = torch.normal(0, 1, size=x_0.shape, dtype=torch.float32).cuda()
# xt = sqrt(at_) * x0 + sqrt(1-at_) * noise
x_t = torch.sqrt(self.Alpha_bar[t]) * x_0 + torch.sqrt(1 - self.Alpha_bar[t]) * noise
return x_t
def get_time_schedule(self, T=1000, step=5):
times = np.linspace(-1, T - 1, num = step + 1, dtype=int)[::-1]
return times
def add_gaussian_noise(self, pts, sigma=0.1, clamp=0.03):
# input: (b, 3, n)
assert (clamp > 0)
# jittered_data = torch.clamp(sigma * torch.randn_like(pts), -1 * clamp, clamp)
jittered_data = sigma * torch.randn_like(pts).cuda()
jittered_data = jittered_data + pts
return jittered_data
def feature_init(self, input_dict):
point = {}
point["coord"] = input_dict["coord"]
point["grid_coord"] = input_dict["grid_coord"]
point["offset"] = input_dict["offset"]
return point
def inference_ddim(self, input_dict, T=1000, step=1, report=20, eval=True, noise_level=None):
if(noise_level is not None):
input_dict["feat"] = self.add_gaussian_noise(input_dict["feat"],sigma=noise_level)
if(self.condition):
N = len(input_dict["feat"])
c_point = self.feature_init(input_dict)
n_point = self.feature_init(input_dict)
# ---- initial input ---- #
if (self.c_in_channels == 6):
c_point['feat'] = c_feat = input_dict["feat"]
else:
c_point['feat'] = c_feat = input_dict["coord"]
n_point['feat'] = torch.normal(0, 1, size=(N, self.num_classes), dtype=torch.float32).cuda()
# ---- initial input ---- #
time_schedule = self.get_time_schedule(T, step)
time_is = reversed(range(len(time_schedule)))
for i, t in zip(time_is, time_schedule):
if ((i + 1) % report == 0 or t <= 0):
print(f" ---- current : [{i + 1 if t > 0 else 0}/{step}] steps ----")
# ---- T steps ---- #
t = t if t >= 0 else 0
ts = t * torch.ones((N, 1), dtype=torch.int64).cuda()
if (self.T_dim != -1):
n_point['t_emb'] = calc_t_emb(ts, t_emb_dim=self.T_dim).cuda()
# ---- T steps ---- #
# ---- n_xt ---- #
n_xt = n_point["feat"]
# ---- n_xt ---- #
# ---- pred c_x0 and n_x0 ---- #
c_point, n_point = self.backbone(c_point, n_point)
# ---- pred c_x0 and n_x0 ---- #
# ---- n_xs ---- #
n_epslon_ = n_point["feat"]
n_xs = self.continuous_p_ddim_sample(
n_xt,
ts,
n_epslon_,
).float()
n_point = self.feature_init(input_dict)
n_point["feat"] = n_xs
# ---- n_xs ---- #
if (t <= 0):
break
# ---- c_feat ---- #
c_point = self.feature_init(input_dict)
c_point["feat"] = c_feat
# ---- c_feat ---- #
else:
n_point = self.backbone(n_point=input_dict)
if(eval):
n_target = input_dict["segment"]
if(self.condition and self.dm_target == "noise"):
n_target = torch.log(torch.nn.functional.one_hot(n_target, self.num_classes) + self.eps)
if("valid" in input_dict.keys()):
n_point["feat"] = n_point["feat"][input_dict["valid"]]
n_target = n_target[input_dict["valid"]]
input_dict["segment"] = input_dict["segment"][input_dict["valid"]]
point = {}
point['n_pred'] = n_point["feat"]
point['n_target'] = n_target
point['loss_mode'] = "eval"
loss = self.criteria(point)
return dict(loss=loss, seg_logits=n_point["feat"])
else:
return dict(seg_logits=n_point["feat"])
def inference(self, input_dict, eval=True, noise_level=None):
if(noise_level is not None):
input_dict["feat"] = self.add_gaussian_noise(input_dict["feat"],sigma=noise_level)
if(self.condition):
### ---- PT V3 + DM ---- ###
c_point = {}
c_point["coord"] = input_dict["coord"]
c_point["grid_coord"] = input_dict["grid_coord"]
c_point["offset"] = input_dict["offset"]
n_point = {}
n_point["coord"] = input_dict["coord"]
n_point["grid_coord"] = input_dict["grid_coord"]
n_point["offset"] = input_dict["offset"]
# ---- initial input ---- #
n_point["feat"] = input_dict["feat"]
if (self.c_in_channels == 3):
c_point['feat'] = c_target = input_dict["coord"]
elif (self.c_in_channels == 6):
c_point['feat'] = c_target = input_dict["feat"]
t = 0
if(self.dm and self.dm_input == "xt"):
c_point['feat'] = torch.normal(0, 1, size=c_target.shape, dtype=torch.float32).cuda()
t = self.T - 1
# ---- initial input ---- #
N = len(c_target)
# ---- T steps ---- #
ts = t * torch.ones((N, 1), dtype=torch.int64).cuda()
if (self.T_dim != -1):
c_point['t_emb'] = calc_t_emb(ts, t_emb_dim=self.T_dim).cuda()
# ---- T steps ---- #
# ---- pred c_epsilon and n_x0 ---- #
c_point, n_point = self.backbone(c_point, n_point, c_decoder=False)
# ---- pred c_epsilon and n_x0 ---- #
### ---- PT V3 + DM ---- ###
else:
### ---- PT V3 ---- ###
n_point = self.backbone(n_point=input_dict)
### ---- PT V3 ---- ###
if(eval):
point = {}
point['n_pred'] = n_point["feat"]
point['n_target'] = input_dict['segment']
point['loss_mode'] = "eval"
loss = self.criteria(point)
return dict(loss=loss, seg_logits=n_point["feat"])
else:
return dict(seg_logits=n_point["feat"])
def forward(self, input_dict):
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | true |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/models/modules.py | pointcept/models/modules.py | import sys
import torch.nn as nn
import spconv.pytorch as spconv
from collections import OrderedDict
from pointcept.models.utils.structure import Point
class PointModule(nn.Module):
r"""PointModule
placeholder, all module subclass from this will take Point in PointSequential.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class PointSequential(PointModule):
r"""A sequential container.
Modules will be added to it in the order they are passed in the constructor.
Alternatively, an ordered dict of modules can also be passed in.
"""
def __init__(self, *args, **kwargs):
super().__init__()
if len(args) == 1 and isinstance(args[0], OrderedDict):
for key, module in args[0].items():
self.add_module(key, module)
else:
for idx, module in enumerate(args):
self.add_module(str(idx), module)
for name, module in kwargs.items():
if sys.version_info < (3, 6):
raise ValueError("kwargs only supported in py36+")
if name in self._modules:
raise ValueError("name exists.")
self.add_module(name, module)
def __getitem__(self, idx):
if not (-len(self) <= idx < len(self)):
raise IndexError("index {} is out of range".format(idx))
if idx < 0:
idx += len(self)
it = iter(self._modules.values())
for i in range(idx):
next(it)
return next(it)
def __len__(self):
return len(self._modules)
def add(self, module, name=None):
if name is None:
name = str(len(self._modules))
if name in self._modules:
raise KeyError("name exists")
self.add_module(name, module)
def forward(self, input):
for k, module in self._modules.items():
# Point module
if isinstance(module, PointModule):
input = module(input)
# Spconv module
elif spconv.modules.is_spconv_module(module):
if isinstance(input, Point):
input.sparse_conv_feat = module(input.sparse_conv_feat)
input.feat = input.sparse_conv_feat.features
else:
input = module(input)
# PyTorch module
else:
if isinstance(input, Point):
input.feat = module(input.feat)
if "sparse_conv_feat" in input.keys():
input.sparse_conv_feat = input.sparse_conv_feat.replace_feature(
input.feat
)
elif isinstance(input, spconv.SparseConvTensor):
if input.indices.shape[0] != 0:
input = input.replace_feature(module(input.features))
else:
input = module(input)
return input
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/models/__init__.py | pointcept/models/__init__.py | from .builder import build_model
from .default import DefaultSegmentor, DefaultClassifier
# Backbones
from .sparse_unet import *
from .point_transformer import *
from .point_transformer_v2 import *
from .point_transformer_v3 import *
from .stratified_transformer import *
from .spvcnn import *
from .octformer import *
from .oacnns import *
# from .swin3d import *
# Semantic Segmentation
from .context_aware_classifier import *
# Instance Segmentation
from .point_group import *
# Pretraining
from .masked_scene_contrast import *
from .point_prompt_training import *
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/models/builder.py | pointcept/models/builder.py | """
Model Builder
Author: Xiaoyang Wu (xiaoyang.wu.cs@gmail.com)
Please cite our work if the code is helpful to you.
"""
from pointcept.utils.registry import Registry
MODELS = Registry("models")
MODULES = Registry("modules")
def build_model(cfg):
"""Build models."""
return MODELS.build(cfg)
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/models/oacnns/__init__.py | pointcept/models/oacnns/__init__.py | from .oacnns_v1m1_base import OACNNs
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/models/oacnns/oacnns_v1m1_base.py | pointcept/models/oacnns/oacnns_v1m1_base.py | from functools import partial
import torch
import torch.nn as nn
from einops import rearrange
import spconv.pytorch as spconv
from timm.models.layers import trunc_normal_
from ..builder import MODELS
from ..utils import offset2batch
from torch_geometric.nn.pool import voxel_grid
from torch_geometric.utils import scatter
class BasicBlock(nn.Module):
def __init__(
self,
in_channels,
embed_channels,
norm_fn=None,
indice_key=None,
depth=4,
groups=None,
grid_size=None,
bias=False,
):
super().__init__()
assert embed_channels % groups == 0
self.groups = groups
self.embed_channels = embed_channels
self.proj = nn.ModuleList()
self.grid_size = grid_size
self.weight = nn.ModuleList()
self.l_w = nn.ModuleList()
self.proj.append(
nn.Sequential(
nn.Linear(embed_channels, embed_channels, bias=False),
norm_fn(embed_channels),
nn.ReLU(),
)
)
for _ in range(depth - 1):
self.proj.append(
nn.Sequential(
nn.Linear(embed_channels, embed_channels, bias=False),
norm_fn(embed_channels),
nn.ReLU(),
)
)
self.l_w.append(
nn.Sequential(
nn.Linear(embed_channels, embed_channels, bias=False),
norm_fn(embed_channels),
nn.ReLU(),
)
)
self.weight.append(nn.Linear(embed_channels, embed_channels, bias=False))
self.adaptive = nn.Linear(embed_channels, depth - 1, bias=False)
self.fuse = nn.Sequential(
nn.Linear(embed_channels * 2, embed_channels, bias=False),
norm_fn(embed_channels),
nn.ReLU(),
)
self.voxel_block = spconv.SparseSequential(
spconv.SubMConv3d(
embed_channels,
embed_channels,
kernel_size=3,
stride=1,
padding=1,
indice_key=indice_key,
bias=bias,
),
norm_fn(embed_channels),
nn.ReLU(),
spconv.SubMConv3d(
embed_channels,
embed_channels,
kernel_size=3,
stride=1,
padding=1,
indice_key=indice_key,
bias=bias,
),
norm_fn(embed_channels),
)
self.act = nn.ReLU()
def forward(self, x, clusters):
feat = x.features
feats = []
for i, cluster in enumerate(clusters):
pw = self.l_w[i](feat)
pw = pw - scatter(pw, cluster, reduce="mean")[cluster]
pw = self.weight[i](pw)
pw = torch.exp(pw - pw.max())
pw = pw / (scatter(pw, cluster, reduce="sum", dim=0)[cluster] + 1e-6)
pfeat = self.proj[i](feat) * pw
pfeat = scatter(pfeat, cluster, reduce="sum")[cluster]
feats.append(pfeat)
adp = self.adaptive(feat)
adp = torch.softmax(adp, dim=1)
feats = torch.stack(feats, dim=1)
feats = torch.einsum("l n, l n c -> l c", adp, feats)
feat = self.proj[-1](feat)
feat = torch.cat([feat, feats], dim=1)
feat = self.fuse(feat) + x.features
res = feat
x = x.replace_feature(feat)
x = self.voxel_block(x)
x = x.replace_feature(self.act(x.features + res))
return x
class DonwBlock(nn.Module):
def __init__(
self,
in_channels,
embed_channels,
depth,
sp_indice_key,
point_grid_size,
num_ref=16,
groups=None,
norm_fn=None,
sub_indice_key=None,
):
super().__init__()
self.num_ref = num_ref
self.depth = depth
self.point_grid_size = point_grid_size
self.down = spconv.SparseSequential(
spconv.SparseConv3d(
in_channels,
embed_channels,
kernel_size=2,
stride=2,
indice_key=sp_indice_key,
bias=False,
),
norm_fn(embed_channels),
nn.ReLU(),
)
self.blocks = nn.ModuleList()
for _ in range(depth):
self.blocks.append(
BasicBlock(
in_channels=embed_channels,
embed_channels=embed_channels,
depth=len(point_grid_size) + 1,
groups=groups,
grid_size=point_grid_size,
norm_fn=norm_fn,
indice_key=sub_indice_key,
)
)
def forward(self, x):
x = self.down(x)
coord = x.indices[:, 1:].float()
batch = x.indices[:, 0]
clusters = []
for grid_size in self.point_grid_size:
cluster = voxel_grid(pos=coord, size=grid_size, batch=batch)
_, cluster = torch.unique(cluster, return_inverse=True)
clusters.append(cluster)
for block in self.blocks:
x = block(x, clusters)
return x
class UpBlock(nn.Module):
def __init__(
self,
in_channels,
skip_channels,
embed_channels,
depth,
sp_indice_key,
norm_fn=None,
down_ratio=2,
sub_indice_key=None,
):
super().__init__()
assert depth > 0
self.up = spconv.SparseSequential(
spconv.SparseInverseConv3d(
in_channels,
embed_channels,
kernel_size=down_ratio,
indice_key=sp_indice_key,
bias=False,
),
norm_fn(embed_channels),
nn.ReLU(),
)
self.blocks = nn.ModuleList()
self.fuse = nn.Sequential(
nn.Linear(skip_channels + embed_channels, embed_channels),
norm_fn(embed_channels),
nn.ReLU(),
nn.Linear(embed_channels, embed_channels),
norm_fn(embed_channels),
nn.ReLU(),
)
def forward(self, x, skip_x):
x = self.up(x)
x = x.replace_feature(
self.fuse(torch.cat([x.features, skip_x.features], dim=1)) + x.features
)
return x
@MODELS.register_module()
class OACNNs(nn.Module):
def __init__(
self,
in_channels,
num_classes,
embed_channels=64,
enc_num_ref=[16, 16, 16, 16],
enc_channels=[64, 64, 128, 256],
groups=[2, 4, 8, 16],
enc_depth=[2, 3, 6, 4],
down_ratio=[2, 2, 2, 2],
dec_channels=[96, 96, 128, 256],
point_grid_size=[[16, 32, 64], [8, 16, 24], [4, 8, 12], [2, 4, 6]],
dec_depth=[2, 2, 2, 2],
):
super().__init__()
self.in_channels = in_channels
self.num_classes = num_classes
self.num_stages = len(enc_channels)
self.embed_channels = embed_channels
norm_fn = partial(nn.BatchNorm1d, eps=1e-3, momentum=0.01)
self.stem = spconv.SparseSequential(
spconv.SubMConv3d(
in_channels,
embed_channels,
kernel_size=3,
padding=1,
indice_key="stem",
bias=False,
),
norm_fn(embed_channels),
nn.ReLU(),
spconv.SubMConv3d(
embed_channels,
embed_channels,
kernel_size=3,
padding=1,
indice_key="stem",
bias=False,
),
norm_fn(embed_channels),
nn.ReLU(),
spconv.SubMConv3d(
embed_channels,
embed_channels,
kernel_size=3,
padding=1,
indice_key="stem",
bias=False,
),
norm_fn(embed_channels),
nn.ReLU(),
)
self.enc = nn.ModuleList()
self.dec = nn.ModuleList()
for i in range(self.num_stages):
self.enc.append(
DonwBlock(
in_channels=embed_channels if i == 0 else enc_channels[i - 1],
embed_channels=enc_channels[i],
depth=enc_depth[i],
norm_fn=norm_fn,
groups=groups[i],
point_grid_size=point_grid_size[i],
num_ref=enc_num_ref[i],
sp_indice_key=f"spconv{i}",
sub_indice_key=f"subm{i + 1}",
)
)
self.dec.append(
UpBlock(
in_channels=(
enc_channels[-1]
if i == self.num_stages - 1
else dec_channels[i + 1]
),
skip_channels=embed_channels if i == 0 else enc_channels[i - 1],
embed_channels=dec_channels[i],
depth=dec_depth[i],
norm_fn=norm_fn,
sp_indice_key=f"spconv{i}",
sub_indice_key=f"subm{i}",
)
)
self.final = spconv.SubMConv3d(dec_channels[0], num_classes, kernel_size=1)
self.apply(self._init_weights)
def forward(self, input_dict):
discrete_coord = input_dict["grid_coord"]
feat = input_dict["feat"]
offset = input_dict["offset"]
batch = offset2batch(offset)
x = spconv.SparseConvTensor(
features=feat,
indices=torch.cat([batch.unsqueeze(-1), discrete_coord], dim=1)
.int()
.contiguous(),
spatial_shape=torch.add(
torch.max(discrete_coord, dim=0).values, 1
).tolist(),
batch_size=batch[-1].tolist() + 1,
)
x = self.stem(x)
skips = [x]
for i in range(self.num_stages):
x = self.enc[i](x)
skips.append(x)
x = skips.pop(-1)
for i in reversed(range(self.num_stages)):
skip = skips.pop(-1)
x = self.dec[i](x, skip)
x = self.final(x)
return x.features
@staticmethod
def _init_weights(m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, spconv.SubMConv3d):
trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/models/losses/misc.py | pointcept/models/losses/misc.py | """
Misc Losses
Author: Xiaoyang Wu (xiaoyang.wu.cs@gmail.com)
Please cite our work if the code is helpful to you.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from .builder import LOSSES
def ignore_label(scores, labels, ignore=None):
"""Flattens predictions in the batch (binary case)
Remove labels equal to 'ignore'
"""
if ignore is None:
return scores, labels
valid = labels != ignore
vscores = scores[valid]
vlabels = labels[valid]
return vscores, vlabels
@LOSSES.register_module()
class MSELoss(nn.Module):
def __init__(
self,
pred="c_pred",
target="c_target",
segment_target="n_target",
batch_sample_point=8192,
size_average=None,
reduce=None,
reduction="none",
loss_weight=1.0,
ignore_index=None
):
super(MSELoss, self).__init__()
self.loss_weight = loss_weight
self.ignore_index = ignore_index
self.batch_sample_point = batch_sample_point
self.pred = pred
self.target = target
self.segment_target = segment_target
self.loss = nn.MSELoss(
size_average=size_average,
reduce=reduce,
reduction=reduction
)
def forward(self, point):
if(self.pred not in point.keys() or self.target not in point.keys()):
return 0.0
pred, target = point[self.pred], point[self.target]
if(self.batch_sample_point > 0):
preds = []
targets = []
offset = point["offset"]
start = 0
for end in offset:
N = end - start
p = pred[start:end]
t = target[start:end]
if (self.batch_sample_point < N):
choices = torch.randint(low=0,high=N, size=(self.batch_sample_point,))
p,t = p[choices],t[choices]
preds.append(p)
targets.append(t)
start = end
pred = torch.cat(preds,dim=0)
target = torch.cat(targets,dim=0)
if(self.ignore_index):
n_target = point[self.segment_target]
valid = n_target != self.ignore_index
pred = pred[valid]
target = target[valid]
if (hasattr(point, "snr_loss_weight")):
point["snr_loss_weight"] = point["snr_loss_weight"][valid]
loss = self.loss(pred, target)
if(hasattr(point, "snr_loss_weight")):
loss = loss * point["snr_loss_weight"]
loss = loss.mean() * self.loss_weight
# validate_data(loss, "MSE Loss")
return loss
@LOSSES.register_module()
class CrossEntropyLoss(nn.Module):
def __init__(
self,
pred="n_pred",
target="n_target",
weight=None,
size_average=None,
reduce=None,
reduction="mean",
label_smoothing=0.0,
loss_weight=1.0,
ignore_index=-1,
):
super(CrossEntropyLoss, self).__init__()
weight = torch.tensor(weight).cuda() if weight is not None else None
self.loss_weight = loss_weight
self.ignore_index = ignore_index
self.pred = pred
self.target = target
self.loss = nn.CrossEntropyLoss(
weight=weight,
size_average=size_average,
reduce=reduce,
reduction=reduction,
label_smoothing=label_smoothing,
)
def forward(self, point):
if(self.pred not in point.keys() or self.target not in point.keys()):
return 0.0
pred, target = point[self.pred], point[self.target]
if(self.ignore_index):
pred, target = ignore_label(pred,target,self.ignore_index)
loss = self.loss(pred, target) * self.loss_weight
# validate_data(loss ,"Cross Entropy Loss")
return loss
@LOSSES.register_module()
class SmoothCELoss(nn.Module):
def __init__(self, smoothing_ratio=0.1):
super(SmoothCELoss, self).__init__()
self.smoothing_ratio = smoothing_ratio
def forward(self, pred, target):
eps = self.smoothing_ratio
n_class = pred.size(1)
one_hot = torch.zeros_like(pred).scatter(1, target.view(-1, 1), 1)
one_hot = one_hot * (1 - eps) + (1 - one_hot) * eps / (n_class - 1)
log_prb = F.log_softmax(pred, dim=1)
loss = -(one_hot * log_prb).total(dim=1)
loss = loss[torch.isfinite(loss)].mean()
return loss
@LOSSES.register_module()
class BinaryFocalLoss(nn.Module):
def __init__(self, gamma=2.0, alpha=0.5, logits=True, reduce=True, loss_weight=1.0):
"""Binary Focal Loss
<https://arxiv.org/abs/1708.02002>`
"""
super(BinaryFocalLoss, self).__init__()
assert 0 < alpha < 1
self.gamma = gamma
self.alpha = alpha
self.logits = logits
self.reduce = reduce
self.loss_weight = loss_weight
def forward(self, pred, target, **kwargs):
"""Forward function.
Args:
pred (torch.Tensor): The prediction with shape (N)
target (torch.Tensor): The ground truth. If containing class
indices, shape (N) where each value is 0≤targets[i]≤1, If containing class probabilities,
same shape as the input.
Returns:
torch.Tensor: The calculated loss
"""
if self.logits:
bce = F.binary_cross_entropy_with_logits(pred, target, reduction="none")
else:
bce = F.binary_cross_entropy(pred, target, reduction="none")
pt = torch.exp(-bce)
alpha = self.alpha * target + (1 - self.alpha) * (1 - target)
focal_loss = alpha * (1 - pt) ** self.gamma * bce
if self.reduce:
focal_loss = torch.mean(focal_loss)
return focal_loss * self.loss_weight
@LOSSES.register_module()
class FocalLoss(nn.Module):
def __init__(
self, gamma=2.0, alpha=0.5, reduction="mean", loss_weight=1.0, ignore_index=-1
):
"""Focal Loss
<https://arxiv.org/abs/1708.02002>`
"""
super(FocalLoss, self).__init__()
assert reduction in (
"mean",
"sum",
), "AssertionError: reduction should be 'mean' or 'sum'"
assert isinstance(
alpha, (float, list)
), "AssertionError: alpha should be of type float"
assert isinstance(gamma, float), "AssertionError: gamma should be of type float"
assert isinstance(
loss_weight, float
), "AssertionError: loss_weight should be of type float"
assert isinstance(ignore_index, int), "ignore_index must be of type int"
self.gamma = gamma
self.alpha = alpha
self.reduction = reduction
self.loss_weight = loss_weight
self.ignore_index = ignore_index
def forward(self, pred, target, **kwargs):
"""Forward function.
Args:
pred (torch.Tensor): The prediction with shape (N, C) where C = number of classes.
target (torch.Tensor): The ground truth. If containing class
indices, shape (N) where each value is 0≤targets[i]≤C−1, If containing class probabilities,
same shape as the input.
Returns:
torch.Tensor: The calculated loss
"""
# [B, C, d_1, d_2, ..., d_k] -> [C, B, d_1, d_2, ..., d_k]
pred = pred.transpose(0, 1)
# [C, B, d_1, d_2, ..., d_k] -> [C, N]
pred = pred.reshape(pred.size(0), -1)
# [C, N] -> [N, C]
pred = pred.transpose(0, 1).contiguous()
# (B, d_1, d_2, ..., d_k) --> (B * d_1 * d_2 * ... * d_k,)
target = target.view(-1).contiguous()
assert pred.size(0) == target.size(
0
), "The shape of pred doesn't match the shape of target"
valid_mask = target != self.ignore_index
target = target[valid_mask]
pred = pred[valid_mask]
if len(target) == 0:
return 0.0
num_classes = pred.size(1)
target = F.one_hot(target, num_classes=num_classes)
alpha = self.alpha
if isinstance(alpha, list):
alpha = pred.new_tensor(alpha)
pred_sigmoid = pred.sigmoid()
target = target.type_as(pred)
one_minus_pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target)
focal_weight = (alpha * target + (1 - alpha) * (1 - target)) * one_minus_pt.pow(
self.gamma
)
loss = (
F.binary_cross_entropy_with_logits(pred, target, reduction="none")
* focal_weight
)
if self.reduction == "mean":
loss = loss.mean()
elif self.reduction == "sum":
loss = loss.total()
return self.loss_weight * loss
@LOSSES.register_module()
class DiceLoss(nn.Module):
def __init__(self, smooth=1, exponent=2, loss_weight=1.0, ignore_index=-1):
"""DiceLoss.
This loss is proposed in `V-Net: Fully Convolutional Neural Networks for
Volumetric Medical Image Segmentation <https://arxiv.org/abs/1606.04797>`_.
"""
super(DiceLoss, self).__init__()
self.smooth = smooth
self.exponent = exponent
self.loss_weight = loss_weight
self.ignore_index = ignore_index
def forward(self, pred, target, **kwargs):
# [B, C, d_1, d_2, ..., d_k] -> [C, B, d_1, d_2, ..., d_k]
pred = pred.transpose(0, 1)
# [C, B, d_1, d_2, ..., d_k] -> [C, N]
pred = pred.reshape(pred.size(0), -1)
# [C, N] -> [N, C]
pred = pred.transpose(0, 1).contiguous()
# (B, d_1, d_2, ..., d_k) --> (B * d_1 * d_2 * ... * d_k,)
target = target.view(-1).contiguous()
assert pred.size(0) == target.size(
0
), "The shape of pred doesn't match the shape of target"
valid_mask = target != self.ignore_index
target = target[valid_mask]
pred = pred[valid_mask]
pred = F.softmax(pred, dim=1)
num_classes = pred.shape[1]
target = F.one_hot(
torch.clamp(target.long(), 0, num_classes - 1), num_classes=num_classes
)
total_loss = 0
for i in range(num_classes):
if i != self.ignore_index:
num = torch.sum(torch.mul(pred[:, i], target[:, i])) * 2 + self.smooth
den = (
torch.sum(
pred[:, i].pow(self.exponent) + target[:, i].pow(self.exponent)
)
+ self.smooth
)
dice_loss = 1 - num / den
total_loss += dice_loss
loss = total_loss / num_classes
return self.loss_weight * loss
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/models/losses/__init__.py | pointcept/models/losses/__init__.py | from .builder import build_criteria
from .misc import CrossEntropyLoss, SmoothCELoss, DiceLoss, FocalLoss, BinaryFocalLoss
from .lovasz import LovaszLoss
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/models/losses/builder.py | pointcept/models/losses/builder.py | """
Criteria Builder
Author: Xiaoyang Wu (xiaoyang.wu.cs@gmail.com)
Please cite our work if the code is helpful to you.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from pointcept.utils.registry import Registry
LOSSES = Registry("losses")
class Criteria(object):
def __init__(self, cfg=None, loss_type="EW", task_num=2):
self.cfg = cfg if cfg is not None else []
self.criteria = []
for loss_cfg in self.cfg:
self.criteria.append(LOSSES.build(cfg=loss_cfg))
self.loss_type = loss_type
self.task_num = task_num
def __call__(self, points):
if len(self.criteria) == 0:
# loss computation occur in model
return points
loss = 0.0
loss_mode = points["loss_mode"]
if(loss_mode == "eval" or self.loss_type == "EW"):
for c in self.criteria:
l = c(points)
loss += l
elif(loss_mode == "train" and self.loss_type == "GLS"):
loss = []
for c in self.criteria:
l = c(points)
loss.append(l)
if(self.task_num == 1):
loss = loss[0] + loss[1]
elif (self.task_num == 2 and self.task_num != len(loss)):
loss = [loss[0], loss[1] + loss[2]] # MSE, Cross Entropy + Lovaz
loss = loss[0] * loss[1]
loss = torch.pow(loss, 1. / self.task_num)
return loss
def build_criteria(cfg,loss_type="EW", task_num=2):
return Criteria(cfg,loss_type=loss_type, task_num=task_num)
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/models/losses/lovasz.py | pointcept/models/losses/lovasz.py | """
Lovasz Loss
refer https://arxiv.org/abs/1705.08790
Author: Xiaoyang Wu (xiaoyang.wu.cs@gmail.com)
Please cite our work if the code is helpful to you.
"""
from typing import Optional
from itertools import filterfalse
import torch
import torch.nn.functional as F
from torch.nn.modules.loss import _Loss
from .builder import LOSSES
BINARY_MODE: str = "binary"
MULTICLASS_MODE: str = "multiclass"
MULTILABEL_MODE: str = "multilabel"
def _lovasz_grad(gt_sorted):
"""Compute gradient of the Lovasz extension w.r.t sorted errors
See Alg. 1 in paper
"""
p = len(gt_sorted)
gts = gt_sorted.sum()
intersection = gts - gt_sorted.float().cumsum(0)
union = gts + (1 - gt_sorted).float().cumsum(0)
jaccard = 1.0 - intersection / union
if p > 1: # cover 1-pixel case
jaccard[1:p] = jaccard[1:p] - jaccard[0:-1]
return jaccard
def _lovasz_hinge(logits, labels, per_image=True, ignore=None):
"""
Binary Lovasz hinge loss
logits: [B, H, W] Logits at each pixel (between -infinity and +infinity)
labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)
per_image: compute the loss per image instead of per batch
ignore: void class id
"""
if per_image:
loss = mean(
_lovasz_hinge_flat(
*_flatten_binary_scores(log.unsqueeze(0), lab.unsqueeze(0), ignore)
)
for log, lab in zip(logits, labels)
)
else:
loss = _lovasz_hinge_flat(*_flatten_binary_scores(logits, labels, ignore))
return loss
def _lovasz_hinge_flat(logits, labels):
"""Binary Lovasz hinge loss
Args:
logits: [P] Logits at each prediction (between -infinity and +infinity)
labels: [P] Tensor, binary ground truth labels (0 or 1)
"""
if len(labels) == 0:
# only void pixels, the gradients should be 0
return logits.sum() * 0.0
signs = 2.0 * labels.float() - 1.0
errors = 1.0 - logits * signs
errors_sorted, perm = torch.sort(errors, dim=0, descending=True)
perm = perm.data
gt_sorted = labels[perm]
grad = _lovasz_grad(gt_sorted)
loss = torch.dot(F.relu(errors_sorted), grad)
return loss
def _flatten_binary_scores(scores, labels, ignore=None):
"""Flattens predictions in the batch (binary case)
Remove labels equal to 'ignore'
"""
scores = scores.view(-1)
labels = labels.view(-1)
if ignore is None:
return scores, labels
valid = labels != ignore
vscores = scores[valid]
vlabels = labels[valid]
return vscores, vlabels
def _lovasz_softmax(
probas, labels, classes="present", class_seen=None, per_image=False, ignore=None
):
"""Multi-class Lovasz-Softmax loss
Args:
@param probas: [B, C, H, W] Class probabilities at each prediction (between 0 and 1).
Interpreted as binary (sigmoid) output with outputs of size [B, H, W].
@param labels: [B, H, W] Tensor, ground truth labels (between 0 and C - 1)
@param classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average.
@param per_image: compute the loss per image instead of per batch
@param ignore: void class labels
"""
if per_image:
loss = mean(
_lovasz_softmax_flat(
*_flatten_probas(prob.unsqueeze(0), lab.unsqueeze(0), ignore),
classes=classes
)
for prob, lab in zip(probas, labels)
)
else:
loss = _lovasz_softmax_flat(
*_flatten_probas(probas, labels, ignore),
classes=classes,
class_seen=class_seen
)
return loss
def _lovasz_softmax_flat(probas, labels, classes="present", class_seen=None):
"""Multi-class Lovasz-Softmax loss
Args:
@param probas: [P, C] Class probabilities at each prediction (between 0 and 1)
@param labels: [P] Tensor, ground truth labels (between 0 and C - 1)
@param classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average.
"""
if probas.numel() == 0:
# only void pixels, the gradients should be 0
return probas * 0.0
C = probas.size(1)
losses = []
class_to_sum = list(range(C)) if classes in ["all", "present"] else classes
# for c in class_to_sum:
for c in labels.unique():
if class_seen is None:
fg = (labels == c).type_as(probas) # foreground for class c
if classes == "present" and fg.sum() == 0:
continue
if C == 1:
if len(classes) > 1:
raise ValueError("Sigmoid output possible only with 1 class")
class_pred = probas[:, 0]
else:
class_pred = probas[:, c]
errors = (fg - class_pred).abs()
errors_sorted, perm = torch.sort(errors, 0, descending=True)
perm = perm.data
fg_sorted = fg[perm]
losses.append(torch.dot(errors_sorted, _lovasz_grad(fg_sorted)))
else:
if c in class_seen:
fg = (labels == c).type_as(probas) # foreground for class c
if classes == "present" and fg.sum() == 0:
continue
if C == 1:
if len(classes) > 1:
raise ValueError("Sigmoid output possible only with 1 class")
class_pred = probas[:, 0]
else:
class_pred = probas[:, c]
errors = (fg - class_pred).abs()
errors_sorted, perm = torch.sort(errors, 0, descending=True)
perm = perm.data
fg_sorted = fg[perm]
losses.append(torch.dot(errors_sorted, _lovasz_grad(fg_sorted)))
return mean(losses)
def _flatten_probas(probas, labels, ignore=None):
"""Flattens predictions in the batch"""
if probas.dim() == 3:
# assumes output of a sigmoid layer
B, H, W = probas.size()
probas = probas.view(B, 1, H, W)
C = probas.size(1)
probas = torch.movedim(probas, 1, -1) # [B, C, Di, Dj, ...] -> [B, Di, Dj, ..., C]
probas = probas.contiguous().view(-1, C) # [P, C]
labels = labels.view(-1)
if ignore is None:
return probas, labels
valid = labels != ignore
vprobas = probas[valid]
vlabels = labels[valid]
return vprobas, vlabels
def isnan(x):
return x != x
def mean(values, ignore_nan=False, empty=0):
"""Nan-mean compatible with generators."""
values = iter(values)
if ignore_nan:
values = filterfalse(isnan, values)
try:
n = 1
acc = next(values)
except StopIteration:
if empty == "raise":
raise ValueError("Empty mean")
return empty
for n, v in enumerate(values, 2):
acc += v
if n == 1:
return acc
return acc / n
@LOSSES.register_module()
class LovaszLoss(_Loss):
def __init__(
self,
mode: str,
pred="n_pred",
target="n_target",
class_seen: Optional[int] = None,
per_image: bool = False,
ignore_index: Optional[int] = None,
loss_weight: float = 1.0,
):
"""Lovasz loss for segmentation task.
It supports binary, multiclass and multilabel cases
Args:
mode: Loss mode 'binary', 'multiclass' or 'multilabel'
ignore_index: Label that indicates ignored pixels (does not contribute to loss)
per_image: If True loss computed per each image and then averaged, else computed per whole batch
Shape
- **y_pred** - torch.Tensor of shape (N, C, H, W)
- **y_true** - torch.Tensor of shape (N, H, W) or (N, C, H, W)
Reference
https://github.com/BloodAxe/pytorch-toolbelt
"""
assert mode in {BINARY_MODE, MULTILABEL_MODE, MULTICLASS_MODE}
super().__init__()
self.mode = mode
self.pred = pred
self.target = target
self.ignore_index = ignore_index
self.per_image = per_image
self.class_seen = class_seen
self.loss_weight = loss_weight
def forward(self, point):
if(self.pred not in point.keys() or self.target not in point.keys()):
return 0.0
pred, target = point[self.pred], point[self.target]
if self.mode in {BINARY_MODE, MULTILABEL_MODE}:
loss = _lovasz_hinge(
pred, target, per_image=self.per_image, ignore=self.ignore_index
)
elif self.mode == MULTICLASS_MODE:
pred = pred.softmax(dim=1)
loss = _lovasz_softmax(
pred,
target,
class_seen=self.class_seen,
per_image=self.per_image,
ignore=self.ignore_index,
)
else:
raise ValueError("Wrong mode {}.".format(self.mode))
return loss * self.loss_weight
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/models/sparse_unet/mink_unet.py | pointcept/models/sparse_unet/mink_unet.py | """
SparseUNet Driven by MinkowskiEngine
Modified from chrischoy/SpatioTemporalSegmentation
Author: Xiaoyang Wu (xiaoyang.wu.cs@gmail.com)
Please cite our work if the code is helpful to you.
"""
import torch
import torch.nn as nn
try:
import MinkowskiEngine as ME
except ImportError:
ME = None
from pointcept.models.builder import MODELS
def offset2batch(offset):
return (
torch.cat(
[
(
torch.tensor([i] * (o - offset[i - 1]))
if i > 0
else torch.tensor([i] * o)
)
for i, o in enumerate(offset)
],
dim=0,
)
.long()
.to(offset.device)
)
class BasicBlock(nn.Module):
expansion = 1
def __init__(
self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
bn_momentum=0.1,
dimension=-1,
):
super(BasicBlock, self).__init__()
assert dimension > 0
self.conv1 = ME.MinkowskiConvolution(
inplanes,
planes,
kernel_size=3,
stride=stride,
dilation=dilation,
dimension=dimension,
)
self.norm1 = ME.MinkowskiBatchNorm(planes, momentum=bn_momentum)
self.conv2 = ME.MinkowskiConvolution(
planes,
planes,
kernel_size=3,
stride=1,
dilation=dilation,
dimension=dimension,
)
self.norm2 = ME.MinkowskiBatchNorm(planes, momentum=bn_momentum)
self.relu = ME.MinkowskiReLU(inplace=True)
self.downsample = downsample
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(
self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
bn_momentum=0.1,
dimension=-1,
):
super(Bottleneck, self).__init__()
assert dimension > 0
self.conv1 = ME.MinkowskiConvolution(
inplanes, planes, kernel_size=1, dimension=dimension
)
self.norm1 = ME.MinkowskiBatchNorm(planes, momentum=bn_momentum)
self.conv2 = ME.MinkowskiConvolution(
planes,
planes,
kernel_size=3,
stride=stride,
dilation=dilation,
dimension=dimension,
)
self.norm2 = ME.MinkowskiBatchNorm(planes, momentum=bn_momentum)
self.conv3 = ME.MinkowskiConvolution(
planes, planes * self.expansion, kernel_size=1, dimension=dimension
)
self.norm3 = ME.MinkowskiBatchNorm(
planes * self.expansion, momentum=bn_momentum
)
self.relu = ME.MinkowskiReLU(inplace=True)
self.downsample = downsample
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.norm3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class MinkUNetBase(nn.Module):
BLOCK = None
PLANES = None
DILATIONS = (1, 1, 1, 1, 1, 1, 1, 1)
LAYERS = (2, 2, 2, 2, 2, 2, 2, 2)
PLANES = (32, 64, 128, 256, 256, 128, 96, 96)
INIT_DIM = 32
OUT_TENSOR_STRIDE = 1
def __init__(self, in_channels, out_channels, dimension=3):
super().__init__()
assert ME is not None, "Please follow `README.md` to install MinkowskiEngine.`"
self.D = dimension
assert self.BLOCK is not None
# Output of the first conv concated to conv6
self.inplanes = self.INIT_DIM
self.conv0p1s1 = ME.MinkowskiConvolution(
in_channels, self.inplanes, kernel_size=5, dimension=self.D
)
self.bn0 = ME.MinkowskiBatchNorm(self.inplanes)
self.conv1p1s2 = ME.MinkowskiConvolution(
self.inplanes, self.inplanes, kernel_size=2, stride=2, dimension=self.D
)
self.bn1 = ME.MinkowskiBatchNorm(self.inplanes)
self.block1 = self._make_layer(self.BLOCK, self.PLANES[0], self.LAYERS[0])
self.conv2p2s2 = ME.MinkowskiConvolution(
self.inplanes, self.inplanes, kernel_size=2, stride=2, dimension=self.D
)
self.bn2 = ME.MinkowskiBatchNorm(self.inplanes)
self.block2 = self._make_layer(self.BLOCK, self.PLANES[1], self.LAYERS[1])
self.conv3p4s2 = ME.MinkowskiConvolution(
self.inplanes, self.inplanes, kernel_size=2, stride=2, dimension=self.D
)
self.bn3 = ME.MinkowskiBatchNorm(self.inplanes)
self.block3 = self._make_layer(self.BLOCK, self.PLANES[2], self.LAYERS[2])
self.conv4p8s2 = ME.MinkowskiConvolution(
self.inplanes, self.inplanes, kernel_size=2, stride=2, dimension=self.D
)
self.bn4 = ME.MinkowskiBatchNorm(self.inplanes)
self.block4 = self._make_layer(self.BLOCK, self.PLANES[3], self.LAYERS[3])
self.convtr4p16s2 = ME.MinkowskiConvolutionTranspose(
self.inplanes, self.PLANES[4], kernel_size=2, stride=2, dimension=self.D
)
self.bntr4 = ME.MinkowskiBatchNorm(self.PLANES[4])
self.inplanes = self.PLANES[4] + self.PLANES[2] * self.BLOCK.expansion
self.block5 = self._make_layer(self.BLOCK, self.PLANES[4], self.LAYERS[4])
self.convtr5p8s2 = ME.MinkowskiConvolutionTranspose(
self.inplanes, self.PLANES[5], kernel_size=2, stride=2, dimension=self.D
)
self.bntr5 = ME.MinkowskiBatchNorm(self.PLANES[5])
self.inplanes = self.PLANES[5] + self.PLANES[1] * self.BLOCK.expansion
self.block6 = self._make_layer(self.BLOCK, self.PLANES[5], self.LAYERS[5])
self.convtr6p4s2 = ME.MinkowskiConvolutionTranspose(
self.inplanes, self.PLANES[6], kernel_size=2, stride=2, dimension=self.D
)
self.bntr6 = ME.MinkowskiBatchNorm(self.PLANES[6])
self.inplanes = self.PLANES[6] + self.PLANES[0] * self.BLOCK.expansion
self.block7 = self._make_layer(self.BLOCK, self.PLANES[6], self.LAYERS[6])
self.convtr7p2s2 = ME.MinkowskiConvolutionTranspose(
self.inplanes, self.PLANES[7], kernel_size=2, stride=2, dimension=self.D
)
self.bntr7 = ME.MinkowskiBatchNorm(self.PLANES[7])
self.inplanes = self.PLANES[7] + self.INIT_DIM
self.block8 = self._make_layer(self.BLOCK, self.PLANES[7], self.LAYERS[7])
self.final = ME.MinkowskiConvolution(
self.PLANES[7] * self.BLOCK.expansion,
out_channels,
kernel_size=1,
bias=True,
dimension=self.D,
)
self.relu = ME.MinkowskiReLU(inplace=True)
self.weight_initialization()
def weight_initialization(self):
for m in self.modules():
if isinstance(m, ME.MinkowskiConvolution):
ME.utils.kaiming_normal_(m.kernel, mode="fan_out", nonlinearity="relu")
if isinstance(m, ME.MinkowskiBatchNorm):
nn.init.constant_(m.bn.weight, 1)
nn.init.constant_(m.bn.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilation=1, bn_momentum=0.1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
ME.MinkowskiConvolution(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
dimension=self.D,
),
ME.MinkowskiBatchNorm(planes * block.expansion),
)
layers = []
layers.append(
block(
self.inplanes,
planes,
stride=stride,
dilation=dilation,
downsample=downsample,
dimension=self.D,
)
)
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(
block(
self.inplanes, planes, stride=1, dilation=dilation, dimension=self.D
)
)
return nn.Sequential(*layers)
def forward(self, data_dict):
grid_coord = data_dict["grid_coord"]
feat = data_dict["feat"]
offset = data_dict["offset"]
batch = offset2batch(offset)
in_field = ME.TensorField(
feat,
coordinates=torch.cat([batch.unsqueeze(-1).int(), grid_coord.int()], dim=1),
quantization_mode=ME.SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE,
minkowski_algorithm=ME.MinkowskiAlgorithm.SPEED_OPTIMIZED,
device=feat.device,
)
x = in_field.sparse()
out = self.conv0p1s1(x)
out = self.bn0(out)
out_p1 = self.relu(out)
out = self.conv1p1s2(out_p1)
out = self.bn1(out)
out = self.relu(out)
out_b1p2 = self.block1(out)
out = self.conv2p2s2(out_b1p2)
out = self.bn2(out)
out = self.relu(out)
out_b2p4 = self.block2(out)
out = self.conv3p4s2(out_b2p4)
out = self.bn3(out)
out = self.relu(out)
out_b3p8 = self.block3(out)
# tensor_stride=16
out = self.conv4p8s2(out_b3p8)
out = self.bn4(out)
out = self.relu(out)
out = self.block4(out)
# tensor_stride=8
out = self.convtr4p16s2(out)
out = self.bntr4(out)
out = self.relu(out)
out = ME.cat(out, out_b3p8)
out = self.block5(out)
# tensor_stride=4
out = self.convtr5p8s2(out)
out = self.bntr5(out)
out = self.relu(out)
out = ME.cat(out, out_b2p4)
out = self.block6(out)
# tensor_stride=2
out = self.convtr6p4s2(out)
out = self.bntr6(out)
out = self.relu(out)
out = ME.cat(out, out_b1p2)
out = self.block7(out)
# tensor_stride=1
out = self.convtr7p2s2(out)
out = self.bntr7(out)
out = self.relu(out)
out = ME.cat(out, out_p1)
out = self.block8(out)
return self.final(out).slice(in_field).F
@MODELS.register_module()
class MinkUNet14(MinkUNetBase):
BLOCK = BasicBlock
LAYERS = (1, 1, 1, 1, 1, 1, 1, 1)
@MODELS.register_module()
class MinkUNet18(MinkUNetBase):
BLOCK = BasicBlock
LAYERS = (2, 2, 2, 2, 2, 2, 2, 2)
@MODELS.register_module()
class MinkUNet34(MinkUNetBase):
BLOCK = BasicBlock
LAYERS = (2, 3, 4, 6, 2, 2, 2, 2)
@MODELS.register_module()
class MinkUNet50(MinkUNetBase):
BLOCK = Bottleneck
LAYERS = (2, 3, 4, 6, 2, 2, 2, 2)
@MODELS.register_module()
class MinkUNet101(MinkUNetBase):
BLOCK = Bottleneck
LAYERS = (2, 3, 4, 23, 2, 2, 2, 2)
@MODELS.register_module()
class MinkUNet14A(MinkUNet14):
PLANES = (32, 64, 128, 256, 128, 128, 96, 96)
@MODELS.register_module()
class MinkUNet14B(MinkUNet14):
PLANES = (32, 64, 128, 256, 128, 128, 128, 128)
@MODELS.register_module()
class MinkUNet14C(MinkUNet14):
PLANES = (32, 64, 128, 256, 192, 192, 128, 128)
@MODELS.register_module()
class MinkUNet14D(MinkUNet14):
PLANES = (32, 64, 128, 256, 384, 384, 384, 384)
@MODELS.register_module()
class MinkUNet18A(MinkUNet18):
PLANES = (32, 64, 128, 256, 128, 128, 96, 96)
@MODELS.register_module()
class MinkUNet18B(MinkUNet18):
PLANES = (32, 64, 128, 256, 128, 128, 128, 128)
@MODELS.register_module()
class MinkUNet18D(MinkUNet18):
PLANES = (32, 64, 128, 256, 384, 384, 384, 384)
@MODELS.register_module()
class MinkUNet34A(MinkUNet34):
PLANES = (32, 64, 128, 256, 256, 128, 96, 96)
@MODELS.register_module()
class MinkUNet34B(MinkUNet34):
PLANES = (32, 64, 128, 256, 256, 128, 64, 32)
@MODELS.register_module()
class MinkUNet34C(MinkUNet34):
PLANES = (32, 64, 128, 256, 256, 128, 96, 96)
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/models/sparse_unet/spconv_unet_v1m3_pdnorm.py | pointcept/models/sparse_unet/spconv_unet_v1m3_pdnorm.py | """
SparseUNet V1M3
Enable Prompt-Driven Normalization for Point Prompt Training
Author: Xiaoyang Wu (xiaoyang.wu.cs@gmail.com)
Please cite our work if the code is helpful to you.
"""
from functools import partial
from collections import OrderedDict
import torch
import torch.nn as nn
import spconv.pytorch as spconv
from torch_geometric.utils import scatter
from timm.models.layers import trunc_normal_
from pointcept.models.builder import MODELS
from pointcept.models.utils import offset2batch
class PDBatchNorm(torch.nn.Module):
def __init__(
self,
num_features,
context_channels=256,
eps=1e-3,
momentum=0.01,
conditions=("ScanNet", "S3DIS", "Structured3D"),
decouple=True,
adaptive=False,
affine=True,
):
super().__init__()
self.conditions = conditions
self.decouple = decouple
self.adaptive = adaptive
self.affine = affine
if self.decouple:
self.bns = nn.ModuleList(
[
nn.BatchNorm1d(
num_features=num_features,
eps=eps,
momentum=momentum,
affine=affine,
)
for _ in conditions
]
)
else:
self.bn = nn.BatchNorm1d(
num_features=num_features, eps=eps, momentum=momentum, affine=affine
)
if self.adaptive:
self.modulation = nn.Sequential(
nn.SiLU(), nn.Linear(context_channels, 2 * num_features, bias=True)
)
def forward(self, feat, condition=None, context=None):
if self.decouple:
assert condition in self.conditions
bn = self.bns[self.conditions.index(condition)]
else:
bn = self.bn
feat = bn(feat)
if self.adaptive:
assert context is not None
shift, scale = self.modulation(context).chunk(2, dim=1)
feat = feat * (1.0 + scale) + shift
return feat
class BasicBlock(spconv.SparseModule):
expansion = 1
def __init__(
self,
in_channels,
embed_channels,
stride=1,
norm_fn=None,
indice_key=None,
bias=False,
):
super().__init__()
assert norm_fn is not None
self.in_channels = in_channels
self.embed_channels = embed_channels
if in_channels == embed_channels:
self.proj = spconv.SparseSequential(nn.Identity())
else:
# TODO remove norm after project
self.proj_conv = spconv.SubMConv3d(
in_channels, embed_channels, kernel_size=1, bias=False
)
self.proj_norm = norm_fn(embed_channels)
self.conv1 = spconv.SubMConv3d(
in_channels,
embed_channels,
kernel_size=3,
stride=stride,
padding=1,
bias=bias,
indice_key=indice_key,
)
self.bn1 = norm_fn(embed_channels)
self.relu = nn.ReLU()
self.conv2 = spconv.SubMConv3d(
embed_channels,
embed_channels,
kernel_size=3,
stride=stride,
padding=1,
bias=bias,
indice_key=indice_key,
)
self.bn2 = norm_fn(embed_channels)
self.stride = stride
def forward(self, x):
x, condition, context = x
residual = x
out = self.conv1(x)
out = out.replace_feature(self.bn1(out.features, condition, context))
out = out.replace_feature(self.relu(out.features))
out = self.conv2(out)
out = out.replace_feature(self.bn2(out.features, condition, context))
if self.in_channels == self.embed_channels:
residual = self.proj(residual)
else:
residual = residual.replace_feature(
self.proj_norm(self.proj_conv(residual).features, condition, context)
)
out = out.replace_feature(out.features + residual.features)
out = out.replace_feature(self.relu(out.features))
return out, condition, context
class SPConvDown(nn.Module):
def __init__(
self,
in_channels,
out_channels,
indice_key,
kernel_size=2,
bias=False,
norm_fn=None,
):
super().__init__()
self.conv = spconv.SparseConv3d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=kernel_size,
bias=bias,
indice_key=indice_key,
)
self.bn = norm_fn(out_channels)
self.relu = nn.ReLU()
def forward(self, x):
x, condition, context = x
out = self.conv(x)
out = out.replace_feature(self.bn(out.features, condition, context))
out = out.replace_feature(self.relu(out.features))
return out
class SPConvUp(nn.Module):
def __init__(
self,
in_channels,
out_channels,
indice_key,
kernel_size=2,
bias=False,
norm_fn=None,
):
super().__init__()
self.conv = spconv.SparseInverseConv3d(
in_channels,
out_channels,
kernel_size=kernel_size,
bias=bias,
indice_key=indice_key,
)
self.bn = norm_fn(out_channels)
self.relu = nn.ReLU()
def forward(self, x):
x, condition, context = x
out = self.conv(x)
out = out.replace_feature(self.bn(out.features, condition, context))
out = out.replace_feature(self.relu(out.features))
return out
class SPConvPatchEmbedding(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=5, norm_fn=None):
super().__init__()
self.conv = spconv.SubMConv3d(
in_channels,
out_channels,
kernel_size=kernel_size,
padding=1,
bias=False,
indice_key="stem",
)
self.bn = norm_fn(out_channels)
self.relu = nn.ReLU()
def forward(self, x):
x, condition, context = x
out = self.conv(x)
out = out.replace_feature(self.bn(out.features, condition, context))
out = out.replace_feature(self.relu(out.features))
return out
@MODELS.register_module("SpUNet-v1m3")
class SpUNetBase(nn.Module):
def __init__(
self,
in_channels,
num_classes=0,
base_channels=32,
context_channels=256,
channels=(32, 64, 128, 256, 256, 128, 96, 96),
layers=(2, 3, 4, 6, 2, 2, 2, 2),
cls_mode=False,
conditions=("ScanNet", "S3DIS", "Structured3D"),
zero_init=True,
norm_decouple=True,
norm_adaptive=True,
norm_affine=False,
):
super().__init__()
assert len(layers) % 2 == 0
assert len(layers) == len(channels)
self.in_channels = in_channels
self.num_classes = num_classes
self.base_channels = base_channels
self.channels = channels
self.layers = layers
self.num_stages = len(layers) // 2
self.cls_mode = cls_mode
self.conditions = conditions
self.zero_init = zero_init
norm_fn = partial(
PDBatchNorm,
eps=1e-3,
momentum=0.01,
conditions=conditions,
context_channels=context_channels,
decouple=norm_decouple,
adaptive=norm_adaptive,
affine=norm_affine,
)
block = BasicBlock
self.conv_input = SPConvPatchEmbedding(
in_channels, base_channels, kernel_size=5, norm_fn=norm_fn
)
enc_channels = base_channels
dec_channels = channels[-1]
self.down = nn.ModuleList()
self.up = nn.ModuleList()
self.enc = nn.ModuleList()
self.dec = nn.ModuleList() if not self.cls_mode else None
for s in range(self.num_stages):
# encode num_stages
self.down.append(
SPConvDown(
enc_channels,
channels[s],
kernel_size=2,
bias=False,
indice_key=f"spconv{s + 1}",
norm_fn=norm_fn,
)
)
self.enc.append(
spconv.SparseSequential(
OrderedDict(
[
# (f"block{i}", block(enc_channels, channels[s], norm_fn=norm_fn, indice_key=f"subm{s + 1}"))
# if i == 0 else
(
f"block{i}",
block(
channels[s],
channels[s],
norm_fn=norm_fn,
indice_key=f"subm{s + 1}",
),
)
for i in range(layers[s])
]
)
)
)
if not self.cls_mode:
# decode num_stages
self.up.append(
SPConvUp(
channels[len(channels) - s - 2],
dec_channels,
kernel_size=2,
bias=False,
indice_key=f"spconv{s + 1}",
norm_fn=norm_fn,
)
)
self.dec.append(
spconv.SparseSequential(
OrderedDict(
[
(
(
f"block{i}",
block(
dec_channels + enc_channels,
dec_channels,
norm_fn=norm_fn,
indice_key=f"subm{s}",
),
)
if i == 0
else (
f"block{i}",
block(
dec_channels,
dec_channels,
norm_fn=norm_fn,
indice_key=f"subm{s}",
),
)
)
for i in range(layers[len(channels) - s - 1])
]
)
)
)
enc_channels = channels[s]
dec_channels = channels[len(channels) - s - 2]
final_in_channels = (
channels[-1] if not self.cls_mode else channels[self.num_stages - 1]
)
self.final = (
spconv.SubMConv3d(
final_in_channels, num_classes, kernel_size=1, padding=1, bias=True
)
if num_classes > 0
else spconv.Identity()
)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, spconv.SubMConv3d):
trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
if m.affine:
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, PDBatchNorm):
if self.zero_init:
nn.init.constant_(m.modulation[-1].weight, 0)
nn.init.constant_(m.modulation[-1].bias, 0)
def forward(self, input_dict):
grid_coord = input_dict["grid_coord"]
feat = input_dict["feat"]
offset = input_dict["offset"]
condition = input_dict["condition"][0]
context = input_dict["context"] if "context" in input_dict.keys() else None
batch = offset2batch(offset)
sparse_shape = torch.add(torch.max(grid_coord, dim=0).values, 96).tolist()
x = spconv.SparseConvTensor(
features=feat,
indices=torch.cat(
[batch.unsqueeze(-1).int(), grid_coord.int()], dim=1
).contiguous(),
spatial_shape=sparse_shape,
batch_size=batch[-1].tolist() + 1,
)
x = self.conv_input([x, condition, context])
skips = [x]
# enc forward
for s in range(self.num_stages):
x = self.down[s]([x, condition, context])
x, _, _ = self.enc[s]([x, condition, context])
skips.append(x)
x = skips.pop(-1)
if not self.cls_mode:
# dec forward
for s in reversed(range(self.num_stages)):
x = self.up[s]([x, condition, context])
skip = skips.pop(-1)
x = x.replace_feature(torch.cat((x.features, skip.features), dim=1))
x, _, _ = self.dec[s]([x, condition, context])
x = self.final(x)
if self.cls_mode:
x = x.replace_feature(
scatter(x.features, x.indices[:, 0].long(), reduce="mean", dim=0)
)
return x.features
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/models/sparse_unet/__init__.py | pointcept/models/sparse_unet/__init__.py | from .mink_unet import *
from .spconv_unet_v1m1_base import *
from .spconv_unet_v1m2_bn_momentum import *
from .spconv_unet_v1m3_pdnorm import *
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/models/sparse_unet/spconv_unet_v1m1_base.py | pointcept/models/sparse_unet/spconv_unet_v1m1_base.py | """
SparseUNet Driven by SpConv (recommend)
Author: Xiaoyang Wu (xiaoyang.wu.cs@gmail.com)
Please cite our work if the code is helpful to you.
"""
from functools import partial
from collections import OrderedDict
import torch
import torch.nn as nn
import spconv.pytorch as spconv
from torch_geometric.utils import scatter
from timm.models.layers import trunc_normal_
from pointcept.models.builder import MODELS
from pointcept.models.utils import offset2batch
class BasicBlock(spconv.SparseModule):
expansion = 1
def __init__(
self,
in_channels,
embed_channels,
stride=1,
norm_fn=None,
indice_key=None,
bias=False,
):
super().__init__()
assert norm_fn is not None
if in_channels == embed_channels:
self.proj = spconv.SparseSequential(nn.Identity())
else:
self.proj = spconv.SparseSequential(
spconv.SubMConv3d(
in_channels, embed_channels, kernel_size=1, bias=False
),
norm_fn(embed_channels),
)
self.conv1 = spconv.SubMConv3d(
in_channels,
embed_channels,
kernel_size=3,
stride=stride,
padding=1,
bias=bias,
indice_key=indice_key,
)
self.bn1 = norm_fn(embed_channels)
self.relu = nn.ReLU()
self.conv2 = spconv.SubMConv3d(
embed_channels,
embed_channels,
kernel_size=3,
stride=stride,
padding=1,
bias=bias,
indice_key=indice_key,
)
self.bn2 = norm_fn(embed_channels)
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = out.replace_feature(self.bn1(out.features))
out = out.replace_feature(self.relu(out.features))
out = self.conv2(out)
out = out.replace_feature(self.bn2(out.features))
out = out.replace_feature(out.features + self.proj(residual).features)
out = out.replace_feature(self.relu(out.features))
return out
@MODELS.register_module("SpUNet-v1m1")
class SpUNetBase(nn.Module):
def __init__(
self,
in_channels,
num_classes,
base_channels=32,
channels=(32, 64, 128, 256, 256, 128, 96, 96),
layers=(2, 3, 4, 6, 2, 2, 2, 2),
cls_mode=False,
):
super().__init__()
assert len(layers) % 2 == 0
assert len(layers) == len(channels)
self.in_channels = in_channels
self.num_classes = num_classes
self.base_channels = base_channels
self.channels = channels
self.layers = layers
self.num_stages = len(layers) // 2
self.cls_mode = cls_mode
norm_fn = partial(nn.BatchNorm1d, eps=1e-3, momentum=0.01)
block = BasicBlock
self.conv_input = spconv.SparseSequential(
spconv.SubMConv3d(
in_channels,
base_channels,
kernel_size=5,
padding=1,
bias=False,
indice_key="stem",
),
norm_fn(base_channels),
nn.ReLU(),
)
enc_channels = base_channels
dec_channels = channels[-1]
self.down = nn.ModuleList()
self.up = nn.ModuleList()
self.enc = nn.ModuleList()
self.dec = nn.ModuleList() if not self.cls_mode else None
for s in range(self.num_stages):
# encode num_stages
self.down.append(
spconv.SparseSequential(
spconv.SparseConv3d(
enc_channels,
channels[s],
kernel_size=2,
stride=2,
bias=False,
indice_key=f"spconv{s + 1}",
),
norm_fn(channels[s]),
nn.ReLU(),
)
)
self.enc.append(
spconv.SparseSequential(
OrderedDict(
[
# (f"block{i}", block(enc_channels, channels[s], norm_fn=norm_fn, indice_key=f"subm{s + 1}"))
# if i == 0 else
(
f"block{i}",
block(
channels[s],
channels[s],
norm_fn=norm_fn,
indice_key=f"subm{s + 1}",
),
)
for i in range(layers[s])
]
)
)
)
if not self.cls_mode:
# decode num_stages
self.up.append(
spconv.SparseSequential(
spconv.SparseInverseConv3d(
channels[len(channels) - s - 2],
dec_channels,
kernel_size=2,
bias=False,
indice_key=f"spconv{s + 1}",
),
norm_fn(dec_channels),
nn.ReLU(),
)
)
self.dec.append(
spconv.SparseSequential(
OrderedDict(
[
(
(
f"block{i}",
block(
dec_channels + enc_channels,
dec_channels,
norm_fn=norm_fn,
indice_key=f"subm{s}",
),
)
if i == 0
else (
f"block{i}",
block(
dec_channels,
dec_channels,
norm_fn=norm_fn,
indice_key=f"subm{s}",
),
)
)
for i in range(layers[len(channels) - s - 1])
]
)
)
)
enc_channels = channels[s]
dec_channels = channels[len(channels) - s - 2]
final_in_channels = (
channels[-1] if not self.cls_mode else channels[self.num_stages - 1]
)
self.final = (
spconv.SubMConv3d(
final_in_channels, num_classes, kernel_size=1, padding=1, bias=True
)
if num_classes > 0
else spconv.Identity()
)
self.apply(self._init_weights)
@staticmethod
def _init_weights(m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, spconv.SubMConv3d):
trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def forward(self, input_dict):
grid_coord = input_dict["grid_coord"]
feat = input_dict["feat"]
offset = input_dict["offset"]
batch = offset2batch(offset)
sparse_shape = torch.add(torch.max(grid_coord, dim=0).values, 96).tolist()
x = spconv.SparseConvTensor(
features=feat,
indices=torch.cat(
[batch.unsqueeze(-1).int(), grid_coord.int()], dim=1
).contiguous(),
spatial_shape=sparse_shape,
batch_size=batch[-1].tolist() + 1,
)
x = self.conv_input(x)
skips = [x]
# enc forward
for s in range(self.num_stages):
x = self.down[s](x)
x = self.enc[s](x)
skips.append(x)
x = skips.pop(-1)
if not self.cls_mode:
# dec forward
for s in reversed(range(self.num_stages)):
x = self.up[s](x)
skip = skips.pop(-1)
x = x.replace_feature(torch.cat((x.features, skip.features), dim=1))
x = self.dec[s](x)
x = self.final(x)
if self.cls_mode:
x = x.replace_feature(
scatter(x.features, x.indices[:, 0].long(), reduce="mean", dim=0)
)
return x.features
@MODELS.register_module()
class SpUNetNoSkipBase(nn.Module):
def __init__(
self,
in_channels,
out_channels,
base_channels=32,
channels=(32, 64, 128, 256, 256, 128, 96, 96),
layers=(2, 3, 4, 6, 2, 2, 2, 2),
):
super().__init__()
assert len(layers) % 2 == 0
assert len(layers) == len(channels)
self.in_channels = in_channels
self.out_channels = out_channels
self.base_channels = base_channels
self.channels = channels
self.layers = layers
self.num_stages = len(layers) // 2
norm_fn = partial(nn.BatchNorm1d, eps=1e-3, momentum=0.01)
block = BasicBlock
self.conv_input = spconv.SparseSequential(
spconv.SubMConv3d(
in_channels,
base_channels,
kernel_size=5,
padding=1,
bias=False,
indice_key="stem",
),
norm_fn(base_channels),
nn.ReLU(),
)
enc_channels = base_channels
dec_channels = channels[-1]
self.down = nn.ModuleList()
self.up = nn.ModuleList()
self.enc = nn.ModuleList()
self.dec = nn.ModuleList()
for s in range(self.num_stages):
# encode num_stages
self.down.append(
spconv.SparseSequential(
spconv.SparseConv3d(
enc_channels,
channels[s],
kernel_size=2,
stride=2,
bias=False,
indice_key=f"spconv{s + 1}",
),
norm_fn(channels[s]),
nn.ReLU(),
)
)
self.enc.append(
spconv.SparseSequential(
OrderedDict(
[
# (f"block{i}", block(enc_channels, channels[s], norm_fn=norm_fn, indice_key=f"subm{s + 1}"))
# if i == 0 else
(
f"block{i}",
block(
channels[s],
channels[s],
norm_fn=norm_fn,
indice_key=f"subm{s + 1}",
),
)
for i in range(layers[s])
]
)
)
)
# decode num_stages
self.up.append(
spconv.SparseSequential(
spconv.SparseInverseConv3d(
channels[len(channels) - s - 2],
dec_channels,
kernel_size=2,
bias=False,
indice_key=f"spconv{s + 1}",
),
norm_fn(dec_channels),
nn.ReLU(),
)
)
self.dec.append(
spconv.SparseSequential(
OrderedDict(
[
(
(
f"block{i}",
block(
dec_channels,
dec_channels,
norm_fn=norm_fn,
indice_key=f"subm{s}",
),
)
if i == 0
else (
f"block{i}",
block(
dec_channels,
dec_channels,
norm_fn=norm_fn,
indice_key=f"subm{s}",
),
)
)
for i in range(layers[len(channels) - s - 1])
]
)
)
)
enc_channels = channels[s]
dec_channels = channels[len(channels) - s - 2]
self.final = (
spconv.SubMConv3d(
channels[-1], out_channels, kernel_size=1, padding=1, bias=True
)
if out_channels > 0
else spconv.Identity()
)
self.apply(self._init_weights)
@staticmethod
def _init_weights(m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, spconv.SubMConv3d):
trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def forward(self, data_dict):
grid_coord = data_dict["grid_coord"]
feat = data_dict["feat"]
offset = data_dict["offset"]
batch = offset2batch(offset)
sparse_shape = torch.add(torch.max(grid_coord, dim=0).values, 1).tolist()
x = spconv.SparseConvTensor(
features=feat,
indices=torch.cat(
[batch.unsqueeze(-1).int(), grid_coord.int()], dim=1
).contiguous(),
spatial_shape=sparse_shape,
batch_size=batch[-1].tolist() + 1,
)
x = self.conv_input(x)
skips = [x]
# enc forward
for s in range(self.num_stages):
x = self.down[s](x)
x = self.enc[s](x)
skips.append(x)
x = skips.pop(-1)
# dec forward
for s in reversed(range(self.num_stages)):
x = self.up[s](x)
# skip = skips.pop(-1)
# x = x.replace_feature(torch.cat((x.features, skip.features), dim=1))
x = self.dec[s](x)
x = self.final(x)
return x.features
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/models/sparse_unet/spconv_unet_v1m2_bn_momentum.py | pointcept/models/sparse_unet/spconv_unet_v1m2_bn_momentum.py | """
SparseUNet Driven by SpConv (recommend)
Author: Xiaoyang Wu (xiaoyang.wu.cs@gmail.com)
Please cite our work if the code is helpful to you.
"""
from functools import partial
from collections import OrderedDict
import torch
import torch.nn as nn
try:
import spconv.pytorch as spconv
except ImportError:
import warnings
warnings.warn("Please follow `README.md` to install spconv2.`")
from timm.models.layers import trunc_normal_
from pointcept.models.builder import MODELS
def offset2batch(offset):
return (
torch.cat(
[
(
torch.tensor([i] * (o - offset[i - 1]))
if i > 0
else torch.tensor([i] * o)
)
for i, o in enumerate(offset)
],
dim=0,
)
.long()
.to(offset.device)
)
class BasicBlock(spconv.SparseModule):
expansion = 1
def __init__(
self,
in_channels,
embed_channels,
stride=1,
norm_fn=None,
indice_key=None,
bias=False,
):
super().__init__()
assert norm_fn is not None
if in_channels == embed_channels:
self.proj = spconv.SparseSequential(nn.Identity())
else:
self.proj = spconv.SparseSequential(
spconv.SubMConv3d(
in_channels, embed_channels, kernel_size=1, bias=False
),
norm_fn(embed_channels, momentum=0.02),
)
self.conv1 = spconv.SubMConv3d(
in_channels,
embed_channels,
kernel_size=3,
stride=stride,
padding=1,
bias=bias,
indice_key=indice_key,
)
self.bn1 = norm_fn(embed_channels)
self.relu = nn.ReLU()
self.conv2 = spconv.SubMConv3d(
embed_channels,
embed_channels,
kernel_size=3,
stride=stride,
padding=1,
bias=bias,
indice_key=indice_key,
)
self.bn2 = norm_fn(embed_channels)
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = out.replace_feature(self.bn1(out.features))
out = out.replace_feature(self.relu(out.features))
out = self.conv2(out)
out = out.replace_feature(self.bn2(out.features))
out = out.replace_feature(out.features + self.proj(residual).features)
out = out.replace_feature(self.relu(out.features))
return out
@MODELS.register_module("SpUNet-v1m2")
class SpUNetBase(nn.Module):
def __init__(
self,
in_channels,
num_classes,
base_channels=32,
channels=(32, 64, 128, 256, 256, 128, 96, 96),
layers=(2, 3, 4, 6, 2, 2, 2, 2),
bn_momentum=0.1,
):
super().__init__()
assert len(layers) % 2 == 0
assert len(layers) == len(channels)
self.in_channels = in_channels
self.num_classes = num_classes
self.base_channels = base_channels
self.channels = channels
self.layers = layers
self.num_stages = len(layers) // 2
norm_fn = partial(nn.BatchNorm1d, eps=1e-5, momentum=bn_momentum)
block = BasicBlock
self.conv_input = spconv.SparseSequential(
spconv.SubMConv3d(
in_channels,
base_channels,
kernel_size=5,
padding=1,
bias=False,
indice_key="stem",
),
norm_fn(base_channels, momentum=0.02),
nn.ReLU(),
)
enc_channels = base_channels
dec_channels = channels[-1]
self.down = nn.ModuleList()
self.up = nn.ModuleList()
self.enc = nn.ModuleList()
self.dec = nn.ModuleList()
for s in range(self.num_stages):
# encode num_stages
self.down.append(
spconv.SparseSequential(
spconv.SparseConv3d(
enc_channels,
channels[s],
kernel_size=2,
stride=2,
bias=False,
indice_key=f"spconv{s + 1}",
),
norm_fn(channels[s], momentum=0.02),
nn.ReLU(),
)
)
self.enc.append(
spconv.SparseSequential(
OrderedDict(
[
# (f"block{i}", block(enc_channels, channels[s], norm_fn=norm_fn, indice_key=f"subm{s + 1}"))
# if i == 0 else
(
f"block{i}",
block(
channels[s],
channels[s],
norm_fn=norm_fn,
indice_key=f"subm{s + 1}",
),
)
for i in range(layers[s])
]
)
)
)
# decode num_stages
self.up.append(
spconv.SparseSequential(
spconv.SparseInverseConv3d(
channels[len(channels) - s - 2],
dec_channels,
kernel_size=2,
bias=False,
indice_key=f"spconv{s + 1}",
),
norm_fn(dec_channels, momentum=0.02),
nn.ReLU(),
)
)
self.dec.append(
spconv.SparseSequential(
OrderedDict(
[
(
(
f"block{i}",
block(
dec_channels + enc_channels,
dec_channels,
norm_fn=norm_fn,
indice_key=f"subm{s}",
),
)
if i == 0
else (
f"block{i}",
block(
dec_channels,
dec_channels,
norm_fn=norm_fn,
indice_key=f"subm{s}",
),
)
)
for i in range(layers[len(channels) - s - 1])
]
)
)
)
enc_channels = channels[s]
dec_channels = channels[len(channels) - s - 2]
self.final = (
spconv.SubMConv3d(
channels[-1], num_classes, kernel_size=1, padding=1, bias=True
)
if num_classes > 0
else spconv.Identity()
)
self.apply(self._init_weights)
@staticmethod
def _init_weights(m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, spconv.SubMConv3d):
trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def forward(self, data_dict):
grid_coord = data_dict["grid_coord"]
feat = data_dict["feat"]
offset = data_dict["offset"]
batch = offset2batch(offset)
sparse_shape = torch.add(torch.max(grid_coord, dim=0).values, 1).tolist()
x = spconv.SparseConvTensor(
features=feat,
indices=torch.cat(
[batch.unsqueeze(-1).int(), grid_coord.int()], dim=1
).contiguous(),
spatial_shape=sparse_shape,
batch_size=batch[-1].tolist() + 1,
)
x = self.conv_input(x)
skips = [x]
# enc forward
for s in range(self.num_stages):
x = self.down[s](x)
x = self.enc[s](x)
skips.append(x)
x = skips.pop(-1)
# dec forward
for s in reversed(range(self.num_stages)):
x = self.up[s](x)
skip = skips.pop(-1)
x = x.replace_feature(torch.cat((x.features, skip.features), dim=1))
x = self.dec[s](x)
x = self.final(x)
return x.features
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/models/point_prompt_training/prompt_driven_normalization.py | pointcept/models/point_prompt_training/prompt_driven_normalization.py | import torch.nn as nn
from pointcept.models.modules import PointModule, PointSequential
from pointcept.models.builder import MODULES
@MODULES.register_module()
class PDNorm(PointModule):
def __init__(
self,
num_features,
norm_layer,
context_channels=256,
conditions=("ScanNet", "S3DIS", "Structured3D"),
decouple=True,
adaptive=False,
):
super().__init__()
self.conditions = conditions
self.decouple = decouple
self.adaptive = adaptive
if self.decouple:
self.norm = nn.ModuleList([norm_layer(num_features) for _ in conditions])
else:
self.norm = norm_layer
if self.adaptive:
self.modulation = nn.Sequential(
nn.SiLU(), nn.Linear(context_channels, 2 * num_features, bias=True)
)
def forward(self, point):
assert {"feat", "condition"}.issubset(point.keys())
if isinstance(point.condition, str):
condition = point.condition
else:
condition = point.condition[0]
if self.decouple:
assert condition in self.conditions
norm = self.norm[self.conditions.index(condition)]
else:
norm = self.norm
point.feat = norm(point.feat)
if self.adaptive:
assert "context" in point.keys()
shift, scale = self.modulation(point.context).chunk(2, dim=1)
point.feat = point.feat * (1.0 + scale) + shift
return point
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/models/point_prompt_training/point_prompt_training_v1m2_decoupled.py | pointcept/models/point_prompt_training/point_prompt_training_v1m2_decoupled.py | """
Point Prompt Training with decoupled segmentation head
Author: Xiaoyang Wu (xiaoyang.wu.cs@gmail.com)
Please cite our work if the code is helpful to you.
"""
from functools import partial
from collections import OrderedDict
import torch
import torch.nn as nn
from pointcept.models.utils.structure import Point
from pointcept.models.builder import MODELS
from pointcept.models.losses import build_criteria
@MODELS.register_module("PPT-v1m2")
class PointPromptTraining(nn.Module):
"""
PointPromptTraining v1m2 provides Data-driven Context and enables multi-dataset training with
Decoupled Segmentation Head. PDNorm is supported by SpUNet-v1m3 to adapt the
backbone to a specific dataset with a given dataset condition and context.
"""
def __init__(
self,
backbone=None,
criteria=None,
backbone_out_channels=96,
context_channels=256,
conditions=("Structured3D", "ScanNet", "S3DIS"),
num_classes=(25, 20, 13),
backbone_mode=False,
):
super().__init__()
assert len(conditions) == len(num_classes)
assert backbone.type in ["SpUNet-v1m3", "PT-v2m3", "PT-v3m1"]
self.backbone = MODELS.build(backbone)
self.criteria = build_criteria(criteria)
self.conditions = conditions
self.embedding_table = nn.Embedding(len(conditions), context_channels)
self.backbone_mode = backbone_mode
self.seg_heads = nn.ModuleList(
[nn.Linear(backbone_out_channels, num_cls) for num_cls in num_classes]
)
def forward(self, data_dict):
condition = data_dict["condition"][0]
assert condition in self.conditions
context = self.embedding_table(
torch.tensor(
[self.conditions.index(condition)], device=data_dict["coord"].device
)
)
data_dict["context"] = context
point = self.backbone(data_dict)
# Backbone added after v1.5.0 return Point instead of feat and use DefaultSegmentorV2
# TODO: remove this part after make all backbone return Point only.
if isinstance(point, Point):
feat = point.feat
else:
feat = point
if self.backbone_mode:
# PPT serve as a multi-dataset backbone when enable backbone mode
return feat
seg_head = self.seg_heads[self.conditions.index(condition)]
seg_logits = seg_head(feat)
# train
if self.training:
loss = self.criteria(seg_logits, data_dict["segment"])
return dict(loss=loss)
# eval
elif "segment" in data_dict.keys():
loss = self.criteria(seg_logits, data_dict["segment"])
return dict(loss=loss, seg_logits=seg_logits)
# test
else:
return dict(seg_logits=seg_logits)
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/models/point_prompt_training/point_prompt_training_v1m1_language_guided.py | pointcept/models/point_prompt_training/point_prompt_training_v1m1_language_guided.py | """
Point Prompt Training
Author: Xiaoyang Wu (xiaoyang.wu.cs@gmail.com)
Please cite our work if the code is helpful to you.
"""
from functools import partial
from collections import OrderedDict
import numpy as np
import math
import torch
import torch.nn as nn
from pointcept.utils.comm import calc_t_emb
from pointcept.models.utils.structure import Point
from pointcept.models.builder import MODELS
from pointcept.models.losses import build_criteria
# @MODELS.register_module("PPT-v1m1")
# class PointPromptTraining(nn.Module):
# """
# PointPromptTraining provides Data-driven Context and enables multi-dataset training with
# Language-driven Categorical Alignment. PDNorm is supported by SpUNet-v1m3 to adapt the
# backbone to a specific dataset with a given dataset condition and context.
# """
#
# def __init__(
# self,
# backbone=None,
# criteria=None,
# backbone_out_channels=96,
# context_channels=256,
# conditions=("Structured3D", "ScanNet", "S3DIS"),
# template="[x]",
# clip_model="ViT-B/16",
# # fmt: off
# class_name=(
# "wall", "floor", "cabinet", "bed", "chair", "sofa", "table", "door",
# "window", "bookshelf", "bookcase", "picture", "counter", "desk", "shelves", "curtain",
# "dresser", "pillow", "mirror", "ceiling", "refrigerator", "television", "shower curtain", "nightstand",
# "toilet", "sink", "lamp", "bathtub", "garbagebin", "board", "beam", "column",
# "clutter", "otherstructure", "otherfurniture", "otherprop",
# ),
# valid_index=(
# (0, 1, 2, 3, 4, 5, 6, 7, 8, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 23, 25, 26, 33, 34, 35),
# (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 15, 20, 22, 24, 25, 27, 34),
# (0, 1, 4, 5, 6, 7, 8, 10, 19, 29, 30, 31, 32),
# ),
# # fmt: on
# backbone_mode=False,
# ):
# super().__init__()
# assert len(conditions) == len(valid_index)
# assert backbone.type in ["SpUNet-v1m3", "PT-v2m3", "PT-v3m1"]
# self.backbone = MODELS.build(backbone)
# self.criteria = build_criteria(criteria)
# self.conditions = conditions
# self.valid_index = valid_index
# self.embedding_table = nn.Embedding(len(conditions), context_channels)
# self.backbone_mode = backbone_mode
# if not self.backbone_mode:
# import clip
#
# clip_model, _ = clip.load(
# clip_model, device="cpu", download_root="./.cache/clip"
# )
# clip_model.requires_grad_(False)
# class_prompt = [template.replace("[x]", name) for name in class_name]
# class_token = clip.tokenize(class_prompt)
# class_embedding = clip_model.encode_text(class_token)
# class_embedding = class_embedding / class_embedding.norm(
# dim=-1, keepdim=True
# )
# self.register_buffer("class_embedding", class_embedding)
# self.proj_head = nn.Linear(
# backbone_out_channels, clip_model.text_projection.shape[1]
# )
# self.logit_scale = clip_model.logit_scale
#
# def forward(self, data_dict):
# condition = data_dict["condition"][0]
# assert condition in self.conditions
# context = self.embedding_table(
# torch.tensor(
# [self.conditions.index(condition)], device=data_dict["coord"].device
# )
# )
# data_dict["context"] = context
# point = self.backbone(data_dict)
# # Backbone added after v1.5.0 return Point instead of feat and use DefaultSegmentorV2
# # TODO: remove this part after make all backbone return Point only.
# if isinstance(point, Point):
# feat = point.feat
# else:
# feat = point
# if self.backbone_mode:
# # PPT serve as a multi-dataset backbone when enable backbone mode
# return feat
# feat = self.proj_head(feat)
# feat = feat / feat.norm(dim=-1, keepdim=True)
# sim = (
# feat
# @ self.class_embedding[
# self.valid_index[self.conditions.index(condition)], :
# ].t()
# )
# logit_scale = self.log-=it_scale.exp()
# seg_logits = logit_scale * sim
# # train
# if self.training:
# loss = self.criteria(seg_logits, data_dict["segment"])
# return dict(loss=loss)
# # eval
# elif "segment" in data_dict.keys():
# loss = self.criteria(seg_logits, data_dict["segment"])
# return dict(loss=loss, seg_logits=seg_logits)
# # test
# else:
# return dict(seg_logits=seg_logits)
@MODELS.register_module("PPT-v1m1")
class PointPromptTraining(nn.Module):
"""
PointPromptTraining provides Data-driven Context and enables multi-dataset training with
Language-driven Categorical Alignment. PDNorm is supported by SpUNet-v1m3 to adapt the
backbone to a specific dataset with a given dataset condition and context.
"""
def __init__(
self,
backbone=None,
criteria=None,
context_channels=256,
conditions=("Structured3D", "ScanNet", "S3DIS"),
template="[x]",
clip_model="ViT-B/16",
# fmt: off
class_name=(
"wall", "floor", "cabinet", "bed", "chair", "sofa", "table", "door",
"window", "bookshelf", "bookcase", "picture", "counter", "desk", "shelves", "curtain",
"dresser", "pillow", "mirror", "ceiling", "refrigerator", "television", "shower curtain", "nightstand",
"toilet", "sink", "lamp", "bathtub", "garbagebin", "board", "beam", "column",
"clutter", "otherstructure", "otherfurniture", "otherprop",
),
valid_index=(
(0, 1, 2, 3, 4, 5, 6, 7, 8, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 23, 25, 26, 33, 34, 35),
(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 15, 20, 22, 24, 25, 27, 34),
(0, 1, 4, 5, 6, 7, 8, 10, 19, 29, 30, 31, 32),
),
# fmt: on
backbone_mode=False,
loss_type="EW",
task_num=2,
num_classes=20,
T=1000,
beta_start=0.0001,
beta_end=0.02,
noise_schedule="linear",
T_dim=128,
dm=False,
dm_input="xt",
dm_target="noise",
dm_min_snr=None,
condition=False,
c_in_channels=6
):
super().__init__()
assert len(conditions) == len(valid_index)
assert backbone.type in ["SpUNet-v1m3", "PT-v2m3", "PT-v3m1"]
self.backbone = MODELS.build(backbone)
self.criteria = build_criteria(cfg=criteria,loss_type=loss_type,task_num=task_num)
self.conditions = conditions
self.valid_index = valid_index
self.embedding_table = nn.Embedding(len(conditions), context_channels)
self.backbone_mode = backbone_mode
if not self.backbone_mode:
import clip
clip_model, _ = clip.load(
clip_model, device="cpu", download_root="./.cache/clip"
)
clip_model.requires_grad_(False)
class_prompt = [template.replace("[x]", name) for name in class_name]
class_token = clip.tokenize(class_prompt)
class_embedding = clip_model.encode_text(class_token)
class_embedding = class_embedding / class_embedding.norm(
dim=-1, keepdim=True
)
self.register_buffer("class_embedding", class_embedding)
self.logit_scale = clip_model.logit_scale
self.num_classes = num_classes
self.T = T
self.beta_start = beta_start
self.beta_end = beta_end
self.noise_schedule = noise_schedule
self.T_dim = T_dim
self.condition = condition
self.dm = dm
self.dm_input = dm_input
self.dm_target = dm_target
self.dm_min_snr = dm_min_snr
self.c_in_channels = c_in_channels
if(self.dm):
# ---- diffusion params ----
self.eps = 1e-6
self.Beta, self.Alpha ,self.Alpha_bar, self.Sigma, self.SNR= self.get_diffusion_hyperparams(
noise_schedule=noise_schedule,
T=self.T,
beta_start=self.beta_start,
beta_end=self.beta_end,
)
# ---- diffusion params ----
self.Beta = self.Beta.float().cuda()
self.Alpha = self.Alpha.float().cuda()
self.Alpha_bar = self.Alpha_bar.float().cuda()
self.Sigma = self.Sigma.float().cuda()
self.SNR = self.SNR.float().cuda() if dm_min_snr is None else torch.clamp(self.SNR.float().cuda(),max=dm_min_snr)
def get_diffusion_hyperparams(
self,
noise_schedule,
beta_start,
beta_end,
T
):
"""
Compute diffusion process hyperparameters
Parameters:
T (int): number of diffusion steps
beta_0 and beta_T (float): beta schedule start/end value,
where any beta_t in the middle is linearly interpolated
Returns:
a dictionary of diffusion hyperparameters including:
T (int), Beta/Alpha/Alpha_bar/Sigma (torch.tensor on cpu, shape=(T, ))
These cpu tensors are changed to cuda tensors on each individual gpu
"""
# Beta = torch.linspace(noise_schedule,beta_start, beta_end, T)
Beta = self.get_diffusion_betas(
type=noise_schedule,
start=beta_start,
stop=beta_end,
T=T
)
# at = 1 - bt
Alpha = 1 - Beta
# at_
Alpha_bar = Alpha + 0
# 方差
Beta_tilde = Beta + 0
for t in range(1, T):
# \bar{\alpha}_t = \prod_{s=1}^t \alpha_s
Alpha_bar[t] *= Alpha_bar[t - 1]
# \tilde{\beta}_t = (1-\bar{\alpha}_{t-1}) / (1-\bar{\alpha}_t) * \beta_t
Beta_tilde[t] *= (1-Alpha_bar[t-1]) / (1-Alpha_bar[t])
# 标准差
Sigma = torch.sqrt(Beta_tilde) # \sigma_t^2 = \tilde{\beta}_t
Sigma[0] = 0.0
'''
SNR = at ** 2 / sigma ** 2
at = sqrt(at_), sigma = sqrt(1 - at_)
q(xt|x0) = sqrt(at_) * x0 + sqrt(1 - at_) * noise
'''
SNR = Alpha_bar / (1 - Alpha_bar)
return Beta, Alpha, Alpha_bar, Sigma, SNR
def get_diffusion_betas(self, type='linear', start=0.0001, stop=0.02, T=1000):
"""Get betas from the hyperparameters."""
if type == 'linear':
# Used by Ho et al. for DDPM, https://arxiv.org/abs/2006.11239.
# To be used with Gaussian diffusion models in continuous and discrete
# state spaces.
# To be used with transition_mat_type = 'gaussian'
scale = 1000 / T
beta_start = scale * start
beta_end = scale * stop
return torch.linspace(beta_start, beta_end, T, dtype=torch.float64)
elif type == 'cosine':
# Schedule proposed by Hoogeboom et al. https://arxiv.org/abs/2102.05379
# To be used with transition_mat_type = 'uniform'.
steps = T + 1
s = 0.008
t = torch.linspace(0, T, steps, dtype=torch.float64) / T
alphas_cumprod = torch.cos((t + s) / (1 + s) * math.pi * 0.5) ** 2
alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])
return torch.clip(betas, 0, 0.999)
elif type == 'sigmoid': # 1/T, 1/(T-1), 1/(T-2), ..., 1
# Proposed by Sohl-Dickstein et al., https://arxiv.org/abs/1503.03585
# To be used with absorbing state models.
# ensures that the probability of decaying to the absorbing state
# increases linearly over time, and is 1 for t = T-1 (the final time).
# To be used with transition_mat_type = 'absorbing'
start = -3
end = 3
tau = 1
steps = T + 1
t = torch.linspace(0, T, steps, dtype=torch.float64) / T
v_start = torch.tensor(start / tau).sigmoid()
v_end = torch.tensor(end / tau).sigmoid()
alphas_cumprod = (-((t * (end - start) + start) / tau).sigmoid() + v_end) / (v_end - v_start)
alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])
return torch.clip(betas, 0, 0.999)
elif type == "laplace":
mu = 0.0
b = 0.5
lmb = lambda t: mu - b * torch.sign(0.5 - t) * torch.log(1 - 2 * torch.abs(0.5 - t))
snr_func = lambda t: torch.exp(lmb(t))
alpha_func = lambda t: torch.sqrt(snr_func(t) / (1 + snr_func(t)))
# sigma_func = lambda t: torch.sqrt(1 / (1 + snr_func(t)))
timesteps = torch.linspace(0, 1, 1002)[1:-1]
alphas_cumprod = []
for t in timesteps:
a = alpha_func(t) ** 2
alphas_cumprod.append(a)
alphas_cumprod = torch.cat(alphas_cumprod,dim=0)
alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])
return torch.clip(betas, 0, 0.999)
else:
raise NotImplementedError(type)
def continuous_p_ddim_sample(self, x_t, t, noise):
if(self.dm_target == "noise"):
# x0 = (xt - sqrt(1-at_) * noise) / sqrt(at_)
c_x0 = (x_t - torch.sqrt(1 - self.Alpha_bar[t]) * noise) / torch.sqrt(self.Alpha_bar[t])
elif(self.dm_target == "x0"):
c_x0 = noise
# noise = (xt - sqrt(1-at_) * x0) / sqrt(1-at_)
noise = (x_t - torch.sqrt(self.Alpha_bar[t]) * c_x0) / torch.sqrt(1 - self.Alpha_bar[t])
if(t[0] == 0):
return c_x0
# sqrt(at-1_) * (xt - sqrt(1-at_) * noise) / sqrt(at_)
c_xt_1_1 = torch.sqrt(self.Alpha_bar[t-1]) * c_x0
# sqrt(1 - at-1_) * noise
c_xt_1_2 = torch.sqrt(1 - self.Alpha_bar[t-1]) * noise
# xt-1 = sqrt(at-1_) * (xt - sqrt(1-at_) * noise) / sqrt(at_) + sqrt(1 - at-1_) * noise
c_xt_1 = c_xt_1_1 + c_xt_1_2
return c_xt_1
def continuous_q_sample(self,x_0, t, noise=None):
if(noise is None):
# sampling from Gaussian distribution
noise = torch.normal(0, 1, size=x_0.shape, dtype=torch.float32).cuda()
# xt = sqrt(at_) * x0 + sqrt(1-at_) * noise
x_t = torch.sqrt(self.Alpha_bar[t]) * x_0 + torch.sqrt(1 - self.Alpha_bar[t]) * noise
return x_t
def get_time_schedule(self, T=1000, step=5):
times = np.linspace(-1, T - 1, num = step + 1, dtype=int)[::-1]
return times
def add_gaussian_noise(self, pts, sigma=0.1, clamp=0.03):
# input: (b, 3, n)
assert (clamp > 0)
# jittered_data = torch.clamp(sigma * torch.randn_like(pts), -1 * clamp, clamp)
jittered_data = sigma * torch.randn_like(pts).cuda()
jittered_data = jittered_data + pts
return jittered_data
def inference(self, input_dict, eval=True, noise_level=None):
if(noise_level is not None):
input_dict["feat"] = self.add_gaussian_noise(input_dict["feat"],sigma=noise_level)
condition = input_dict["condition"][0]
assert condition in self.conditions
context = self.embedding_table(
torch.tensor(
[self.conditions.index(condition)], device=input_dict["coord"].device
)
)
if(self.condition):
### ---- PT V3 + DM ---- ###
c_point = {}
c_point["coord"] = input_dict["coord"]
c_point["grid_coord"] = input_dict["grid_coord"]
c_point["offset"] = input_dict["offset"]
c_point["condition"] = input_dict["condition"]
c_point["context"] = context
n_point = {}
n_point["coord"] = input_dict["coord"]
n_point["grid_coord"] = input_dict["grid_coord"]
n_point["offset"] = input_dict["offset"]
n_point["condition"] = input_dict["condition"]
n_point["context"] = context
# ---- initial input ---- #
n_point["feat"] = input_dict["feat"]
if (self.c_in_channels == 3):
c_point['feat'] = c_target = input_dict["coord"]
elif (self.c_in_channels == 6):
c_point['feat'] = c_target = input_dict["feat"]
t = 0
if(self.dm and self.dm_input == "xt"):
c_point['feat'] = torch.normal(0, 1, size=c_target.shape, dtype=torch.float32).cuda()
t = self.T - 1
# ---- initial input ---- #
N = len(c_target)
# ---- T steps ---- #
ts = t * torch.ones((N, 1), dtype=torch.int64).cuda()
if (self.T_dim != -1):
c_point['t_emb'] = calc_t_emb(ts, t_emb_dim=self.T_dim).cuda()
# ---- T steps ---- #
# ---- pred c_epsilon and n_x0 ---- #
c_point, n_point = self.backbone(c_point, n_point)
# ---- pred c_epsilon and n_x0 ---- #
### ---- PT V3 + DM ---- ###
else:
### ---- PT V3 ---- ###
input_dict["context"] = context
n_point = self.backbone(n_point=input_dict)
### ---- PT V3 ---- ###
# Backbone added after v1.5.0 return Point instead of feat and use DefaultSegmentorV2
# TODO: remove this part after make all backbone return Point only.
if isinstance(n_point, Point):
feat = n_point.feat
else:
feat = n_point
if self.backbone_mode:
# PPT serve as a multi-dataset backbone when enable backbone mode
return feat
feat = feat / feat.norm(dim=-1, keepdim=True)
sim = (
feat
@ self.class_embedding[
self.valid_index[self.conditions.index(condition)], :
].t()
)
logit_scale = self.logit_scale.exp()
seg_logits = logit_scale * sim
if(eval):
point = {}
point['n_pred'] = seg_logits
point['n_target'] = input_dict['segment']
point['loss_mode'] = "eval"
loss = self.criteria(point)
return dict(loss=loss, seg_logits=seg_logits)
else:
return dict(seg_logits=seg_logits)
def forward(self, input_dict):
point = {}
condition = input_dict["condition"][0]
assert condition in self.conditions
context = self.embedding_table(
torch.tensor(
[self.conditions.index(condition)], device=input_dict["coord"].device
)
)
if (self.condition):
c_point = {}
c_point["coord"] = input_dict["coord"]
c_point["grid_coord"] = input_dict["grid_coord"]
c_point["offset"] = input_dict["offset"]
c_point["condition"] = input_dict["condition"]
c_point["context"] = context
n_point = {}
n_point["coord"] = input_dict["coord"]
n_point["grid_coord"] = input_dict["grid_coord"]
n_point["offset"] = input_dict["offset"]
n_point["condition"] = input_dict["condition"]
n_point["context"] = context
c_point = Point(c_point)
n_point = Point(n_point)
batch = n_point["batch"]
B = len(torch.unique(batch))
# ---- initial input ---- #
n_point["feat"] = input_dict["feat"]
if(self.c_in_channels == 3):
c_point['feat'] = c_target = input_dict["coord"]
elif(self.c_in_channels == 6):
c_point['feat'] = c_target = input_dict["feat"]
# ---- initial input ---- #
# ---- continuous diffusion ---- #
if(self.dm):
# --- T_embeding ---- #
ts = torch.randint(0, self.T, size=(B, 1), dtype=torch.int64).cuda()
if (self.T_dim != -1):
c_point["t_emb"] = calc_t_emb(ts, self.T_dim)[batch, :]
ts = ts[batch, :]
# --- T_embeding ---- #
# ---- add noise ---- #
c_x0 = c_target
c_noise = torch.normal(0, 1, size=c_x0.shape,dtype=torch.float32).cuda()
c_xt = self.continuous_q_sample(c_x0, ts, c_noise)
c_point['feat'] = c_xt
# ---- add noise ---- #
# ---- diffusion target ---- #
if(self.dm_target == "noise"):
c_target = c_noise
# ---- diffusion target ---- #
# ---- SNR Loss Weight ----
if (self.dm_min_snr is not None):
point["snr_loss_weight"] = self.SNR[ts]
# ---- SNR Loss Weight ----
# ---- continuous diffusion ---- #
# ---- output ---- #
c_point, n_point = self.backbone(c_point, n_point)
# ---- output ---- #
point['c_pred'] = c_point["feat"]
point['c_target'] = c_target
else:
### ---- PT V3 ---- ###
input_dict["context"] = context
n_point = self.backbone(n_point=input_dict)
### ---- PT V3 ---- ###
# Backbone added after v1.5.0 return Point instead of feat and use DefaultSegmentorV2
# TODO: remove this part after make all backbone return Point only.
if isinstance(n_point, Point):
feat = n_point.feat
else:
feat = n_point
if self.backbone_mode:
# PPT serve as a multi-dataset backbone when enable backbone mode
return feat
feat = feat / feat.norm(dim=-1, keepdim=True)
sim = (
feat
@ self.class_embedding[
self.valid_index[self.conditions.index(condition)], :
].t()
)
logit_scale = self.logit_scale.exp()
seg_logits = logit_scale * sim
point['n_pred'] = seg_logits
point['n_target'] = input_dict['segment']
point['loss_mode'] = "train"
loss = self.criteria(point)
return dict(loss=loss) | python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/models/point_prompt_training/__init__.py | pointcept/models/point_prompt_training/__init__.py | from .point_prompt_training_v1m1_language_guided import *
from .point_prompt_training_v1m2_decoupled import *
from .prompt_driven_normalization import PDNorm
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/models/utils/structure.py | pointcept/models/utils/structure.py | import torch
import spconv.pytorch as spconv
try:
import ocnn
except ImportError:
ocnn = None
from addict import Dict
from pointcept.models.utils.serialization import encode, decode
from pointcept.models.utils import offset2batch, batch2offset
class Point(Dict):
"""
Point Structure of Pointcept
A Point (point cloud) in Pointcept is a dictionary that contains various properties of
a batched point cloud. The property with the following names have a specific definition
as follows:
- "coord": original coordinate of point cloud;
- "grid_coord": grid coordinate for specific grid size (related to GridSampling);
Point also support the following optional attributes:
- "offset": if not exist, initialized as batch size is 1;
- "batch": if not exist, initialized as batch size is 1;
- "feat": feature of point cloud, default input of model;
- "grid_size": Grid size of point cloud (related to GridSampling);
(related to Serialization)
- "serialized_depth": depth of serialization, 2 ** depth * grid_size describe the maximum of point cloud range;
- "serialized_code": a list of serialization codes;
- "serialized_order": a list of serialization order determined by code;
- "serialized_inverse": a list of inverse mapping determined by code;
(related to Sparsify: SpConv)
- "sparse_shape": Sparse shape for Sparse Conv Tensor;
- "sparse_conv_feat": SparseConvTensor init with information provide by Point;
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# If one of "offset" or "batch" do not exist, generate by the existing one
if "batch" not in self.keys() and "offset" in self.keys():
self["batch"] = offset2batch(self.offset)
elif "offset" not in self.keys() and "batch" in self.keys():
self["offset"] = batch2offset(self.batch)
def serialization(self, order="z", depth=None, shuffle_orders=False):
"""
Point Cloud Serialization
relay on ["grid_coord" or "coord" + "grid_size", "batch", "feat"]
"""
assert "batch" in self.keys()
if "grid_coord" not in self.keys():
# if you don't want to operate GridSampling in data augmentation,
# please add the following augmentation into your pipline:
# dict(type="Copy", keys_dict={"grid_size": 0.01}),
# (adjust `grid_size` to what your want)
assert {"grid_size", "coord"}.issubset(self.keys())
self["grid_coord"] = torch.div(
self.coord - self.coord.min(0)[0], self.grid_size, rounding_mode="trunc"
).int()
if depth is None:
# Adaptive measure the depth of serialization cube (length = 2 ^ depth)
depth = int(self.grid_coord.max()).bit_length()
self["serialized_depth"] = depth
# Maximum bit length for serialization code is 63 (int64)
assert depth * 3 + len(self.offset).bit_length() <= 63
# Here we follow OCNN and set the depth limitation to 16 (48bit) for the point position.
# Although depth is limited to less than 16, we can encode a 655.36^3 (2^16 * 0.01) meter^3
# cube with a grid size of 0.01 meter. We consider it is enough for the current stage.
# We can unlock the limitation by optimizing the z-order encoding function if necessary.
assert depth <= 16
# The serialization codes are arranged as following structures:
# [Order1 ([n]),
# Order2 ([n]),
# ...
# OrderN ([n])] (k, n)
code = [
encode(self.grid_coord, self.batch, depth, order=order_) for order_ in order
]
code = torch.stack(code)
order = torch.argsort(code)
inverse = torch.zeros_like(order).scatter_(
dim=1,
index=order,
src=torch.arange(0, code.shape[1], device=order.device).repeat(
code.shape[0], 1
),
)
if shuffle_orders:
perm = torch.randperm(code.shape[0])
code = code[perm]
order = order[perm]
inverse = inverse[perm]
self["serialized_code"] = code
self["serialized_order"] = order
self["serialized_inverse"] = inverse
def sparsify(self, pad=96):
"""
Point Cloud Serialization
Point cloud is sparse, here we use "sparsify" to specifically refer to
preparing "spconv.SparseConvTensor" for SpConv.
relay on ["grid_coord" or "coord" + "grid_size", "batch", "feat"]
pad: padding sparse for sparse shape.
"""
assert {"feat", "batch"}.issubset(self.keys())
if "grid_coord" not in self.keys():
# if you don't want to operate GridSampling in data augmentation,
# please add the following augmentation into your pipline:
# dict(type="Copy", keys_dict={"grid_size": 0.01}),
# (adjust `grid_size` to what your want)
assert {"grid_size", "coord"}.issubset(self.keys())
self["grid_coord"] = torch.div(
self.coord - self.coord.min(0)[0], self.grid_size, rounding_mode="trunc"
).int()
if "sparse_shape" in self.keys():
sparse_shape = self.sparse_shape
else:
sparse_shape = torch.add(
torch.max(self.grid_coord, dim=0).values, pad
).tolist()
sparse_conv_feat = spconv.SparseConvTensor(
features=self.feat,
indices=torch.cat(
[self.batch.unsqueeze(-1).int(), self.grid_coord.int()], dim=1
).contiguous(),
spatial_shape=sparse_shape,
batch_size=self.batch[-1].tolist() + 1,
)
self["sparse_shape"] = sparse_shape
self["sparse_conv_feat"] = sparse_conv_feat
def octreetization(self, depth=None, full_depth=None):
"""
Point Cloud Octreelization
Generate octree with OCNN
relay on ["grid_coord", "batch", "feat"]
"""
assert (
ocnn is not None
), "Please follow https://github.com/octree-nn/ocnn-pytorch install ocnn."
assert {"grid_coord", "feat", "batch"}.issubset(self.keys())
# add 1 to make grid space support shift order
if depth is None:
if "depth" in self.keys():
depth = self.depth
else:
depth = int(self.grid_coord.max() + 1).bit_length()
if full_depth is None:
full_depth = 2
self["depth"] = depth
assert depth <= 16 # maximum in ocnn
# [0, 2**depth] -> [0, 2] -> [-1, 1]
coord = self.grid_coord / 2 ** (self.depth - 1) - 1.0
point = ocnn.octree.Points(
points=coord,
features=self.feat,
batch_id=self.batch.unsqueeze(-1),
batch_size=self.batch[-1] + 1,
)
octree = ocnn.octree.Octree(
depth=depth,
full_depth=full_depth,
batch_size=self.batch[-1] + 1,
device=coord.device,
)
octree.build_octree(point)
octree.construct_all_neigh()
self["octree"] = octree
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/models/utils/checkpoint.py | pointcept/models/utils/checkpoint.py | """
Checkpoint Utils for Models
Author: Xiaoyang Wu (xiaoyang.wu.cs@gmail.com)
Please cite our work if the code is helpful to you.
"""
import torch
class CheckpointFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, run_function, length, *args):
ctx.run_function = run_function
ctx.input_tensors = list(args[:length])
ctx.input_params = list(args[length:])
with torch.no_grad():
output_tensors = ctx.run_function(*ctx.input_tensors)
return output_tensors
@staticmethod
def backward(ctx, *output_grads):
ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors]
with torch.enable_grad():
# Fixes a bug where the first op in run_function modifies the
# Tensor storage in place, which is not allowed for detach()'d
# Tensors.
shallow_copies = [x.view_as(x) for x in ctx.input_tensors]
output_tensors = ctx.run_function(*shallow_copies)
input_grads = torch.autograd.grad(
output_tensors,
ctx.input_tensors + ctx.input_params,
output_grads,
allow_unused=True,
)
del ctx.input_tensors
del ctx.input_params
del output_tensors
return (None, None) + input_grads
def checkpoint(func, inputs, params, flag):
"""
Evaluate a function without caching intermediate activations, allowing for
reduced memory at the expense of extra compute in the backward pass.
:param func: the function to evaluate.
:param inputs: the argument sequence to pass to `func`.
:param params: a sequence of parameters `func` depends on but does not
explicitly take as arguments.
:param flag: if False, disable gradient checkpointing.
"""
if flag:
args = tuple(inputs) + tuple(params)
return CheckpointFunction.apply(func, len(inputs), *args)
else:
return func(*inputs)
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/models/utils/misc.py | pointcept/models/utils/misc.py | """
General Utils for Models
Author: Xiaoyang Wu (xiaoyang.wu.cs@gmail.com)
Please cite our work if the code is helpful to you.
"""
import torch
@torch.inference_mode()
def offset2bincount(offset):
return torch.diff(
offset, prepend=torch.tensor([0], device=offset.device, dtype=torch.long)
)
@torch.inference_mode()
def offset2batch(offset):
bincount = offset2bincount(offset)
return torch.arange(
len(bincount), device=offset.device, dtype=torch.long
).repeat_interleave(bincount)
@torch.inference_mode()
def batch2offset(batch):
return torch.cumsum(batch.bincount(), dim=0).long()
def off_diagonal(x):
# return a flattened view of the off-diagonal elements of a square matrix
n, m = x.shape
assert n == m
return x.flatten()[:-1].view(n - 1, n + 1)[:, 1:].flatten()
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/models/utils/__init__.py | pointcept/models/utils/__init__.py | from .misc import offset2batch, offset2bincount, batch2offset, off_diagonal
from .checkpoint import checkpoint
from .serialization import encode, decode
from .structure import Point
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/models/utils/serialization/default.py | pointcept/models/utils/serialization/default.py | import torch
from .z_order import xyz2key as z_order_encode_
from .z_order import key2xyz as z_order_decode_
from .hilbert import encode as hilbert_encode_
from .hilbert import decode as hilbert_decode_
@torch.inference_mode()
def encode(grid_coord, batch=None, depth=16, order="z"):
assert order in {"z", "z-trans", "hilbert", "hilbert-trans"}
if order == "z":
code = z_order_encode(grid_coord, depth=depth)
elif order == "z-trans":
code = z_order_encode(grid_coord[:, [1, 0, 2]], depth=depth)
elif order == "hilbert":
code = hilbert_encode(grid_coord, depth=depth)
elif order == "hilbert-trans":
code = hilbert_encode(grid_coord[:, [1, 0, 2]], depth=depth)
else:
raise NotImplementedError
if batch is not None:
batch = batch.long()
code = batch << depth * 3 | code
return code
@torch.inference_mode()
def decode(code, depth=16, order="z"):
assert order in {"z", "hilbert"}
batch = code >> depth * 3
code = code & ((1 << depth * 3) - 1)
if order == "z":
grid_coord = z_order_decode(code, depth=depth)
elif order == "hilbert":
grid_coord = hilbert_decode(code, depth=depth)
else:
raise NotImplementedError
return grid_coord, batch
def z_order_encode(grid_coord: torch.Tensor, depth: int = 16):
x, y, z = grid_coord[:, 0].long(), grid_coord[:, 1].long(), grid_coord[:, 2].long()
# we block the support to batch, maintain batched code in Point class
code = z_order_encode_(x, y, z, b=None, depth=depth)
return code
def z_order_decode(code: torch.Tensor, depth):
x, y, z = z_order_decode_(code, depth=depth)
grid_coord = torch.stack([x, y, z], dim=-1) # (N, 3)
return grid_coord
def hilbert_encode(grid_coord: torch.Tensor, depth: int = 16):
return hilbert_encode_(grid_coord, num_dims=3, num_bits=depth)
def hilbert_decode(code: torch.Tensor, depth: int = 16):
return hilbert_decode_(code, num_dims=3, num_bits=depth)
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/models/utils/serialization/hilbert.py | pointcept/models/utils/serialization/hilbert.py | """
Hilbert Order
Modified from https://github.com/PrincetonLIPS/numpy-hilbert-curve
Author: Xiaoyang Wu (xiaoyang.wu.cs@gmail.com), Kaixin Xu
Please cite our work if the code is helpful to you.
"""
import torch
def right_shift(binary, k=1, axis=-1):
"""Right shift an array of binary values.
Parameters:
-----------
binary: An ndarray of binary values.
k: The number of bits to shift. Default 1.
axis: The axis along which to shift. Default -1.
Returns:
--------
Returns an ndarray with zero prepended and the ends truncated, along
whatever axis was specified."""
# If we're shifting the whole thing, just return zeros.
if binary.shape[axis] <= k:
return torch.zeros_like(binary)
# Determine the padding pattern.
# padding = [(0,0)] * len(binary.shape)
# padding[axis] = (k,0)
# Determine the slicing pattern to eliminate just the last one.
slicing = [slice(None)] * len(binary.shape)
slicing[axis] = slice(None, -k)
shifted = torch.nn.functional.pad(
binary[tuple(slicing)], (k, 0), mode="constant", value=0
)
return shifted
def binary2gray(binary, axis=-1):
"""Convert an array of binary values into Gray codes.
This uses the classic X ^ (X >> 1) trick to compute the Gray code.
Parameters:
-----------
binary: An ndarray of binary values.
axis: The axis along which to compute the gray code. Default=-1.
Returns:
--------
Returns an ndarray of Gray codes.
"""
shifted = right_shift(binary, axis=axis)
# Do the X ^ (X >> 1) trick.
gray = torch.logical_xor(binary, shifted)
return gray
def gray2binary(gray, axis=-1):
"""Convert an array of Gray codes back into binary values.
Parameters:
-----------
gray: An ndarray of gray codes.
axis: The axis along which to perform Gray decoding. Default=-1.
Returns:
--------
Returns an ndarray of binary values.
"""
# Loop the log2(bits) number of times necessary, with shift and xor.
shift = 2 ** (torch.Tensor([gray.shape[axis]]).log2().ceil().int() - 1)
while shift > 0:
gray = torch.logical_xor(gray, right_shift(gray, shift))
shift = torch.div(shift, 2, rounding_mode="floor")
return gray
def encode(locs, num_dims, num_bits):
"""Decode an array of locations in a hypercube into a Hilbert integer.
This is a vectorized-ish version of the Hilbert curve implementation by John
Skilling as described in:
Skilling, J. (2004, April). Programming the Hilbert curve. In AIP Conference
Proceedings (Vol. 707, No. 1, pp. 381-387). American Institute of Physics.
Params:
-------
locs - An ndarray of locations in a hypercube of num_dims dimensions, in
which each dimension runs from 0 to 2**num_bits-1. The shape can
be arbitrary, as long as the last dimension of the same has size
num_dims.
num_dims - The dimensionality of the hypercube. Integer.
num_bits - The number of bits for each dimension. Integer.
Returns:
--------
The output is an ndarray of uint64 integers with the same shape as the
input, excluding the last dimension, which needs to be num_dims.
"""
# Keep around the original shape for later.
orig_shape = locs.shape
bitpack_mask = 1 << torch.arange(0, 8).to(locs.device)
bitpack_mask_rev = bitpack_mask.flip(-1)
if orig_shape[-1] != num_dims:
raise ValueError(
"""
The shape of locs was surprising in that the last dimension was of size
%d, but num_dims=%d. These need to be equal.
"""
% (orig_shape[-1], num_dims)
)
if num_dims * num_bits > 63:
raise ValueError(
"""
num_dims=%d and num_bits=%d for %d bits total, which can't be encoded
into a int64. Are you sure you need that many points on your Hilbert
curve?
"""
% (num_dims, num_bits, num_dims * num_bits)
)
# Treat the location integers as 64-bit unsigned and then split them up into
# a sequence of uint8s. Preserve the association by dimension.
locs_uint8 = locs.long().view(torch.uint8).reshape((-1, num_dims, 8)).flip(-1)
# Now turn these into bits and truncate to num_bits.
gray = (
locs_uint8.unsqueeze(-1)
.bitwise_and(bitpack_mask_rev)
.ne(0)
.byte()
.flatten(-2, -1)[..., -num_bits:]
)
# Run the decoding process the other way.
# Iterate forwards through the bits.
for bit in range(0, num_bits):
# Iterate forwards through the dimensions.
for dim in range(0, num_dims):
# Identify which ones have this bit active.
mask = gray[:, dim, bit]
# Where this bit is on, invert the 0 dimension for lower bits.
gray[:, 0, bit + 1 :] = torch.logical_xor(
gray[:, 0, bit + 1 :], mask[:, None]
)
# Where the bit is off, exchange the lower bits with the 0 dimension.
to_flip = torch.logical_and(
torch.logical_not(mask[:, None]).repeat(1, gray.shape[2] - bit - 1),
torch.logical_xor(gray[:, 0, bit + 1 :], gray[:, dim, bit + 1 :]),
)
gray[:, dim, bit + 1 :] = torch.logical_xor(
gray[:, dim, bit + 1 :], to_flip
)
gray[:, 0, bit + 1 :] = torch.logical_xor(gray[:, 0, bit + 1 :], to_flip)
# Now flatten out.
gray = gray.swapaxes(1, 2).reshape((-1, num_bits * num_dims))
# Convert Gray back to binary.
hh_bin = gray2binary(gray)
# Pad back out to 64 bits.
extra_dims = 64 - num_bits * num_dims
padded = torch.nn.functional.pad(hh_bin, (extra_dims, 0), "constant", 0)
# Convert binary values into uint8s.
hh_uint8 = (
(padded.flip(-1).reshape((-1, 8, 8)) * bitpack_mask)
.sum(2)
.squeeze()
.type(torch.uint8)
)
# Convert uint8s into uint64s.
hh_uint64 = hh_uint8.view(torch.int64).squeeze()
return hh_uint64
def decode(hilberts, num_dims, num_bits):
"""Decode an array of Hilbert integers into locations in a hypercube.
This is a vectorized-ish version of the Hilbert curve implementation by John
Skilling as described in:
Skilling, J. (2004, April). Programming the Hilbert curve. In AIP Conference
Proceedings (Vol. 707, No. 1, pp. 381-387). American Institute of Physics.
Params:
-------
hilberts - An ndarray of Hilbert integers. Must be an integer dtype and
cannot have fewer bits than num_dims * num_bits.
num_dims - The dimensionality of the hypercube. Integer.
num_bits - The number of bits for each dimension. Integer.
Returns:
--------
The output is an ndarray of unsigned integers with the same shape as hilberts
but with an additional dimension of size num_dims.
"""
if num_dims * num_bits > 64:
raise ValueError(
"""
num_dims=%d and num_bits=%d for %d bits total, which can't be encoded
into a uint64. Are you sure you need that many points on your Hilbert
curve?
"""
% (num_dims, num_bits)
)
# Handle the case where we got handed a naked integer.
hilberts = torch.atleast_1d(hilberts)
# Keep around the shape for later.
orig_shape = hilberts.shape
bitpack_mask = 2 ** torch.arange(0, 8).to(hilberts.device)
bitpack_mask_rev = bitpack_mask.flip(-1)
# Treat each of the hilberts as a s equence of eight uint8.
# This treats all of the inputs as uint64 and makes things uniform.
hh_uint8 = (
hilberts.ravel().type(torch.int64).view(torch.uint8).reshape((-1, 8)).flip(-1)
)
# Turn these lists of uints into lists of bits and then truncate to the size
# we actually need for using Skilling's procedure.
hh_bits = (
hh_uint8.unsqueeze(-1)
.bitwise_and(bitpack_mask_rev)
.ne(0)
.byte()
.flatten(-2, -1)[:, -num_dims * num_bits :]
)
# Take the sequence of bits and Gray-code it.
gray = binary2gray(hh_bits)
# There has got to be a better way to do this.
# I could index them differently, but the eventual packbits likes it this way.
gray = gray.reshape((-1, num_bits, num_dims)).swapaxes(1, 2)
# Iterate backwards through the bits.
for bit in range(num_bits - 1, -1, -1):
# Iterate backwards through the dimensions.
for dim in range(num_dims - 1, -1, -1):
# Identify which ones have this bit active.
mask = gray[:, dim, bit]
# Where this bit is on, invert the 0 dimension for lower bits.
gray[:, 0, bit + 1 :] = torch.logical_xor(
gray[:, 0, bit + 1 :], mask[:, None]
)
# Where the bit is off, exchange the lower bits with the 0 dimension.
to_flip = torch.logical_and(
torch.logical_not(mask[:, None]),
torch.logical_xor(gray[:, 0, bit + 1 :], gray[:, dim, bit + 1 :]),
)
gray[:, dim, bit + 1 :] = torch.logical_xor(
gray[:, dim, bit + 1 :], to_flip
)
gray[:, 0, bit + 1 :] = torch.logical_xor(gray[:, 0, bit + 1 :], to_flip)
# Pad back out to 64 bits.
extra_dims = 64 - num_bits
padded = torch.nn.functional.pad(gray, (extra_dims, 0), "constant", 0)
# Now chop these up into blocks of 8.
locs_chopped = padded.flip(-1).reshape((-1, num_dims, 8, 8))
# Take those blocks and turn them unto uint8s.
# from IPython import embed; embed()
locs_uint8 = (locs_chopped * bitpack_mask).sum(3).squeeze().type(torch.uint8)
# Finally, treat these as uint64s.
flat_locs = locs_uint8.view(torch.int64)
# Return them in the expected shape.
return flat_locs.reshape((*orig_shape, num_dims))
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/models/utils/serialization/z_order.py | pointcept/models/utils/serialization/z_order.py | # --------------------------------------------------------
# Octree-based Sparse Convolutional Neural Networks
# Copyright (c) 2022 Peng-Shuai Wang <wangps@hotmail.com>
# Licensed under The MIT License [see LICENSE for details]
# Written by Peng-Shuai Wang
# --------------------------------------------------------
import torch
from typing import Optional, Union
class KeyLUT:
def __init__(self):
r256 = torch.arange(256, dtype=torch.int64)
r512 = torch.arange(512, dtype=torch.int64)
zero = torch.zeros(256, dtype=torch.int64)
device = torch.device("cpu")
self._encode = {
device: (
self.xyz2key(r256, zero, zero, 8),
self.xyz2key(zero, r256, zero, 8),
self.xyz2key(zero, zero, r256, 8),
)
}
self._decode = {device: self.key2xyz(r512, 9)}
def encode_lut(self, device=torch.device("cpu")):
if device not in self._encode:
cpu = torch.device("cpu")
self._encode[device] = tuple(e.to(device) for e in self._encode[cpu])
return self._encode[device]
def decode_lut(self, device=torch.device("cpu")):
if device not in self._decode:
cpu = torch.device("cpu")
self._decode[device] = tuple(e.to(device) for e in self._decode[cpu])
return self._decode[device]
def xyz2key(self, x, y, z, depth):
key = torch.zeros_like(x)
for i in range(depth):
mask = 1 << i
key = (
key
| ((x & mask) << (2 * i + 2))
| ((y & mask) << (2 * i + 1))
| ((z & mask) << (2 * i + 0))
)
return key
def key2xyz(self, key, depth):
x = torch.zeros_like(key)
y = torch.zeros_like(key)
z = torch.zeros_like(key)
for i in range(depth):
x = x | ((key & (1 << (3 * i + 2))) >> (2 * i + 2))
y = y | ((key & (1 << (3 * i + 1))) >> (2 * i + 1))
z = z | ((key & (1 << (3 * i + 0))) >> (2 * i + 0))
return x, y, z
_key_lut = KeyLUT()
def xyz2key(
x: torch.Tensor,
y: torch.Tensor,
z: torch.Tensor,
b: Optional[Union[torch.Tensor, int]] = None,
depth: int = 16,
):
r"""Encodes :attr:`x`, :attr:`y`, :attr:`z` coordinates to the shuffled keys
based on pre-computed look up tables. The speed of this function is much
faster than the method based on for-loop.
Args:
x (torch.Tensor): The x coordinate.
y (torch.Tensor): The y coordinate.
z (torch.Tensor): The z coordinate.
b (torch.Tensor or int): The batch index of the coordinates, and should be
smaller than 32768. If :attr:`b` is :obj:`torch.Tensor`, the size of
:attr:`b` must be the same as :attr:`x`, :attr:`y`, and :attr:`z`.
depth (int): The depth of the shuffled key, and must be smaller than 17 (< 17).
"""
EX, EY, EZ = _key_lut.encode_lut(x.device)
x, y, z = x.long(), y.long(), z.long()
mask = 255 if depth > 8 else (1 << depth) - 1
key = EX[x & mask] | EY[y & mask] | EZ[z & mask]
if depth > 8:
mask = (1 << (depth - 8)) - 1
key16 = EX[(x >> 8) & mask] | EY[(y >> 8) & mask] | EZ[(z >> 8) & mask]
key = key16 << 24 | key
if b is not None:
b = b.long()
key = b << 48 | key
return key
def key2xyz(key: torch.Tensor, depth: int = 16):
r"""Decodes the shuffled key to :attr:`x`, :attr:`y`, :attr:`z` coordinates
and the batch index based on pre-computed look up tables.
Args:
key (torch.Tensor): The shuffled key.
depth (int): The depth of the shuffled key, and must be smaller than 17 (< 17).
"""
DX, DY, DZ = _key_lut.decode_lut(key.device)
x, y, z = torch.zeros_like(key), torch.zeros_like(key), torch.zeros_like(key)
b = key >> 48
key = key & ((1 << 48) - 1)
n = (depth + 2) // 3
for i in range(n):
k = key >> (i * 9) & 511
x = x | (DX[k] << (i * 3))
y = y | (DY[k] << (i * 3))
z = z | (DZ[k] << (i * 3))
return x, y, z, b
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/models/utils/serialization/__init__.py | pointcept/models/utils/serialization/__init__.py | from .default import (
encode,
decode,
z_order_encode,
z_order_decode,
hilbert_encode,
hilbert_decode,
)
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/models/octformer/octformer_v1m1_base.py | pointcept/models/octformer/octformer_v1m1_base.py | """
Octree Transformer
Modified from https://github.com/octree-nn/octformer
Author: Xiaoyang Wu (xiaoyang.wu.cs@gmail.com)
Please cite our work if the code is helpful to you.
"""
from typing import Optional, List, Dict
import torch
import torch.nn as nn
from torch.utils.checkpoint import checkpoint
try:
import ocnn
from ocnn.octree import Octree, Points
except ImportError:
from pointcept.utils.misc import DummyClass
ocnn = None
Octree = DummyClass
Points = DummyClass
try:
import dwconv
except ImportError:
dwconv = None
from pointcept.models.builder import MODELS
from pointcept.models.utils import offset2batch
class OctreeT(Octree):
def __init__(
self,
octree: Octree,
patch_size: int = 24,
dilation: int = 4,
nempty: bool = True,
max_depth: Optional[int] = None,
start_depth: Optional[int] = None,
**kwargs
):
super().__init__(octree.depth, octree.full_depth)
self.__dict__.update(octree.__dict__)
self.patch_size = patch_size
self.dilation = dilation
self.nempty = nempty
self.max_depth = max_depth or self.depth
self.start_depth = start_depth or self.full_depth
self.invalid_mask_value = -1e3
assert self.start_depth > 1
self.block_num = patch_size * dilation
self.nnum_t = self.nnum_nempty if nempty else self.nnum
self.nnum_a = ((self.nnum_t / self.block_num).ceil() * self.block_num).int()
num = self.max_depth + 1
self.batch_idx = [None] * num
self.patch_mask = [None] * num
self.dilate_mask = [None] * num
self.rel_pos = [None] * num
self.dilate_pos = [None] * num
self.build_t()
def build_t(self):
for d in range(self.start_depth, self.max_depth + 1):
self.build_batch_idx(d)
self.build_attn_mask(d)
self.build_rel_pos(d)
def build_batch_idx(self, depth: int):
batch = self.batch_id(depth, self.nempty)
self.batch_idx[depth] = self.patch_partition(batch, depth, self.batch_size)
def build_attn_mask(self, depth: int):
batch = self.batch_idx[depth]
mask = batch.view(-1, self.patch_size)
self.patch_mask[depth] = self._calc_attn_mask(mask)
mask = batch.view(-1, self.patch_size, self.dilation)
mask = mask.transpose(1, 2).reshape(-1, self.patch_size)
self.dilate_mask[depth] = self._calc_attn_mask(mask)
def _calc_attn_mask(self, mask: torch.Tensor):
attn_mask = mask.unsqueeze(2) - mask.unsqueeze(1)
attn_mask = attn_mask.masked_fill(attn_mask != 0, self.invalid_mask_value)
return attn_mask
def build_rel_pos(self, depth: int):
key = self.key(depth, self.nempty)
key = self.patch_partition(key, depth)
x, y, z, _ = ocnn.octree.key2xyz(key, depth)
xyz = torch.stack([x, y, z], dim=1)
xyz = xyz.view(-1, self.patch_size, 3)
self.rel_pos[depth] = xyz.unsqueeze(2) - xyz.unsqueeze(1)
xyz = xyz.view(-1, self.patch_size, self.dilation, 3)
xyz = xyz.transpose(1, 2).reshape(-1, self.patch_size, 3)
self.dilate_pos[depth] = xyz.unsqueeze(2) - xyz.unsqueeze(1)
def patch_partition(self, data: torch.Tensor, depth: int, fill_value=0):
num = self.nnum_a[depth] - self.nnum_t[depth]
tail = data.new_full((num,) + data.shape[1:], fill_value)
return torch.cat([data, tail], dim=0)
def patch_reverse(self, data: torch.Tensor, depth: int):
return data[: self.nnum_t[depth]]
class MLP(torch.nn.Module):
def __init__(
self,
in_features: int,
hidden_features: Optional[int] = None,
out_features: Optional[int] = None,
activation=torch.nn.GELU,
drop: float = 0.0,
**kwargs
):
super().__init__()
self.in_features = in_features
self.out_features = out_features or in_features
self.hidden_features = hidden_features or in_features
self.fc1 = torch.nn.Linear(self.in_features, self.hidden_features)
self.act = activation()
self.fc2 = torch.nn.Linear(self.hidden_features, self.out_features)
self.drop = torch.nn.Dropout(drop, inplace=True)
def forward(self, data: torch.Tensor):
data = self.fc1(data)
data = self.act(data)
data = self.drop(data)
data = self.fc2(data)
data = self.drop(data)
return data
class OctreeDWConvBn(torch.nn.Module):
def __init__(
self,
in_channels: int,
kernel_size: List[int] = [3],
stride: int = 1,
nempty: bool = False,
):
super().__init__()
self.conv = dwconv.OctreeDWConv(
in_channels, kernel_size, nempty, use_bias=False
)
self.bn = torch.nn.BatchNorm1d(in_channels)
def forward(self, data: torch.Tensor, octree: Octree, depth: int):
out = self.conv(data, octree, depth)
out = self.bn(out)
return out
class RPE(torch.nn.Module):
def __init__(self, patch_size: int, num_heads: int, dilation: int = 1):
super().__init__()
self.patch_size = patch_size
self.num_heads = num_heads
self.dilation = dilation
self.pos_bnd = self.get_pos_bnd(patch_size)
self.rpe_num = 2 * self.pos_bnd + 1
self.rpe_table = torch.nn.Parameter(torch.zeros(3 * self.rpe_num, num_heads))
torch.nn.init.trunc_normal_(self.rpe_table, std=0.02)
def get_pos_bnd(self, patch_size: int):
return int(0.8 * patch_size * self.dilation**0.5)
def xyz2idx(self, xyz: torch.Tensor):
mul = torch.arange(3, device=xyz.device) * self.rpe_num
xyz = xyz.clamp(-self.pos_bnd, self.pos_bnd)
idx = xyz + (self.pos_bnd + mul)
return idx
def forward(self, xyz):
idx = self.xyz2idx(xyz)
out = self.rpe_table.index_select(0, idx.reshape(-1))
out = out.view(idx.shape + (-1,)).sum(3)
out = out.permute(0, 3, 1, 2) # (N, K, K, H) -> (N, H, K, K)
return out
def extra_repr(self) -> str:
return "num_heads={}, pos_bnd={}, dilation={}".format(
self.num_heads, self.pos_bnd, self.dilation
) # noqa
class OctreeAttention(torch.nn.Module):
def __init__(
self,
dim: int,
patch_size: int,
num_heads: int,
qkv_bias: bool = True,
qk_scale: Optional[float] = None,
attn_drop: float = 0.0,
proj_drop: float = 0.0,
dilation: int = 1,
use_rpe: bool = True,
):
super().__init__()
self.dim = dim
self.patch_size = patch_size
self.num_heads = num_heads
self.dilation = dilation
self.use_rpe = use_rpe
self.scale = qk_scale or (dim // num_heads) ** -0.5
self.qkv = torch.nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = torch.nn.Dropout(attn_drop)
self.proj = torch.nn.Linear(dim, dim)
self.proj_drop = torch.nn.Dropout(proj_drop)
self.softmax = torch.nn.Softmax(dim=-1)
self.rpe = RPE(patch_size, num_heads, dilation) if use_rpe else None
def forward(self, data: torch.Tensor, octree: OctreeT, depth: int):
H = self.num_heads
K = self.patch_size
C = self.dim
D = self.dilation
# patch partition
data = octree.patch_partition(data, depth)
if D > 1: # dilation
rel_pos = octree.dilate_pos[depth]
mask = octree.dilate_mask[depth]
data = data.view(-1, K, D, C).transpose(1, 2).reshape(-1, C)
else:
rel_pos = octree.rel_pos[depth]
mask = octree.patch_mask[depth]
data = data.view(-1, K, C)
# qkv
qkv = self.qkv(data).reshape(-1, K, 3, H, C // H).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # (N, H, K, C')
q = q * self.scale
# attn
attn = q @ k.transpose(-2, -1) # (N, H, K, K)
attn = self.apply_rpe(attn, rel_pos) # (N, H, K, K)
attn = attn + mask.unsqueeze(1)
attn = self.softmax(attn)
attn = self.attn_drop(attn)
data = (attn @ v).transpose(1, 2).reshape(-1, C)
# patch reverse
if D > 1: # dilation
data = data.view(-1, D, K, C).transpose(1, 2).reshape(-1, C)
data = octree.patch_reverse(data, depth)
# ffn
data = self.proj(data)
data = self.proj_drop(data)
return data
def apply_rpe(self, attn, rel_pos):
if self.use_rpe:
attn = attn + self.rpe(rel_pos)
return attn
def extra_repr(self) -> str:
return "dim={}, patch_size={}, num_heads={}, dilation={}".format(
self.dim, self.patch_size, self.num_heads, self.dilation
) # noqa
class OctFormerBlock(torch.nn.Module):
def __init__(
self,
dim: int,
num_heads: int,
patch_size: int = 32,
dilation: int = 0,
mlp_ratio: float = 4.0,
qkv_bias: bool = True,
qk_scale: Optional[float] = None,
attn_drop: float = 0.0,
proj_drop: float = 0.0,
drop_path: float = 0.0,
nempty: bool = True,
activation: torch.nn.Module = torch.nn.GELU,
**kwargs
):
super().__init__()
self.norm1 = torch.nn.LayerNorm(dim)
self.attention = OctreeAttention(
dim,
patch_size,
num_heads,
qkv_bias,
qk_scale,
attn_drop,
proj_drop,
dilation,
)
self.norm2 = torch.nn.LayerNorm(dim)
self.mlp = MLP(dim, int(dim * mlp_ratio), dim, activation, proj_drop)
self.drop_path = ocnn.nn.OctreeDropPath(drop_path, nempty)
self.cpe = OctreeDWConvBn(dim, nempty=nempty)
def forward(self, data: torch.Tensor, octree: OctreeT, depth: int):
data = self.cpe(data, octree, depth) + data
attn = self.attention(self.norm1(data), octree, depth)
data = data + self.drop_path(attn, octree, depth)
ffn = self.mlp(self.norm2(data))
data = data + self.drop_path(ffn, octree, depth)
return data
class OctFormerStage(torch.nn.Module):
def __init__(
self,
dim: int,
num_heads: int,
patch_size: int = 32,
dilation: int = 0,
mlp_ratio: float = 4.0,
qkv_bias: bool = True,
qk_scale: Optional[float] = None,
attn_drop: float = 0.0,
proj_drop: float = 0.0,
drop_path: float = 0.0,
nempty: bool = True,
activation: torch.nn.Module = torch.nn.GELU,
interval: int = 6,
use_checkpoint: bool = True,
num_blocks: int = 2,
octformer_block=OctFormerBlock,
**kwargs
):
super().__init__()
self.num_blocks = num_blocks
self.use_checkpoint = use_checkpoint
self.interval = interval # normalization interval
self.num_norms = (num_blocks - 1) // self.interval
self.blocks = torch.nn.ModuleList(
[
octformer_block(
dim=dim,
num_heads=num_heads,
patch_size=patch_size,
dilation=1 if (i % 2 == 0) else dilation,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attn_drop=attn_drop,
proj_drop=proj_drop,
drop_path=(
drop_path[i] if isinstance(drop_path, list) else drop_path
),
nempty=nempty,
activation=activation,
)
for i in range(num_blocks)
]
)
# self.norms = torch.nn.ModuleList([
# torch.nn.BatchNorm1d(dim) for _ in range(self.num_norms)])
def forward(self, data: torch.Tensor, octree: OctreeT, depth: int):
for i in range(self.num_blocks):
if self.use_checkpoint and self.training:
data = checkpoint(self.blocks[i], data, octree, depth)
else:
data = self.blocks[i](data, octree, depth)
# if i % self.interval == 0 and i != 0:
# data = self.norms[(i - 1) // self.interval](data)
return data
class OctFormerDecoder(torch.nn.Module):
def __init__(
self, channels: List[int], fpn_channel: int, nempty: bool, head_up: int = 1
):
super().__init__()
self.head_up = head_up
self.num_stages = len(channels)
self.conv1x1 = torch.nn.ModuleList(
[
torch.nn.Linear(channels[i], fpn_channel)
for i in range(self.num_stages - 1, -1, -1)
]
)
self.upsample = ocnn.nn.OctreeUpsample("nearest", nempty)
self.conv3x3 = torch.nn.ModuleList(
[
ocnn.modules.OctreeConvBnRelu(
fpn_channel, fpn_channel, kernel_size=[3], stride=1, nempty=nempty
)
for _ in range(self.num_stages)
]
)
self.up_conv = torch.nn.ModuleList(
[
ocnn.modules.OctreeDeconvBnRelu(
fpn_channel, fpn_channel, kernel_size=[3], stride=2, nempty=nempty
)
for _ in range(self.head_up)
]
)
def forward(self, features: Dict[int, torch.Tensor], octree: Octree):
depth = min(features.keys())
depth_max = max(features.keys())
assert self.num_stages == len(features)
feature = self.conv1x1[0](features[depth])
conv_out = self.conv3x3[0](feature, octree, depth)
out = self.upsample(conv_out, octree, depth, depth_max)
for i in range(1, self.num_stages):
depth_i = depth + i
feature = self.upsample(feature, octree, depth_i - 1)
feature = self.conv1x1[i](features[depth_i]) + feature
conv_out = self.conv3x3[i](feature, octree, depth_i)
out = out + self.upsample(conv_out, octree, depth_i, depth_max)
for i in range(self.head_up):
out = self.up_conv[i](out, octree, depth_max + i)
return out
class PatchEmbed(torch.nn.Module):
def __init__(
self,
in_channels: int = 3,
dim: int = 96,
num_down: int = 2,
nempty: bool = True,
**kwargs
):
super().__init__()
self.num_stages = num_down
self.delta_depth = -num_down
channels = [int(dim * 2**i) for i in range(-self.num_stages, 1)]
self.convs = torch.nn.ModuleList(
[
ocnn.modules.OctreeConvBnRelu(
in_channels if i == 0 else channels[i],
channels[i],
kernel_size=[3],
stride=1,
nempty=nempty,
)
for i in range(self.num_stages)
]
)
self.downsamples = torch.nn.ModuleList(
[
ocnn.modules.OctreeConvBnRelu(
channels[i],
channels[i + 1],
kernel_size=[2],
stride=2,
nempty=nempty,
)
for i in range(self.num_stages)
]
)
self.proj = ocnn.modules.OctreeConvBnRelu(
channels[-1], dim, kernel_size=[3], stride=1, nempty=nempty
)
def forward(self, data: torch.Tensor, octree: Octree, depth: int):
# TODO: reduce to single input
for i in range(self.num_stages):
depth_i = depth - i
data = self.convs[i](data, octree, depth_i)
data = self.downsamples[i](data, octree, depth_i)
data = self.proj(data, octree, depth_i - 1)
return data
class Downsample(torch.nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: List[int] = (2,),
nempty: bool = True,
):
super().__init__()
self.norm = torch.nn.BatchNorm1d(out_channels)
self.conv = ocnn.nn.OctreeConv(
in_channels,
out_channels,
kernel_size,
stride=2,
nempty=nempty,
use_bias=True,
)
def forward(self, data: torch.Tensor, octree: Octree, depth: int):
data = self.conv(data, octree, depth)
data = self.norm(data)
return data
@MODELS.register_module("OctFormer-v1m1")
class OctFormer(torch.nn.Module):
def __init__(
self,
in_channels,
num_classes,
fpn_channels=168,
channels=(96, 192, 384, 384),
num_blocks=(2, 2, 18, 2),
num_heads=(6, 12, 24, 24),
patch_size=26,
stem_down=2,
head_up=2,
dilation=4,
drop_path=0.5,
nempty=True,
octree_scale_factor=10.24,
octree_depth=11,
octree_full_depth=2,
):
super().__init__()
assert ocnn is not None, "Please follow `README.md` to install ocnn.`"
assert dwconv is not None, "Please follow `README.md` to install dwconv.`"
self.patch_size = patch_size
self.dilation = dilation
self.nempty = nempty
self.num_stages = len(num_blocks)
self.stem_down = stem_down
self.octree_scale_factor = octree_scale_factor
self.octree_depth = octree_depth
self.octree_full_depth = octree_full_depth
drop_ratio = torch.linspace(0, drop_path, sum(num_blocks)).tolist()
self.patch_embed = PatchEmbed(in_channels, channels[0], stem_down, nempty)
self.layers = torch.nn.ModuleList(
[
OctFormerStage(
dim=channels[i],
num_heads=num_heads[i],
patch_size=patch_size,
drop_path=drop_ratio[
sum(num_blocks[:i]) : sum(num_blocks[: i + 1])
],
dilation=dilation,
nempty=nempty,
num_blocks=num_blocks[i],
)
for i in range(self.num_stages)
]
)
self.downsamples = torch.nn.ModuleList(
[
Downsample(channels[i], channels[i + 1], kernel_size=[2], nempty=nempty)
for i in range(self.num_stages - 1)
]
)
self.decoder = OctFormerDecoder(
channels=channels, fpn_channel=fpn_channels, nempty=nempty, head_up=head_up
)
self.interp = ocnn.nn.OctreeInterp("nearest", nempty)
self.seg_head = (
nn.Sequential(
nn.Linear(fpn_channels, fpn_channels),
torch.nn.BatchNorm1d(fpn_channels),
nn.ReLU(inplace=True),
nn.Linear(fpn_channels, num_classes),
)
if num_classes > 0
else nn.Identity()
)
def points2octree(self, points):
octree = ocnn.octree.Octree(self.octree_depth, self.octree_full_depth)
octree.build_octree(points)
return octree
def forward(self, data_dict):
coord = data_dict["coord"]
normal = data_dict["normal"]
feat = data_dict["feat"]
offset = data_dict["offset"]
batch = offset2batch(offset)
point = Points(
points=coord / self.octree_scale_factor,
normals=normal,
features=feat,
batch_id=batch.unsqueeze(-1),
batch_size=len(offset),
)
octree = ocnn.octree.Octree(
depth=self.octree_depth,
full_depth=self.octree_full_depth,
batch_size=len(offset),
device=coord.device,
)
octree.build_octree(point)
octree.construct_all_neigh()
feat = self.patch_embed(octree.features[octree.depth], octree, octree.depth)
depth = octree.depth - self.stem_down # current octree depth
octree = OctreeT(
octree,
self.patch_size,
self.dilation,
self.nempty,
max_depth=depth,
start_depth=depth - self.num_stages + 1,
)
features = {}
for i in range(self.num_stages):
depth_i = depth - i
feat = self.layers[i](feat, octree, depth_i)
features[depth_i] = feat
if i < self.num_stages - 1:
feat = self.downsamples[i](feat, octree, depth_i)
out = self.decoder(features, octree)
# interp representation to points before Octreeization
query_pts = torch.cat([point.points, point.batch_id], dim=1).contiguous()
out = self.interp(out, octree, octree.depth, query_pts)
out = self.seg_head(out)
return out
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
QWTforGithub/CDSegNet | https://github.com/QWTforGithub/CDSegNet/blob/87b603dbd011c0f57fb498d70680e32d4f8cf2f0/pointcept/models/octformer/__init__.py | pointcept/models/octformer/__init__.py | from .octformer_v1m1_base import OctFormer
| python | MIT | 87b603dbd011c0f57fb498d70680e32d4f8cf2f0 | 2026-01-05T07:13:40.759144Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.