repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
ReAgent | ReAgent-master/reagent/optimizer/utils.py | #!/usr/bin/env python3
import inspect
import torch
def is_strict_subclass(a, b):
if not inspect.isclass(a) or not inspect.isclass(b):
return False
return issubclass(a, b) and a != b
def is_torch_optimizer(cls):
return is_strict_subclass(cls, torch.optim.Optimizer)
def is_torch_lr_scheduler(cls):
return is_strict_subclass(cls, torch.optim.lr_scheduler._LRScheduler)
| 399 | 19 | 73 | py |
ReAgent | ReAgent-master/reagent/optimizer/scheduler.py | #!/usr/bin/env python3
import inspect
from typing import Any, Dict
import torch
from reagent.core.dataclasses import dataclass
from reagent.core.registry_meta import RegistryMeta
from .utils import is_torch_lr_scheduler
@dataclass(frozen=True)
class LearningRateSchedulerConfig(metaclass=RegistryMeta):
def make_from_optimizer(
self, optimizer: torch.optim.Optimizer
) -> torch.optim.lr_scheduler._LRScheduler:
torch_lr_scheduler_class = getattr(
torch.optim.lr_scheduler, type(self).__name__
)
assert is_torch_lr_scheduler(
torch_lr_scheduler_class
), f"{torch_lr_scheduler_class} is not a scheduler."
filtered_args = {
k: getattr(self, k)
for k in inspect.signature(torch_lr_scheduler_class).parameters
if k != "optimizer"
}
self.decode_lambdas(filtered_args)
return torch_lr_scheduler_class(optimizer=optimizer, **filtered_args)
def decode_lambdas(self, args: Dict[str, Any]) -> None:
pass
| 1,054 | 27.513514 | 77 | py |
ReAgent | ReAgent-master/reagent/optimizer/optimizer.py | #!/usr/bin/env python3
"""
For each Torch optimizer, we create a wrapper pydantic dataclass around it.
We also add this class to our Optimizer registry.
Usage:
Whenever you want to use this Optimizer__Union, specify it as the type.
E.g.
class Parameters:
rl: RLParameters = field(default_factory=RLParameters)
minibatch_size: int = 64
optimizer: Optimizer__Union = field(default_factory=Optimizer__Union.default)
To instantiate it, specify desired optimzier in YAML file.
E.g.
rl:
...
minibatch: 64
optimizer:
Adam:
lr: 0.001
eps: 1e-08
lr_schedulers:
- OneCycleLR:
...
Since we don't know which network parameters we want to optimize,
Optimizer__Union will be a factory for the optimizer it contains.
Following the above example, we create an optimizer as follows:
class Trainer:
def __init__(self, network, params):
self.optimizer = params.optimizer.make_optimizer_scheduler(network.parameters())["optimizer"]
def train(self, data):
...
loss.backward()
# steps both optimizer and chained lr_schedulers
self.optimizer.step()
"""
import inspect
from typing import List, Dict, Union
import torch
from reagent.core.dataclasses import dataclass, field
from reagent.core.registry_meta import RegistryMeta
from .scheduler import LearningRateSchedulerConfig
from .utils import is_torch_optimizer
@dataclass(frozen=True)
class OptimizerConfig(metaclass=RegistryMeta):
# optional config if you want to use (potentially chained) lr scheduler
lr_schedulers: List[LearningRateSchedulerConfig] = field(default_factory=list)
def make_optimizer_scheduler(
self, params
) -> Dict[str, Union[torch.optim.Optimizer, torch.optim.lr_scheduler._LRScheduler]]:
assert (
len(self.lr_schedulers) <= 1
), "Multiple schedulers for one optimizer is no longer supported"
# Assuming the classname is the same as the torch class name
torch_optimizer_class = getattr(torch.optim, type(self).__name__)
assert is_torch_optimizer(
torch_optimizer_class
), f"{torch_optimizer_class} is not an optimizer."
filtered_args = {
k: getattr(self, k)
for k in inspect.signature(torch_optimizer_class).parameters
if k != "params"
}
optimizer = torch_optimizer_class(params=params, **filtered_args)
if len(self.lr_schedulers) == 0:
return {"optimizer": optimizer}
else:
lr_scheduler = self.lr_schedulers[0].make_from_optimizer(optimizer)
return {"optimizer": optimizer, "lr_scheduler": lr_scheduler}
| 2,673 | 31.609756 | 101 | py |
ReAgent | ReAgent-master/reagent/workflow/gym_batch_rl.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import json
import logging
import random
from typing import Optional
import gym
import numpy as np
import pandas as pd
import pytorch_lightning as pl
import torch
from reagent.data.spark_utils import call_spark_class, get_spark_session
from reagent.gym.agents.agent import Agent
from reagent.gym.envs import Gym
from reagent.gym.policies.predictor_policies import create_predictor_policy_from_model
from reagent.gym.policies.random_policies import make_random_policy_for_env
from reagent.gym.runners.gymrunner import evaluate_for_n_episodes
from reagent.gym.utils import fill_replay_buffer
from reagent.model_managers.union import ModelManager__Union
from reagent.publishers.union import FileSystemPublisher, ModelPublisher__Union
from reagent.replay_memory.circular_replay_buffer import ReplayBuffer
from reagent.replay_memory.utils import replay_buffer_to_pre_timeline_df
from .types import TableSpec
logger = logging.getLogger(__name__)
def initialize_seed(seed: int, env):
pl.seed_everything(seed)
env.seed(seed)
env.action_space.seed(seed)
def offline_gym_random(
env_name: str,
pkl_path: str,
num_train_transitions: int,
max_steps: Optional[int],
seed: int = 1,
):
"""
Generate samples from a random Policy on the Gym environment and
saves results in a pandas df parquet.
"""
env = Gym(env_name=env_name)
random_policy = make_random_policy_for_env(env)
agent = Agent.create_for_env(env, policy=random_policy)
return _offline_gym(env, agent, pkl_path, num_train_transitions, max_steps, seed)
def offline_gym_predictor(
env_name: str,
model: ModelManager__Union,
publisher: ModelPublisher__Union,
pkl_path: str,
num_train_transitions: int,
max_steps: Optional[int],
module_name: str = "default_model",
seed: int = 1,
):
"""
Generate samples from a trained Policy on the Gym environment and
saves results in a pandas df parquet.
"""
env = Gym(env_name=env_name)
agent = make_agent_from_model(env, model, publisher, module_name)
return _offline_gym(env, agent, pkl_path, num_train_transitions, max_steps, seed)
def _offline_gym(
env: Gym,
agent: Agent,
pkl_path: str,
num_train_transitions: int,
max_steps: Optional[int],
seed: int = 1,
):
initialize_seed(seed, env)
replay_buffer = ReplayBuffer(replay_capacity=num_train_transitions, batch_size=1)
fill_replay_buffer(env, replay_buffer, num_train_transitions, agent)
if isinstance(env.action_space, gym.spaces.Discrete):
is_discrete_action = True
else:
assert isinstance(env.action_space, gym.spaces.Box)
is_discrete_action = False
df = replay_buffer_to_pre_timeline_df(is_discrete_action, replay_buffer)
logger.info(f"Saving dataset with {len(df)} samples to {pkl_path}")
df.to_pickle(pkl_path)
PRE_TIMELINE_SUFFIX = "_pre_timeline_operator"
def timeline_operator(pkl_path: str, input_table_spec: TableSpec):
"""Loads a pandas parquet, converts to pyspark, and uploads df to Hive.
Then call the timeline operator.
"""
pd_df = pd.read_pickle(pkl_path)
spark = get_spark_session()
df = spark.createDataFrame(pd_df)
input_name = f"{input_table_spec.table_name}{PRE_TIMELINE_SUFFIX}"
df.createTempView(input_name)
output_name = input_table_spec.table_name
include_possible_actions = "possible_actions" in pd_df
arg = {
"startDs": "2019-01-01",
"endDs": "2019-01-01",
"addTerminalStateRow": True,
"inputTableName": input_name,
"outputTableName": output_name,
"includePossibleActions": include_possible_actions,
"percentileFunction": "percentile_approx",
"rewardColumns": ["reward", "metrics"],
"extraFeatureColumns": [],
}
call_spark_class(spark, class_name="Timeline", args=json.dumps(arg))
def make_agent_from_model(
env: Gym,
model: ModelManager__Union,
publisher: ModelPublisher__Union,
module_name: str,
):
publisher_manager = publisher.value
assert isinstance(
publisher_manager, FileSystemPublisher
), f"publishing manager is type {type(publisher_manager)}, not FileSystemPublisher"
module_names = model.value.serving_module_names()
assert module_name in module_names, f"{module_name} not in {module_names}"
torchscript_path = publisher_manager.get_latest_published_model(
model.value, module_name
)
jit_model = torch.jit.load(torchscript_path)
policy = create_predictor_policy_from_model(jit_model)
agent = Agent.create_for_env_with_serving_policy(env, policy)
return agent
def evaluate_gym(
env_name: str,
model: ModelManager__Union,
publisher: ModelPublisher__Union,
num_eval_episodes: int,
passing_score_bar: float,
module_name: str = "default_model",
max_steps: Optional[int] = None,
):
env = Gym(env_name=env_name)
initialize_seed(1, env)
agent = make_agent_from_model(env, model, publisher, module_name)
rewards = evaluate_for_n_episodes(
n=num_eval_episodes, env=env, agent=agent, max_steps=max_steps
)
avg_reward = np.mean(rewards)
logger.info(
f"Average reward over {num_eval_episodes} is {avg_reward}.\n"
f"List of rewards: {rewards}\n"
f"Passing score bar: {passing_score_bar}"
)
assert (
avg_reward >= passing_score_bar
), f"{avg_reward} fails to pass the bar of {passing_score_bar}!"
return
| 5,597 | 31.358382 | 87 | py |
ReAgent | ReAgent-master/reagent/workflow/training.py | #!/usr/bin/env python3
import dataclasses
import logging
import time
from typing import Dict, Optional
import torch
from reagent.core.parameters import NormalizationData
from reagent.core.tensorboardX import summary_writer_context
from reagent.data.manual_data_module import get_sample_range
from reagent.data.oss_data_fetcher import OssDataFetcher
from reagent.model_managers.model_manager import ModelManager
from reagent.model_managers.union import ModelManager__Union
from reagent.publishers.union import ModelPublisher__Union
from reagent.validators.union import ModelValidator__Union
from reagent.workflow.env import get_new_named_entity_ids, get_workflow_id
from reagent.workflow.types import (
Dataset,
ModuleNameToEntityId,
ReaderOptions,
RecurringPeriod,
ResourceOptions,
RewardOptions,
RLTrainingOutput,
TableSpec,
)
from torch.utils.tensorboard import SummaryWriter
logger = logging.getLogger(__name__)
def identify_and_train_network(
input_table_spec: TableSpec,
model: ModelManager__Union,
num_epochs: int,
use_gpu: Optional[bool] = None,
reward_options: Optional[RewardOptions] = None,
reader_options: Optional[ReaderOptions] = None,
resource_options: Optional[ResourceOptions] = None,
warmstart_path: Optional[str] = None,
validator: Optional[ModelValidator__Union] = None,
publisher: Optional[ModelPublisher__Union] = None,
) -> RLTrainingOutput:
if use_gpu is None:
# pyre-fixme[35]: Target cannot be annotated.
use_gpu: bool = torch.cuda.is_available()
reward_options = reward_options or RewardOptions()
reader_options = reader_options or ReaderOptions()
manager = model.value
normalization_data_map = None
setup_data = None
data_module = manager.get_data_module(
input_table_spec=input_table_spec,
reward_options=reward_options,
reader_options=reader_options,
resource_options=resource_options,
)
if data_module is not None:
data_module.prepare_data()
setup_data = data_module.setup_data
else:
normalization_data_map = manager.run_feature_identification(input_table_spec)
return query_and_train(
input_table_spec,
model,
num_epochs,
use_gpu=use_gpu,
setup_data=setup_data,
normalization_data_map=normalization_data_map,
reward_options=reward_options,
reader_options=reader_options,
resource_options=resource_options,
warmstart_path=warmstart_path,
validator=validator,
publisher=publisher,
)
def query_and_train(
input_table_spec: TableSpec,
model: ModelManager__Union,
num_epochs: int,
use_gpu: bool,
*,
setup_data: Optional[Dict[str, bytes]] = None,
saved_setup_data: Optional[Dict[str, bytes]] = None,
normalization_data_map: Optional[Dict[str, NormalizationData]] = None,
reward_options: Optional[RewardOptions] = None,
reader_options: Optional[ReaderOptions] = None,
resource_options: Optional[ResourceOptions] = None,
warmstart_path: Optional[str] = None,
validator: Optional[ModelValidator__Union] = None,
publisher: Optional[ModelPublisher__Union] = None,
named_model_ids: Optional[ModuleNameToEntityId] = None,
recurring_period: Optional[RecurringPeriod] = None,
) -> RLTrainingOutput:
child_workflow_id = get_workflow_id()
if named_model_ids is None:
# pyre-fixme[20]: Argument `model_type_id` expected.
named_model_ids = get_new_named_entity_ids(model.value.serving_module_names())
logger.info("Starting query")
reward_options = reward_options or RewardOptions()
reader_options = reader_options or ReaderOptions()
resource_options = resource_options or ResourceOptions()
manager = model.value
resource_options.gpu = int(use_gpu)
if saved_setup_data is not None:
def _maybe_get_bytes(v) -> bytes:
if isinstance(v, bytes):
return v
# HACK: FBLearner sometimes pack bytes into Blob
return v.data
saved_setup_data = {k: _maybe_get_bytes(v) for k, v in saved_setup_data.items()}
if setup_data is None:
data_module = manager.get_data_module(
input_table_spec=input_table_spec,
reward_options=reward_options,
reader_options=reader_options,
resource_options=resource_options,
saved_setup_data=saved_setup_data,
)
if data_module is not None:
data_module.prepare_data()
setup_data = data_module.setup_data
# Throw away existing normalization data map
normalization_data_map = None
if sum([int(setup_data is not None), int(normalization_data_map is not None)]) != 1:
raise ValueError("setup_data and normalization_data_map are mutually exclusive")
train_dataset = None
eval_dataset = None
data_fetcher = OssDataFetcher()
if normalization_data_map is not None:
calc_cpe_in_training = manager.should_generate_eval_dataset
sample_range_output = get_sample_range(input_table_spec, calc_cpe_in_training)
train_dataset = manager.query_data(
input_table_spec=input_table_spec,
sample_range=sample_range_output.train_sample_range,
reward_options=reward_options,
data_fetcher=data_fetcher,
)
eval_dataset = None
if calc_cpe_in_training:
eval_dataset = manager.query_data(
input_table_spec=input_table_spec,
sample_range=sample_range_output.eval_sample_range,
reward_options=reward_options,
data_fetcher=data_fetcher,
)
logger.info("Starting training")
results = train_workflow(
manager,
train_dataset,
eval_dataset,
num_epochs=num_epochs,
use_gpu=use_gpu,
setup_data=setup_data,
normalization_data_map=normalization_data_map,
named_model_ids=named_model_ids,
child_workflow_id=child_workflow_id,
reward_options=reward_options,
reader_options=reader_options,
resource_options=resource_options,
warmstart_path=warmstart_path,
)
if validator is not None:
results = run_validator(validator, results)
if publisher is not None:
results = run_publisher(
publisher,
model,
results,
setup_data,
named_model_ids,
child_workflow_id,
recurring_period,
)
return results
def train_workflow(
model_manager: ModelManager,
train_dataset: Optional[Dataset],
eval_dataset: Optional[Dataset],
*,
num_epochs: int,
use_gpu: bool,
named_model_ids: ModuleNameToEntityId,
child_workflow_id: int,
setup_data: Optional[Dict[str, bytes]] = None,
normalization_data_map: Optional[Dict[str, NormalizationData]] = None,
reward_options: Optional[RewardOptions] = None,
reader_options: Optional[ReaderOptions] = None,
resource_options: Optional[ResourceOptions] = None,
warmstart_path: Optional[str] = None,
) -> RLTrainingOutput:
writer = SummaryWriter()
logger.info("TensorBoard logging location is: {}".format(writer.log_dir))
if setup_data is not None:
data_module = model_manager.get_data_module(
setup_data=setup_data,
reward_options=reward_options,
reader_options=reader_options,
resource_options=resource_options,
)
assert data_module is not None
data_module.setup()
else:
data_module = None
if normalization_data_map is None:
assert data_module is not None
normalization_data_map = data_module.get_normalization_data_map()
warmstart_input_path = warmstart_path or None
trainer_module = model_manager.build_trainer(
use_gpu=use_gpu,
reward_options=reward_options,
normalization_data_map=normalization_data_map,
)
if not reader_options:
reader_options = ReaderOptions()
if not resource_options:
resource_options = ResourceOptions()
with summary_writer_context(writer):
train_output, lightning_trainer = model_manager.train(
trainer_module,
train_dataset,
eval_dataset,
None,
data_module,
num_epochs,
reader_options,
resource_options,
checkpoint_path=warmstart_input_path,
)
output_paths = {}
for module_name, serving_module in model_manager.build_serving_modules(
trainer_module, normalization_data_map
).items():
torchscript_output_path = f"{model_manager.__class__.__name__}_{module_name}_{round(time.time())}.torchscript"
torch.jit.save(serving_module, torchscript_output_path)
logger.info(f"Saved {module_name} to {torchscript_output_path}")
output_paths[module_name] = torchscript_output_path
return dataclasses.replace(train_output, output_paths=output_paths)
def run_validator(
validator: ModelValidator__Union, training_output: RLTrainingOutput
) -> RLTrainingOutput:
assert (
training_output.validation_result is None
), f"validation_output was set to f{training_output.validation_output}"
model_validator = validator.value
validation_result = model_validator.validate(training_output)
return dataclasses.replace(training_output, validation_result=validation_result)
def run_publisher(
publisher: ModelPublisher__Union,
model_chooser: ModelManager__Union,
training_output: RLTrainingOutput,
setup_data: Optional[Dict[str, bytes]],
recurring_workflow_ids: ModuleNameToEntityId,
child_workflow_id: int,
recurring_period: Optional[RecurringPeriod],
) -> RLTrainingOutput:
assert (
training_output.publishing_result is None
), f"publishing_output was set to f{training_output.publishing_output}"
model_publisher = publisher.value
model_manager = model_chooser.value
publishing_result = model_publisher.publish(
model_manager,
training_output,
setup_data,
recurring_workflow_ids,
child_workflow_id,
recurring_period,
)
return dataclasses.replace(training_output, publishing_result=publishing_result)
| 10,454 | 32.944805 | 118 | py |
ReAgent | ReAgent-master/reagent/workflow/utils.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
from typing import Dict, List, Optional
import pytorch_lightning as pl
import torch
# pyre-fixme[21]: Could not find `petastorm`.
from petastorm import make_batch_reader
# pyre-fixme[21]: Could not find module `petastorm.pytorch`.
# pyre-fixme[21]: Could not find module `petastorm.pytorch`.
from petastorm.pytorch import DataLoader, decimal_friendly_collate
from reagent.core.oss_tensorboard_logger import OssTensorboardLogger
from reagent.data.spark_utils import get_spark_session
from reagent.preprocessing.batch_preprocessor import BatchPreprocessor
from reagent.training import StoppingEpochCallback
from .types import Dataset, ReaderOptions, ResourceOptions
logger = logging.getLogger(__name__)
def get_table_row_count(parquet_url: str):
spark = get_spark_session()
return spark.read.parquet(parquet_url).count()
def collate_and_preprocess(batch_preprocessor: BatchPreprocessor, use_gpu: bool):
"""Helper for Petastorm's DataLoader to preprocess.
TODO(kaiwenw): parallelize preprocessing by using transform of Petastorm reader
Should pin memory and preprocess in reader and convert to gpu in collate_fn.
"""
def collate_fn(batch_list: List[Dict]):
batch = decimal_friendly_collate(batch_list)
preprocessed_batch = batch_preprocessor(batch)
if use_gpu:
preprocessed_batch = preprocessed_batch.cuda()
return preprocessed_batch
return collate_fn
def get_petastorm_dataloader(
dataset: Dataset,
batch_size: int,
batch_preprocessor: BatchPreprocessor,
use_gpu: bool,
reader_options: ReaderOptions,
):
"""get petastorm loader for dataset (with preprocessor)"""
data_reader = make_batch_reader(
dataset.parquet_url,
num_epochs=1,
reader_pool_type=reader_options.petastorm_reader_pool_type,
)
return DataLoader(
data_reader,
batch_size=batch_size,
collate_fn=collate_and_preprocess(
batch_preprocessor=batch_preprocessor, use_gpu=use_gpu
),
)
# TODO: Move this to appropriate location
class PetastormLightningDataModule(pl.LightningDataModule):
def __init__(self, train_dataset, eval_dataset, batch_preprocessor, reader_options):
super().__init__()
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.batch_preprocessor = batch_preprocessor
self.reader_options = reader_options
def _closing_iter(self, dataloader):
yield from dataloader
dataloader.__exit__(None, None, None)
def train_dataloader(self):
dataloader = get_petastorm_dataloader(
dataset=self.train_dataset,
batch_size=self.reader_options.minibatch_size,
batch_preprocessor=self.batch_preprocessor,
use_gpu=False,
reader_options=self.reader_options,
)
return self._closing_iter(dataloader)
def test_dataloader(self):
dataloader = get_petastorm_dataloader(
dataset=self.eval_dataset,
batch_size=self.reader_options.minibatch_size,
batch_preprocessor=self.batch_preprocessor,
use_gpu=False,
reader_options=self.reader_options,
)
return self._closing_iter(dataloader)
def get_rank() -> int:
"""
Returns the torch.distributed rank of the process. 0 represents
the main process and is the default if torch.distributed isn't set up
"""
return (
torch.distributed.get_rank()
if torch.distributed.is_available() and torch.distributed.is_initialized()
else 0
)
def train_eval_lightning(
train_dataset,
eval_dataset,
test_dataset,
trainer_module,
data_module,
num_epochs,
logger_name: str,
batch_preprocessor=None,
reader_options: Optional[ReaderOptions] = None,
checkpoint_path: Optional[str] = None,
resource_options: Optional[ResourceOptions] = None,
) -> pl.Trainer:
resource_options = resource_options or ResourceOptions()
use_gpu = resource_options.use_gpu
reader_options = reader_options or ReaderOptions()
datamodule = data_module or PetastormLightningDataModule(
train_dataset, eval_dataset, batch_preprocessor, reader_options
)
trainer = pl.Trainer(
logger=OssTensorboardLogger(save_dir="pl_log_tensorboard", name=logger_name),
max_epochs=num_epochs * 1000,
gpus=int(use_gpu),
reload_dataloaders_every_epoch=True,
resume_from_checkpoint=checkpoint_path,
callbacks=[StoppingEpochCallback(num_epochs)],
)
trainer.fit(trainer_module, datamodule=datamodule)
trainer.test()
if checkpoint_path is not None:
# Overwrite the warmstart path with the new model
trainer_module.set_clean_stop(True)
trainer.save_checkpoint(checkpoint_path)
return trainer
| 4,998 | 32.326667 | 88 | py |
ReAgent | ReAgent-master/reagent/reporting/discrete_crr_reporter.py | #!/usr/bin/env python3
import itertools
import logging
from typing import List, Optional
import torch
from reagent.core import aggregators as agg
from reagent.core.observers import IntervalAggregatingObserver
from reagent.reporting.reporter_base import (
ReporterBase,
)
from reagent.workflow.training_reports import DQNTrainingReport
logger = logging.getLogger(__name__)
class DiscreteCRRReporter(ReporterBase):
def __init__(
self,
actions: List[str],
report_interval: int = 100,
target_action_distribution: Optional[List[float]] = None,
recent_window_size: int = 100,
):
self.value_list_observers = {}
self.aggregating_observers = {
**{
"cpe_results": IntervalAggregatingObserver(
1, agg.ListAggregator("cpe_details")
),
},
**{
name: IntervalAggregatingObserver(report_interval, aggregator)
for name, aggregator in itertools.chain(
[
("td_loss", agg.MeanAggregator("td_loss")),
("reward_loss", agg.MeanAggregator("reward_loss")),
("actor_loss", agg.MeanAggregator("actor_loss")),
(
"model_values",
agg.FunctionsByActionAggregator(
"model_values",
actions,
{"mean": torch.mean, "std": torch.std},
),
),
(
"logged_action",
agg.ActionCountAggregator("logged_actions", actions),
),
(
"model_action",
agg.ActionCountAggregator("model_action_idxs", actions),
),
(
"recent_rewards",
agg.RecentValuesAggregator("logged_rewards"),
),
],
[
(
f"{key}_tb",
agg.TensorBoardActionCountAggregator(key, title, actions),
)
for key, title in [
("logged_actions", "logged"),
("model_action_idxs", "model"),
]
],
[
(
f"{key}_tb",
agg.TensorBoardHistogramAndMeanAggregator(key, log_key),
)
for key, log_key in [
("reward_loss", "reward_loss"),
("logged_propensities", "propensities/logged"),
("logged_rewards", "reward/logged"),
]
],
[
(
f"{key}_tb",
agg.TensorBoardActionHistogramAndMeanAggregator(
key, category, title, actions
),
)
for key, category, title in [
("model_propensities", "propensities", "model"),
("model_rewards", "reward", "model"),
("model_values", "value", "model"),
]
],
)
},
}
super().__init__(self.value_list_observers, self.aggregating_observers)
self.target_action_distribution = target_action_distribution
self.recent_window_size = recent_window_size
# TODO: write this for OSS
def generate_training_report(self) -> DQNTrainingReport:
return DQNTrainingReport()
| 4,053 | 37.245283 | 86 | py |
ReAgent | ReAgent-master/reagent/reporting/seq2reward_reporter.py | #!/usr/bin/env python3
import itertools
import logging
from typing import List
import torch
from reagent.core import aggregators as agg
from reagent.core.observers import IntervalAggregatingObserver
from reagent.reporting.reporter_base import ReporterBase
from reagent.workflow.training_reports import Seq2RewardTrainingReport
logger = logging.getLogger(__name__)
class Seq2RewardReporter(ReporterBase):
def __init__(self, action_names: List[str], report_interval: int = 100):
self.action_names = action_names
self.report_interval = report_interval
super().__init__(self.value_list_observers, self.aggregating_observers)
@property
def value_list_observers(self):
return {}
@property
def aggregating_observers(self):
return {
name: IntervalAggregatingObserver(self.report_interval, aggregator)
for name, aggregator in itertools.chain(
[
("mse_loss_per_batch", agg.MeanAggregator("mse_loss")),
(
"step_entropy_loss_per_batch",
agg.MeanAggregator("step_entropy_loss"),
),
(
"q_values_per_batch",
agg.FunctionsByActionAggregator(
"q_values", self.action_names, {"mean": torch.mean}
),
),
("eval_mse_loss_per_batch", agg.MeanAggregator("eval_mse_loss")),
(
"eval_step_entropy_loss_per_batch",
agg.MeanAggregator("eval_step_entropy_loss"),
),
(
"eval_q_values_per_batch",
agg.FunctionsByActionAggregator(
"eval_q_values", self.action_names, {"mean": torch.mean}
),
),
(
"eval_action_distribution_per_batch",
agg.FunctionsByActionAggregator(
"eval_action_distribution",
self.action_names,
{"mean": torch.mean},
),
),
],
[
(
f"{key}_tb",
agg.TensorBoardHistogramAndMeanAggregator(key, log_key),
)
for key, log_key in [
("mse_loss", "mse_loss"),
("step_entropy_loss", "step_entropy_loss"),
("eval_mse_loss", "eval_mse_loss"),
("eval_step_entropy_loss", "eval_step_entropy_loss"),
]
],
[
(
f"{key}_tb",
agg.TensorBoardActionHistogramAndMeanAggregator(
key, category, title, self.action_names
),
)
for key, category, title in [
("q_values", "q_values", "training"),
("eval_q_values", "q_values", "eval"),
("eval_action_distribution", "action_distribution", "eval"),
]
],
)
}
# TODO: write this for OSS
def generate_training_report(self) -> Seq2RewardTrainingReport:
return Seq2RewardTrainingReport()
class Seq2RewardCompressReporter(Seq2RewardReporter):
@property
def aggregating_observers(self):
return {
name: IntervalAggregatingObserver(self.report_interval, aggregator)
for name, aggregator in itertools.chain(
[
("mse_loss_per_batch", agg.MeanAggregator("mse_loss")),
("accuracy_per_batch", agg.MeanAggregator("accuracy")),
("eval_mse_loss_per_batch", agg.MeanAggregator("eval_mse_loss")),
("eval_accuracy_per_batch", agg.MeanAggregator("eval_accuracy")),
(
"eval_q_values_per_batch",
agg.FunctionsByActionAggregator(
"eval_q_values", self.action_names, {"mean": torch.mean}
),
),
(
"eval_action_distribution_per_batch",
agg.FunctionsByActionAggregator(
"eval_action_distribution",
self.action_names,
{"mean": torch.mean},
),
),
],
[
(
f"{key}_tb",
agg.TensorBoardHistogramAndMeanAggregator(key, log_key),
)
for key, log_key in [
("mse_loss", "compress_mse_loss"),
("accuracy", "compress_accuracy"),
("eval_mse_loss", "compress_eval_mse_loss"),
("eval_accuracy", "compress_eval_accuracy"),
]
],
[
(
f"{key}_tb",
agg.TensorBoardActionHistogramAndMeanAggregator(
key, category, title, self.action_names
),
)
for key, category, title in [
("eval_q_values", "q_values", "compress_eval"),
(
"eval_action_distribution",
"action_distribution",
"compress_eval",
),
]
],
)
}
| 5,987 | 38.137255 | 85 | py |
ReAgent | ReAgent-master/reagent/reporting/reporter_base.py | #!/usr/bin/env python3
import abc
import logging
from typing import Dict
import torch
from reagent.core.observers import (
CompositeObserver,
EpochEndObserver,
IntervalAggregatingObserver,
ValueListObserver,
)
from reagent.core.result_registries import TrainingReport
from reagent.core.tracker import ObservableMixin
from reagent.core.utils import lazy_property
logger = logging.getLogger(__name__)
class ReporterBase(CompositeObserver):
def __init__(
self,
value_list_observers: Dict[str, ValueListObserver],
aggregating_observers: Dict[str, IntervalAggregatingObserver],
):
epoch_end_observer = EpochEndObserver(self.flush)
self._value_list_observers = value_list_observers
self._aggregating_observers = aggregating_observers
super().__init__(
list(value_list_observers.values())
# pyre-fixme[58]: `+` is not supported for operand types
# `List[ValueListObserver]` and `List[IntervalAggregatingObserver]`.
# pyre-fixme[58]: `+` is not supported for operand types
# `List[ValueListObserver]` and `List[IntervalAggregatingObserver]`.
+ list(aggregating_observers.values())
# pyre-fixme[58]: `+` is not supported for operand types
# `List[ValueListObserver]` and `List[EpochEndObserver]`.
# pyre-fixme[58]: `+` is not supported for operand types
# `List[ValueListObserver]` and `List[EpochEndObserver]`.
+ [epoch_end_observer]
)
self._reporter_observable = _ReporterObservable(self)
def log(self, **kwargs) -> None:
self._reporter_observable.notify_observers(**kwargs)
def flush(self, epoch: int):
logger.info(f"Epoch {epoch} ended")
for observer in self._aggregating_observers.values():
observer.flush()
def __getattr__(self, key: str):
val = self._value_list_observers.get(key, None)
if val is not None:
return val
val = self._aggregating_observers.get(key, None)
if val is not None:
return val.aggregator
raise AttributeError
# TODO: write this for OSS
@abc.abstractmethod
def generate_training_report(self) -> TrainingReport:
pass
class _ReporterObservable(ObservableMixin):
def __init__(self, reporter) -> None:
self._reporter = reporter
super().__init__()
self.add_observer(reporter)
@lazy_property
def _observable_value_types(self):
return {k: torch.Tensor for k in self._reporter.get_observing_keys()}
| 2,631 | 32.316456 | 81 | py |
ReAgent | ReAgent-master/reagent/reporting/discrete_dqn_reporter.py | #!/usr/bin/env python3
import itertools
import logging
from collections import OrderedDict
from typing import List, Optional
import torch
from reagent.core import aggregators as agg
from reagent.core.observers import IntervalAggregatingObserver, ValueListObserver
from reagent.reporting.reporter_base import (
ReporterBase,
)
from reagent.workflow.training_reports import DQNTrainingReport
logger = logging.getLogger(__name__)
class DiscreteDQNReporter(ReporterBase):
def __init__(
self,
actions: List[str],
report_interval: int = 100,
target_action_distribution: Optional[List[float]] = None,
recent_window_size: int = 100,
):
self.value_list_observers = {}
self.aggregating_observers = {
**{
"cpe_results": IntervalAggregatingObserver(
1, agg.ListAggregator("cpe_details")
),
},
**{
name: IntervalAggregatingObserver(report_interval, aggregator)
for name, aggregator in itertools.chain(
[
("td_loss", agg.MeanAggregator("td_loss")),
("reward_loss", agg.MeanAggregator("reward_loss")),
(
"model_values",
agg.FunctionsByActionAggregator(
"model_values",
actions,
{"mean": torch.mean, "std": torch.std},
),
),
(
"logged_action",
agg.ActionCountAggregator("logged_actions", actions),
),
(
"model_action",
agg.ActionCountAggregator("model_action_idxs", actions),
),
(
"recent_rewards",
agg.RecentValuesAggregator("logged_rewards"),
),
],
[
(
f"{key}_tb",
agg.TensorBoardActionCountAggregator(key, title, actions),
)
for key, title in [
("logged_actions", "logged"),
("model_action_idxs", "model"),
]
],
[
(
f"{key}_tb",
agg.TensorBoardHistogramAndMeanAggregator(key, log_key),
)
for key, log_key in [
("td_loss", "td_loss"),
("reward_loss", "reward_loss"),
("logged_propensities", "propensities/logged"),
("logged_rewards", "reward/logged"),
]
],
[
(
f"{key}_tb",
agg.TensorBoardActionHistogramAndMeanAggregator(
key, category, title, actions
),
)
for key, category, title in [
("model_propensities", "propensities", "model"),
("model_rewards", "reward", "model"),
("model_values", "value", "model"),
]
],
)
},
}
super().__init__(self.value_list_observers, self.aggregating_observers)
self.target_action_distribution = target_action_distribution
self.recent_window_size = recent_window_size
# TODO: write this for OSS
def generate_training_report(self) -> DQNTrainingReport:
return DQNTrainingReport()
| 4,086 | 37.196262 | 86 | py |
ReAgent | ReAgent-master/reagent/replay_memory/prioritized_replay_buffer.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# Copyright 2018 The Dopamine Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An implementation of Prioritized Experience Replay (PER).
This implementation is based on the paper "Prioritized Experience Replay"
by Tom Schaul et al. (2015). Many thanks to Tom Schaul, John Quan, and Matteo
Hessel for providing useful pointers on the algorithm and its implementation.
"""
import numpy as np
import torch
from reagent.replay_memory import circular_replay_buffer, sum_tree
class PrioritizedReplayBuffer(circular_replay_buffer.ReplayBuffer):
"""An out-of-graph Replay Buffer for Prioritized Experience Replay.
See circular_replay_buffer.py for details.
"""
def __init__(
self,
stack_size,
replay_capacity,
batch_size,
update_horizon=1,
gamma=0.99,
max_sample_attempts=1000,
):
"""Initializes PrioritizedReplayBuffer.
Args:
stack_size: int, number of frames to use in state stack.
replay_capacity: int, number of transitions to keep in memory.
batch_size: int.
update_horizon: int, length of update ('n' in n-step update).
gamma: int, the discount factor.
"""
super(PrioritizedReplayBuffer, self).__init__(
stack_size=stack_size,
replay_capacity=replay_capacity,
batch_size=batch_size,
update_horizon=update_horizon,
gamma=gamma,
)
self._max_sample_attempts = max_sample_attempts
self.sum_tree = sum_tree.SumTree(replay_capacity)
def _add(self, **kwargs):
"""Internal add method to add to the underlying memory arrays.
The arguments need to match add_arg_signature.
If priority is none, it is set to the maximum priority ever seen.
Args:
"""
self._check_args_length(**kwargs)
# Use Schaul et al.'s (2015) scheme of setting the priority of new elements
# to the maximum priority so far.
# Picks out 'priority' from arguments and adds it to the sum_tree.
transition = {}
for element in self.get_add_args_signature():
if element.name == "priority":
priority = kwargs[element.name]
else:
transition[element.name] = element.metadata.input_to_storage(
kwargs[element.name]
)
self.sum_tree.set(self.cursor(), priority)
super(PrioritizedReplayBuffer, self)._add_transition(transition)
def sample_index_batch(self, batch_size: int) -> torch.Tensor:
"""Returns a batch of valid indices sampled as in Schaul et al. (2015).
Args:
batch_size: int, number of indices returned.
Returns:
1D tensor of ints, a batch of valid indices sampled uniformly.
Raises:
Exception: If the batch was not constructed after maximum number of tries.
"""
# TODO: do priority sampling with torch as well.
# Sample stratified indices. Some of them might be invalid.
indices = self.sum_tree.stratified_sample(batch_size)
allowed_attempts = self._max_sample_attempts
for i in range(len(indices)):
if not self.is_valid_transition(indices[i]):
if allowed_attempts == 0:
raise RuntimeError(
"Max sample attempts: Tried {} times but only sampled {}"
" valid indices. Batch size is {}".format(
self._max_sample_attempts, i, batch_size
)
)
index = indices[i]
while not self.is_valid_transition(index) and allowed_attempts > 0:
# If index i is not valid keep sampling others. Note that this
# is not stratified.
index = self.sum_tree.sample()
allowed_attempts -= 1
indices[i] = index
return torch.tensor(indices, dtype=torch.int64)
def sample_transition_batch(self, batch_size=None, indices=None):
"""Returns a batch of transitions with extra storage and the priorities.
The extra storage are defined through the extra_storage_types constructor
argument.
When the transition is terminal next_state_batch has undefined contents.
Args:
batch_size: int, number of transitions returned. If None, the default
batch_size will be used.
indices: None or 1D tensor of ints, the indices of every transition in the
batch. If None, sample the indices uniformly.
Returns:
transition_batch: tuple of np.arrays with the shape and type as in
get_transition_elements().
"""
transition = super(PrioritizedReplayBuffer, self).sample_transition_batch(
batch_size, indices
)
# The parent returned an empty array for the probabilities. Fill it with the
# contents of the sum tree. Note scalar values are returned as (batch_size, 1).
batch_arrays = []
for element_name in self._transition_elements:
if element_name == "sampling_probabilities":
batch = torch.from_numpy(
self.get_priority(transition.indices.numpy().astype(np.int32))
).view(batch_size, 1)
else:
batch = getattr(transition, element_name)
batch_arrays.append(batch)
return self._batch_type(*batch_arrays)
def set_priority(self, indices, priorities):
"""Sets the priority of the given elements according to Schaul et al.
Args:
indices: np.array with dtype int32, of indices in range
[0, replay_capacity).
priorities: float, the corresponding priorities.
"""
assert (
indices.dtype == np.int32
), "Indices must be integers, " "given: {}".format(indices.dtype)
for index, priority in zip(indices, priorities):
self.sum_tree.set(index, priority)
def get_priority(self, indices):
"""Fetches the priorities correspond to a batch of memory indices.
For any memory location not yet used, the corresponding priority is 0.
Args:
indices: np.array with dtype int32, of indices in range
[0, replay_capacity).
Returns:
priorities: float, the corresponding priorities.
"""
assert indices.shape, "Indices must be an array."
assert indices.dtype == np.int32, "Indices must be int32s, " "given: {}".format(
indices.dtype
)
batch_size = len(indices)
priority_batch = np.empty((batch_size), dtype=np.float32)
for i, memory_index in enumerate(indices):
priority_batch[i] = self.sum_tree.get(memory_index)
return priority_batch
def get_transition_elements(self):
parent_transition_elements = super(
PrioritizedReplayBuffer, self
).get_transition_elements()
return parent_transition_elements + ["sampling_probabilities"]
| 7,773 | 41.480874 | 88 | py |
ReAgent | ReAgent-master/reagent/replay_memory/circular_replay_buffer.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# We removed Tensorflow dependencies.
# OutOfGraphReplayBuffer is renamed ReplayBuffer
# Copyright 2018 The Dopamine Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The standard DQN replay memory.
This implementation is an out-of-graph replay memory + in-graph wrapper. It
supports vanilla n-step updates of the form typically found in the literature,
i.e. where rewards are accumulated for n steps and the intermediate trajectory
is not exposed to the agent. This does not allow, for example, performing
off-policy corrections.
"""
import abc
import collections
import gzip
import logging
import os
import pickle
from dataclasses import dataclass
from typing import Dict, List, NamedTuple, Optional, Tuple
import numpy as np
import torch
logger = logging.getLogger(__name__)
@dataclass
class ElementMetadata:
@classmethod
@abc.abstractmethod
def create_from_example(cls, name: str, example):
"""Constructor of the Metadata.
Given an input example, construct an ElementMetadata for this key `name`.
Good practice to call self.validate here after initializing metadata.
"""
raise NotImplementedError()
@abc.abstractmethod
def zero_example(self):
"""What would an empty `input` example look like?"""
raise NotImplementedError()
@abc.abstractmethod
def validate(self, name: str, input):
"""Does the input look correct?"""
raise NotImplementedError()
@abc.abstractmethod
def create_storage(self, capacity: int):
"""Initialize the replay buffer with given `capacity`, for this data type.
I.e. what is the "internal representation" of this data type in the replay buffer?
"""
raise NotImplementedError()
@abc.abstractmethod
def input_to_storage(self, input):
"""Convert `input` to the "internal representation" of the replay buffer."""
raise NotImplementedError()
@abc.abstractmethod
def sample_to_output(self, sample):
"""Convert "internal representation" of replay buffer to `output`.
Concretely, when we call replay_buffer.sample(...), what do we want the output to look like?
"""
raise NotImplementedError()
@dataclass
class DenseMetadata(ElementMetadata):
"""
Internal representation is a torch tensor.
Batched output is tensor of shape (batch_size, obs_shape, stack_size)
"""
shape: Tuple[int, ...]
dtype: np.dtype
@classmethod
def create_from_example(cls, name: str, example):
arr = np.array(example)
dtype = arr.dtype
if dtype == np.dtype("float64"):
dtype = np.dtype("float32")
res = cls(arr.shape, dtype)
res.validate(name, example)
return res
def zero_example(self):
return np.zeros(self.shape, dtype=self.dtype)
def validate(self, name: str, input):
assert not isinstance(
input, (dict, torch.Tensor)
), f"{name}: {type(input)} is dict or torch.Tensor"
arr = np.array(input)
dtype = arr.dtype
if dtype == np.dtype("float64"):
dtype = np.dtype("float32")
assert (
arr.shape == self.shape and dtype == self.dtype
), f"{name}: Expected {self.shape} {self.dtype}, got {arr.shape} {dtype}"
def create_storage(self, capacity: int):
array_shape = [capacity, *self.shape]
# not all bit representations are valid for bool
if self.dtype == bool:
return torch.zeros(array_shape, dtype=torch.bool)
return torch.from_numpy(np.empty(array_shape, dtype=self.dtype))
def input_to_storage(self, input):
return torch.from_numpy(np.array(input, dtype=self.dtype))
def sample_to_output(self, sample):
# sample has shape (batch_size, stack_size, obs_shape) right now, so
# reshape to (batch_size, obs_shape, stack_size)
perm = [0] + list(range(2, len(self.shape) + 2)) + [1]
output = sample.permute(*perm)
# squeeze the stack dim if it is 1
if output.shape[-1] == 1:
output = output.squeeze(-1)
return output
@dataclass
class IDListMetadata(ElementMetadata):
"""
Internal representation is a np.array of Dict[str, np.array of type int64]
Output is Dict[str, Tuple[np.array of type int32, np.array of type int64]], same as id_list in FeatureStore.
The tuple is (offset, ids).
TODO: implement for stack size > 1
"""
keys: List[str]
@classmethod
def create_from_example(cls, name: str, example):
res = cls(list(example.keys()))
res.validate(name, example)
return res
def zero_example(self):
return {k: [] for k in self.keys}
def validate(self, name: str, input):
assert isinstance(input, dict), f"{name}: {type(input)} isn't dict"
for k, v in input.items():
assert isinstance(k, str), f"{name}: {k} ({type(k)}) is not str"
assert k in self.keys, f"{name}: {k} not in {self.keys}"
arr = np.array(v)
if len(arr) > 0:
assert (
arr.dtype == np.int64
), f"{name}: {v} arr has dtype {arr.dtype}, not np.int64"
def create_storage(self, capacity: int):
array_shape = (capacity,)
return np.empty(array_shape, dtype=np.object)
def input_to_storage(self, input):
return input
def sample_to_output(self, sample):
sample = sample.squeeze(1)
result: Dict[str, Tuple[torch.Tensor, torch.Tensor]] = {}
for k in self.keys:
offsets = []
ids = []
for elem in sample:
# uninitialized case (when sampling next)
if elem is None:
cur_ids = []
else:
cur_ids = elem[k]
offsets.append(len(ids))
ids.extend(cur_ids)
result[k] = (
torch.tensor(offsets, dtype=torch.int32),
torch.tensor(ids, dtype=torch.int64),
)
return result
@dataclass
class IDScoreListMetadata(ElementMetadata):
"""
Internal representation is a np.array of Dict[str, np.array of type int64]
Output is Dict[str, Tuple[np.array of type int32, np.array of type int64, np.array of type np.float32]], same as id_list in FeatureStore.
The tuple is (offset, ids, scores).
TODO: implement for stack size > 1
"""
keys: List[str]
@classmethod
def create_from_example(cls, name: str, example):
res = cls(list(example.keys()))
res.validate(name, example)
return res
def zero_example(self):
return {k: ([], []) for k in self.keys}
def validate(self, name: str, input):
assert isinstance(input, dict), f"{name}: {type(input)} isn't dict"
for k, v in input.items():
assert isinstance(k, str), f"{name}: {k} ({type(k)}) is not str"
assert k in self.keys, f"{name}: {k} not in {self.keys}"
assert (
isinstance(v, tuple) and len(v) == 2
), f"{name}: {v} ({type(v)}) is not len 2 tuple"
ids = np.array(v[0])
scores = np.array(v[1])
assert len(ids) == len(scores), f"{name}: {len(ids)} != {len(scores)}"
if len(ids) > 0:
assert (
ids.dtype == np.int64
), f"{name}: ids dtype {ids.dtype} isn't np.int64"
assert scores.dtype in (
np.float32,
np.float64,
), f"{name}: scores dtype {scores.dtype} isn't np.float32/64"
def create_storage(self, capacity: int):
array_shape = (capacity,)
return np.empty(array_shape, dtype=np.object)
def input_to_storage(self, input):
return input
def sample_to_output(self, sample):
sample = sample.squeeze(1)
result: Dict[str, Tuple[torch.Tensor, torch.Tensor]] = {}
for k in self.keys:
offsets = []
ids = []
scores = []
for elem in sample:
# uninitialized case (when sampling next)
if elem is None:
cur_ids, cur_scores = [], []
else:
cur_ids, cur_scores = elem[k]
assert len(cur_ids) == len(
cur_scores
), f"{len(cur_ids)} != {len(cur_scores)}"
offsets.append(len(ids))
ids.extend(cur_ids)
scores.extend(cur_scores)
result[k] = (
torch.tensor(offsets, dtype=torch.int32),
torch.tensor(ids, dtype=torch.int64),
torch.tensor(scores, dtype=torch.float32),
)
return result
class ReplayElement(NamedTuple):
# Describing contents of each field of replay memory.
name: str
metadata: ElementMetadata
def make_replay_element(name, example):
assert not isinstance(example, torch.Tensor), "Input shouldn't be tensor"
metadata = None
for metadata_cls in [DenseMetadata, IDListMetadata, IDScoreListMetadata]:
try:
metadata = metadata_cls.create_from_example(name, example)
break
except Exception as e:
logger.info(
f"Failed attempt to create {metadata_cls} from ({name}) {example}: {e}"
)
if metadata is None:
raise ValueError(f"Unable to deduce type for {name}: {example}")
return ReplayElement(name, metadata)
# A prefix that can not collide with variable names for checkpoint files.
STORE_FILENAME_PREFIX = "$store$_"
# This constant determines how many iterations a checkpoint is kept for.
CHECKPOINT_DURATION = 4
REQUIRED_KEYS = ["observation", "action", "reward", "terminal"]
class ReplayBuffer(object):
"""A simple Replay Buffer.
Stores transitions, state, action, reward, next_state, terminal (and any
extra contents specified) in a circular buffer and provides a uniform
transition sampling function.
When the states consist of stacks of observations storing the states is
inefficient. This class writes observations and constructs the stacked states
at sample time.
Attributes:
add_count: int, counter of how many transitions have been added (including
the blank ones at the beginning of an episode).
"""
def __init__(
self,
stack_size: int = 1,
replay_capacity: int = 10000,
batch_size: int = 1,
return_everything_as_stack: bool = False,
return_as_timeline_format: bool = False,
update_horizon: int = 1,
gamma: float = 0.99,
) -> None:
"""Initializes ReplayBuffer.
Args:
stack_size: int, number of frames to use in state stack.
replay_capacity: int, number of transitions to keep in memory.
batch_size: int.
return_everything_as_stack: bool, set True if we want everything,
not just states, to be stacked too
return_as_timeline_format: bool, when set True, next_(states, actions, etc.)
is returned list format, like the output of TimelineOperator
update_horizon: int, length of update ('n' in n-step update).
gamma: int, the discount factor.
Raises:
ValueError: If replay_capacity is too small to hold at least one
transition.
"""
if replay_capacity < update_horizon + stack_size:
raise ValueError(
"There is not enough capacity to cover "
"update_horizon and stack_size."
)
if return_as_timeline_format:
if update_horizon <= 1:
logger.warn(
f"Pointless to set return_as_timeline_format when "
f"update_horizon ({update_horizon}) isn't > 1."
"But we'll support it anyways..."
)
self._initialized_buffer = False
self._stack_size = stack_size
self._return_everything_as_stack = return_everything_as_stack
self._return_as_timeline_format = return_as_timeline_format
self._replay_capacity = replay_capacity
self._batch_size = batch_size
self._update_horizon = update_horizon
self._gamma = gamma
self.add_count = np.array(0)
# When the horizon is > 1, we compute the sum of discounted rewards as a dot
# product using the precomputed vector <gamma^0, gamma^1, ..., gamma^{n-1}>.
self._decays = (self._gamma ** torch.arange(self._update_horizon)).unsqueeze(0)
# track if index is valid for sampling purposes. there're two cases
# 1) first stack_size-1 zero transitions at start of episode
# 2) last update_horizon transitions before the cursor
self._is_index_valid = torch.zeros(self._replay_capacity, dtype=torch.bool)
self._num_valid_indices = 0
self._num_transitions_in_current_episode = 0
# to be initialized on first add (put here to please pyre)
self._store: Dict[str, torch.Tensor] = {}
self._storage_types: List[ReplayElement] = []
self._batch_type = collections.namedtuple("filler", [])
# have these for ease
self._extra_keys: List[str] = []
self._key_to_replay_elem: Dict[str, ReplayElement] = {}
self._zero_transition = {}
self._transition_elements = {}
def initialize_buffer(self, **kwargs):
"""Initialize replay buffer based on first input"""
kwarg_keys = set(kwargs.keys())
assert set(REQUIRED_KEYS).issubset(
kwarg_keys
), f"{kwarg_keys} doesn't contain all of {REQUIRED_KEYS}"
# arbitrary order for extra keys
self._extra_keys = list(kwarg_keys - set(REQUIRED_KEYS))
self._storage_types: List[ReplayElement] = [
make_replay_element(k, kwargs[k]) for k in REQUIRED_KEYS + self._extra_keys
]
self._key_to_replay_elem = {
elem.name: elem for elem in self.get_storage_signature()
}
self._create_storage()
self._transition_elements = self.get_transition_elements()
self._batch_type = collections.namedtuple(
"batch_type", self._transition_elements
)
self._zero_transition = {
elem.name: elem.metadata.zero_example() for elem in self._storage_types
}
self._initialized_buffer = True
logger.info(f"Initializing {self.__class__.__name__}...")
logger.info(f"\t stack_size: {self._stack_size}")
logger.info(f"\t replay_capacity: {self._replay_capacity}")
logger.info(f"\t update_horizon: {self._update_horizon}")
logger.info(f"\t gamma: {self._gamma}")
logger.info("\t storage_types: ")
for elem in self._storage_types:
logger.info(f"\t\t {elem}")
@property
def size(self) -> int:
return self._num_valid_indices
def set_index_valid_status(self, idx: int, is_valid: bool):
old_valid = self._is_index_valid[idx]
if not old_valid and is_valid:
self._num_valid_indices += 1
elif old_valid and not is_valid:
self._num_valid_indices -= 1
assert self._num_valid_indices >= 0, f"{self._num_valid_indices} is negative"
self._is_index_valid[idx] = is_valid
def _create_storage(self) -> None:
"""Creates the numpy arrays used to store transitions."""
for storage_element in self.get_storage_signature():
self._store[storage_element.name] = storage_element.metadata.create_storage(
self._replay_capacity
)
def get_add_args_signature(self) -> List[ReplayElement]:
"""The signature of the add function.
Note - Derived classes may return a different signature.
Returns:
list of ReplayElements defining the type of the argument signature needed
by the add function.
"""
return self.get_storage_signature()
def get_storage_signature(self) -> List[ReplayElement]:
"""Returns a default list of elements to be stored in this replay memory.
Note - Derived classes may return a different signature.
Returns:
list of ReplayElements defining the type of the contents stored.
"""
return self._storage_types
def _add_zero_transition(self) -> None:
"""Adds a padding transition filled with zeros (Used in episode beginnings)."""
self._add(**self._zero_transition)
def add(self, **kwargs):
"""Adds a transition to the replay memory.
This function checks the types and handles the padding at the beginning of
an episode. Then it calls the _add function.
Since the next_observation in the transition will be the observation added
next there is no need to pass it.
If the replay memory is at capacity the oldest transition will be discarded.
Only accept kwargs, which must contain observation, action, reward, terminal
as keys.
"""
if not self._initialized_buffer:
self.initialize_buffer(**kwargs)
self._check_add_types(**kwargs)
last_idx = (self.cursor() - 1) % self._replay_capacity
if self.is_empty() or self._store["terminal"][last_idx]:
self._num_transitions_in_current_episode = 0
for _ in range(self._stack_size - 1):
# Child classes can rely on the padding transitions being filled with
# zeros. This is useful when there is a priority argument.
self._add_zero_transition()
# remember, the last update_horizon transitions shouldn't be sampled
cur_idx = self.cursor()
self.set_index_valid_status(idx=cur_idx, is_valid=False)
if self._num_transitions_in_current_episode >= self._update_horizon:
idx = (cur_idx - self._update_horizon) % self._replay_capacity
self.set_index_valid_status(idx=idx, is_valid=True)
self._add(**kwargs)
self._num_transitions_in_current_episode += 1
# mark the next stack_size-1 as invalid (note cursor has advanced by 1)
for i in range(self._stack_size - 1):
idx = (self.cursor() + i) % self._replay_capacity
self.set_index_valid_status(idx=idx, is_valid=False)
if kwargs["terminal"]:
# Since the frame (cur_idx) we just inserted was terminal, we now mark
# the last "num_back" transitions as valid for sampling (including cur_idx).
# This is because next_state is not relevant for those terminal (multi-step)
# transitions.
# NOTE: this was not accounted for by the original Dopamine buffer.
# It is not a big problem, since after update_horizon steps,
# the original Dopamine buffer will make these frames
# available for sampling.
# But that is update_horizon steps too late. If we train right
# after an episode terminates, this can result in missing the
# bulk of rewards at the end of the most recent episode.
num_back = min(
self._num_transitions_in_current_episode, self._update_horizon
)
for i in range(0, num_back):
idx = (cur_idx - i) % self._replay_capacity
self.set_index_valid_status(idx=idx, is_valid=True)
def _add(self, **kwargs):
"""Internal add method to add to the storage arrays.
Args:
*args: All the elements in a transition.
"""
self._check_args_length(**kwargs)
elements = self.get_add_args_signature()
for element in elements:
kwargs[element.name] = element.metadata.input_to_storage(
kwargs[element.name]
)
self._add_transition(kwargs)
def _add_transition(self, transition: Dict[str, torch.Tensor]) -> None:
"""Internal add method to add transition dictionary to storage arrays.
Args:
transition: The dictionary of names and values of the transition
to add to the storage.
"""
cursor = self.cursor()
for arg_name in transition:
self._store[arg_name][cursor] = transition[arg_name]
self.add_count += 1
def _check_args_length(self, **kwargs):
"""Check if args passed to the add method have the same length as storage.
Args:
*args: Args for elements used in storage.
Raises:
ValueError: If args have wrong length.
"""
if len(kwargs) != len(self.get_add_args_signature()):
raise ValueError(
f"Add expects: {self.get_add_args_signature()}; received {kwargs}"
)
def _check_add_types(self, **kwargs):
"""Checks if args passed to the add method match those of the storage.
Args:
*args: Args whose types need to be validated.
Raises:
ValueError: If args have wrong shape or dtype.
"""
self._check_args_length(**kwargs)
for store_element in self.get_add_args_signature():
arg_element = kwargs[store_element.name]
store_element.metadata.validate(store_element.name, arg_element)
def is_empty(self) -> bool:
"""Is the Replay Buffer empty?"""
return self.add_count == 0
def is_full(self) -> bool:
"""Is the Replay Buffer full?"""
return self.add_count >= self._replay_capacity
def cursor(self) -> int:
"""Index to the location where the next transition will be written."""
return self.add_count % self._replay_capacity
def is_valid_transition(self, index):
return self._is_index_valid[index]
def sample_index_batch(self, batch_size: int) -> torch.Tensor:
"""Returns a batch of valid indices sampled uniformly.
Args:
batch_size: int, number of indices returned.
Returns:
1D tensor of ints, a batch of valid indices sampled uniformly.
Raises:
RuntimeError: If there are no valid indices to sample.
"""
if self._num_valid_indices == 0:
raise RuntimeError(
f"Cannot sample {batch_size} since there are no valid indices so far."
)
valid_indices = self._is_index_valid.nonzero().squeeze(1)
return valid_indices[torch.randint(valid_indices.shape[0], (batch_size,))]
def sample_all_valid_transitions(self):
valid_indices = self._is_index_valid.nonzero().squeeze(1)
assert (
valid_indices.ndim == 1
), f"Expecting 1D tensor since is_index_valid is 1D. Got {valid_indices}."
return self.sample_transition_batch(
batch_size=len(valid_indices), indices=valid_indices
)
def sample_transition_batch(self, batch_size=None, indices=None):
"""Returns a batch of transitions (including any extra contents).
If get_transition_elements has been overridden and defines elements not
stored in self._store, None will be returned and it will be
left to the child class to fill it. For example, for the child class
PrioritizedReplayBuffer, the contents of the
sampling_probabilities are stored separately in a sum tree.
When the transition is terminal next_state_batch has undefined contents.
NOTE: This transition contains the indices of the sampled elements. These
are only valid during the call to sample_transition_batch, i.e. they may
be used by subclasses of this replay buffer but may point to different data
as soon as sampling is done.
NOTE: Tensors are reshaped. I.e., state is 2-D unless stack_size > 1.
Scalar values are returned as (batch_size, 1) instead of (batch_size,).
Args:
batch_size: int, number of transitions returned. If None, the default
batch_size will be used.
indices: None or Tensor, the indices of every transition in the
batch. If None, sample the indices uniformly.
Returns:
transition_batch: tuple of Tensors with the shape and type as in
get_transition_elements().
Raises:
ValueError: If an element to be sampled is missing from the replay buffer.
"""
if batch_size is None:
batch_size = self._batch_size
if indices is None:
indices = self.sample_index_batch(batch_size)
else:
assert isinstance(
indices, torch.Tensor
), f"Indices {indices} have type {type(indices)} instead of torch.Tensor"
indices = indices.type(dtype=torch.int64)
assert len(indices) == batch_size
# calculate 2d array of indices with size (batch_size, update_horizon)
# ith row contain the multistep indices starting at indices[i]
multistep_indices = indices.unsqueeze(1) + torch.arange(self._update_horizon)
multistep_indices %= self._replay_capacity
steps = self._get_steps(multistep_indices)
# to pass in to next_features and reward to toggle whether to return
# a list batch of length steps.
if self._return_as_timeline_format:
next_indices = (indices + 1) % self._replay_capacity
steps_for_timeline_format = steps
else:
next_indices = (indices + steps) % self._replay_capacity
steps_for_timeline_format = None
batch_arrays = []
for element_name in self._transition_elements:
if element_name == "state":
batch = self._get_batch_for_indices("observation", indices)
elif element_name == "next_state":
batch = self._get_batch_for_indices(
"observation", next_indices, steps_for_timeline_format
)
elif element_name == "indices":
batch = indices
elif element_name == "terminal":
terminal_indices = (indices + steps - 1) % self._replay_capacity
batch = self._store["terminal"][terminal_indices].to(torch.bool)
elif element_name == "reward":
if self._return_as_timeline_format or self._return_everything_as_stack:
batch = self._get_batch_for_indices(
"reward", indices, steps_for_timeline_format
)
else:
batch = self._reduce_multi_step_reward(multistep_indices, steps)
elif element_name == "step":
batch = steps
elif element_name in self._store:
batch = self._get_batch_for_indices(element_name, indices)
elif element_name.startswith("next_"):
store_name = element_name[len("next_") :]
assert (
store_name in self._store
), f"{store_name} is not in {self._store.keys()}"
batch = self._get_batch_for_indices(
store_name, next_indices, steps_for_timeline_format
)
else:
# We assume the other elements are filled in by the subclass.
batch = None
# always enables the batch_size dim
if isinstance(batch, torch.Tensor) and batch.ndim == 1:
batch = batch.unsqueeze(1)
batch_arrays.append(batch)
return self._batch_type(*batch_arrays)
def _get_batch_for_indices(
self, key: str, indices: torch.Tensor, steps: Optional[torch.Tensor] = None
):
"""Get batch for given key.
There are two orthogonal special cases.
- returning a stack of features:
View this case as adding an extra "stack" dimension to feature,
causing the shape to be (*feature.shape, stack_size)
- returning next_features as a list (same as timeline output):
This should only be on if update_horizon is > 1.
If this is the case then we don't return a torch.Tensor,
but instead return List[List[features]] where the ith
element is torch.tensor([feat_{t+1}, ..., feat_{t+k}]);
where k <= multi_steps could be strictly less if there's a
terminal state.
NOTE: this option is activated by using the optional steps parameter.
Otherwise, we just return the indexed features in the replay buffer.
In all of the cases, we assume indices is 1-dimensional.
"""
assert len(indices.shape) == 1, f"{indices.shape} isn't 1-dimensional."
if steps is not None:
# for next state timeline format
assert indices.shape == steps.shape, f"{indices.shape} != {steps.shape}"
return [
self._get_stack_for_indices(
key, torch.arange(start_idx, start_idx + step)
)
for start_idx, step in zip(indices.tolist(), steps.tolist())
]
else:
return self._get_stack_for_indices(key, indices)
def _reduce_multi_step_reward(
self, multistep_indices: torch.Tensor, steps: torch.Tensor
):
# default behavior is to sum up multi_step reward
masks = torch.arange(self._update_horizon) < steps.unsqueeze(1)
rewards = self._store["reward"][multistep_indices] * self._decays * masks
return rewards.sum(dim=1)
def _get_stack_for_indices(self, key: str, indices: torch.Tensor) -> torch.Tensor:
"""Get stack of transition data."""
assert len(indices.shape) == 1, f"{indices.shape} not 1-dimensional"
# calculate 2d array of indices of shape (batch_size, stack_size)
# ith row contain indices in the stack of obs at indices[i]
stack_indices = indices.unsqueeze(1) + torch.arange(-self._stack_size + 1, 1)
# pyre-fixme[16]: `Tensor` has no attribute `__imod__`.
stack_indices %= self._replay_capacity
retval = self._store[key][stack_indices]
return self._key_to_replay_elem[key].metadata.sample_to_output(retval)
def _get_steps(self, multistep_indices: torch.Tensor) -> torch.Tensor:
"""Calculate trajectory length, defined to be the number of states
in this multi_step transition until terminal state or until
end of multi_step (a.k.a. update_horizon).
"""
terminals = self._store["terminal"][multistep_indices].to(torch.bool)
# if trajectory is non-terminal, we'll have traj_length = update_horizon
terminals[:, -1] = True
# use argmax to find the first True in each trajectory
# NOTE: argmax may not contain the first occurrence of each maximal value found,
# unless it is unique, so we need to make each boolean unique,
# with the first occurance the largarst number
terminals = terminals.float()
unique_mask = torch.arange(terminals.shape[1] + 1, 1, -1)
terminals = torch.einsum("ab,b->ab", (terminals, unique_mask))
return torch.argmax(terminals, dim=1) + 1
def get_transition_elements(self):
"""Returns element names for sample_transition_batch."""
extra_names = []
for name in self._extra_keys:
for prefix in ["", "next_"]:
extra_names.append(f"{prefix}{name}")
return [
"state",
"action",
"reward",
"next_state",
"next_action",
"next_reward",
"terminal",
"indices",
"step",
*extra_names,
]
def _generate_filename(self, checkpoint_dir, name, suffix):
return os.path.join(checkpoint_dir, "{}_ckpt.{}.gz".format(name, suffix))
def _return_checkpointable_elements(self):
"""Return the dict of elements of the class for checkpointing.
Returns:
checkpointable_elements: dict containing all non private (starting with
_) members + all the arrays inside self._store.
"""
checkpointable_elements = {}
for member_name, member in self.__dict__.items():
if member_name == "_store":
for array_name, array in self._store.items():
checkpointable_elements[STORE_FILENAME_PREFIX + array_name] = array
elif not member_name.startswith("_"):
checkpointable_elements[member_name] = member
return checkpointable_elements
def save(self, checkpoint_dir, iteration_number):
"""Save the ReplayBuffer attributes into a file.
This method will save all the replay buffer's state in a single file.
Args:
checkpoint_dir: str, the directory where numpy checkpoint files should be
saved.
iteration_number: int, iteration_number to use as a suffix in naming
numpy checkpoint files.
"""
# TODO: Save tensors to torch files.
if not os.path.exists(checkpoint_dir):
return
checkpointable_elements = self._return_checkpointable_elements()
for attr in checkpointable_elements:
filename = self._generate_filename(checkpoint_dir, attr, iteration_number)
with open(filename, "wb") as f:
with gzip.GzipFile(fileobj=f) as outfile:
# Checkpoint the np arrays in self._store with np.save instead of
# pickling the dictionary is critical for file size and performance.
# STORE_FILENAME_PREFIX indicates that the variable is contained in
# self._store.
if attr.startswith(STORE_FILENAME_PREFIX):
array_name = attr[len(STORE_FILENAME_PREFIX) :]
np.save(
outfile, self._store[array_name].numpy(), allow_pickle=False
)
# Some numpy arrays might not be part of storage
elif isinstance(self.__dict__[attr], np.ndarray):
np.save(outfile, self.__dict__[attr], allow_pickle=False)
else:
pickle.dump(self.__dict__[attr], outfile)
# After writing a checkpoint file, we garbage collect the checkpoint file
# that is four versions old.
stale_iteration_number = iteration_number - CHECKPOINT_DURATION
if stale_iteration_number >= 0:
stale_filename = self._generate_filename(
checkpoint_dir, attr, stale_iteration_number
)
try:
os.remove(stale_filename)
except FileNotFoundError:
pass
def load(self, checkpoint_dir, suffix):
"""Restores the object from bundle_dictionary and numpy checkpoints.
Args:
checkpoint_dir: str, the directory where to read the numpy checkpointed
files from.
suffix: str, the suffix to use in numpy checkpoint files.
Raises:
NotFoundError: If not all expected files are found in directory.
"""
# TODO: Load tensors from torch files.
save_elements = self._return_checkpointable_elements()
# We will first make sure we have all the necessary files available to avoid
# loading a partially-specified (i.e. corrupted) replay buffer.
for attr in save_elements:
filename = self._generate_filename(checkpoint_dir, attr, suffix)
if not os.path.exists(filename):
raise FileNotFoundError(None, None, "Missing file: {}".format(filename))
# If we've reached this point then we have verified that all expected files
# are available.
for attr in save_elements:
filename = self._generate_filename(checkpoint_dir, attr, suffix)
with open(filename, "rb") as f:
with gzip.GzipFile(fileobj=f) as infile:
if attr.startswith(STORE_FILENAME_PREFIX):
array_name = attr[len(STORE_FILENAME_PREFIX) :]
self._store[array_name] = torch.from_numpy(
np.load(infile, allow_pickle=False)
)
elif isinstance(self.__dict__[attr], np.ndarray):
self.__dict__[attr] = np.load(infile, allow_pickle=False)
else:
self.__dict__[attr] = pickle.load(infile)
| 37,435 | 41.015713 | 141 | py |
ReAgent | ReAgent-master/reagent/net_builder/discrete_dqn_net_builder.py | #!/usr/bin/env python3
import abc
from typing import List
import reagent.core.types as rlt
import torch
from reagent.core.fb_checker import IS_FB_ENVIRONMENT
from reagent.core.parameters import NormalizationData
from reagent.models.base import ModelBase
from reagent.prediction.predictor_wrapper import (
DiscreteDqnWithPreprocessor,
BinaryDifferenceScorerWithPreprocessor,
)
from reagent.preprocessing.normalization import get_num_output_features
from reagent.preprocessing.preprocessor import Preprocessor
if IS_FB_ENVIRONMENT:
from reagent.fb.prediction.fb_predictor_wrapper import (
FbDiscreteDqnPredictorWrapper as DiscreteDqnPredictorWrapper,
FbBinaryDifferenceScorerPredictorWrapper as BinaryDifferenceScorerPredictorWrapper,
)
else:
from reagent.prediction.predictor_wrapper import (
DiscreteDqnPredictorWrapper,
BinaryDifferenceScorerPredictorWrapper,
)
class DiscreteDQNNetBuilder:
"""
Base class for discrete DQN net builder.
"""
@abc.abstractmethod
def build_q_network(
self,
state_feature_config: rlt.ModelFeatureConfig,
state_normalization_data: NormalizationData,
output_dim: int,
) -> ModelBase:
pass
def _get_input_dim(self, state_normalization_data: NormalizationData) -> int:
return get_num_output_features(
state_normalization_data.dense_normalization_parameters
)
def build_serving_module(
self,
q_network: ModelBase,
state_normalization_data: NormalizationData,
action_names: List[str],
state_feature_config: rlt.ModelFeatureConfig,
predictor_wrapper_type=None,
) -> torch.nn.Module:
"""
Returns a TorchScript predictor module
"""
state_preprocessor = Preprocessor(
state_normalization_data.dense_normalization_parameters, False
)
dqn_with_preprocessor = DiscreteDqnWithPreprocessor(
q_network.cpu_model().eval(), state_preprocessor, state_feature_config
)
predictor_wrapper_type = predictor_wrapper_type or DiscreteDqnPredictorWrapper
return predictor_wrapper_type(
dqn_with_preprocessor, action_names, state_feature_config
)
def build_binary_difference_scorer(
self,
q_network: ModelBase,
state_normalization_data: NormalizationData,
action_names: List[str],
state_feature_config: rlt.ModelFeatureConfig,
) -> torch.nn.Module:
"""
Returns softmax(1) - softmax(0)
"""
assert len(action_names) == 2
state_preprocessor = Preprocessor(
state_normalization_data.dense_normalization_parameters, False
)
binary_difference_scorer_with_preprocessor = (
BinaryDifferenceScorerWithPreprocessor(
q_network.cpu_model().eval(), state_preprocessor, state_feature_config
)
)
return BinaryDifferenceScorerPredictorWrapper(
binary_difference_scorer_with_preprocessor, state_feature_config
)
| 3,123 | 32.234043 | 91 | py |
ReAgent | ReAgent-master/reagent/net_builder/continuous_actor_net_builder.py | #!/usr/bin/env python3
import abc
import reagent.core.types as rlt
import torch
from reagent.core.fb_checker import IS_FB_ENVIRONMENT
from reagent.core.parameters import NormalizationData
from reagent.models.base import ModelBase
from reagent.prediction.predictor_wrapper import (
ActorWithPreprocessor,
RankingActorWithPreprocessor,
)
from reagent.preprocessing.postprocessor import Postprocessor
from reagent.preprocessing.preprocessor import Preprocessor
if IS_FB_ENVIRONMENT:
from reagent.fb.prediction.fb_predictor_wrapper import (
FbActorPredictorWrapper as ActorPredictorWrapper,
FbRankingActorPredictorWrapper as RankingActorPredictorWrapper,
)
else:
from reagent.prediction.predictor_wrapper import ActorPredictorWrapper
class ContinuousActorNetBuilder:
"""
Base class for continuous actor net builder.
"""
@property
@abc.abstractmethod
def default_action_preprocessing(self) -> str:
pass
@abc.abstractmethod
def build_actor(
self,
state_feature_config: rlt.ModelFeatureConfig,
state_normalization_data: NormalizationData,
action_normalization_data: NormalizationData,
) -> ModelBase:
pass
def build_serving_module(
self,
actor: ModelBase,
state_feature_config: rlt.ModelFeatureConfig,
state_normalization_data: NormalizationData,
action_normalization_data: NormalizationData,
serve_mean_policy: bool = False,
) -> torch.nn.Module:
"""
Returns a TorchScript predictor module
"""
state_preprocessor = Preprocessor(
state_normalization_data.dense_normalization_parameters, use_gpu=False
)
postprocessor = Postprocessor(
action_normalization_data.dense_normalization_parameters, use_gpu=False
)
actor_with_preprocessor = ActorWithPreprocessor(
actor.cpu_model().eval(),
state_preprocessor,
state_feature_config,
postprocessor,
serve_mean_policy=serve_mean_policy,
)
action_features = Preprocessor(
action_normalization_data.dense_normalization_parameters, use_gpu=False
).sorted_features
return ActorPredictorWrapper(
actor_with_preprocessor, state_feature_config, action_features
)
def build_ranking_serving_module(
self,
actor: ModelBase,
state_normalization_data: NormalizationData,
candidate_normalization_data: NormalizationData,
num_candidates: int,
action_normalization_data: NormalizationData,
) -> torch.nn.Module:
state_preprocessor = Preprocessor(
state_normalization_data.dense_normalization_parameters, use_gpu=False
)
candidate_preprocessor = Preprocessor(
candidate_normalization_data.dense_normalization_parameters, use_gpu=False
)
postprocessor = Postprocessor(
action_normalization_data.dense_normalization_parameters, use_gpu=False
)
actor_with_preprocessor = RankingActorWithPreprocessor(
model=actor.cpu_model().eval(),
state_preprocessor=state_preprocessor,
candidate_preprocessor=candidate_preprocessor,
num_candidates=num_candidates,
action_postprocessor=postprocessor,
)
action_features = Preprocessor(
action_normalization_data.dense_normalization_parameters, use_gpu=False
).sorted_features
return RankingActorPredictorWrapper(actor_with_preprocessor, action_features)
| 3,653 | 33.471698 | 86 | py |
ReAgent | ReAgent-master/reagent/net_builder/slate_ranking_net_builder.py | #!/usr/bin/env python3
import abc
import torch
from reagent.core.registry_meta import RegistryMeta
class SlateRankingNetBuilder:
"""
Base class for slate ranking network builder.
"""
@abc.abstractmethod
def build_slate_ranking_network(
self, state_dim, candidate_dim, candidate_size, slate_size
) -> torch.nn.Module:
pass
| 367 | 18.368421 | 66 | py |
ReAgent | ReAgent-master/reagent/net_builder/synthetic_reward_net_builder.py | #!/usr/bin/env python3
import abc
from typing import List, Optional
import torch
from reagent.core.fb_checker import IS_FB_ENVIRONMENT
from reagent.core.parameters import NormalizationData
from reagent.models.base import ModelBase
from reagent.preprocessing.preprocessor import Preprocessor
if IS_FB_ENVIRONMENT:
from reagent.fb.prediction.synthetic_reward.synthetic_reward_predictor_wrapper import (
FbSyntheticRewardPredictorWrapper as SyntheticRewardPredictorWrapper,
)
else:
from reagent.prediction.synthetic_reward.synthetic_reward_predictor_wrapper import (
SyntheticRewardPredictorWrapper,
)
class SyntheticRewardNetBuilder:
"""
Base class for Synthetic Reward net builder.
"""
@abc.abstractmethod
def build_synthetic_reward_network(
self,
state_normalization_data: NormalizationData,
action_normalization_data: Optional[NormalizationData] = None,
discrete_action_names: Optional[List[str]] = None,
) -> ModelBase:
pass
def build_serving_module(
self,
seq_len: int,
synthetic_reward_network: ModelBase,
state_normalization_data: NormalizationData,
action_normalization_data: Optional[NormalizationData] = None,
discrete_action_names: Optional[List[str]] = None,
) -> torch.nn.Module:
"""
Returns a TorchScript predictor module
"""
state_preprocessor = Preprocessor(
state_normalization_data.dense_normalization_parameters
)
if not discrete_action_names:
assert action_normalization_data is not None
action_preprocessor = Preprocessor(
action_normalization_data.dense_normalization_parameters
)
return SyntheticRewardPredictorWrapper(
seq_len,
state_preprocessor,
action_preprocessor,
# pyre-fixme[29]: `Union[torch.Tensor, torch.nn.Module]` is not a
# function.
synthetic_reward_network.export_mlp().cpu().eval(),
)
else:
# TODO add Discrete Single Step Synthetic Reward Predictor
return torch.jit.script(torch.nn.Linear(1, 1))
| 2,259 | 33.242424 | 91 | py |
ReAgent | ReAgent-master/reagent/net_builder/parametric_dqn_net_builder.py | #!/usr/bin/env python3
import abc
import torch
from reagent.core.fb_checker import IS_FB_ENVIRONMENT
from reagent.core.parameters import NormalizationData
from reagent.core.registry_meta import RegistryMeta
from reagent.models.base import ModelBase
from reagent.prediction.predictor_wrapper import ParametricDqnWithPreprocessor
from reagent.preprocessing.preprocessor import Preprocessor
if IS_FB_ENVIRONMENT:
from reagent.fb.prediction.fb_predictor_wrapper import (
FbParametricDqnPredictorWrapper as ParametricDqnPredictorWrapper,
)
else:
from reagent.prediction.predictor_wrapper import ParametricDqnPredictorWrapper
class ParametricDQNNetBuilder:
"""
Base class for parametric DQN net builder.
"""
@abc.abstractmethod
def build_q_network(
self,
state_normalization_data: NormalizationData,
action_normalization_data: NormalizationData,
output_dim: int = 1,
) -> ModelBase:
pass
def build_serving_module(
self,
q_network: ModelBase,
state_normalization_data: NormalizationData,
action_normalization_data: NormalizationData,
) -> torch.nn.Module:
"""
Returns a TorchScript predictor module
"""
state_preprocessor = Preprocessor(
state_normalization_data.dense_normalization_parameters, False
)
action_preprocessor = Preprocessor(
action_normalization_data.dense_normalization_parameters, False
)
dqn_with_preprocessor = ParametricDqnWithPreprocessor(
q_network.cpu_model().eval(), state_preprocessor, action_preprocessor
)
return ParametricDqnPredictorWrapper(
dqn_with_preprocessor=dqn_with_preprocessor
)
| 1,777 | 30.192982 | 82 | py |
ReAgent | ReAgent-master/reagent/net_builder/slate_reward_net_builder.py | #!/usr/bin/env python3
import abc
import torch
class SlateRewardNetBuilder:
"""
Base class for slate reward network builder.
"""
@abc.abstractmethod
def build_slate_reward_network(
self, state_dim, candidate_dim, candidate_size, slate_size
) -> torch.nn.Module:
pass
@abc.abstractproperty
def expect_slate_wise_reward(self) -> bool:
pass
| 400 | 17.227273 | 66 | py |
ReAgent | ReAgent-master/reagent/net_builder/discrete_actor_net_builder.py | #!/usr/bin/env python3
import abc
from typing import List
import reagent.core.types as rlt
import torch
from reagent.core.fb_checker import IS_FB_ENVIRONMENT
from reagent.core.parameters import NormalizationData
from reagent.models.base import ModelBase
from reagent.prediction.predictor_wrapper import ActorWithPreprocessor
from reagent.preprocessing.preprocessor import Preprocessor
if IS_FB_ENVIRONMENT:
from reagent.fb.prediction.fb_predictor_wrapper import (
FbActorPredictorWrapper as ActorPredictorWrapper,
)
else:
from reagent.prediction.predictor_wrapper import ActorPredictorWrapper
class DiscreteActorNetBuilder:
"""
Base class for discrete actor net builder.
"""
@abc.abstractmethod
def build_actor(
self,
state_normalization_data: NormalizationData,
num_actions: int,
) -> ModelBase:
pass
def build_serving_module(
self,
actor: ModelBase,
state_feature_config: rlt.ModelFeatureConfig,
state_normalization_data: NormalizationData,
action_feature_ids: List[int],
) -> torch.nn.Module:
"""
Returns a TorchScript predictor module
"""
state_preprocessor = Preprocessor(
state_normalization_data.dense_normalization_parameters, use_gpu=False
)
actor_with_preprocessor = ActorWithPreprocessor(
actor.cpu_model().eval(), state_preprocessor, state_feature_config
)
return ActorPredictorWrapper(
actor_with_preprocessor, state_feature_config, action_feature_ids
)
| 1,608 | 27.732143 | 82 | py |
ReAgent | ReAgent-master/reagent/net_builder/value_net_builder.py | #!/usr/bin/env python3
import abc
import torch
from reagent.core.parameters import NormalizationData
from reagent.core.registry_meta import RegistryMeta
class ValueNetBuilder:
"""
Base class for value-network builder.
"""
@abc.abstractmethod
def build_value_network(
self, state_normalization_data: NormalizationData
) -> torch.nn.Module:
pass
| 389 | 18.5 | 57 | py |
ReAgent | ReAgent-master/reagent/net_builder/categorical_dqn_net_builder.py | #!/usr/bin/env python3
import abc
from typing import List
import reagent.core.types as rlt
import torch
from reagent.core.fb_checker import IS_FB_ENVIRONMENT
from reagent.core.parameters import NormalizationData
from reagent.core.registry_meta import RegistryMeta
from reagent.models.base import ModelBase
from reagent.prediction.predictor_wrapper import DiscreteDqnWithPreprocessor
from reagent.preprocessing.normalization import get_num_output_features
from reagent.preprocessing.preprocessor import Preprocessor
if IS_FB_ENVIRONMENT:
from reagent.fb.prediction.fb_predictor_wrapper import (
FbDiscreteDqnPredictorWrapper as DiscreteDqnPredictorWrapper,
)
else:
from reagent.prediction.predictor_wrapper import DiscreteDqnPredictorWrapper
class CategoricalDQNNetBuilder:
"""
Base class for categorical DQN net builder.
"""
@abc.abstractmethod
def build_q_network(
self,
state_normalization_data: NormalizationData,
output_dim: int,
num_atoms: int,
qmin: int,
qmax: int,
) -> ModelBase:
pass
def _get_input_dim(self, state_normalization_data: NormalizationData) -> int:
return get_num_output_features(
state_normalization_data.dense_normalization_parameters
)
def build_serving_module(
self,
q_network: ModelBase,
state_normalization_data: NormalizationData,
action_names: List[str],
state_feature_config: rlt.ModelFeatureConfig,
) -> torch.nn.Module:
"""
Returns a TorchScript predictor module
"""
state_preprocessor = Preprocessor(
state_normalization_data.dense_normalization_parameters, False
)
dqn_with_preprocessor = DiscreteDqnWithPreprocessor(
q_network.cpu_model().eval(), state_preprocessor, state_feature_config
)
return DiscreteDqnPredictorWrapper(
dqn_with_preprocessor, action_names, state_feature_config
)
| 2,019 | 30.076923 | 82 | py |
ReAgent | ReAgent-master/reagent/net_builder/quantile_dqn_net_builder.py | #!/usr/bin/env python3
import abc
from typing import List
import reagent.core.types as rlt
import torch
from reagent.core.fb_checker import IS_FB_ENVIRONMENT
from reagent.core.parameters import NormalizationData
from reagent.core.registry_meta import RegistryMeta
from reagent.models import ModelBase, Sequential
from reagent.prediction.predictor_wrapper import DiscreteDqnWithPreprocessor
from reagent.preprocessing.normalization import get_num_output_features
from reagent.preprocessing.preprocessor import Preprocessor
if IS_FB_ENVIRONMENT:
from reagent.fb.prediction.fb_predictor_wrapper import (
FbDiscreteDqnPredictorWrapper as DiscreteDqnPredictorWrapper,
)
else:
from reagent.prediction.predictor_wrapper import DiscreteDqnPredictorWrapper
class _Mean(torch.nn.Module):
def forward(self, input: torch.Tensor) -> torch.Tensor:
assert input.ndim == 3 # type: ignore
return torch.mean(input, dim=2)
class QRDQNNetBuilder:
"""
Base class for QRDQN net builder.
"""
@abc.abstractmethod
def build_q_network(
self,
state_normalization_data: NormalizationData,
output_dim: int,
num_atoms: int,
) -> ModelBase:
pass
def _get_input_dim(self, state_normalization_data: NormalizationData) -> int:
return get_num_output_features(
state_normalization_data.dense_normalization_parameters
)
def build_serving_module(
self,
q_network: ModelBase,
state_normalization_data: NormalizationData,
action_names: List[str],
state_feature_config: rlt.ModelFeatureConfig,
) -> torch.nn.Module:
"""
Returns a TorchScript predictor module
"""
state_preprocessor = Preprocessor(
state_normalization_data.dense_normalization_parameters, False
)
dqn_with_preprocessor = DiscreteDqnWithPreprocessor(
Sequential(q_network.cpu_model().eval(), _Mean()), # type: ignore
state_preprocessor,
state_feature_config,
)
return DiscreteDqnPredictorWrapper(
dqn_with_preprocessor, action_names, state_feature_config
)
| 2,210 | 30.140845 | 81 | py |
ReAgent | ReAgent-master/reagent/net_builder/synthetic_reward/ngram_synthetic_reward.py | #!/usr/bin/env python3
from typing import List, Optional
import torch
from reagent.core.dataclasses import dataclass, field
from reagent.core.parameters import NormalizationData, param_hash, ConvNetParameters
from reagent.models.base import ModelBase
from reagent.models.synthetic_reward import (
NGramConvolutionalNetwork,
SyntheticRewardNet,
NGramFullyConnectedNetwork,
)
from reagent.net_builder.synthetic_reward_net_builder import SyntheticRewardNetBuilder
from reagent.preprocessing.normalization import get_num_output_features
@dataclass
class NGramSyntheticReward(SyntheticRewardNetBuilder):
__hash__ = param_hash
sizes: List[int] = field(default_factory=lambda: [256, 128])
activations: List[str] = field(default_factory=lambda: ["relu", "relu"])
last_layer_activation: str = "sigmoid"
context_size: int = 3
use_layer_norm: bool = False
def build_synthetic_reward_network(
self,
state_normalization_data: NormalizationData,
action_normalization_data: Optional[NormalizationData] = None,
discrete_action_names: Optional[List[str]] = None,
) -> ModelBase:
state_dim = get_num_output_features(
state_normalization_data.dense_normalization_parameters
)
if not discrete_action_names:
assert action_normalization_data is not None
action_dim = get_num_output_features(
action_normalization_data.dense_normalization_parameters
)
else:
action_dim = len(discrete_action_names)
net = NGramFullyConnectedNetwork(
state_dim=state_dim,
action_dim=action_dim,
sizes=self.sizes,
activations=self.activations,
last_layer_activation=self.last_layer_activation,
context_size=self.context_size,
use_layer_norm=self.use_layer_norm,
)
return SyntheticRewardNet(net)
@dataclass
class NGramConvNetSyntheticReward(SyntheticRewardNetBuilder):
__hash__ = param_hash
sizes: List[int] = field(default_factory=lambda: [256, 128])
activations: List[str] = field(default_factory=lambda: ["relu", "relu"])
last_layer_activation: str = "sigmoid"
context_size: int = 3
conv_net_params: ConvNetParameters = field(
default_factory=lambda: ConvNetParameters(
conv_dims=[256, 128],
conv_height_kernels=[1, 1],
pool_types=["max", "max"],
pool_kernel_sizes=[1, 1],
)
)
use_layer_norm: bool = False
def build_synthetic_reward_network(
self,
state_normalization_data: NormalizationData,
action_normalization_data: Optional[NormalizationData] = None,
discrete_action_names: Optional[List[str]] = None,
) -> ModelBase:
state_dim = get_num_output_features(
state_normalization_data.dense_normalization_parameters
)
if not discrete_action_names:
assert action_normalization_data is not None
action_dim = get_num_output_features(
action_normalization_data.dense_normalization_parameters
)
else:
action_dim = len(discrete_action_names)
net = NGramConvolutionalNetwork(
state_dim=state_dim,
action_dim=action_dim,
sizes=self.sizes,
activations=self.activations,
last_layer_activation=self.last_layer_activation,
context_size=self.context_size,
conv_net_params=self.conv_net_params,
use_layer_norm=self.use_layer_norm,
)
return SyntheticRewardNet(net)
| 3,678 | 34.375 | 86 | py |
ReAgent | ReAgent-master/reagent/net_builder/slate_ranking/slate_ranking_scorer.py | #!/usr/bin/env python3
from dataclasses import asdict
from typing import List
from typing import Optional
import torch
import torch.nn as nn
from reagent.core.dataclasses import dataclass, field
from reagent.core.parameters import param_hash
from reagent.models.base import ModelBase
from reagent.models.fully_connected_network import FullyConnectedNetwork
from reagent.models.mlp_scorer import MLPScorer
from reagent.net_builder.slate_ranking_net_builder import SlateRankingNetBuilder
class ScoreCap(nn.Module):
def __init__(self, cap: float):
super().__init__()
self.cap = cap
def forward(self, input):
return torch.clip(input, max=self.cap)
@dataclass
class FinalLayer:
score_cap: Optional[float] = None
sigmoid: bool = False
tanh: bool = False
def __post_init_post_parse__(self):
assert (
sum(map(lambda x: int(bool(x)), asdict(self).values())) <= 1
), f"More than one option set {self}"
def get(self):
if self.score_cap:
return ScoreCap(self.score_cap)
if self.sigmoid:
return nn.Sigmoid()
if self.tanh:
return nn.Tanh()
return nn.Identity()
@dataclass
class SlateRankingScorer(SlateRankingNetBuilder):
__hash__ = param_hash
# For MLP
hidden_layers: List[int] = field(default_factory=lambda: [64, 32])
activations: List[str] = field(default_factory=lambda: ["relu", "relu"])
use_batch_norm: bool = False
min_std: float = 0.0
dropout_ratio: float = 0.0
use_layer_norm: bool = False
normalize_output: bool = False
orthogonal_init: bool = False
# For MLP Scorer
# if disabled, ignores the state features
has_user_feat: bool = False # TODO: deprecate
final_layer: FinalLayer = field(
default_factory=FinalLayer
) # TODO: if score cap not needed, deprecate
def build_slate_ranking_network(
self, state_dim, candidate_dim, _candidate_size=None, _slate_size=None
) -> ModelBase:
# pointwise MLP
input_dim = state_dim + candidate_dim
output_dim = 1
layers = [input_dim, *self.hidden_layers, output_dim]
activations = [
*self.activations,
# identity, but we'll add our own final layer
"linear",
]
mlp = FullyConnectedNetwork(
layers=layers,
activations=activations,
use_batch_norm=self.use_batch_norm,
min_std=self.min_std,
dropout_ratio=self.dropout_ratio,
use_layer_norm=self.use_layer_norm,
normalize_output=self.normalize_output,
orthogonal_init=self.orthogonal_init,
)
mlp = nn.Sequential(
*[
mlp,
self.final_layer.get(),
]
)
return MLPScorer(mlp=mlp, has_user_feat=self.has_user_feat)
| 2,915 | 27.871287 | 80 | py |
ReAgent | ReAgent-master/reagent/net_builder/value/fully_connected.py | #!/usr/bin/env python3
from typing import List
import torch
from reagent.core.dataclasses import dataclass, field
from reagent.core.parameters import NormalizationData, param_hash
from reagent.models.fully_connected_network import FloatFeatureFullyConnected
from reagent.net_builder.value_net_builder import ValueNetBuilder
from reagent.preprocessing.normalization import get_num_output_features
@dataclass
class FullyConnected(ValueNetBuilder):
__hash__ = param_hash
sizes: List[int] = field(default_factory=lambda: [256, 128])
activations: List[str] = field(default_factory=lambda: ["relu", "relu"])
use_layer_norm: bool = False
def __post_init_post_parse__(self):
super().__init__()
assert len(self.sizes) == len(self.activations), (
f"Must have the same numbers of sizes and activations; got: "
f"{self.sizes}, {self.activations}"
)
def build_value_network(
self, state_normalization_data: NormalizationData, output_dim: int = 1
) -> torch.nn.Module:
state_dim = get_num_output_features(
state_normalization_data.dense_normalization_parameters
)
return FloatFeatureFullyConnected(
state_dim=state_dim,
output_dim=output_dim,
sizes=self.sizes,
activations=self.activations,
use_layer_norm=self.use_layer_norm,
)
| 1,410 | 33.414634 | 78 | py |
ReAgent | ReAgent-master/reagent/net_builder/value/seq2reward_rnn.py | #!/usr/bin/env python3
import torch
from reagent.core.dataclasses import dataclass
from reagent.core.parameters import NormalizationData, param_hash
from reagent.models.seq2reward_model import Seq2RewardNetwork
from reagent.net_builder.value_net_builder import ValueNetBuilder
from reagent.preprocessing.normalization import get_num_output_features
@dataclass
class Seq2RewardNetBuilder(ValueNetBuilder):
__hash__ = param_hash
action_dim: int = 2
num_hiddens: int = 64
num_hidden_layers: int = 2
def build_value_network(
self, state_normalization_data: NormalizationData
) -> torch.nn.Module:
state_dim = get_num_output_features(
state_normalization_data.dense_normalization_parameters
)
return Seq2RewardNetwork(
state_dim=state_dim,
action_dim=self.action_dim,
num_hiddens=self.num_hiddens,
num_hidden_layers=self.num_hidden_layers,
)
| 966 | 30.193548 | 71 | py |
ReAgent | ReAgent-master/reagent/model_utils/seq2slate_utils.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import copy
import logging
import math
from enum import Enum
import torch
import torch.nn as nn
import torch.nn.functional as F
logger = logging.getLogger(__name__)
PADDING_SYMBOL = 0
DECODER_START_SYMBOL = 1
class Seq2SlateMode(Enum):
RANK_MODE = "rank"
PER_SEQ_LOG_PROB_MODE = "per_sequence_log_prob"
PER_SYMBOL_LOG_PROB_DIST_MODE = "per_symbol_log_prob_dist"
DECODE_ONE_STEP_MODE = "decode_one_step"
ENCODER_SCORE_MODE = "encoder_score_mode"
class Seq2SlateOutputArch(Enum):
# Only output encoder scores
ENCODER_SCORE = "encoder_score"
# A decoder outputs a sequence in an autoregressive way
AUTOREGRESSIVE = "autoregressive"
# Using encoder scores, a decoder outputs a sequence using
# frechet sort (equivalent to iterative softmax)
FRECHET_SORT = "frechet_sort"
def print_model_info(seq2slate):
def _num_of_params(model):
return len(torch.cat([p.flatten() for p in model.parameters()]))
logger.info(f"Num of total params: {_num_of_params(seq2slate)}")
logger.info(f"Num of Encoder params: {_num_of_params(seq2slate.encoder)}")
logger.info(
f"Num of Candidate Embedder params: {_num_of_params(seq2slate.candidate_embedder)}"
)
logger.info(
f"Num of State Embedder params: {_num_of_params(seq2slate.state_embedder)}"
)
if seq2slate.output_arch == Seq2SlateOutputArch.FRECHET_SORT:
logger.info(
f"Num of Encoder_Scorer params: {_num_of_params(seq2slate.encoder_scorer)}"
)
elif seq2slate.output_arch == Seq2SlateOutputArch.AUTOREGRESSIVE:
logger.info(
f"Num of Positional Encoding params: {_num_of_params(seq2slate.positional_encoding_decoder)}"
)
logger.info(f"Num of Decoder params: {_num_of_params(seq2slate.decoder)}")
elif seq2slate.output_arch == Seq2SlateOutputArch.ENCODER_SCORE:
logger.info(
f"Num of Encoder_Scorer params: {_num_of_params(seq2slate.encoder_scorer)}"
)
def mask_logits_by_idx(logits, tgt_in_idx):
# logits shape: batch_size, seq_len, candidate_size
# tgt_in_idx shape: batch_size, seq_len
# the first two symbols are reserved for padding and decoder-starting symbols
# so they should never be a possible output label
logits[:, :, :2] = float("-inf")
batch_size, seq_len = tgt_in_idx.shape
mask_indices = torch.tril(
tgt_in_idx.repeat(1, seq_len).reshape(batch_size, seq_len, seq_len), diagonal=0
)
logits = logits.scatter(2, mask_indices, float("-inf"))
return logits
def subsequent_mask(size: int, device: torch.device):
"""
Mask out subsequent positions. Mainly used in the decoding process,
in which an item should not attend subsequent items.
mask_ijk = 0 if the item should be ignored; 1 if the item should be paid attention
"""
subsequent_mask = ~torch.triu(
torch.ones(1, size, size, device=device, dtype=torch.bool), diagonal=1
)
return subsequent_mask
# TODO (@czxttkl): use when we introduce padding
def subsequent_and_padding_mask(tgt_in_idx):
"""Create a mask to hide padding and future items"""
# tgt_in_idx shape: batch_size, seq_len
# tgt_tgt_mask shape: batch_size, 1, seq_len
tgt_tgt_mask = (tgt_in_idx != PADDING_SYMBOL).unsqueeze(-2).type(torch.int8)
# subseq_mask shape: 1, seq_len, seq_len
subseq_mask = subsequent_mask(tgt_in_idx.size(-1), tgt_in_idx.device)
# tgt_tgt_mask shape: batch_size, seq_len, seq_len
tgt_tgt_mask = tgt_tgt_mask & subseq_mask
return tgt_tgt_mask
def clones(module, N):
"""
Produce N identical layers.
:param module: nn.Module class
:param N: number of copies
"""
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
def attention(query, key, value, mask, d_k):
"""Scaled Dot Product Attention"""
# mask shape: batch_size x 1 x seq_len x seq_len
# scores shape: batch_size x num_heads x seq_len x seq_len
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
scores = scores.masked_fill(mask == 0, float("-inf"))
# p_attn shape: batch_size x num_heads x seq_len x seq_len
p_attn = F.softmax(scores, dim=3)
# attn shape: batch_size x num_heads x seq_len x d_k
attn = torch.matmul(p_attn, value)
return attn, p_attn
def per_symbol_to_per_seq_log_probs(per_symbol_log_probs, tgt_out_idx):
"""Gather per-symbol log probabilities into per-seq log probabilities"""
# per_symbol_log_probs shape: batch_size, seq_len, candidate_size
# tgt_out_idx shape: batch_size, seq_len
# per_symbol_log_probs is log probability of each symbol in the tgt_out_idx
# shape: batch_size, seq_len
log_probs = torch.gather(per_symbol_log_probs, 2, tgt_out_idx.unsqueeze(2)).squeeze(
2
)
# shape: batch_size, 1
return log_probs.sum(dim=1, keepdim=True)
def per_symbol_to_per_seq_probs(per_symbol_probs, tgt_out_idx):
"""Gather per-symbol probabilities into per-seq probabilities"""
# per_symbol_probs shape: batch_size, seq_len, candidate_size
# tgt_out_idx shape: batch_size, seq_len
# output shape: batch_size, 1
return torch.clamp(
torch.prod(
torch.gather(per_symbol_probs, 2, tgt_out_idx.unsqueeze(2)).squeeze(2),
dim=1,
keepdim=True,
),
# prevent zero probabilities, which cause torch.log return -inf
min=1e-40,
)
def pytorch_decoder_mask(
memory: torch.Tensor, tgt_in_idx: torch.Tensor, num_heads: int
):
"""
Compute the masks used in the PyTorch Transformer-based decoder for
self-attention and attention over encoder outputs
mask_ijk = 1 if the item should be ignored; 0 if the item should be paid attention
Input:
memory shape: batch_size, src_seq_len, dim_model
tgt_in_idx (+2 offseted) shape: batch_size, tgt_seq_len
Return:
tgt_tgt_mask shape: batch_size * num_heads, tgt_seq_len, tgt_seq_len
tgt_src_mask shape: batch_size * num_heads, tgt_seq_len, src_seq_len
"""
batch_size, src_seq_len, _ = memory.shape
tgt_seq_len = tgt_in_idx.shape[1]
device = memory.device
mask_indices = torch.tril(
tgt_in_idx.repeat(1, tgt_seq_len).reshape(batch_size, tgt_seq_len, tgt_seq_len),
diagonal=0,
).to(device)
tgt_src_mask_augmented = torch.zeros(
batch_size, tgt_seq_len, src_seq_len + 2, dtype=torch.bool, device=device
).scatter(2, mask_indices, 1)
tgt_src_mask = tgt_src_mask_augmented[:, :, 2:].repeat_interleave(num_heads, dim=0)
tgt_tgt_mask = (subsequent_mask(tgt_seq_len, device) == 0).repeat(
batch_size * num_heads, 1, 1
)
return tgt_tgt_mask, tgt_src_mask
| 6,842 | 34.455959 | 105 | py |
ReAgent | ReAgent-master/reagent/preprocessing/sparse_preprocessor.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import abc
import logging
from typing import Dict, Tuple
import reagent.core.types as rlt
import torch
logger = logging.getLogger(__name__)
@torch.jit.script
def map_id_list(raw_values: torch.Tensor, id2index: Dict[int, int]) -> torch.Tensor:
# TODO(kaiwenw): handle case where raw_ids not in mapping
# (i.e. id2index[val.item()] not found)
return torch.tensor([id2index[x.item()] for x in raw_values], dtype=torch.long)
@torch.jit.script
def map_id_score_list(
raw_keys: torch.Tensor, raw_values: torch.Tensor, id2index: Dict[int, int]
) -> Tuple[torch.Tensor, torch.Tensor]:
# TODO(kaiwenw): handle case where raw_ids not in mapping
# (i.e. id2index[val.item()] not found)
return (
torch.tensor([id2index[x.item()] for x in raw_keys], dtype=torch.long),
raw_values,
)
class MapIDList(torch.nn.Module):
@abc.abstractmethod
def forward(self, raw_values: torch.Tensor) -> torch.Tensor:
pass
class MapIDScoreList(torch.nn.Module):
@abc.abstractmethod
def forward(
self, raw_keys: torch.Tensor, raw_values: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
pass
class ExplicitMapIDList(MapIDList):
def __init__(self, id2index: Dict[int, int]):
super().__init__()
self.id2index: Dict[int, int] = torch.jit.Attribute(id2index, Dict[int, int])
def forward(self, raw_values: torch.Tensor) -> torch.Tensor:
# TODO(kaiwenw): handle case where raw_ids not in mapping
# (i.e. id2index[val.item()] not found)
return torch.tensor(
[self.id2index[x.item()] for x in raw_values], dtype=torch.long
)
class ExplicitMapIDScoreList(MapIDScoreList):
def __init__(self, id2index: Dict[int, int]):
super().__init__()
self.id2index: Dict[int, int] = torch.jit.Attribute(id2index, Dict[int, int])
def forward(
self, raw_keys: torch.Tensor, raw_values: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
# TODO(kaiwenw): handle case where raw_ids not in mapping
# (i.e. id2index[val.item()] not found)
return (
torch.tensor([self.id2index[x.item()] for x in raw_keys], dtype=torch.long),
raw_values,
)
class ModuloMapIDList(MapIDList):
def __init__(self, modulo: int):
super().__init__()
self.modulo = modulo
def forward(self, raw_values: torch.Tensor) -> torch.Tensor:
return torch.remainder(raw_values.to(torch.long), self.modulo)
class ModuloMapIDScoreList(MapIDScoreList):
def __init__(self, modulo: int):
super().__init__()
self.modulo = modulo
def forward(
self, raw_keys: torch.Tensor, raw_values: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
return (
torch.remainder(raw_keys.to(torch.long), self.modulo),
raw_values,
)
def make_sparse_preprocessor(
feature_config: rlt.ModelFeatureConfig, device: torch.device
):
"""Helper to initialize, for scripting SparsePreprocessor"""
# TODO: Add option for simple modulo and other hash functions
id2name: Dict[int, str] = feature_config.id2name
name2id: Dict[str, int] = feature_config.name2id
def _make_id_list_mapper(config: rlt.IdListFeatureConfig) -> MapIDList:
mapping_config = feature_config.id_mapping_config[config.id_mapping_name].value
if isinstance(mapping_config, rlt.ExplicitMapping):
return ExplicitMapIDList(mapping_config.id2index)
elif isinstance(mapping_config, rlt.ModuloMapping):
return ModuloMapIDList(mapping_config.table_size)
else:
raise NotImplementedError(f"Unsupported {mapping_config}")
id_list_mappers = {
config.feature_id: _make_id_list_mapper(config)
for config in feature_config.id_list_feature_configs
}
def _make_id_score_list_mapper(
config: rlt.IdScoreListFeatureConfig,
) -> MapIDScoreList:
mapping_config = feature_config.id_mapping_config[config.id_mapping_name].value
if isinstance(mapping_config, rlt.ExplicitMapping):
return ExplicitMapIDScoreList(mapping_config.id2index)
elif isinstance(mapping_config, rlt.ModuloMapping):
return ModuloMapIDScoreList(mapping_config.table_size)
else:
raise NotImplementedError(f"Unsupported {mapping_config}")
id_score_list_mappers = {
config.feature_id: _make_id_score_list_mapper(config)
for config in feature_config.id_score_list_feature_configs
}
sparse_preprocessor = SparsePreprocessor(
id2name, name2id, id_list_mappers, id_score_list_mappers, device
)
return torch.jit.script(sparse_preprocessor)
class SparsePreprocessor(torch.nn.Module):
"""Performs preprocessing for sparse features (i.e. id_list, id_score_list)
Functionality includes:
(1) changes keys from feature_id to feature_name, for better debuggability
(2) maps sparse ids to embedding table indices based on id_mapping
(3) filters out ids which aren't in the id2name
"""
def __init__(
self,
id2name: Dict[int, str],
name2id: Dict[str, int],
id_list_mappers: Dict[int, MapIDList],
id_score_list_mappers: Dict[int, MapIDScoreList],
device: torch.device,
) -> None:
super().__init__()
assert set(id2name.keys()) == set(id_list_mappers.keys()) | set(
id_score_list_mappers.keys()
)
self.id2name: Dict[int, str] = torch.jit.Attribute(id2name, Dict[int, str])
self.name2id: Dict[str, int] = torch.jit.Attribute(name2id, Dict[str, int])
self.id_list_mappers = torch.nn.ModuleDict(
{id2name[k]: v for k, v in id_list_mappers.items()}
)
self.id_score_list_mappers = torch.nn.ModuleDict(
{id2name[k]: v for k, v in id_score_list_mappers.items()}
)
self.device = device
@torch.jit.export
def preprocess_id_list(
self, id_list: Dict[int, Tuple[torch.Tensor, torch.Tensor]]
) -> Dict[str, Tuple[torch.Tensor, torch.Tensor]]:
"""
Input: rlt.ServingIdListFeature
Output: rlt.IdListFeature
"""
ret: Dict[str, Tuple[torch.Tensor, torch.Tensor]] = {}
for name, mapper in self.id_list_mappers.items():
fid = self.name2id[name]
if fid in id_list:
offsets, values = id_list[fid]
idx_values = mapper(values)
ret[name] = (
offsets.to(self.device),
idx_values.to(self.device),
)
return ret
@torch.jit.export
def preprocess_id_score_list(
self, id_score_list: Dict[int, Tuple[torch.Tensor, torch.Tensor, torch.Tensor]]
) -> Dict[str, Tuple[torch.Tensor, torch.Tensor, torch.Tensor]]:
"""
Input: rlt.ServingIdScoreListFeature
Output: rlt.IdScoreListFeature
"""
ret: Dict[str, Tuple[torch.Tensor, torch.Tensor, torch.Tensor]] = {}
for name, mapper in self.id_score_list_mappers.items():
fid = self.name2id[name]
if fid in id_score_list:
offsets, keys, values = id_score_list[fid]
idx_keys, weights = mapper(keys, values)
ret[name] = (
offsets.to(self.device),
idx_keys.to(self.device),
weights.to(self.device).float(),
)
return ret
| 7,650 | 34.752336 | 88 | py |
ReAgent | ReAgent-master/reagent/preprocessing/postprocessor.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from typing import Dict, Tuple
import torch
import torch.nn as nn
from reagent.core.parameters import NormalizationParameters
from reagent.preprocessing.identify_types import (
CONTINUOUS_ACTION,
DISCRETE_ACTION,
DO_NOT_PREPROCESS,
)
from reagent.preprocessing.normalization import EPS, get_num_output_features
class Postprocessor(nn.Module):
"""
Inverting action
"""
def __init__(
self,
normalization_parameters: Dict[int, NormalizationParameters],
use_gpu: bool,
) -> None:
super().__init__()
self.num_output_features = get_num_output_features(normalization_parameters)
feature_types = {
norm_param.feature_type for norm_param in normalization_parameters.values()
}
assert (
len(feature_types) == 1
), "All dimensions of actions should have the same preprocessing"
self.feature_type = list(feature_types)[0]
assert self.feature_type in {
DISCRETE_ACTION,
CONTINUOUS_ACTION,
DO_NOT_PREPROCESS,
}, f"{self.feature_type} is not DISCRETE_ACTION, CONTINUOUS_ACTION or DO_NOT_PREPROCESS"
self.device = torch.device("cuda" if use_gpu else "cpu")
if self.feature_type == CONTINUOUS_ACTION:
sorted_features = sorted(normalization_parameters.keys())
self.min_serving_value = torch.tensor(
[normalization_parameters[f].min_value for f in sorted_features],
device=self.device,
).float()
self.scaling_factor = torch.tensor(
[
(
# pyre-fixme[58]: `-` is not supported for operand types
# `Optional[float]` and `Optional[float]`.
normalization_parameters[f].max_value
- normalization_parameters[f].min_value
)
/ (2 * (1 - EPS))
for f in sorted_features
],
device=self.device,
).float()
self.almost_one = torch.tensor(1.0 - EPS, device=self.device).float()
def input_prototype(self) -> Tuple[torch.Tensor]:
return (torch.randn(1, self.num_output_features),)
def forward(self, input: torch.Tensor) -> torch.Tensor:
if self.feature_type == CONTINUOUS_ACTION:
# Please don't re-order; ONNX messed up tensor type when torch.clamp is
# the first operand.
return (
self.almost_one + torch.clamp(input, -self.almost_one, self.almost_one)
) * self.scaling_factor + self.min_serving_value
return input
| 2,812 | 36.013158 | 96 | py |
ReAgent | ReAgent-master/reagent/preprocessing/sparse_to_dense.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from typing import Dict, List, Tuple
import torch
from reagent.preprocessing import normalization
class SparseToDenseProcessor:
def __init__(
self, sorted_features: List[int], set_missing_value_to_zero: bool = False
):
self.sorted_features = sorted_features
self.set_missing_value_to_zero = set_missing_value_to_zero
def __call__(self, sparse_data):
return self.process(sparse_data)
class StringKeySparseToDenseProcessor(SparseToDenseProcessor):
"""
We just have this in case the input data is keyed by string
"""
def __init__(
self, sorted_features: List[int], set_missing_value_to_zero: bool = False
):
super().__init__(sorted_features, set_missing_value_to_zero)
self._sparse_to_dense = PythonSparseToDenseProcessor(
sorted_features, set_missing_value_to_zero
)
def process(
self, sparse_data: List[Dict[str, float]]
) -> Tuple[torch.Tensor, torch.Tensor]:
# Convert all keys to integers
sparse_data_int = []
for sd in sparse_data:
sd_int = {}
for k, v in sd.items():
sd_int[int(k)] = v
sparse_data_int.append(sd_int)
return self._sparse_to_dense(sparse_data_int)
class PythonSparseToDenseProcessor(SparseToDenseProcessor):
def __init__(
self, sorted_features: List[int], set_missing_value_to_zero: bool = False
):
super().__init__(sorted_features, set_missing_value_to_zero)
self.feature_to_index: Dict[int, int] = {
f: i for i, f in enumerate(sorted_features)
}
def process(
self, sparse_data: List[Dict[int, float]]
) -> Tuple[torch.Tensor, torch.Tensor]:
missing_value = normalization.MISSING_VALUE
if self.set_missing_value_to_zero:
missing_value = 0.0
values = torch.nan_to_num(
torch.FloatTensor(
[
[
row[col] if col in row else missing_value
for col in self.sorted_features
]
for row in sparse_data
]
),
nan=missing_value,
)
if self.set_missing_value_to_zero:
# When we set missing values to 0, we don't know what is and isn't missing
presence = torch.ones_like(values, dtype=torch.bool)
else:
presence = values != missing_value
return values, presence
| 2,628 | 31.45679 | 86 | py |
ReAgent | ReAgent-master/reagent/preprocessing/batch_preprocessor.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from typing import Dict
import torch
import torch.nn as nn
import torch.nn.functional as F
from reagent.core import types as rlt
from reagent.preprocessing.preprocessor import Preprocessor
class BatchPreprocessor(nn.Module):
pass
def batch_to_device(batch: Dict[str, torch.Tensor], device: torch.device):
out = {}
for k in batch:
out[k] = batch[k].to(device)
return out
class DiscreteDqnBatchPreprocessor(BatchPreprocessor):
def __init__(
self, num_actions: int, state_preprocessor: Preprocessor, use_gpu: bool = False
):
super().__init__()
self.num_actions = num_actions
self.state_preprocessor = state_preprocessor
self.device = torch.device("cuda") if use_gpu else torch.device("cpu")
def forward(self, batch: Dict[str, torch.Tensor]) -> rlt.DiscreteDqnInput:
batch = batch_to_device(batch, self.device)
preprocessed_state = self.state_preprocessor(
batch["state_features"], batch["state_features_presence"]
)
preprocessed_next_state = self.state_preprocessor(
batch["next_state_features"], batch["next_state_features_presence"]
)
# not terminal iff at least one possible for next action
not_terminal = batch["possible_next_actions_mask"].max(dim=1)[0].float()
action = F.one_hot(batch["action"].to(torch.int64), self.num_actions)
# next action can potentially have value self.num_action if not available
next_action = F.one_hot(
batch["next_action"].to(torch.int64), self.num_actions + 1
)[:, : self.num_actions]
return rlt.DiscreteDqnInput(
state=rlt.FeatureData(preprocessed_state),
next_state=rlt.FeatureData(preprocessed_next_state),
action=action,
next_action=next_action,
reward=batch["reward"].unsqueeze(1),
time_diff=batch["time_diff"].unsqueeze(1),
step=batch["step"].unsqueeze(1),
not_terminal=not_terminal.unsqueeze(1),
possible_actions_mask=batch["possible_actions_mask"],
possible_next_actions_mask=batch["possible_next_actions_mask"],
extras=rlt.ExtraData(
mdp_id=batch["mdp_id"].unsqueeze(1),
sequence_number=batch["sequence_number"].unsqueeze(1),
action_probability=batch["action_probability"].unsqueeze(1),
),
)
class ParametricDqnBatchPreprocessor(BatchPreprocessor):
def __init__(
self,
state_preprocessor: Preprocessor,
action_preprocessor: Preprocessor,
use_gpu: bool,
):
super().__init__()
self.state_preprocessor = state_preprocessor
self.action_preprocessor = action_preprocessor
self.device = torch.device("cuda") if use_gpu else torch.device("cpu")
def forward(self, batch: Dict[str, torch.Tensor]) -> rlt.ParametricDqnInput:
batch = batch_to_device(batch, self.device)
# first preprocess state and action
preprocessed_state = self.state_preprocessor(
batch["state_features"], batch["state_features_presence"]
)
preprocessed_next_state = self.state_preprocessor(
batch["next_state_features"], batch["next_state_features_presence"]
)
preprocessed_action = self.action_preprocessor(
batch["action"], batch["action_presence"]
)
preprocessed_next_action = self.action_preprocessor(
batch["next_action"], batch["next_action_presence"]
)
return rlt.ParametricDqnInput(
state=rlt.FeatureData(preprocessed_state),
next_state=rlt.FeatureData(preprocessed_next_state),
action=rlt.FeatureData(preprocessed_action),
next_action=rlt.FeatureData(preprocessed_next_action),
reward=batch["reward"].unsqueeze(1),
time_diff=batch["time_diff"].unsqueeze(1),
step=batch["step"].unsqueeze(1),
not_terminal=batch["not_terminal"].unsqueeze(1),
possible_actions=batch["possible_actions"],
possible_actions_mask=batch["possible_actions_mask"],
possible_next_actions=batch["possible_next_actions"],
possible_next_actions_mask=batch["possible_next_actions_mask"],
extras=rlt.ExtraData(
mdp_id=batch["mdp_id"].unsqueeze(1),
sequence_number=batch["sequence_number"].unsqueeze(1),
action_probability=batch["action_probability"].unsqueeze(1),
),
)
class PolicyNetworkBatchPreprocessor(BatchPreprocessor):
def __init__(
self,
state_preprocessor: Preprocessor,
action_preprocessor: Preprocessor,
use_gpu: bool = False,
):
super().__init__()
self.state_preprocessor = state_preprocessor
self.action_preprocessor = action_preprocessor
self.device = torch.device("cuda") if use_gpu else torch.device("cpu")
def forward(self, batch: Dict[str, torch.Tensor]) -> rlt.PolicyNetworkInput:
batch = batch_to_device(batch, self.device)
preprocessed_state = self.state_preprocessor(
batch["state_features"], batch["state_features_presence"]
)
preprocessed_next_state = self.state_preprocessor(
batch["next_state_features"], batch["next_state_features_presence"]
)
preprocessed_action = self.action_preprocessor(
batch["action"], batch["action_presence"]
)
preprocessed_next_action = self.action_preprocessor(
batch["next_action"], batch["next_action_presence"]
)
return rlt.PolicyNetworkInput(
state=rlt.FeatureData(preprocessed_state),
next_state=rlt.FeatureData(preprocessed_next_state),
action=rlt.FeatureData(preprocessed_action),
next_action=rlt.FeatureData(preprocessed_next_action),
reward=batch["reward"].unsqueeze(1),
time_diff=batch["time_diff"].unsqueeze(1),
step=batch["step"].unsqueeze(1),
not_terminal=batch["not_terminal"].unsqueeze(1),
extras=rlt.ExtraData(
mdp_id=batch["mdp_id"].unsqueeze(1),
sequence_number=batch["sequence_number"].unsqueeze(1),
action_probability=batch["action_probability"].unsqueeze(1),
),
)
| 6,551 | 41 | 87 | py |
ReAgent | ReAgent-master/reagent/preprocessing/normalization.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import json
import logging
from dataclasses import asdict
from typing import Dict, List, Optional, Tuple
import numpy as np
import reagent.core.types as rlt
import six
import torch
from reagent.core.parameters import NormalizationParameters
from reagent.preprocessing import identify_types
from reagent.preprocessing.identify_types import DEFAULT_MAX_UNIQUE_ENUM, FEATURE_TYPES
from scipy import stats # @manual=third-party//scipy:scipy-py
from scipy.stats.mstats import mquantiles # @manual=third-party//scipy:scipy-py
logger = logging.getLogger(__name__)
BOX_COX_MAX_STDDEV = 1e8
BOX_COX_MARGIN = 1e-4
MISSING_VALUE = -1337.1337
DEFAULT_QUANTILE_K2_THRESHOLD = 1000.0
MINIMUM_SAMPLES_TO_IDENTIFY = 20
DEFAULT_MAX_QUANTILE_SIZE = 20
DEFAULT_NUM_SAMPLES = 100000
# Achieved by probability feature transformation on clamped limits (1e-5, 1-1e-5)
MAX_FEATURE_VALUE = 11.513
MIN_FEATURE_VALUE = MAX_FEATURE_VALUE * -1
EPS = 1e-6
def no_op_feature():
return NormalizationParameters(
identify_types.CONTINUOUS, None, 0, 0, 1, None, None, None, None
)
def identify_parameter(
feature_name,
values,
max_unique_enum_values=DEFAULT_MAX_UNIQUE_ENUM,
quantile_size=DEFAULT_MAX_QUANTILE_SIZE,
quantile_k2_threshold=DEFAULT_QUANTILE_K2_THRESHOLD,
skip_box_cox=False,
skip_quantiles=False,
feature_type=None,
):
force_boxcox = feature_type == identify_types.BOXCOX
force_continuous = feature_type == identify_types.CONTINUOUS
force_quantile = feature_type == identify_types.QUANTILE
if feature_type is None:
feature_type = identify_types.identify_type(values, max_unique_enum_values)
boxcox_lambda = None
boxcox_shift = 0.0
mean = 0.0
stddev = 1.0
possible_values = None
quantiles = None
assert feature_type in identify_types.FEATURE_TYPES, "unknown type {}".format(
feature_type
)
assert (
len(values) >= MINIMUM_SAMPLES_TO_IDENTIFY
), "insufficient information to identify parameter"
min_value = float(np.min(values))
max_value = float(np.max(values))
if feature_type == identify_types.DO_NOT_PREPROCESS:
mean = float(np.mean(values))
values = values - mean
stddev = max(float(np.std(values, ddof=1)), 1.0)
if feature_type == identify_types.CONTINUOUS or force_boxcox or force_quantile:
if min_value == max_value and not (force_boxcox or force_quantile):
return no_op_feature()
k2_original, p_original = stats.normaltest(values)
# shift can be estimated but not in scipy
boxcox_shift = float(min_value * -1)
candidate_values, lambda_ = stats.boxcox(
np.maximum(values + boxcox_shift, BOX_COX_MARGIN)
)
k2_boxcox, p_boxcox = stats.normaltest(candidate_values)
logger.info(
"Feature stats. Original K2: {} P: {} Boxcox K2: {} P: {}".format(
k2_original, p_original, k2_boxcox, p_boxcox
)
)
if (lambda_ < 0.9 or lambda_ > 1.1 or force_boxcox) and not (
force_continuous or force_quantile
):
# Lambda is far enough from 1.0 to be worth doing boxcox
if (
k2_original > k2_boxcox * 10 and k2_boxcox <= quantile_k2_threshold
) or force_boxcox:
# The boxcox output is significantly more normally distributed
# than the original data and is normal enough to apply
# effectively.
stddev = float(np.std(candidate_values, ddof=1))
# Unclear whether this happens in practice or not
if (
np.isfinite(stddev)
and stddev < BOX_COX_MAX_STDDEV
and not np.isclose(stddev, 0)
) or force_boxcox:
values = candidate_values
boxcox_lambda = float(lambda_)
if boxcox_lambda is None or skip_box_cox:
boxcox_shift = None
boxcox_lambda = None
if boxcox_lambda is not None:
feature_type = identify_types.BOXCOX
if (
boxcox_lambda is None
and k2_original > quantile_k2_threshold
and (not skip_quantiles)
and not force_continuous
) or force_quantile:
feature_type = identify_types.QUANTILE
quantiles = (
np.unique(
mquantiles(
values,
np.arange(quantile_size + 1, dtype=np.float64)
/ float(quantile_size),
alphap=0.0,
betap=1.0,
)
)
.astype(float)
.tolist()
)
logger.info("Feature is non-normal, using quantiles: {}".format(quantiles))
if (
feature_type == identify_types.CONTINUOUS
or feature_type == identify_types.BOXCOX
or feature_type == identify_types.CONTINUOUS_ACTION
):
mean = float(np.mean(values))
values = values - mean
stddev = max(float(np.std(values, ddof=1)), 1.0)
if not np.isfinite(stddev):
logger.info("Std. dev not finite for feature {}".format(feature_name))
return None
values /= stddev
if feature_type == identify_types.ENUM:
possible_values = np.unique(values.astype(int)).astype(int).tolist()
return NormalizationParameters(
feature_type,
boxcox_lambda,
boxcox_shift,
mean,
stddev,
possible_values,
quantiles,
min_value,
max_value,
)
def get_feature_config(
float_features: Optional[List[Tuple[int, str]]]
) -> rlt.ModelFeatureConfig:
float_features = float_features or []
float_feature_infos = [
rlt.FloatFeatureInfo(name=f_name, feature_id=f_id)
for f_id, f_name in float_features
]
return rlt.ModelFeatureConfig(float_feature_infos=float_feature_infos)
def get_num_output_features(
normalization_parameters: Dict[int, NormalizationParameters]
) -> int:
return sum(
map(
lambda np: (
len(np.possible_values) if np.feature_type == identify_types.ENUM else 1
),
normalization_parameters.values(),
)
)
def get_feature_start_indices(
sorted_features: List[int],
normalization_parameters: Dict[int, NormalizationParameters],
):
"""Returns the starting index for each feature in the output feature vector"""
start_indices = []
cur_idx = 0
for feature in sorted_features:
np = normalization_parameters[feature]
start_indices.append(cur_idx)
if np.feature_type == identify_types.ENUM:
possible_values = np.possible_values
assert possible_values is not None
cur_idx += len(possible_values)
else:
cur_idx += 1
return start_indices
def sort_features_by_normalization(
normalization_parameters: Dict[int, NormalizationParameters]
) -> Tuple[List[int], List[int]]:
"""
Helper function to return a sorted list from a normalization map.
Also returns the starting index for each feature type"""
# Sort features by feature type
sorted_features: List[int] = []
feature_starts: List[int] = []
assert isinstance(
list(normalization_parameters.keys())[0], int
), "Normalization Parameters need to be int"
for feature_type in FEATURE_TYPES:
feature_starts.append(len(sorted_features))
for feature in sorted(normalization_parameters.keys()):
norm = normalization_parameters[feature]
if norm.feature_type == feature_type:
sorted_features.append(feature)
return sorted_features, feature_starts
def deserialize(parameters_json) -> Dict[int, NormalizationParameters]:
parameters = {}
for feature, feature_parameters in six.iteritems(parameters_json):
# Note: This is OK since NormalizationParameters is flat.
params = NormalizationParameters(**json.loads(feature_parameters))
# Check for negative enum IDs
if params.feature_type == identify_types.ENUM:
assert params.possible_values is not None
parameters[int(feature)] = params
return parameters
def serialize_one(feature_parameters):
return json.dumps(asdict(feature_parameters))
def serialize(parameters):
parameters_json = {}
for feature, feature_parameters in six.iteritems(parameters):
parameters_json[feature] = serialize_one(feature_parameters)
return parameters_json
def get_feature_norm_metadata(feature_name, feature_value_list, norm_params):
logger.info("Got feature: {}".format(feature_name))
num_features = len(feature_value_list)
if num_features < MINIMUM_SAMPLES_TO_IDENTIFY:
return None
feature_override = None
if norm_params["feature_overrides"] is not None:
feature_override = norm_params["feature_overrides"].get(feature_name, None)
feature_override = feature_override or norm_params.get(
"default_feature_override", None
)
feature_values = np.array(feature_value_list, dtype=np.float32)
assert not (np.any(np.isinf(feature_values))), "Feature values contain infinity"
assert not (
np.any(np.isnan(feature_values))
), "Feature values contain nan (are there nulls in the feature values?)"
normalization_parameters = identify_parameter(
feature_name,
feature_values,
norm_params["max_unique_enum_values"],
norm_params["quantile_size"],
norm_params["quantile_k2_threshold"],
norm_params["skip_box_cox"],
norm_params["skip_quantiles"],
feature_override,
)
logger.info(
"Feature {} normalization: {}".format(feature_name, normalization_parameters)
)
return normalization_parameters
def construct_action_scale_tensor(action_norm_params, action_scale_overrides):
"""Construct tensors that will rescale each action value on each dimension i
from [min_serving_value[i], max_serving_value[i]] to [-1, 1] for training.
"""
sorted_features, _ = sort_features_by_normalization(action_norm_params)
min_action_array = np.zeros((1, len(sorted_features)))
max_action_array = np.zeros((1, len(sorted_features)))
for idx, feature_id in enumerate(sorted_features):
if feature_id in action_scale_overrides:
min_action_array[0][idx] = action_scale_overrides[feature_id][0]
max_action_array[0][idx] = action_scale_overrides[feature_id][1]
else:
min_action_array[0][idx] = action_norm_params[feature_id].min_value
max_action_array[0][idx] = action_norm_params[feature_id].max_value
min_action_range_tensor_serving = torch.from_numpy(min_action_array)
max_action_range_tensor_serving = torch.from_numpy(max_action_array)
return min_action_range_tensor_serving, max_action_range_tensor_serving
| 11,199 | 34.897436 | 88 | py |
ReAgent | ReAgent-master/reagent/preprocessing/preprocessor.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
from typing import Dict, List, Optional, Tuple, cast
import torch
from reagent.core.parameters import NormalizationParameters
from reagent.preprocessing.identify_types import DO_NOT_PREPROCESS, ENUM, FEATURE_TYPES
from reagent.preprocessing.normalization import (
EPS,
MAX_FEATURE_VALUE,
MIN_FEATURE_VALUE,
)
from torch.nn import Module, Parameter # @manual="//caffe2:torch"
logger = logging.getLogger(__name__)
class Preprocessor(Module):
def __init__(
self,
normalization_parameters: Dict[int, NormalizationParameters],
use_gpu: Optional[bool] = None,
device: Optional[torch.device] = None,
) -> None:
super().__init__()
self.normalization_parameters = normalization_parameters
(
self.feature_id_to_index,
self.sorted_features,
self.sorted_feature_boundaries,
) = self._sort_features_by_normalization()
cuda_available = torch.cuda.is_available()
logger.info("CUDA availability: {}".format(cuda_available))
if device is not None:
self.device = device
elif use_gpu and cuda_available:
logger.warn("use_gpu is deprecated, please pass in the device directly")
logger.info("Using GPU: GPU requested and available.")
self.device = torch.device("cuda")
else:
logger.info("NOT Using GPU: GPU not requested or not available.")
self.device = torch.device("cpu")
# NOTE: Because of the way we call AppendNet to squash ONNX to a C2 net,
# We need to make tensors for every numeric literal
self.zero_tensor = Parameter(
torch.tensor([0.0], device=self.device), requires_grad=False
)
self.one_tensor = Parameter(
torch.tensor([1.0], device=self.device), requires_grad=False
)
self.one_half_tensor = Parameter(
torch.tensor([0.5], device=self.device), requires_grad=False
)
self.one_hundredth_tensor = Parameter(
torch.tensor([0.01], device=self.device), requires_grad=False
)
self.negative_one_tensor = Parameter(
torch.tensor([-1.0], device=self.device), requires_grad=False
)
self.min_tensor = Parameter(
torch.tensor([-1e20], device=self.device), requires_grad=False
)
self.max_tensor = Parameter(
torch.tensor([1e20], device=self.device), requires_grad=False
)
self.epsilon_tensor = Parameter(
torch.tensor([EPS], device=self.device), requires_grad=False
)
self.feature_starts = self._get_type_boundaries()
self.split_sections: List[int] = []
for i, feature_type in enumerate(FEATURE_TYPES):
begin_index = self.feature_starts[i]
if (i + 1) == len(FEATURE_TYPES):
end_index = len(self.normalization_parameters)
else:
end_index = self.feature_starts[i + 1]
if begin_index == end_index:
continue # No features of this type
if feature_type == ENUM:
# Process one-at-a-time
for j in range(begin_index, end_index):
enum_norm_params = self.normalization_parameters[
self.sorted_features[j]
]
func = getattr(self, "_create_parameters_" + feature_type)
func(j, enum_norm_params)
self.split_sections.append(1)
else:
norm_params = []
for f in self.sorted_features[begin_index:end_index]:
norm_params.append(self.normalization_parameters[f])
func = getattr(self, "_create_parameters_" + feature_type)
func(begin_index, norm_params)
self.split_sections.append(end_index - begin_index)
def input_prototype(self) -> Tuple[torch.Tensor, torch.Tensor]:
return (
torch.randn(1, len(self.normalization_parameters), device=self.device),
torch.ones(
1,
len(self.normalization_parameters),
dtype=torch.uint8,
device=self.device,
),
)
def forward(
self, input: torch.Tensor, input_presence_byte: torch.Tensor
) -> torch.Tensor:
"""Preprocess the input matrix
:param input tensor
"""
assert (
input.shape == input_presence_byte.shape
), f"{input.shape} != {input_presence_byte.shape}"
outputs = []
split_input = torch.split(input, self.split_sections, dim=1)
# NB: converting to float prevent ASAN heap-buffer-overflow
split_presence = torch.split(
input_presence_byte.float(), self.split_sections, dim=1
)
ptr = 0
for i, feature_type in enumerate(FEATURE_TYPES):
begin_index = self.feature_starts[i]
if (i + 1) == len(FEATURE_TYPES):
end_index = len(self.normalization_parameters)
else:
end_index = self.feature_starts[i + 1]
if begin_index == end_index:
continue # No features of this type
if feature_type == ENUM:
# Process one-at-a-time
for j in range(begin_index, end_index):
norm_params = self.normalization_parameters[self.sorted_features[j]]
new_output = (
self._preprocess_feature_single_column(
j, split_input[ptr], norm_params
)
* split_presence[ptr]
)
ptr += 1
self._check_preprocessing_output(new_output, [norm_params])
outputs.append(new_output)
else:
norm_params_list: List[NormalizationParameters] = []
for f in self.sorted_features[begin_index:end_index]:
norm_params_list.append(self.normalization_parameters[f])
new_output = (
self._preprocess_feature_multi_column(
begin_index, split_input[ptr], norm_params_list
)
* split_presence[ptr]
)
ptr += 1
self._check_preprocessing_output(new_output, norm_params_list)
if feature_type != DO_NOT_PREPROCESS:
new_output = torch.clamp(
new_output, MIN_FEATURE_VALUE, MAX_FEATURE_VALUE
)
outputs.append(new_output)
return torch.cat(outputs, dim=1)
def _preprocess_feature_single_column(
self,
begin_index: int,
input: torch.Tensor,
norm_params: NormalizationParameters,
) -> torch.Tensor:
feature_type = norm_params.feature_type
func = getattr(self, "_preprocess_" + feature_type)
return func(begin_index, input, norm_params)
def _preprocess_feature_multi_column(
self,
begin_index: int,
input: torch.Tensor,
norm_params: List[NormalizationParameters],
) -> torch.Tensor:
feature_type = norm_params[0].feature_type
func = getattr(self, "_preprocess_" + feature_type)
return func(begin_index, input, norm_params)
def _create_parameters_DO_NOT_PREPROCESS(
self, begin_index: int, norm_params: List[NormalizationParameters]
):
pass
def _preprocess_DO_NOT_PREPROCESS(
self,
begin_index: int,
input: torch.Tensor,
norm_params: List[NormalizationParameters],
) -> torch.Tensor:
return input
def _create_parameters_BINARY(
self, begin_index: int, norm_params: List[NormalizationParameters]
):
pass
def _preprocess_BINARY(
self,
begin_index: int,
input: torch.Tensor,
norm_params: List[NormalizationParameters],
) -> torch.Tensor:
# ONNX doesn't support != yet
return self.one_tensor - (input == self.zero_tensor).float()
def _create_parameters_CLIP_LOG(
self, begin_index: int, norm_params: List[NormalizationParameters]
):
pass
def _preprocess_CLIP_LOG(
self,
begin_index: int,
input: torch.Tensor,
norm_params: List[NormalizationParameters],
) -> torch.Tensor:
return input.clip(EPS).log() # pyre-ignore[16]
def _create_parameters_PROBABILITY(
self, begin_index: int, norm_params: List[NormalizationParameters]
):
pass
def _preprocess_PROBABILITY(
self,
begin_index: int,
input: torch.Tensor,
norm_params: List[NormalizationParameters],
) -> torch.Tensor:
clamped_input = torch.clamp(input, 1e-5, 1 - 1e-5)
return self.negative_one_tensor * (
((self.one_tensor / clamped_input) - self.one_tensor).log()
)
def _create_parameters_CONTINUOUS_ACTION(
self, begin_index: int, norm_params: List[NormalizationParameters]
):
self._create_parameter(
begin_index,
"min_serving_value",
torch.tensor([p.min_value for p in norm_params], device=self.device),
)
self._create_parameter(
begin_index,
"min_training_value",
torch.ones(len(norm_params), device=self.device) * -1 + EPS,
)
self._create_parameter(
begin_index,
"scaling_factor",
(torch.ones(len(norm_params), device=self.device) - EPS)
* 2
/ torch.tensor(
# pyre-fixme[58]: `-` is not supported for operand types
# `Optional[float]` and `Optional[float]`.
[p.max_value - p.min_value for p in norm_params],
device=self.device,
),
)
def _preprocess_CONTINUOUS_ACTION(
self,
begin_index: int,
input: torch.Tensor,
norm_params: List[NormalizationParameters],
) -> torch.Tensor:
min_serving_value = self._fetch_parameter(begin_index, "min_serving_value")
min_training_value = self._fetch_parameter(begin_index, "min_training_value")
scaling_factor = self._fetch_parameter(begin_index, "scaling_factor")
continuous_action = (
input - min_serving_value
) * scaling_factor + min_training_value
return torch.clamp(continuous_action, -1 + EPS, 1 - EPS)
def _create_parameters_DISCRETE_ACTION(
self, begin_index: int, norm_params: List[NormalizationParameters]
):
pass
def _preprocess_DISCRETE_ACTION(
self,
begin_index: int,
input: torch.Tensor,
norm_params: List[NormalizationParameters],
):
return input
def _create_parameters_CONTINUOUS(
self, begin_index: int, norm_params: List[NormalizationParameters]
):
self._create_parameter(
begin_index,
"means",
torch.tensor([p.mean for p in norm_params], device=self.device),
)
self._create_parameter(
begin_index,
"stddevs",
torch.tensor([p.stddev for p in norm_params], device=self.device),
)
def _preprocess_CONTINUOUS(
self,
begin_index: int,
input: torch.Tensor,
norm_params: List[NormalizationParameters],
) -> torch.Tensor:
means = self._fetch_parameter(begin_index, "means")
stddevs = self._fetch_parameter(begin_index, "stddevs")
continuous_output = (input - means) / stddevs
return continuous_output
def _create_parameters_BOXCOX(
self, begin_index: int, norm_params: List[NormalizationParameters]
):
self._create_parameter(
begin_index,
"shifts",
torch.tensor([p.boxcox_shift for p in norm_params], device=self.device),
)
for p in norm_params:
assert (
# pyre-fixme[16]: `Optional` has no attribute `__abs__`.
abs(p.boxcox_lambda)
> 1e-6
), "Invalid value for boxcox lambda: " + str(p.boxcox_lambda)
self._create_parameter(
begin_index,
"lambdas",
torch.tensor([p.boxcox_lambda for p in norm_params], device=self.device),
)
self._create_parameters_CONTINUOUS(begin_index, norm_params)
def _preprocess_BOXCOX(
self,
begin_index: int,
input: torch.Tensor,
norm_params: List[NormalizationParameters],
) -> torch.Tensor:
shifts = self._fetch_parameter(begin_index, "shifts")
lambdas = self._fetch_parameter(begin_index, "lambdas")
boxcox_output = (
# We can replace this with a normal pow() call after D8528654 lands
torch.pow(
torch.clamp(
input + shifts, 1e-6
), # Clamp is necessary to prevent MISSING_VALUE from going to NaN
lambdas,
)
- self.one_tensor
) / lambdas
return self._preprocess_CONTINUOUS(begin_index, boxcox_output, norm_params)
def _create_parameters_QUANTILE(
self, begin_index: int, norm_params: List[NormalizationParameters]
):
F = len(norm_params)
num_quantiles = torch.tensor(
# pyre-fixme[6]: Expected `Sized` for 1st param but got
# `Optional[List[float]]`.
[[float(len(p.quantiles)) - 1 for p in norm_params]],
device=self.device,
)
self._create_parameter(begin_index, "num_quantiles", num_quantiles)
max_num_quantile_boundaries = int(
torch.max(
# pyre-fixme[6]: Expected `Sized` for 1st param but got
# `Optional[List[float]]`.
torch.tensor([len(p.quantiles) for p in norm_params])
).item()
)
B = max_num_quantile_boundaries
# The quantile boundaries is a FxB matrix where B is the max # of boundaries
# We take advantage of the fact that if the value is >= the max
# quantile boundary it automatically gets a 1.0 to repeat the max quantile
# so that we guarantee a square matrix.
# We project the quantiles boundaries to 3d and create a 1xFxB tensor
quantile_boundaries = torch.zeros(
[1, len(norm_params), max_num_quantile_boundaries], device=self.device
)
max_quantile_boundaries = torch.zeros([1, len(norm_params)], device=self.device)
min_quantile_boundaries = torch.zeros([1, len(norm_params)], device=self.device)
for i, p in enumerate(norm_params):
# pyre-fixme[16]: `Optional` has no attribute `__getitem__`.
quantile_boundaries[0, i, :] = p.quantiles[-1]
quantile_boundaries[
0,
i,
# pyre-fixme[6]: Expected `Sized` for 1st param but got
# `Optional[List[float]]`.
0 : len(p.quantiles),
] = torch.tensor(p.quantiles, device=self.device)
# pyre-fixme[6]: Expected `Iterable[Variable[_T]]` for 1st param but got
# `Optional[List[float]]`.
max_quantile_boundaries[0, i] = max(p.quantiles)
# pyre-fixme[6]: Expected `Iterable[Variable[_T]]` for 1st param but got
# `Optional[List[float]]`.
min_quantile_boundaries[0, i] = min(p.quantiles)
quantile_boundaries = quantile_boundaries.to(self.device)
max_quantile_boundaries = max_quantile_boundaries.to(self.device)
min_quantile_boundaries = min_quantile_boundaries.to(self.device)
self._create_parameter(begin_index, "quantile_boundaries", quantile_boundaries)
self._create_parameter(
begin_index, "max_quantile_boundaries", max_quantile_boundaries
)
self._create_parameter(
begin_index, "min_quantile_boundaries", min_quantile_boundaries
)
self._create_parameter(
begin_index,
"quantile_boundary_mask",
torch.ones([1, F, B], device=self.device),
)
def _preprocess_QUANTILE(
self,
begin_index: int,
input: torch.Tensor,
norm_params: List[NormalizationParameters],
) -> torch.Tensor:
"""
Replace the value with its percentile in the range [0,1].
This preprocesses several features in a single step by putting the
quantile boundaries in the third dimension and broadcasting.
The input is a JxF matrix where J is the batch size and F is the # of features.
"""
# The number of quantiles is a 1xF matrix
num_quantiles = self._fetch_parameter(begin_index, "num_quantiles")
quantile_boundaries = self._fetch_parameter(begin_index, "quantile_boundaries")
max_quantile_boundaries = self._fetch_parameter(
begin_index, "max_quantile_boundaries"
)
min_quantile_boundaries = self._fetch_parameter(
begin_index, "min_quantile_boundaries"
)
# Add a third dimension and repeat to create a JxFxB matrix, where the
# inputs are repeated B times in the third dimension. We need to
# do this because we can't broadcast both operands in different
# dimensions in the same operation.
# repeat doesn't work yet, so * by a mask
mask = self._fetch_parameter(begin_index, "quantile_boundary_mask")
expanded_inputs = input.unsqueeze(2) * mask
input_greater_than_or_equal_to = (
expanded_inputs >= quantile_boundaries
).float()
input_less_than = (expanded_inputs < quantile_boundaries).float()
set_to_max = (input >= max_quantile_boundaries).float()
set_to_min = (input <= min_quantile_boundaries).float()
min_or_max = (set_to_min + set_to_max).float()
interpolate = (min_or_max < self.one_hundredth_tensor).float()
interpolate_left, _ = torch.max(
(input_greater_than_or_equal_to * quantile_boundaries)
+ (input_less_than * self.min_tensor),
dim=2,
)
interpolate_right, _ = torch.min(
(input_less_than * quantile_boundaries)
+ (input_greater_than_or_equal_to * self.max_tensor),
dim=2,
)
# This assumes that we need to interpolate and computes the value.
# If we don't need to interpolate, this will be some bogus value, but it
# will be multiplied by 0 so no big deal.
left_start = torch.sum(input_greater_than_or_equal_to, dim=2) - self.one_tensor
interpolated_values = (
(
left_start
+ (
(input - interpolate_left)
/ (
(interpolate_right + self.epsilon_tensor) - interpolate_left
) # Add a small amount to interpolate_right to avoid div-0
)
)
/ num_quantiles
).float()
return set_to_max + (interpolate * interpolated_values).float()
def _create_parameters_ENUM(
self, begin_index: int, norm_params: NormalizationParameters
):
self._create_parameter(
begin_index,
"enum_values",
torch.tensor(
norm_params.possible_values, device=self.device, dtype=torch.float
).unsqueeze(0),
)
def _preprocess_ENUM(
self,
begin_index: int,
input: torch.Tensor,
norm_params: NormalizationParameters,
) -> torch.Tensor:
enum_values = self._fetch_parameter(begin_index, "enum_values")
return (input == enum_values).float()
def _sort_features_by_normalization(self):
"""
Helper function to return a sorted list from a normalization map.
Also returns the starting index for each feature type"""
# Sort features by feature type
feature_id_to_index = {}
sorted_features = []
feature_starts = []
assert isinstance(
list(self.normalization_parameters.keys())[0], int
), "Normalization Parameters need to be int"
for feature_type in FEATURE_TYPES:
feature_starts.append(len(sorted_features))
for feature in sorted(self.normalization_parameters.keys()):
norm = self.normalization_parameters[feature]
if norm.feature_type == feature_type:
feature_id_to_index[feature] = len(sorted_features)
sorted_features.append(feature)
return feature_id_to_index, sorted_features, feature_starts
def _get_type_boundaries(self) -> List[int]:
feature_starts = []
on_feature_type = -1
for i, feature in enumerate(self.sorted_features):
feature_type = self.normalization_parameters[feature].feature_type
feature_type_index = FEATURE_TYPES.index(feature_type)
assert (
feature_type_index >= on_feature_type
), "Features are not sorted by feature type!"
while feature_type_index > on_feature_type:
feature_starts.append(i)
on_feature_type += 1
while on_feature_type < len(FEATURE_TYPES):
feature_starts.append(len(self.sorted_features))
on_feature_type += 1
return feature_starts
def _create_parameter(
self, begin_index: int, name: str, t: torch.Tensor
) -> Parameter:
p = Parameter(t, requires_grad=False)
setattr(self, "_auto_parameter_" + str(begin_index) + "_" + name, p)
return p
def _fetch_parameter(self, begin_index: int, name: str) -> Parameter:
return cast(
Parameter, getattr(self, "_auto_parameter_" + str(begin_index) + "_" + name)
)
def _check_preprocessing_output(self, batch, norm_params):
"""
Check that preprocessed features fall within range of valid output.
:param batch: torch tensor
:param norm_params: list of normalization parameters
"""
if not self.training:
return
feature_type = norm_params[0].feature_type
min_value, max_value = batch.min(), batch.max()
if feature_type in ("BOXCOX", "CONTINUOUS", "DO_NOT_PREPROCESS", "CLIP_LOG"):
# Continuous features may be in range (-inf, inf)
pass
elif max_value.item() > MAX_FEATURE_VALUE:
raise Exception(
f"A {feature_type} feature type has max value {max_value} which is >"
f" than accepted post pre-processing max of {MAX_FEATURE_VALUE}"
)
elif min_value.item() < MIN_FEATURE_VALUE:
raise Exception(
f"A {feature_type} feature type has min value {min_value} which is <"
f" accepted post pre-processing min of {MIN_FEATURE_VALUE}"
)
| 23,384 | 38.040067 | 88 | py |
ReAgent | ReAgent-master/reagent/preprocessing/transforms.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
from typing import Callable, List, Optional
import numpy as np
import reagent.core.types as rlt
import torch
import torch.nn.functional as F
from reagent.core.parameters import NormalizationData
from reagent.preprocessing.preprocessor import Preprocessor
from reagent.preprocessing.sparse_preprocessor import make_sparse_preprocessor
logger = logging.getLogger(__name__)
class Compose:
"""
Applies an iterable collection of transform functions
"""
def __init__(self, *transforms):
self.transforms = transforms
def __call__(self, data):
for t in self.transforms:
data = t(data)
return data
def __repr__(self):
transforms = "\n ".join([repr(t) for t in self.transforms])
return f"{self.__class__.__name__}(\n{transforms}\n)"
# TODO: this wouldn't work for possible_actions_mask (list of value, presence)
class ValuePresence:
"""
For every key `x`, looks for `x_presence`; if `x_presence` exists,
replace `x` with tuple of `x` and `x_presence`, delete `x_presence` key
"""
def __call__(self, data):
keys = list(data.keys())
for k in keys:
presence_key = f"{k}_presence"
if presence_key in data:
data[k] = (data[k], data[presence_key])
del data[presence_key]
return data
class Lambda:
"""Applies an arbitrary callable transform"""
def __init__(self, keys: List[str], fn: Callable):
self.keys = keys
self.fn = fn
def __call__(self, data):
for k in self.keys:
data[k] = self.fn(data[k])
return data
class SelectValuePresenceColumns:
"""
Select columns from value-presence source key
"""
def __init__(self, source: str, dest: str, indices: List[int]):
self.source = source
self.dest = dest
self.indices = indices
def __call__(self, data):
value, presence = data[self.source]
data[self.dest] = (value[:, self.indices], presence[:, self.indices])
return data
class DenseNormalization:
"""
Normalize the `keys` using `normalization_data`.
The keys are expected to be `Tuple[torch.Tensor, torch.Tensor]`,
where the first element is the value and the second element is the
presence mask.
This transform replaces the keys in the input data.
"""
def __init__(
self,
keys: List[str],
normalization_data: NormalizationData,
device: Optional[torch.device] = None,
):
"""
Args:
keys: the name of the keys to be transformed
"""
self.keys = keys
self.normalization_data = normalization_data
self.device = device or torch.device("cpu")
# Delay the initialization of the preprocessor so this class
# is pickleable
self._preprocessor: Optional[Preprocessor] = None
def __call__(self, data):
if self._preprocessor is None:
self._preprocessor = Preprocessor(
self.normalization_data.dense_normalization_parameters,
device=self.device,
)
for k in self.keys:
value, presence = data[k]
value, presence = value.to(self.device), presence.to(self.device)
presence[torch.isnan(value)] = 0
value[torch.isnan(value)] = 0
data[k] = self._preprocessor(value, presence).float()
return data
class MapIDListFeatures:
"""
Applies a SparsePreprocessor (see sparse_preprocessor.SparsePreprocessor)
"""
def __init__(
self,
id_list_keys: List[str],
id_score_list_keys: List[str],
feature_config: rlt.ModelFeatureConfig,
device: torch.device,
):
self.id_list_keys = id_list_keys
self.id_score_list_keys = id_score_list_keys
assert (
set(id_list_keys).intersection(set(id_score_list_keys)) == set()
), f"id_list_keys: {id_list_keys}; id_score_list_keys: {id_score_list_keys}"
self.feature_config = feature_config
self.sparse_preprocessor = make_sparse_preprocessor(
feature_config=feature_config, device=device
)
def __call__(self, data):
for k in self.id_list_keys + self.id_score_list_keys:
# if no ids, it means we're not using sparse features.
if not self.feature_config.id2name or k not in data:
data[k] = None
continue
assert isinstance(data[k], dict), f"{k} has type {type(data[k])}. {data[k]}"
if k in self.id_list_keys:
data[k] = self.sparse_preprocessor.preprocess_id_list(data[k])
else:
data[k] = self.sparse_preprocessor.preprocess_id_score_list(data[k])
return data
class OneHotActions:
"""
Keys should be in the set {0,1,2,...,num_actions}, where
a value equal to num_actions denotes that it's not valid.
"""
def __init__(self, keys: List[str], num_actions: int):
self.keys = keys
self.num_actions = num_actions
def __call__(self, data):
for k in self.keys:
# we do + 1 and then index up to n because value could be num_actions,
# in which case the result is a zero-vector
data[k] = F.one_hot(data[k], self.num_actions + 1).index_select(
-1, torch.arange(self.num_actions)
)
return data
class ColumnVector:
"""
Ensure that the keys are column vectors
"""
def __init__(self, keys: List[str]):
self.keys = keys
def __call__(self, data):
for k in self.keys:
raw_value = data[k]
if isinstance(raw_value, tuple):
value, _presence = raw_value
elif isinstance(raw_value, list):
# TODO(T67265031): make mdp_id a tensor, which we will be able to
# when column type changes to int
value = np.array(raw_value)
elif isinstance(raw_value, torch.Tensor):
# TODO(T67265031): this is an identity mapping, which is only necessary
# when mdp_id in traced batch preprocessors becomes a tensor (mdp_id
# is a list of strings in normal batch preprocessors).
value = raw_value
else:
raise NotImplementedError(f"value of type {type(raw_value)}.")
assert value.ndim == 1 or (
value.ndim == 2 and value.shape[1] == 1
), f"Invalid shape for key {k}: {value.shape}"
data[k] = value.reshape(-1, 1)
return data
class MaskByPresence:
"""
Expect data to be (value, presence) and return value * presence.
This zeros out values that aren't present.
"""
def __init__(self, keys: List[str]):
self.keys = keys
def __call__(self, data):
for k in self.keys:
value_presence = data[k]
assert (
isinstance(value_presence, tuple) and len(value_presence) == 2
), f"Not valid value, presence tuple: {value_presence}"
value, presence = value_presence
assert value.shape == presence.shape, (
f"Unmatching value shape ({value.shape})"
f" and presence shape ({presence.shape})"
)
data[k] = value * presence.float()
return data
class StackDenseFixedSizeArray:
"""
If data is a tensor, ensures it has the correct shape. If data is a list of
(value, presence) discards the presence tensors and concatenates the values
to output a tensor of shape (batch_size, feature_dim).
"""
def __init__(self, keys: List[str], size: int, dtype=torch.float):
self.keys = keys
self.size = size
self.dtype = dtype
def __call__(self, data):
for k in self.keys:
value = data[k]
if isinstance(value, torch.Tensor):
# Just ensure the shape
if not (value.ndim == 2 and value.shape[1] == self.size):
raise ValueError(f"Wrong shape for key {k}: {value.shape}")
data[k] = value.to(self.dtype)
else:
# Assuming that value is List[Tuple[torch.Tensor, torch.Tensor]]
data[k] = (
torch.cat([v for v, p in value], dim=0)
.view(-1, self.size)
.to(dtype=self.dtype)
)
return data
class FixedLengthSequences:
"""
Does two things:
1. makes sure each sequence in the list of keys has the expected fixed length
2. if to_keys is provided, copies the relevant sequence_id to the new key,
otherwise overwrites the old key
Expects each data[key] to be `Dict[Int, Tuple[Tensor, T]]`. Where:
- key is the feature id
- sequence_id is the key of the dict data[key]
- The first element of the tuple is the offset for each example, which is expected to be in fixed interval.
- The second element is the data at each step in the sequence
This is mainly for FB internal use,
see fbcode/caffe2/caffe2/fb/proto/io_metadata.thrift
for the data format extracted from SequenceFeatureMetadata
NOTE: this is not product between two lists (keys and to_keys);
it's setting keys[sequence_id] to to_keys in a parallel way
"""
def __init__(
self,
keys: List[str],
sequence_id: int,
expected_length: Optional[int] = None,
*,
to_keys: Optional[List[str]] = None,
):
self.keys = keys
self.sequence_id = sequence_id
self.to_keys = to_keys or keys
assert len(self.to_keys) == len(keys)
self.expected_length = expected_length
def __call__(self, data):
for key, to_key in zip(self.keys, self.to_keys):
offsets, value = data[key][self.sequence_id]
# TODO assert regarding offsets length compared to value
expected_length = self.expected_length
if expected_length is None:
if len(offsets) > 1:
# If batch size is larger than 1, just use the offsets
expected_length = (offsets[1] - offsets[0]).item()
else:
# If batch size is 1
expected_length = value[0].shape[0]
self.expected_length = expected_length
expected_offsets = torch.arange(
0, offsets.shape[0] * expected_length, expected_length
)
assert all(
expected_offsets == offsets
), f"Unexpected offsets for {key} {self.sequence_id}: {offsets}. Expected {expected_offsets}"
data[to_key] = value
return data
class SlateView:
"""
Assuming that the keys are flatten fixed-length sequences with length of
`slate_size`, unflatten it by inserting `slate_size` to the 1st dim.
I.e., turns the input from the shape of `[B * slate_size, D]` to
`[B, slate_size, D]`.
"""
def __init__(self, keys: List[str], slate_size: int):
self.keys = keys
self.slate_size = slate_size
def __call__(self, data):
for k in self.keys:
value = data[k]
_, dim = value.shape
data[k] = value.view(-1, self.slate_size, dim)
return data
class FixedLengthSequenceDenseNormalization:
"""
Combines the FixedLengthSequences, DenseNormalization, and SlateView transforms
"""
def __init__(
self,
keys: List[str],
sequence_id: int,
normalization_data: NormalizationData,
expected_length: Optional[int] = None,
device: Optional[torch.device] = None,
):
to_keys = [f"{k}:{sequence_id}" for k in keys]
self.fixed_length_sequences = FixedLengthSequences(
keys, sequence_id, to_keys=to_keys, expected_length=expected_length
)
self.dense_normalization = DenseNormalization(
to_keys, normalization_data, device=device
)
# We will override this in __call__()
self.slate_view = SlateView(to_keys, slate_size=-1)
def __call__(self, data):
data = self.fixed_length_sequences(data)
data = self.dense_normalization(data)
self.slate_view.slate_size = self.fixed_length_sequences.expected_length
return self.slate_view(data)
| 12,655 | 32.217848 | 111 | py |
ReAgent | ReAgent-master/reagent/prediction/predictor_wrapper.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
from typing import Dict, List, Optional, Tuple
import reagent.core.types as rlt
import torch
import torch.nn.functional as F
from reagent.core.torch_utils import gather
from reagent.model_utils.seq2slate_utils import Seq2SlateMode
from reagent.model_utils.seq2slate_utils import Seq2SlateOutputArch
from reagent.models.base import ModelBase
from reagent.models.seq2slate import Seq2SlateTransformerNet
from reagent.models.seq2slate_reward import Seq2SlateRewardNetBase
from reagent.preprocessing.postprocessor import Postprocessor
from reagent.preprocessing.preprocessor import Preprocessor
from reagent.preprocessing.sparse_preprocessor import (
SparsePreprocessor,
make_sparse_preprocessor,
)
from reagent.training.utils import gen_permutations
from reagent.training.world_model.seq2reward_trainer import get_Q
from torch import nn
logger = logging.getLogger(__name__)
_DEFAULT_FEATURE_IDS = []
FAKE_STATE_ID_LIST_FEATURES = {
42: (torch.zeros(1, dtype=torch.long), torch.tensor([], dtype=torch.long))
}
FAKE_STATE_ID_SCORE_LIST_FEATURES = {
42: (
torch.zeros(1, dtype=torch.long),
torch.tensor([], dtype=torch.long),
torch.tensor([], dtype=torch.float),
)
}
def serving_to_feature_data(
serving: rlt.ServingFeatureData,
dense_preprocessor: Preprocessor,
sparse_preprocessor: SparsePreprocessor,
) -> rlt.FeatureData:
float_features_with_presence, id_list_features, id_score_list_features = serving
return rlt.FeatureData(
float_features=dense_preprocessor(*float_features_with_presence),
id_list_features=sparse_preprocessor.preprocess_id_list(id_list_features),
id_score_list_features=sparse_preprocessor.preprocess_id_score_list(
id_score_list_features
),
)
def sparse_input_prototype(
model: ModelBase,
state_preprocessor: Preprocessor,
state_feature_config: rlt.ModelFeatureConfig,
):
name2id = state_feature_config.name2id
model_prototype = model.input_prototype()
# Terrible hack to make JIT tracing works. Python dict doesn't have type
# so we need to insert something so JIT tracer can infer the type.
state_id_list_features = FAKE_STATE_ID_LIST_FEATURES
state_id_score_list_features = FAKE_STATE_ID_SCORE_LIST_FEATURES
if isinstance(model_prototype, rlt.FeatureData):
if model_prototype.id_list_features:
state_id_list_features = {
name2id[k]: v for k, v in model_prototype.id_list_features.items()
}
if model_prototype.id_score_list_features:
state_id_score_list_features = {
name2id[k]: v for k, v in model_prototype.id_score_list_features.items()
}
input = rlt.ServingFeatureData(
float_features_with_presence=state_preprocessor.input_prototype(),
id_list_features=state_id_list_features,
id_score_list_features=state_id_score_list_features,
)
return (input,)
class DiscreteDqnWithPreprocessor(ModelBase):
"""
This is separated from DiscreteDqnPredictorWrapper so that we can pass typed inputs
into the model. This is possible because JIT only traces tensor operation.
In contrast, JIT scripting needs to compile the code, therefore, it won't recognize
any custom Python type.
"""
def __init__(
self,
model: ModelBase,
state_preprocessor: Preprocessor,
state_feature_config: rlt.ModelFeatureConfig,
):
super().__init__()
self.model = model
self.state_preprocessor = state_preprocessor
self.state_feature_config = state_feature_config
self.sparse_preprocessor = make_sparse_preprocessor(
self.state_feature_config, device=torch.device("cpu")
)
def forward(self, state: rlt.ServingFeatureData):
state_feature_data = serving_to_feature_data(
state, self.state_preprocessor, self.sparse_preprocessor
)
q_values = self.model(state_feature_data)
return q_values
def input_prototype(self):
return sparse_input_prototype(
model=self.model,
state_preprocessor=self.state_preprocessor,
state_feature_config=self.state_feature_config,
)
class DiscreteDqnPredictorWrapper(torch.jit.ScriptModule):
def __init__(
self,
dqn_with_preprocessor: DiscreteDqnWithPreprocessor,
action_names: List[str],
# here to keep interface consistent with FB internal
state_feature_config: rlt.ModelFeatureConfig,
) -> None:
super().__init__()
self.dqn_with_preprocessor = torch.jit.trace(
dqn_with_preprocessor, dqn_with_preprocessor.input_prototype()
)
self.action_names = torch.jit.Attribute(action_names, List[str])
@torch.jit.script_method
def forward(self, state: rlt.ServingFeatureData) -> Tuple[List[str], torch.Tensor]:
q_values = self.dqn_with_preprocessor(state)
return (self.action_names, q_values)
class OSSSparsePredictorUnwrapper(nn.Module):
# Wrap input in serving feature data
def __init__(self, model: nn.Module) -> None:
super().__init__()
self.model = model
def forward(
self,
state_with_presence: Tuple[torch.Tensor, torch.Tensor],
state_id_list_features: Dict[int, Tuple[torch.Tensor, torch.Tensor]],
state_id_score_list_features: Dict[
int, Tuple[torch.Tensor, torch.Tensor, torch.Tensor]
],
) -> Tuple[List[str], torch.Tensor]:
return self.model(
rlt.ServingFeatureData(
float_features_with_presence=state_with_presence,
id_list_features=state_id_list_features,
id_score_list_features=state_id_score_list_features,
)
)
class BinaryDifferenceScorerWithPreprocessor(ModelBase):
"""
This is separated from DiscreteDqnPredictorWrapper so that we can pass typed inputs
into the model. This is possible because JIT only traces tensor operation.
In contrast, JIT scripting needs to compile the code, therefore, it won't recognize
any custom Python type.
"""
def __init__(
self,
model: ModelBase,
state_preprocessor: Preprocessor,
state_feature_config: rlt.ModelFeatureConfig,
):
super().__init__()
self.model = model
self.state_preprocessor = state_preprocessor
self.state_feature_config = state_feature_config
self.sparse_preprocessor = make_sparse_preprocessor(
self.state_feature_config, device=torch.device("cpu")
)
def forward(self, state: rlt.ServingFeatureData):
state_feature_data = serving_to_feature_data(
state, self.state_preprocessor, self.sparse_preprocessor
)
q_values = self.model(state_feature_data)
assert q_values.shape[1] == 2, f"{q_values.shape}"
softmax_vals = F.softmax(q_values, dim=1)
# TODO for future cleanup: kind of a misnomer now, since not really "difference"
return softmax_vals[:, 1]
def input_prototype(self):
return sparse_input_prototype(
model=self.model,
state_preprocessor=self.state_preprocessor,
state_feature_config=self.state_feature_config,
)
class BinaryDifferenceScorerPredictorWrapper(torch.jit.ScriptModule):
def __init__(
self,
binary_difference_scorer_with_preprocessor: BinaryDifferenceScorerWithPreprocessor,
state_feature_config: rlt.ModelFeatureConfig,
) -> None:
super().__init__()
self.binary_difference_scorer_with_preprocessor = torch.jit.trace(
binary_difference_scorer_with_preprocessor,
binary_difference_scorer_with_preprocessor.input_prototype(),
)
@torch.jit.script_method
def forward(self, state: rlt.ServingFeatureData) -> torch.Tensor:
return self.binary_difference_scorer_with_preprocessor(state)
# Pass through serving module's output
class OSSPredictorUnwrapper(nn.Module):
def __init__(self, model: nn.Module) -> None:
super().__init__()
self.model = model
def forward(self, *args, **kwargs) -> Tuple[List[str], torch.Tensor]:
return self.model(*args, **kwargs)
DiscreteDqnPredictorUnwrapper = OSSSparsePredictorUnwrapper
ActorPredictorUnwrapper = OSSSparsePredictorUnwrapper
ParametricDqnPredictorUnwrapper = OSSPredictorUnwrapper
class ParametricDqnWithPreprocessor(ModelBase):
def __init__(
self,
model: ModelBase,
state_preprocessor: Preprocessor,
action_preprocessor: Preprocessor,
):
super().__init__()
self.model = model
self.state_preprocessor = state_preprocessor
self.action_preprocessor = action_preprocessor
def forward(
self,
state_with_presence: Tuple[torch.Tensor, torch.Tensor],
action_with_presence: Tuple[torch.Tensor, torch.Tensor],
):
preprocessed_state = self.state_preprocessor(
state_with_presence[0], state_with_presence[1]
)
preprocessed_action = self.action_preprocessor(
action_with_presence[0], action_with_presence[1]
)
state = rlt.FeatureData(preprocessed_state)
action = rlt.FeatureData(preprocessed_action)
q_value = self.model(state, action)
return q_value
def input_prototype(self):
return (
self.state_preprocessor.input_prototype(),
self.action_preprocessor.input_prototype(),
)
class ParametricDqnPredictorWrapper(torch.jit.ScriptModule):
def __init__(self, dqn_with_preprocessor: ParametricDqnWithPreprocessor) -> None:
super().__init__()
self.dqn_with_preprocessor = torch.jit.trace(
dqn_with_preprocessor, dqn_with_preprocessor.input_prototype()
)
@torch.jit.script_method
def forward(
self,
state_with_presence: Tuple[torch.Tensor, torch.Tensor],
action_with_presence: Tuple[torch.Tensor, torch.Tensor],
) -> Tuple[List[str], torch.Tensor]:
value = self.dqn_with_preprocessor(state_with_presence, action_with_presence)
return (["Q"], value)
class ActorWithPreprocessor(ModelBase):
"""
This is separate from ActorPredictorWrapper so that we can pass typed inputs
into the model. This is possible because JIT only traces tensor operation.
In contrast, JIT scripting needs to compile the code, therefore, it won't recognize
any custom Python type.
"""
def __init__(
self,
model: ModelBase,
state_preprocessor: Preprocessor,
state_feature_config: rlt.ModelFeatureConfig,
action_postprocessor: Optional[Postprocessor] = None,
serve_mean_policy: bool = False,
):
super().__init__()
self.model = model
self.state_preprocessor = state_preprocessor
self.state_feature_config = state_feature_config
self.sparse_preprocessor = make_sparse_preprocessor(
self.state_feature_config, device=torch.device("cpu")
)
self.action_postprocessor = action_postprocessor
self.serve_mean_policy = serve_mean_policy
def forward(self, state: rlt.ServingFeatureData):
state_feature_data = serving_to_feature_data(
state, self.state_preprocessor, self.sparse_preprocessor
)
model_output = self.model(state_feature_data)
if self.serve_mean_policy:
assert (
model_output.squashed_mean is not None
), "action mean is None and serve_mean_policy=True"
action = model_output.squashed_mean
else:
action = model_output.action
if self.action_postprocessor:
action = self.action_postprocessor(action)
return (action, model_output.log_prob)
def input_prototype(self):
return sparse_input_prototype(
model=self.model,
state_preprocessor=self.state_preprocessor,
state_feature_config=self.state_feature_config,
)
class ActorPredictorWrapper(torch.jit.ScriptModule):
def __init__(
self,
actor_with_preprocessor: ActorWithPreprocessor,
state_feature_config: rlt.ModelFeatureConfig,
action_feature_ids: List[int] = _DEFAULT_FEATURE_IDS,
) -> None:
"""
action_feature_ids is here to make the interface consistent with FB internal
version
"""
super().__init__()
self.actor_with_preprocessor = torch.jit.trace(
actor_with_preprocessor, actor_with_preprocessor.input_prototype()
)
@torch.jit.script_method
def forward(
self, state: rlt.ServingFeatureData
) -> Tuple[torch.Tensor, torch.Tensor]:
return self.actor_with_preprocessor(state)
class RankingActorWithPreprocessor(ModelBase):
def __init__(
self,
model: ModelBase,
state_preprocessor: Preprocessor,
candidate_preprocessor: Preprocessor,
num_candidates: int,
action_postprocessor: Optional[Postprocessor] = None,
):
super().__init__()
self.model = model
self.state_preprocessor = state_preprocessor
self.candidate_preprocessor = candidate_preprocessor
self.num_candidates = num_candidates
self.action_postprocessor = action_postprocessor
def forward(
self,
state_with_presence: Tuple[torch.Tensor, torch.Tensor],
candidate_with_presence_list: List[Tuple[torch.Tensor, torch.Tensor]],
):
assert (
len(candidate_with_presence_list) == self.num_candidates
), f"{len(candidate_with_presence_list)} != {self.num_candidates}"
preprocessed_state = self.state_preprocessor(*state_with_presence)
# each is batch_size x candidate_dim, result is batch_size x num_candidates x candidate_dim
preprocessed_candidates = torch.stack(
[self.candidate_preprocessor(*x) for x in candidate_with_presence_list],
dim=1,
)
input = rlt.FeatureData(
float_features=preprocessed_state,
candidate_docs=rlt.DocList(
float_features=preprocessed_candidates,
mask=torch.tensor(-1),
value=torch.tensor(-1),
),
)
input = rlt._embed_states(input)
action = self.model(input).action
if self.action_postprocessor is not None:
action = self.action_postprocessor(action)
return action
def input_prototype(self):
return (
self.state_preprocessor.input_prototype(),
[self.candidate_preprocessor.input_prototype()] * self.num_candidates,
)
class RankingActorPredictorWrapper(torch.jit.ScriptModule):
def __init__(
self,
actor_with_preprocessor: RankingActorWithPreprocessor,
action_feature_ids: List[int],
) -> None:
super().__init__()
self.actor_with_preprocessor = torch.jit.trace(
actor_with_preprocessor,
actor_with_preprocessor.input_prototype(),
check_trace=False,
)
@torch.jit.script_method
def forward(
self,
state_with_presence: Tuple[torch.Tensor, torch.Tensor],
candidate_with_presence_list: List[Tuple[torch.Tensor, torch.Tensor]],
) -> torch.Tensor:
action = self.actor_with_preprocessor(
state_with_presence, candidate_with_presence_list
)
return action
class LearnVMSlateWithPreprocessor(ModelBase):
def __init__(
self,
mlp: torch.nn.Module,
state_preprocessor: Preprocessor,
candidate_preprocessor: Preprocessor,
):
super().__init__()
self.mlp = mlp
self.state_preprocessor = state_preprocessor
self.candidate_preprocessor = candidate_preprocessor
def input_prototype(self):
candidate_input_prototype = self.candidate_preprocessor.input_prototype()
return (
self.state_preprocessor.input_prototype(),
(
candidate_input_prototype[0].repeat((1, 5, 1)),
candidate_input_prototype[1].repeat((1, 5, 1)),
),
)
def forward(self, state_vp, candidate_vp):
batch_size, num_candidates, candidate_dim = candidate_vp[0].shape
state_feats = self.state_preprocessor(*state_vp)
candidate_feats = self.candidate_preprocessor(
candidate_vp[0].view(
batch_size * num_candidates,
len(self.candidate_preprocessor.sorted_features),
),
candidate_vp[1].view(
batch_size * num_candidates,
len(self.candidate_preprocessor.sorted_features),
),
).view(batch_size, num_candidates, -1)
input = rlt.FeatureData(
float_features=state_feats, candidate_docs=rlt.DocList(candidate_feats)
)
scores = self.mlp(input).view(batch_size, num_candidates)
return scores
class SlateRankingPreprocessor(ModelBase):
def __init__(
self,
state_preprocessor: Preprocessor,
candidate_preprocessor: Preprocessor,
candidate_size: int,
):
super().__init__()
self.state_preprocessor = state_preprocessor
self.candidate_preprocessor = candidate_preprocessor
self.candidate_size = candidate_size
def input_prototype(self):
candidate_input_prototype = self.candidate_preprocessor.input_prototype()
return (
self.state_preprocessor.input_prototype(),
(
candidate_input_prototype[0].repeat((1, self.candidate_size, 1)),
candidate_input_prototype[1].repeat((1, self.candidate_size, 1)),
),
)
def forward(
self,
state_with_presence: Tuple[torch.Tensor, torch.Tensor],
candidate_with_presence: Tuple[torch.Tensor, torch.Tensor],
):
# state_value.shape == state_presence.shape == batch_size x state_feat_num
# candidate_value.shape == candidate_presence.shape ==
# batch_size x max_src_seq_len x candidate_feat_num
batch_size, max_src_seq_len, candidate_feat_num = candidate_with_presence[
0
].shape
preprocessed_state = self.state_preprocessor(
state_with_presence[0], state_with_presence[1]
)
preprocessed_candidates = self.candidate_preprocessor(
candidate_with_presence[0].view(
batch_size * max_src_seq_len,
candidate_feat_num,
),
candidate_with_presence[1].view(
batch_size * max_src_seq_len,
candidate_feat_num,
),
# the last dimension is preprocessed candidate feature dim,
# not necessarily = candidate_feat_num
).view(batch_size, max_src_seq_len, -1)
return preprocessed_state, preprocessed_candidates
class Seq2SlateWithPreprocessor(nn.Module):
def __init__(
self,
model: Seq2SlateTransformerNet,
state_preprocessor: Preprocessor,
candidate_preprocessor: Preprocessor,
greedy: bool,
):
super().__init__()
self.model = model.seq2slate
self.greedy = greedy
preprocessor = SlateRankingPreprocessor(
state_preprocessor, candidate_preprocessor, model.max_src_seq_len
)
self.input_prototype_data = preprocessor.input_prototype()
# if the module has to be serialized via jit.script, preprocessor has to be traced first
# because preprocessor has operations beyond what jit.script can support
if not self.can_be_traced():
preprocessor = torch.jit.trace(preprocessor, preprocessor.input_prototype())
self.preprocessor = preprocessor
self.state_sorted_features = state_preprocessor.sorted_features
self.candidate_sorted_features = candidate_preprocessor.sorted_features
self.state_feature_id_to_index = state_preprocessor.feature_id_to_index
self.candidate_feature_id_to_index = candidate_preprocessor.feature_id_to_index
def input_prototype(self):
return self.input_prototype_data
def forward(
self,
state_with_presence: Tuple[torch.Tensor, torch.Tensor],
candidate_with_presence: Tuple[torch.Tensor, torch.Tensor],
):
preprocessed_state, preprocessed_candidates = self.preprocessor(
state_with_presence, candidate_with_presence
)
max_src_seq_len = preprocessed_candidates.shape[1]
res = self.model(
mode=Seq2SlateMode.RANK_MODE.value,
state=preprocessed_state,
src_seq=preprocessed_candidates,
tgt_seq_len=max_src_seq_len,
greedy=self.greedy,
)
return (
res.ranked_per_symbol_probs,
res.ranked_per_seq_probs,
res.ranked_tgt_out_idx,
)
def can_be_traced(self):
"""
Whether this module can be serialized by jit.trace.
In production, we find jit.trace may have faster performance than jit.script.
The models that can be traced are those don't have for-loop in inference,
since we want to deal with inputs of variable lengths. The models that can't
be traced are those with iterative decoder, i.e., autoregressive or non-greedy
frechet-sort.
"""
output_arch = self.model.output_arch
return output_arch == Seq2SlateOutputArch.ENCODER_SCORE or (
output_arch == Seq2SlateOutputArch.FRECHET_SORT and self.greedy
)
class Seq2SlatePredictorWrapper(torch.jit.ScriptModule):
def __init__(self, seq2slate_with_preprocessor: Seq2SlateWithPreprocessor) -> None:
super().__init__()
if seq2slate_with_preprocessor.can_be_traced():
self.seq2slate_with_preprocessor = torch.jit.trace(
seq2slate_with_preprocessor,
seq2slate_with_preprocessor.input_prototype(),
)
else:
self.seq2slate_with_preprocessor = torch.jit.script(
seq2slate_with_preprocessor
)
@torch.jit.script_method
def forward(
self,
state_with_presence: Tuple[torch.Tensor, torch.Tensor],
candidate_with_presence: Tuple[torch.Tensor, torch.Tensor],
) -> Tuple[torch.Tensor, torch.Tensor]:
# ranked_per_seq_probs shape: batch_size, 1
# ranked_tgt_out_idx shape: batch_size, tgt_seq_len
_, ranked_per_seq_probs, ranked_tgt_out_idx = self.seq2slate_with_preprocessor(
state_with_presence, candidate_with_presence
)
assert ranked_tgt_out_idx is not None
assert ranked_per_seq_probs is not None
# -2 to offset padding symbol and decoder start symbol
ranked_tgt_out_idx -= 2
return ranked_per_seq_probs, ranked_tgt_out_idx
class Seq2RewardWithPreprocessor(DiscreteDqnWithPreprocessor):
def __init__(
self,
model: ModelBase, # acc_reward prediction model
state_preprocessor: Preprocessor,
seq_len: int,
num_action: int,
):
"""
Since TorchScript unable to trace control-flow, we
have to generate the action enumerations as constants
here so that trace can use them directly.
"""
super().__init__(model, state_preprocessor, rlt.ModelFeatureConfig())
self.seq_len = seq_len
self.num_action = num_action
self.all_permut = gen_permutations(seq_len, num_action)
def forward(self, state: rlt.ServingFeatureData):
"""
This serving module only takes in current state.
We need to simulate all multi-step length action seq's
then predict accumulated reward on all those seq's.
After that, we categorize all action seq's by their
first actions. Then take the maximum reward as the
predicted categorical reward for that category.
Return: categorical reward for the first action
"""
state_with_presence, _, _ = state
batch_size, state_dim = state_with_presence[0].size()
state_first_step = self.state_preprocessor(
state_with_presence[0], state_with_presence[1]
).reshape(batch_size, -1)
# shape: batch_size, num_action
max_acc_reward = get_Q(
self.model,
state_first_step,
self.all_permut,
)
return max_acc_reward
class Seq2RewardPlanShortSeqWithPreprocessor(DiscreteDqnWithPreprocessor):
def __init__(
self,
model: ModelBase, # acc_reward prediction model
step_model: ModelBase, # step prediction model
state_preprocessor: Preprocessor,
seq_len: int,
num_action: int,
):
"""
The difference with Seq2RewardWithPreprocessor:
This wrapper will plan for different look_ahead steps (between 1 and seq_len),
and merge results according to look_ahead step prediction probabilities.
"""
super().__init__(model, state_preprocessor, rlt.ModelFeatureConfig())
self.step_model = step_model
self.seq_len = seq_len
self.num_action = num_action
# key: seq_len, value: all possible action sequences of length seq_len
self.all_permut = {
s + 1: gen_permutations(s + 1, num_action) for s in range(seq_len)
}
def forward(self, state: rlt.ServingFeatureData):
state_with_presence, _, _ = state
batch_size, state_dim = state_with_presence[0].size()
state_first_step = self.state_preprocessor(
state_with_presence[0], state_with_presence[1]
).reshape(batch_size, -1)
# shape: batch_size, seq_len
step_probability = F.softmax(self.step_model(state_first_step), dim=1)
# shape: batch_size, seq_len, num_action
max_acc_reward = torch.cat(
[
get_Q(
self.model,
state_first_step,
self.all_permut[i + 1],
).unsqueeze(1)
for i in range(self.seq_len)
],
dim=1,
)
# shape: batch_size, num_action
max_acc_reward_weighted = torch.sum(
max_acc_reward * step_probability.unsqueeze(2), dim=1
)
return max_acc_reward_weighted
class Seq2SlateRewardWithPreprocessor(ModelBase):
def __init__(
self,
model: Seq2SlateRewardNetBase,
state_preprocessor: Preprocessor,
candidate_preprocessor: Preprocessor,
):
super().__init__()
self.model = model
self.state_preprocessor = state_preprocessor
self.candidate_preprocessor = candidate_preprocessor
def input_prototype(self):
candidate_input_prototype = self.candidate_preprocessor.input_prototype()
return (
self.state_preprocessor.input_prototype(),
(
candidate_input_prototype[0].repeat((1, self.model.max_src_seq_len, 1)),
candidate_input_prototype[1].repeat((1, self.model.max_src_seq_len, 1)),
),
)
@property
def state_sorted_features(self) -> List[int]:
return self.state_preprocessor.sorted_features
@property
def candidate_sorted_features(self) -> List[int]:
return self.candidate_preprocessor.sorted_features
def forward(
self,
state_with_presence: Tuple[torch.Tensor, torch.Tensor],
candidate_with_presence: Tuple[torch.Tensor, torch.Tensor],
):
# state_value.shape == state_presence.shape == batch_size x state_feat_num
# candidate_value.shape == candidate_presence.shape ==
# batch_size x max_src_seq_len x candidate_feat_num
batch_size = state_with_presence[0].shape[0]
max_tgt_seq_len = self.model.max_tgt_seq_len
max_src_seq_len = self.model.max_src_seq_len
# we use a fake slate_idx_with_presence to retrieve the first
# max_tgt_seq_len candidates from
# len(slate_idx_with presence) == batch_size
# component: 1d tensor with length max_tgt_seq_len
slate_idx_with_presence = [
(torch.arange(max_tgt_seq_len), torch.ones(max_tgt_seq_len))
] * batch_size
preprocessed_state = self.state_preprocessor(
state_with_presence[0], state_with_presence[1]
)
preprocessed_candidates = self.candidate_preprocessor(
candidate_with_presence[0].view(
batch_size * max_src_seq_len, len(self.candidate_sorted_features)
),
candidate_with_presence[1].view(
batch_size * max_src_seq_len, len(self.candidate_sorted_features)
),
).view(batch_size, max_src_seq_len, -1)
src_src_mask = torch.ones(batch_size, max_src_seq_len, max_src_seq_len)
tgt_out_idx = torch.cat(
[slate_idx[0] for slate_idx in slate_idx_with_presence]
).view(batch_size, max_tgt_seq_len)
tgt_out_seq = gather(preprocessed_candidates, tgt_out_idx)
ranking_input = rlt.PreprocessedRankingInput.from_tensors(
state=preprocessed_state,
src_seq=preprocessed_candidates,
src_src_mask=src_src_mask,
tgt_out_seq=tgt_out_seq,
# +2 is needed to avoid two preserved symbols:
# PADDING_SYMBOL = 0
# DECODER_START_SYMBOL = 1
tgt_out_idx=tgt_out_idx + 2,
)
output = self.model(ranking_input)
return output.predicted_reward
class MDNRNNWithPreprocessor(ModelBase):
def __init__(
self,
model: ModelBase,
state_preprocessor: Preprocessor,
seq_len: int,
num_action: int,
state_feature_config: Optional[rlt.ModelFeatureConfig] = None,
):
super().__init__()
self.model = model
self.state_preprocessor = state_preprocessor
self.state_feature_config = state_feature_config or rlt.ModelFeatureConfig()
self.sparse_preprocessor = make_sparse_preprocessor(
self.state_feature_config, device=torch.device("cpu")
)
self.seq_len = seq_len
self.num_action = num_action
def forward(
self,
state_with_presence: Tuple[torch.Tensor, torch.Tensor],
action: torch.Tensor,
):
batch_size, state_dim = state_with_presence[0].size()
preprocessed_state = (
self.state_preprocessor(state_with_presence[0], state_with_presence[1])
.reshape(batch_size, self.seq_len, -1)
.transpose(0, 1)
)
result = self.model(action, preprocessed_state)
return result
def input_prototype(self):
return (
self.state_preprocessor.input_prototype(),
torch.randn(1, 1, self.num_action, device=self.state_preprocessor.device),
)
class CompressModelWithPreprocessor(DiscreteDqnWithPreprocessor):
def forward(self, state: rlt.ServingFeatureData):
state_feature_data = serving_to_feature_data(
state, self.state_preprocessor, self.sparse_preprocessor
)
q_values = self.model(state_feature_data)
return q_values
| 31,722 | 35.75898 | 99 | py |
ReAgent | ReAgent-master/reagent/prediction/synthetic_reward/synthetic_reward_predictor_wrapper.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from typing import Tuple
import torch
import torch.nn as nn
from reagent.models.base import ModelBase
from reagent.preprocessing.preprocessor import Preprocessor
def split_features(
state_and_action_with_presence: Tuple[torch.Tensor, torch.Tensor],
state_feat_num: int,
action_feat_num: int,
):
# pyre-fixme[16]: `Tensor` has no attribute `narrow`.
state_value = state_and_action_with_presence[0].narrow(1, 0, state_feat_num)
state_presence = state_and_action_with_presence[1].narrow(1, 0, state_feat_num)
action_value = state_and_action_with_presence[0].narrow(
1, state_feat_num, action_feat_num
)
action_presence = state_and_action_with_presence[1].narrow(
1, state_feat_num, action_feat_num
)
return (state_value, state_presence), (action_value, action_presence)
class SyntheticRewardPredictorWrapper(nn.Module):
def __init__(
self,
seq_len: int,
state_preprocessor: Preprocessor,
action_preprocessor: Preprocessor,
net: ModelBase,
) -> None:
super().__init__()
self.seq_len = seq_len
self.state_preprocessor = state_preprocessor
self.action_preprocessor = action_preprocessor
self.net = net
self.state_feat_num = len(state_preprocessor.sorted_features)
self.action_feat_num = len(action_preprocessor.sorted_features)
def forward(
self,
state_and_action_with_presence: Tuple[torch.Tensor, torch.Tensor],
) -> torch.Tensor:
assert self.seq_len == state_and_action_with_presence[0].shape[0]
state_with_presence, action_with_presence = split_features(
state_and_action_with_presence,
self.state_feat_num,
self.action_feat_num,
)
# shape: seq_len, 1, state_feat_dim
preprocessed_state = self.state_preprocessor(
state_with_presence[0], state_with_presence[1]
).unsqueeze(1)
# shape: seq_len, 1, action_feat_dim
preprocessed_action = self.action_preprocessor(
action_with_presence[0], action_with_presence[1]
).unsqueeze(1)
# shape: (seq_len, )
reward = self.net(preprocessed_state, preprocessed_action).flatten()
return reward
| 2,372 | 35.507692 | 83 | py |
ReAgent | ReAgent-master/reagent/prediction/ranking/predictor_wrapper.py | from enum import Enum
from typing import Tuple, List, Optional
import torch
import torch.nn.functional as F
class Kernel(Enum):
# <x, y> = dot_product(x, y)
Linear = "linear"
# <x, y> = exp(-||x-y||^2 / (2 * sigma^2))
RBF = "rbf"
class DeterminantalPointProcessPredictorWrapper(torch.jit.ScriptModule):
"""http://jgillenw.com/cikm2018.pdf Algorithm 1"""
def __init__(
self,
alpha: float,
kernel: Kernel = Kernel.Linear,
sigma: float = 1.0,
rerank_topk: Optional[int] = None,
) -> None:
super().__init__()
# control the strength of encouragement for diversity
self.alpha = alpha
# distance function
self.kernel = kernel
# sigma parameter used in the RBF kernel
self.sigma = sigma
# hard code this value so jit.script can work
self.MIN_VALUE = -3.4e38
# if None, will rerank the full slate
self.rerank_topk = rerank_topk
if self.rerank_topk is not None:
assert self.rerank_topk > 0
def unchosen_dets(self, L, chosen: List[int]):
slate_size = L.shape[0]
dets = torch.full((slate_size,), self.MIN_VALUE, device=L.device)
for i in range(slate_size):
if i not in chosen:
dets[i] = torch.det(L[:, chosen + [i]][chosen + [i]])
return dets
def greedy_select(self, L):
slate_size = L.shape[0]
dets = torch.zeros(slate_size, slate_size, device=L.device)
chosen: List[int] = []
unchosen = torch.ones(slate_size)
if self.rerank_topk is not None:
rerank_topk = min(self.rerank_topk, slate_size)
else:
rerank_topk = slate_size
for i in range(rerank_topk):
unchosen_dets = self.unchosen_dets(L, chosen)
dets[i, :] = unchosen_dets
chosen_idx = torch.argmax(unchosen_dets)
chosen.append(chosen_idx.item())
unchosen[chosen_idx] = 0
final_order = torch.tensor(chosen)
if rerank_topk != slate_size:
final_order = torch.cat((final_order, torch.nonzero(unchosen).flatten()))
return final_order, dets
@torch.jit.script_method
def forward(
self,
quality_scores: torch.Tensor,
feature_vectors: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Args:
quality_scores: (num_items, 1)
feature_vectors (num_items, num_feat)
Return:
chosen indices: (num_items, )
determinants computed at each selection: (num_items, num_items)
the kernel matrix: (num_items, num_items)
"""
quality_scores = quality_scores.float()
feature_vectors = F.normalize(feature_vectors.float(), p=2.0, dim=1)
num_items = quality_scores.shape[0]
if self.kernel == Kernel.Linear:
B = (self.alpha ** 0.5) * quality_scores * feature_vectors
L = torch.mm(B, B.t())
L[torch.arange(num_items), torch.arange(num_items)] = (
quality_scores.squeeze(1) ** 2
)
elif self.kernel == Kernel.RBF:
L = (
self.alpha
* torch.mm(quality_scores, quality_scores.t())
* torch.exp(
-(torch.cdist(feature_vectors, feature_vectors, p=2.0) ** 2)
/ (2 * self.sigma ** 2)
)
)
else:
raise NotImplementedError()
chosen, dets = self.greedy_select(L)
return chosen, dets, L
| 3,643 | 29.881356 | 85 | py |
ReAgent | ReAgent-master/docs/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath("../"))
# -- Project information -----------------------------------------------------
project = "ReAgent"
copyright = "2019, Facebook Inc."
author = "ReAgent Team"
# The full version, including alpha/beta/rc tags
release = "1.0"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinxcontrib.apidoc",
"sphinx.ext.autodoc",
"sphinx.ext.coverage",
"sphinxcontrib.napoleon",
]
apidoc_module_dir = "../reagent"
apidoc_output_dir = "api"
apidoc_excluded_paths = ["test"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
autodoc_mock_imports = [
"scipy",
"numpy",
"caffe2",
"torch",
"pandas",
"sklearn",
"reagent.test",
]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
| 2,292 | 29.171053 | 82 | py |
EFT | EFT-main/incremental_dataloader.py | '''
TaICML incremental learning
Copyright (c) Jathushan Rajasegaran, 2019
'''
import random
import numpy as np
import torch
from PIL import Image
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
from torch.utils.data import Sampler
from torchvision import datasets, transforms
# from imagenet import ImageNet
from idatasets.CUB200 import Cub2011
from idatasets.omniglot import Omniglot
from idatasets.celeb_1m import MS1M
import collections
# from utils.cutout import Cutout
import torchvision
class SubsetRandomSampler(Sampler):
r"""Samples elements randomly from a given list of indices, without replacement.
Arguments:
indices (sequence): a sequence of indices
"""
def __init__(self, indices, shuffle):
self.indices = indices
self.shuffle = shuffle
def __iter__(self):
if(self.shuffle):
return (self.indices[i] for i in torch.randperm(len(self.indices)))
else:
return (self.indices[i] for i in range(len(self.indices)))
def __len__(self):
return len(self.indices)
class IncrementalDataset:
def __init__(
self,
dataset_name,
args,
random_order=False,
shuffle=True,
workers=10,
batch_size=128,
seed=1,
increment=10,
validation_split=0.
):
self.dataset_name = dataset_name.lower().strip()
datasets = _get_datasets(dataset_name)
self.train_transforms = datasets[0].train_transforms
self.common_transforms = datasets[0].common_transforms
try:
self.meta_transforms = datasets[0].meta_transforms
except:
self.meta_transforms = datasets[0].train_transforms
self.args = args
self._setup_data(
datasets,
args.data_path,
random_order=random_order,
seed=seed,
increment=increment,
validation_split=validation_split
)
self._current_task = 0
self._batch_size = batch_size
self._workers = workers
self._shuffle = shuffle
self.sample_per_task_testing = {}
@property
def n_tasks(self):
return len(self.increments)
def get_same_index(self, target, label, mode="train", memory=None):
label_indices = []
label_targets = []
for i in range(len(target)):
if int(target[i]) in label:
label_indices.append(i)
label_targets.append(target[i])
for_memory = (label_indices.copy(),label_targets.copy())
if(self.args.overflow and not(mode=="test")):
memory_indices, memory_targets = memory
return memory_indices, memory
if memory is not None:
memory_indices, memory_targets = memory
memory_indices2 = np.tile(memory_indices, (self.args.mu,))
all_indices = np.concatenate([memory_indices2,label_indices])
else:
all_indices = label_indices
return all_indices, for_memory
def get_same_index_test_chunk(self, target, label, mode="test", memory=None):
label_indices = []
label_targets = []
np_target = np.array(target, dtype="uint32")
np_indices = np.array(list(range(len(target))), dtype="uint32")
for t in range(len(label)//self.args.class_per_task):
task_idx = []
for class_id in label[t*self.args.class_per_task: (t+1)*self.args.class_per_task]:
idx = np.where(np_target==class_id)[0]
task_idx.extend(list(idx.ravel()))
task_idx = np.array(task_idx, dtype="uint32")
task_idx.ravel()
random.shuffle(task_idx)
label_indices.extend(list(np_indices[task_idx]))
label_targets.extend(list(np_target[task_idx]))
if(t not in self.sample_per_task_testing.keys()):
self.sample_per_task_testing[t] = len(task_idx)
label_indices = np.array(label_indices, dtype="uint32")
label_indices.ravel()
return list(label_indices), label_targets
def new_task(self, memory=None):
print(self._current_task)
print(self.increments)
min_class = sum(self.increments[:self._current_task])
max_class = sum(self.increments[:self._current_task + 1])
if(self.args.overflow):
min_class = 0
max_class = sum(self.increments)
train_indices, for_memory = self.get_same_index(self.train_dataset.targets, list(range(min_class, max_class)), mode="train", memory=memory)
test_indices, _ = self.get_same_index(self.test_dataset.targets, list(range(min_class,max_class)), mode="test")
self.train_data_loader = torch.utils.data.DataLoader(self.train_dataset, batch_size=self._batch_size,shuffle=False,num_workers=16, sampler=SubsetRandomSampler(train_indices, True))
self.test_data_loader = torch.utils.data.DataLoader(self.test_dataset, batch_size=self.args.test_batch,shuffle=False,num_workers=16, sampler=SubsetRandomSampler(test_indices, False))
task_info = {
"min_class": min_class,
"max_class": max_class,
"task": self._current_task,
"max_task": len(self.increments),
"n_train_data": len(train_indices),
"n_test_data": len(test_indices)
}
self._current_task += 1
return task_info, self.train_data_loader, self.test_data_loader, self.test_data_loader
# for verification
def get_galary(self, task, batch_size=10):
indexes = []
dict_ind = {}
seen_classes = []
for i, t in enumerate(self.train_dataset.targets):
if not(t in seen_classes) and (t< (task+1)*self.args.class_per_task and (t>= (task)*self.args.class_per_task)):
seen_classes.append(t)
dict_ind[t] = i
od = collections.OrderedDict(sorted(dict_ind.items()))
for k, v in od.items():
indexes.append(v)
data_loader = torch.utils.data.DataLoader(self.train_dataset, batch_size=batch_size, shuffle=False, num_workers=4, sampler=SubsetRandomSampler(indexes, False))
return data_loader
def get_custom_loader_idx(self, indexes, mode="train", batch_size=10, shuffle=True):
if(mode=="train"):
data_loader = torch.utils.data.DataLoader(self.train_dataset, batch_size=batch_size, shuffle=False, num_workers=4, sampler=SubsetRandomSampler(indexes, True))
else:
data_loader = torch.utils.data.DataLoader(self.test_dataset, batch_size=batch_size, shuffle=False, num_workers=4, sampler=SubsetRandomSampler(indexes, False))
return data_loader
def get_custom_loader_class(self, class_id, mode="train", batch_size=10, shuffle=False):
if(mode=="train"):
train_indices, for_memory = self.get_same_index(self.train_dataset.targets, class_id, mode="train", memory=None)
data_loader = torch.utils.data.DataLoader(self.train_dataset, batch_size=batch_size, shuffle=False, num_workers=4, sampler=SubsetRandomSampler(train_indices, True))
else:
test_indices, _ = self.get_same_index(self.test_dataset.targets, class_id, mode="test")
data_loader = torch.utils.data.DataLoader(self.test_dataset, batch_size=batch_size, shuffle=False, num_workers=4, sampler=SubsetRandomSampler(test_indices, False))
return data_loader
def _setup_data(self, datasets, path, random_order=False, seed=1, increment=10, validation_split=0.):
self.increments = []
self.class_order = []
trsf_train = transforms.Compose(self.train_transforms)
try:
trsf_mata = transforms.Compose(self.meta_transforms)
except:
trsf_mata = transforms.Compose(self.train_transforms)
trsf_test = transforms.Compose(self.common_transforms)
current_class_idx = 0 # When using multiple datasets
for dataset in datasets:
if(self.dataset_name=="imagenet"):
train_dataset = dataset.base_dataset(root=path, split='train', download=False, transform=trsf_train)
test_dataset = dataset.base_dataset(root=path, split='val', download=False, transform=trsf_test)
elif(self.dataset_name=="tinyimagenet" or self.dataset_name=="tinyimagenet56"):
# path='/data/dataset/tiny_imagenet/tiny-imagenet-200/'
train_dataset = torchvision.datasets.ImageFolder(root=path + 'train',transform=trsf_train)
test_dataset = torchvision.datasets.ImageFolder(root=path + 'val', transform=trsf_test)
elif(self.dataset_name=="cub200" or self.dataset_name=="cifar100" or self.dataset_name=="mnist" or self.dataset_name=="caltech101" or self.dataset_name=="omniglot" or self.dataset_name=="celeb"):
train_dataset = dataset.base_dataset(root=path, train=True, download=True, transform=trsf_train)
test_dataset = dataset.base_dataset(root=path, train=False, download=True, transform=trsf_test)
elif(self.dataset_name=="svhn"):
train_dataset = dataset.base_dataset(root=path, split='train', download=True, transform=trsf_train)
test_dataset = dataset.base_dataset(root=path, split='test', download=True, transform=trsf_test)
train_dataset.targets = train_dataset.labels
test_dataset.targets = test_dataset.labels
order = [i for i in range(self.args.num_class)]
if random_order:
random.seed(seed)
random.shuffle(order)
elif dataset.class_order is not None:
order = dataset.class_order
for i,t in enumerate(train_dataset.targets):
train_dataset.targets[i] = order[t]
for i,t in enumerate(test_dataset.targets):
test_dataset.targets[i] = order[t]
self.class_order.append(order)
self.increments = [increment for _ in range(len(order) // increment)]
self.train_dataset = train_dataset
self.test_dataset = test_dataset
@staticmethod
def _map_new_class_index(y, order):
"""Transforms targets for new class order."""
return np.array(list(map(lambda x: order.index(x), y)))
def get_memory(self, memory, for_memory, seed=1):
random.seed(seed)
memory_per_task = self.args.memory // ((self.args.sess+1)*self.args.class_per_task)
self._data_memory, self._targets_memory = np.array([]), np.array([])
mu = 1
#update old memory
if(memory is not None):
data_memory, targets_memory = memory
data_memory = np.array(data_memory, dtype="int32")
targets_memory = np.array(targets_memory, dtype="int32")
for class_idx in range(self.args.class_per_task*(self.args.sess)):
idx = np.where(targets_memory==class_idx)[0][:memory_per_task]
self._data_memory = np.concatenate([self._data_memory, np.tile(data_memory[idx], (mu,)) ])
self._targets_memory = np.concatenate([self._targets_memory, np.tile(targets_memory[idx], (mu,)) ])
#add new classes to the memory
new_indices, new_targets = for_memory
new_indices = np.array(new_indices, dtype="int32")
new_targets = np.array(new_targets, dtype="int32")
for class_idx in range(self.args.class_per_task*(self.args.sess),self.args.class_per_task*(1+self.args.sess)):
idx = np.where(new_targets==class_idx)[0][:memory_per_task]
self._data_memory = np.concatenate([self._data_memory, np.tile(new_indices[idx],(mu,)) ])
self._targets_memory = np.concatenate([self._targets_memory, np.tile(new_targets[idx],(mu,)) ])
print(len(self._data_memory))
return list(self._data_memory.astype("int32")), list(self._targets_memory.astype("int32"))
def _get_datasets(dataset_names):
return [_get_dataset(dataset_name) for dataset_name in dataset_names.split("-")]
def _get_dataset(dataset_name):
dataset_name = dataset_name.lower().strip()
if dataset_name == "cifar10":
return iCIFAR10
elif dataset_name == "cifar100":
return iCIFAR100
elif dataset_name == "imagenet":
return iIMAGENET
elif dataset_name == "tinyimagenet":
return iTINYIMAGENET
elif dataset_name == "tinyimagenet56":
return iTINYIMAGENET56
elif dataset_name == "cub200":
return iCUB200
elif dataset_name == "mnist":
return iMNIST
elif dataset_name == "caltech101":
return iCALTECH101
elif dataset_name == "celeb":
return iCELEB
elif dataset_name == "svhn":
return iSVHN
elif dataset_name == "omniglot":
return iOMNIGLOT
else:
raise NotImplementedError("Unknown dataset {}.".format(dataset_name))
class DataHandler:
base_dataset = None
train_transforms = []
mata_transforms = [transforms.ToTensor()]
common_transforms = [transforms.ToTensor()]
class_order = None
class iCIFAR10(DataHandler):
base_dataset = datasets.cifar.CIFAR10
train_transforms = [
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(10),
transforms.ColorJitter(brightness=63 / 255),
transforms.ToTensor(),
]
common_transforms = [
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
]
class iCIFAR100(DataHandler):
base_dataset = datasets.cifar.CIFAR100
train_transforms = [
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(10),
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)),
]
common_transforms = [
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)),
]
class iCALTECH101(DataHandler):
base_dataset = datasets.Caltech101
train_transforms = [
transforms.Resize(136),
transforms.RandomCrop(128, padding=4),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(10),
# transforms.ColorJitter(brightness=63 / 255),
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)),
]
common_transforms = [
transforms.Resize(130),
transforms.CenterCrop(128),
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)),
]
class iCELEB(DataHandler):
base_dataset = MS1M
train_transforms = [
transforms.RandomCrop(112, padding=4),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(10),
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)),
]
common_transforms = [
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)),
]
class iIMAGENET(DataHandler):
base_dataset = datasets.ImageNet
train_transforms = [
transforms.Resize(256),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(10),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
]
common_transforms = [
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
]
class iTINYIMAGENET(DataHandler):
# base_dataset = datasets.ImageNet
train_transforms = [
transforms.Resize(70),
transforms.RandomResizedCrop(64),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(10),
transforms.ToTensor(),
transforms.Normalize((0.480, 0.448, 0.397), (0.277, 0.270, 0.282)),
]
common_transforms = [
transforms.Resize(68),
transforms.CenterCrop(64),
transforms.ToTensor(),
transforms.Normalize((0.480, 0.448, 0.397), (0.277, 0.270, 0.282)),
]
class iTINYIMAGENET56(DataHandler):
# base_dataset = datasets.ImageNet
train_transforms = [
transforms.Resize(60),
transforms.RandomResizedCrop(56),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(10),
transforms.ToTensor(),
transforms.Normalize((0.480, 0.448, 0.397), (0.277, 0.270, 0.282)),
]
common_transforms = [
transforms.Resize(60),
transforms.CenterCrop(56),
transforms.ToTensor(),
transforms.Normalize((0.480, 0.448, 0.397), (0.277, 0.270, 0.282)),
]
class iCUB200(DataHandler):
base_dataset = Cub2011
train_transforms = [
transforms.Resize(230),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(10),
transforms.ColorJitter(brightness=63 / 255),
transforms.ToTensor(),
]
common_transforms = [
transforms.Resize(230),
transforms.CenterCrop(224),
transforms.ToTensor(),
]
class iMNIST(DataHandler):
base_dataset = datasets.MNIST
train_transforms = [ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ]
common_transforms = [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
class iSVHN(DataHandler):
base_dataset = datasets.SVHN
train_transforms = [
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(10),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
]
common_transforms = [
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
]
class iOMNIGLOT(DataHandler):
base_dataset = datasets.Omniglot
train_transforms = [ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ]
common_transforms = [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
| 18,859 | 36.871486 | 210 | py |
EFT | EFT-main/idatasets/CUB200.py | import os
import pandas as pd
from torchvision.datasets.folder import default_loader
from torchvision.datasets.utils import download_url
from torch.utils.data import Dataset
class Cub2011(Dataset):
base_folder = 'CUB_200_2011/images'
url = 'http://www.vision.caltech.edu/visipedia-data/CUB-200-2011/CUB_200_2011.tgz'
filename = 'CUB_200_2011.tgz'
tgz_md5 = '97eceeb196236b17998738112f37df78'
def __init__(self, root, train=True, transform=None, loader=default_loader, download=True):
self.root = os.path.expanduser(root)
self.transform = transform
self.loader = default_loader
self.train = train
if download:
self._download()
if not self._check_integrity():
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
def _load_metadata(self):
images = pd.read_csv(os.path.join(self.root, 'CUB_200_2011', 'images.txt'), sep=' ',
names=['img_id', 'filepath'])
image_class_labels = pd.read_csv(os.path.join(self.root, 'CUB_200_2011', 'image_class_labels.txt'),
sep=' ', names=['img_id', 'target'])
train_test_split = pd.read_csv(os.path.join(self.root, 'CUB_200_2011', 'train_test_split.txt'),
sep=' ', names=['img_id', 'is_training_img'])
data = images.merge(image_class_labels, on='img_id')
self.data = data.merge(train_test_split, on='img_id')
if self.train:
self.data = self.data[self.data.is_training_img == 1]
else:
self.data = self.data[self.data.is_training_img == 0]
self.targets = []
for i in range(len(self.data)):
sample = self.data.iloc[i]
target = sample.target - 1
self.targets.append(target)
def _check_integrity(self):
try:
self._load_metadata()
except Exception:
return False
for index, row in self.data.iterrows():
filepath = os.path.join(self.root, self.base_folder, row.filepath)
if not os.path.isfile(filepath):
print(filepath)
return False
return True
def _download(self):
import tarfile
if self._check_integrity():
print('Files already downloaded and verified')
return
download_url(self.url, self.root, self.filename, self.tgz_md5)
with tarfile.open(os.path.join(self.root, self.filename), "r:gz") as tar:
tar.extractall(path=self.root)
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
sample = self.data.iloc[idx]
path = os.path.join(self.root, self.base_folder, sample.filepath)
target = sample.target - 1 # Targets start at 1 by default, so shift to 0
img = self.loader(path)
if self.transform is not None:
img = self.transform(img)
return img, target | 3,104 | 35.104651 | 107 | py |
EFT | EFT-main/idatasets/tinyimagenet.py | from __future__ import print_function
import os
import shutil
import tempfile
import torch
from .folder import ImageFolder
from .utils import check_integrity, download_and_extract_archive, extract_archive, \
verify_str_arg
ARCHIVE_DICT = {
'train': {
'url': 'http://www.image-net.org/challenges/LSVRC/2012/nnoupb/ILSVRC2012_img_train.tar',
'md5': '1d675b47d978889d74fa0da5fadfb00e',
},
'val': {
'url': 'http://www.image-net.org/challenges/LSVRC/2012/nnoupb/ILSVRC2012_img_val.tar',
'md5': '29b22e2961454d5413ddabcf34fc5622',
},
'devkit': {
'url': 'http://www.image-net.org/challenges/LSVRC/2012/nnoupb/ILSVRC2012_devkit_t12.tar.gz',
'md5': 'fa75699e90414af021442c21a62c3abf',
}
}
class ImageNet(ImageFolder):
"""`ImageNet <http://image-net.org/>`_ 2012 Classification Dataset.
Args:
root (string): Root directory of the ImageNet Dataset.
split (string, optional): The dataset split, supports ``train``, or ``val``.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
loader (callable, optional): A function to load an image given its path.
Attributes:
classes (list): List of the class name tuples.
class_to_idx (dict): Dict with items (class_name, class_index).
wnids (list): List of the WordNet IDs.
wnid_to_idx (dict): Dict with items (wordnet_id, class_index).
imgs (list): List of (image path, class_index) tuples
targets (list): The class_index value for each image in the dataset
"""
def __init__(self, root, split='train', download=False, **kwargs):
root = self.root = os.path.expanduser(root)
self.split = verify_str_arg(split, "split", ("train", "val"))
if download:
self.download()
wnid_to_classes = self._load_meta_file()[0]
super(ImageNet, self).__init__(self.split_folder, **kwargs)
self.root = root
self.wnids = self.classes
self.wnid_to_idx = self.class_to_idx
self.classes = [wnid_to_classes[wnid] for wnid in self.wnids]
self.class_to_idx = {cls: idx
for idx, clss in enumerate(self.classes)
for cls in clss}
def download(self):
if not check_integrity(self.meta_file):
tmp_dir = tempfile.mkdtemp()
archive_dict = ARCHIVE_DICT['devkit']
download_and_extract_archive(archive_dict['url'], self.root,
extract_root=tmp_dir,
md5=archive_dict['md5'])
devkit_folder = _splitexts(os.path.basename(archive_dict['url']))[0]
meta = parse_devkit(os.path.join(tmp_dir, devkit_folder))
self._save_meta_file(*meta)
shutil.rmtree(tmp_dir)
if not os.path.isdir(self.split_folder):
archive_dict = ARCHIVE_DICT[self.split]
download_and_extract_archive(archive_dict['url'], self.root,
extract_root=self.split_folder,
md5=archive_dict['md5'])
if self.split == 'train':
prepare_train_folder(self.split_folder)
elif self.split == 'val':
val_wnids = self._load_meta_file()[1]
prepare_val_folder(self.split_folder, val_wnids)
else:
msg = ("You set download=True, but a folder '{}' already exist in "
"the root directory. If you want to re-download or re-extract the "
"archive, delete the folder.")
print(msg.format(self.split))
@property
def meta_file(self):
return os.path.join(self.root, 'meta.bin')
def _load_meta_file(self):
if check_integrity(self.meta_file):
return torch.load(self.meta_file)
else:
raise RuntimeError("Meta file not found or corrupted.",
"You can use download=True to create it.")
def _save_meta_file(self, wnid_to_class, val_wnids):
torch.save((wnid_to_class, val_wnids), self.meta_file)
@property
def split_folder(self):
return os.path.join(self.root, self.split)
def extra_repr(self):
return "Split: {split}".format(**self.__dict__)
def parse_devkit(root):
idx_to_wnid, wnid_to_classes = parse_meta(root)
val_idcs = parse_val_groundtruth(root)
val_wnids = [idx_to_wnid[idx] for idx in val_idcs]
return wnid_to_classes, val_wnids
def parse_meta(devkit_root, path='data', filename='meta.mat'):
import scipy.io as sio
metafile = os.path.join(devkit_root, path, filename)
meta = sio.loadmat(metafile, squeeze_me=True)['synsets']
nums_children = list(zip(*meta))[4]
meta = [meta[idx] for idx, num_children in enumerate(nums_children)
if num_children == 0]
idcs, wnids, classes = list(zip(*meta))[:3]
classes = [tuple(clss.split(', ')) for clss in classes]
idx_to_wnid = {idx: wnid for idx, wnid in zip(idcs, wnids)}
wnid_to_classes = {wnid: clss for wnid, clss in zip(wnids, classes)}
return idx_to_wnid, wnid_to_classes
def parse_val_groundtruth(devkit_root, path='data',
filename='ILSVRC2012_validation_ground_truth.txt'):
with open(os.path.join(devkit_root, path, filename), 'r') as txtfh:
val_idcs = txtfh.readlines()
return [int(val_idx) for val_idx in val_idcs]
def prepare_train_folder(folder):
for archive in [os.path.join(folder, archive) for archive in os.listdir(folder)]:
extract_archive(archive, os.path.splitext(archive)[0], remove_finished=True)
def prepare_val_folder(folder, wnids):
img_files = sorted([os.path.join(folder, file) for file in os.listdir(folder)])
for wnid in set(wnids):
os.mkdir(os.path.join(folder, wnid))
for wnid, img_file in zip(wnids, img_files):
shutil.move(img_file, os.path.join(folder, wnid, os.path.basename(img_file)))
def _splitexts(root):
exts = []
ext = '.'
while ext:
root, ext = os.path.splitext(root)
exts.append(ext)
return root, ''.join(reversed(exts)) | 6,657 | 38.39645 | 100 | py |
EFT | EFT-main/idatasets/omniglot.py | import os
import pandas as pd
from torchvision.datasets.folder import default_loader
from torchvision.datasets.utils import download_url
from torch.utils.data import Dataset
class Omniglot(Dataset):
"""`Omniglot <https://github.com/brendenlake/omniglot>`_ Dataset.
Args:
root (string): Root directory of dataset where directory
``omniglot-py`` exists.
background (bool, optional): If True, creates dataset from the "background" set, otherwise
creates from the "evaluation" set. This terminology is defined by the authors.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset zip files from the internet and
puts it in root directory. If the zip files are already downloaded, they are not
downloaded again.
"""
folder = 'omniglot-py'
download_url_prefix = 'https://github.com/brendenlake/omniglot/raw/master/python'
zips_md5 = {
'images_background': '68d2efa1b9178cc56df9314c21c6e718',
'images_evaluation': '6b91aef0f799c5bb55b94e3f2daec811'
}
def __init__(self, root, background=True,
transform=None, target_transform=None,
download=False, train=True, all=False):
self.root = join(os.path.expanduser(root), self.folder)
self.background = background
self.transform = transform
self.target_transform = target_transform
self.images_cached = {}
if download:
self.download()
if not self._check_integrity():
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
self.target_folder = join(self.root, self._get_target_folder())
self._alphabets = list_dir(self.target_folder)
self._characters = sum([[join(a, c) for c in list_dir(join(self.target_folder, a))]
for a in self._alphabets], [])
self._character_images = [[(image, idx) for image in list_files(join(self.target_folder, character), '.png')]
for idx, character in enumerate(self._characters)]
self._flat_character_images = sum(self._character_images, [])
self.data = [x[0] for x in self._flat_character_images]
self.targets = [x[1] for x in self._flat_character_images]
self.data2 = []
self.targets2 = []
self.new_flat = []
for a in range(int(len(self.targets) / 20)):
start = a * 20
if train:
for b in range(start, start + 15):
self.data2.append(self.data[b])
self.targets2.append(self.targets[b])
self.new_flat.append(self._flat_character_images[b])
# print(self.targets[start+b])
else:
for b in range(start + 15, start + 20):
self.data2.append(self.data[b])
self.targets2.append(self.targets[b])
self.new_flat.append(self._flat_character_images[b])
if all:
pass
else:
self._flat_character_images = self.new_flat
self.targets = self.targets2
print(self.targets[0:30])
self.data = self.data2
print("Total classes = ", np.max(self.targets))
def __len__(self):
return len(self.data)
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target character class.
"""
# image_name, character_class = self._flat_character_images[index]
image_name = self.data[index]
character_class = self.targets[index]
image_path = join(self.target_folder, self._characters[character_class], image_name)
if image_path not in self.images_cached:
image = Image.open(image_path, mode='r').convert('L')
if self.transform:
image = self.transform(image)
self.images_cached[image_path] = image
else:
image = self.images_cached[image_path]
# if self.transform:
# image = self.transform(image)
if self.target_transform:
character_class = self.target_transform(character_class)
return image, character_class
def _cache_data(self):
pass
def _check_integrity(self):
zip_filename = self._get_target_folder()
if not check_integrity(join(self.root, zip_filename + '.zip'), self.zips_md5[zip_filename]):
return False
return True
def download(self):
import zipfile
if self._check_integrity():
print('Files already downloaded and verified')
return
filename = self._get_target_folder()
zip_filename = filename + '.zip'
url = self.download_url_prefix + '/' + zip_filename
download_url(url, self.root, zip_filename, self.zips_md5[filename])
print('Extracting downloaded file: ' + join(self.root, zip_filename))
with zipfile.ZipFile(join(self.root, zip_filename), 'r') as zip_file:
zip_file.extractall(self.root)
def _get_target_folder(self):
return 'images_background' if self.background else 'images_evaluation' | 5,662 | 39.45 | 117 | py |
EFT | EFT-main/idatasets/celeb_1m.py | import os
import pandas as pd
from torchvision.datasets.folder import default_loader
from torchvision.datasets.utils import download_url
from torch.utils.data import Dataset
import numpy as np
import random
from collections import Counter
class MS1M(Dataset):
def __init__(self, root, train=True, transform=None, loader=default_loader, download=True):
self.root = os.path.expanduser(root)
self.transform = transform
self.loader = default_loader
self.train = train
seed = 3
random.seed(seed)
np.random.seed(seed)
train_imgs_all = []
val_imgs_all = []
all_paths = os.listdir(root)
random.shuffle(all_paths)
folders = 0
for p in all_paths:
path_p = root+"/"+ p
imgs_path = os.listdir(path_p)
if len(imgs_path)>45:
folders += 1
train_imgs = imgs_path[:30]
val_imgs = imgs_path[30:45]
for i in train_imgs:
full_path = p + "/" + i
train_imgs_all.append([full_path, int(p)])
for i in val_imgs:
full_path = p + "/" + i
val_imgs_all.append([full_path, int(p)])
if(folders>=10000):
break
train_imgs_all = np.array(train_imgs_all)
val_imgs_all = np.array(val_imgs_all)
# train_imgs_all = np.load("/raid/brjathu/meta_two/idatasets/train_imgs_all.npy")
# val_imgs_all = np.load("/raid/brjathu/meta_two/idatasets/val_imgs_all.npy")
if(self.train):
data = train_imgs_all
else:
data = val_imgs_all
self.data = data[:,0]
targets_o = data[:,1]
self.mapped_targets = {}
c = 0
for t in np.unique(targets_o):
self.mapped_targets[t] = c
c += 1
self.targets = []
for t in targets_o:
self.targets.append(int(self.mapped_targets[t]))
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
sample = self.data[idx]
path = os.path.join(self.root, sample)
target = self.targets[idx] # Targets start at 1 by default, so shift to 0
img = self.loader(path)
if self.transform is not None:
img = self.transform(img)
return img, target | 2,478 | 28.511905 | 95 | py |
EFT | EFT-main/idatasets/imagenet.py | from __future__ import print_function
import os
import shutil
import tempfile
import torch
from .folder import ImageFolder
from .utils import check_integrity, download_and_extract_archive, extract_archive, \
verify_str_arg
ARCHIVE_DICT = {
'train': {
'url': 'http://www.image-net.org/challenges/LSVRC/2012/nnoupb/ILSVRC2012_img_train.tar',
'md5': '1d675b47d978889d74fa0da5fadfb00e',
},
'val': {
'url': 'http://www.image-net.org/challenges/LSVRC/2012/nnoupb/ILSVRC2012_img_val.tar',
'md5': '29b22e2961454d5413ddabcf34fc5622',
},
'devkit': {
'url': 'http://www.image-net.org/challenges/LSVRC/2012/nnoupb/ILSVRC2012_devkit_t12.tar.gz',
'md5': 'fa75699e90414af021442c21a62c3abf',
}
}
class ImageNet(ImageFolder):
"""`ImageNet <http://image-net.org/>`_ 2012 Classification Dataset.
Args:
root (string): Root directory of the ImageNet Dataset.
split (string, optional): The dataset split, supports ``train``, or ``val``.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
loader (callable, optional): A function to load an image given its path.
Attributes:
classes (list): List of the class name tuples.
class_to_idx (dict): Dict with items (class_name, class_index).
wnids (list): List of the WordNet IDs.
wnid_to_idx (dict): Dict with items (wordnet_id, class_index).
imgs (list): List of (image path, class_index) tuples
targets (list): The class_index value for each image in the dataset
"""
def __init__(self, root, split='train', download=False, **kwargs):
root = self.root = os.path.expanduser(root)
self.split = verify_str_arg(split, "split", ("train", "val"))
if download:
self.download()
wnid_to_classes = self._load_meta_file()[0]
super(ImageNet, self).__init__(self.split_folder, **kwargs)
self.root = root
self.wnids = self.classes
self.wnid_to_idx = self.class_to_idx
self.classes = [wnid_to_classes[wnid] for wnid in self.wnids]
self.class_to_idx = {cls: idx
for idx, clss in enumerate(self.classes)
for cls in clss}
def download(self):
if not check_integrity(self.meta_file):
tmp_dir = tempfile.mkdtemp()
archive_dict = ARCHIVE_DICT['devkit']
download_and_extract_archive(archive_dict['url'], self.root,
extract_root=tmp_dir,
md5=archive_dict['md5'])
devkit_folder = _splitexts(os.path.basename(archive_dict['url']))[0]
meta = parse_devkit(os.path.join(tmp_dir, devkit_folder))
self._save_meta_file(*meta)
shutil.rmtree(tmp_dir)
if not os.path.isdir(self.split_folder):
archive_dict = ARCHIVE_DICT[self.split]
download_and_extract_archive(archive_dict['url'], self.root,
extract_root=self.split_folder,
md5=archive_dict['md5'])
if self.split == 'train':
prepare_train_folder(self.split_folder)
elif self.split == 'val':
val_wnids = self._load_meta_file()[1]
prepare_val_folder(self.split_folder, val_wnids)
else:
msg = ("You set download=True, but a folder '{}' already exist in "
"the root directory. If you want to re-download or re-extract the "
"archive, delete the folder.")
print(msg.format(self.split))
@property
def meta_file(self):
return os.path.join(self.root, 'meta.bin')
def _load_meta_file(self):
if check_integrity(self.meta_file):
return torch.load(self.meta_file)
else:
raise RuntimeError("Meta file not found or corrupted.",
"You can use download=True to create it.")
def _save_meta_file(self, wnid_to_class, val_wnids):
torch.save((wnid_to_class, val_wnids), self.meta_file)
@property
def split_folder(self):
return os.path.join(self.root, self.split)
def extra_repr(self):
return "Split: {split}".format(**self.__dict__)
def parse_devkit(root):
idx_to_wnid, wnid_to_classes = parse_meta(root)
val_idcs = parse_val_groundtruth(root)
val_wnids = [idx_to_wnid[idx] for idx in val_idcs]
return wnid_to_classes, val_wnids
def parse_meta(devkit_root, path='data', filename='meta.mat'):
import scipy.io as sio
metafile = os.path.join(devkit_root, path, filename)
meta = sio.loadmat(metafile, squeeze_me=True)['synsets']
nums_children = list(zip(*meta))[4]
meta = [meta[idx] for idx, num_children in enumerate(nums_children)
if num_children == 0]
idcs, wnids, classes = list(zip(*meta))[:3]
classes = [tuple(clss.split(', ')) for clss in classes]
idx_to_wnid = {idx: wnid for idx, wnid in zip(idcs, wnids)}
wnid_to_classes = {wnid: clss for wnid, clss in zip(wnids, classes)}
return idx_to_wnid, wnid_to_classes
def parse_val_groundtruth(devkit_root, path='data',
filename='ILSVRC2012_validation_ground_truth.txt'):
with open(os.path.join(devkit_root, path, filename), 'r') as txtfh:
val_idcs = txtfh.readlines()
return [int(val_idx) for val_idx in val_idcs]
def prepare_train_folder(folder):
for archive in [os.path.join(folder, archive) for archive in os.listdir(folder)]:
extract_archive(archive, os.path.splitext(archive)[0], remove_finished=True)
def prepare_val_folder(folder, wnids):
img_files = sorted([os.path.join(folder, file) for file in os.listdir(folder)])
for wnid in set(wnids):
os.mkdir(os.path.join(folder, wnid))
for wnid, img_file in zip(wnids, img_files):
shutil.move(img_file, os.path.join(folder, wnid, os.path.basename(img_file)))
def _splitexts(root):
exts = []
ext = '.'
while ext:
root, ext = os.path.splitext(root)
exts.append(ext)
return root, ''.join(reversed(exts)) | 6,657 | 38.39645 | 100 | py |
EFT | EFT-main/idatasets/data_celeb.py | import random
import numpy as np
import torch
from PIL import Image
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
from torch.utils.data import Sampler
from torchvision import datasets, transforms
# from imagenet import ImageNet
from CUB200 import Cub2011
import collections
class SubsetRandomSampler(Sampler):
r"""Samples elements randomly from a given list of indices, without replacement.
Arguments:
indices (sequence): a sequence of indices
"""
def __init__(self, indices, shuffle):
self.indices = indices
self.shuffle = shuffle
def __iter__(self):
if(self.shuffle):
return (self.indices[i] for i in torch.randperm(len(self.indices)))
else:
return (self.indices[i] for i in range(len(self.indices)))
def __len__(self):
return len(self.indices)
class IncrementalDataset:
def __init__(
self,
dataset_name,
args,
random_order=False,
shuffle=True,
workers=10,
batch_size=128,
seed=1,
increment=10,
validation_split=0.
):
self.dataset_name = dataset_name.lower().strip()
datasets = _get_datasets(dataset_name)
self.train_transforms = datasets[0].train_transforms # FIXME handle multiple datasets
self.common_transforms = datasets[0].common_transforms
self.args = args
self._setup_data(
datasets,
args.data_path,
random_order=random_order,
seed=seed,
increment=increment,
validation_split=validation_split
)
self._current_task = 0
self._batch_size = batch_size
self._workers = workers
self._shuffle = shuffle
@property
def n_tasks(self):
return len(self.increments)
def get_same_index(self, target, label, mode="train", memory=None):
label_indices = []
label_targets = []
for i in range(len(target)):
if target[i] in label:
label_indices.append(i)
label_targets.append(target[i])
for_memory = (label_indices.copy(),label_targets.copy())
if memory is not None:
memory_indices, memory_targets = memory
all_indices = memory_indices+label_indices
else:
all_indices = label_indices
# if (mode=="train"):
# random.shuffle(all_indices)
return all_indices, for_memory
def get_same_index_test_chunk(self, target, label, mode="test", memory=None):
label_indices = []
label_targets = []
np_target = np.array(target, dtype="uint32")
np_indices = np.array(list(range(len(target))), dtype="uint32")
for t in range(len(label)//self.args.class_per_task):
task_idx = []
for class_id in label[t*self.args.class_per_task: (t+1)*self.args.class_per_task]:
idx = np.where(np_target==class_id)[0]
task_idx.extend(list(idx.ravel()))
task_idx = np.array(task_idx, dtype="uint32")
task_idx.ravel()
random.shuffle(task_idx)
label_indices.extend(list(np_indices[task_idx]))
label_targets.extend(list(np_target[task_idx]))
label_indices = np.array(label_indices, dtype="uint32")
label_indices.ravel()
return list(label_indices), label_targets
def new_task(self, memory=None):
print(self._current_task)
print(self.increments)
min_class = sum(self.increments[:self._current_task])
max_class = sum(self.increments[:self._current_task + 1])
train_indices, for_memory = self.get_same_index(self.train_dataset.identity, list(range(min_class, max_class)), mode="train", memory=memory)
# list_a = []
# for i in range(len(self.train_dataset.targets)):
# if(i in train_indices):
# list_a.append(self.train_dataset.targets[i])
# print("#############")
# print(collections.Counter(list_a))
# print("#############")
test_indices, _ = self.get_same_index_test_chunk(self.test_dataset.identity, list(range(max_class)), mode="test")
# list_a = []
# for i in range(len(self.test_dataset.targets)):
# if(i in test_indices):
# list_a.append(self.test_dataset.targets[i])
# print("#############")
# print(collections.Counter(list_a))
# print("#############")
self.train_data_loader = torch.utils.data.DataLoader(self.train_dataset, batch_size=self._batch_size,shuffle=False,num_workers=16, sampler=SubsetRandomSampler(train_indices, True))
self.test_data_loader = torch.utils.data.DataLoader(self.test_dataset, batch_size=10,shuffle=False,num_workers=16, sampler=SubsetRandomSampler(test_indices, False))
task_info = {
"min_class": min_class,
"max_class": max_class,
"increment": self.increments[self._current_task],
"task": self._current_task,
"max_task": len(self.increments),
"n_train_data": len(train_indices),
"n_test_data": len(test_indices)
}
self._current_task += 1
return task_info, self.train_data_loader, self.train_data_loader, self.test_data_loader, for_memory
def get_custom_loader_idx(self, indexes, mode="train", batch_size=10, shuffle=True):
# if shuffle:
# random.shuffle(indexes)
if(mode=="train"):
data_loader = torch.utils.data.DataLoader(self.train_dataset, batch_size=batch_size, shuffle=False, num_workers=4, sampler=SubsetRandomSampler(indexes, True))
else:
data_loader = torch.utils.data.DataLoader(self.test_dataset, batch_size=batch_size, shuffle=False, num_workers=4, sampler=SubsetRandomSampler(indexes, False))
return data_loader
def get_custom_loader_class(self, class_id, mode="train", batch_size=10, shuffle=False):
if(mode=="train"):
train_indices, for_memory = self.get_same_index(self.train_dataset.targets, class_id, mode="train", memory=None)
# if shuffle:
# random.shuffle(train_indices)
data_loader = torch.utils.data.DataLoader(self.train_dataset, batch_size=batch_size, shuffle=False, num_workers=4, sampler=SubsetRandomSampler(train_indices, True))
else:
test_indices, _ = self.get_same_index(self.test_dataset.targets, class_id, mode="test")
# if shuffle:
# random.shuffle(test_indices)
data_loader = torch.utils.data.DataLoader(self.test_dataset, batch_size=batch_size, shuffle=False, num_workers=4, sampler=SubsetRandomSampler(test_indices, False))
return data_loader
def _setup_data(self, datasets, path, random_order=False, seed=1, increment=10, validation_split=0.):
self.increments = []
self.class_order = []
trsf_train = transforms.Compose(self.train_transforms)
trsf_test = transforms.Compose(self.common_transforms)
current_class_idx = 0 # When using multiple datasets
for dataset in datasets:
if(self.dataset_name=="imagenet"):
train_dataset = dataset.base_dataset(root=path, split='train', download=False, transform=trsf_train)# ("data", train=True, download=True)
test_dataset = dataset.base_dataset(root=path, split='val', download=False, transform=trsf_test)
elif(self.dataset_name=="cub200" or self.dataset_name=="cifar100" or self.dataset_name=="mnist" or self.dataset_name=="caltech101"):
train_dataset = dataset.base_dataset(root=path, train=True, download=True, transform=trsf_train)# ("data", train=True, download=True)
test_dataset = dataset.base_dataset(root=path, train=False, download=True, transform=trsf_test)
elif(self.dataset_name=="celeb"):
train_dataset = dataset.base_dataset(root=path, split='train',target_type="identity", download=True, transform=trsf_train)# ("data", train=True, download=True)
test_dataset = dataset.base_dataset(root=path, split='valid',target_type="identity", download=True, transform=trsf_test)
order = [i for i in range(self.args.num_class)]
if random_order:
random.seed(seed) # Ensure that following order is determined by seed:
random.shuffle(order)
elif dataset.class_order is not None:
order = dataset.class_order
self.class_order.append(order)
if len(datasets) > 1:
self.increments.append(len(order))
else:
self.increments = [increment for _ in range(len(order) // increment)]
self.train_dataset = train_dataset
self.test_dataset = test_dataset
@staticmethod
def _map_new_class_index(y, order):
"""Transforms targets for new class order."""
return np.array(list(map(lambda x: order.index(x), y)))
def _get_datasets(dataset_names):
return [_get_dataset(dataset_name) for dataset_name in dataset_names.split("-")]
def _get_dataset(dataset_name):
dataset_name = dataset_name.lower().strip()
if dataset_name == "cifar10":
return iCIFAR10
elif dataset_name == "cifar100":
return iCIFAR100
elif dataset_name == "imagenet":
return iIMAGENET
elif dataset_name == "cub200":
return iCUB200
elif dataset_name == "mnist":
return iMNIST
elif dataset_name == "caltech101":
return iCALTECH101
elif dataset_name == "celeb":
return iCELEB
else:
raise NotImplementedError("Unknown dataset {}.".format(dataset_name))
class DataHandler:
base_dataset = None
train_transforms = []
common_transforms = [transforms.ToTensor()]
class_order = None
class iCIFAR10(DataHandler):
base_dataset = datasets.cifar.CIFAR10
train_transforms = [
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(10),
transforms.ColorJitter(brightness=63 / 255),
transforms.ToTensor(),
]
common_transforms = [
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
]
class iCIFAR100(DataHandler):
base_dataset = datasets.cifar.CIFAR100
train_transforms = [
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(10),
transforms.ColorJitter(brightness=63 / 255),
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)),
]
common_transforms = [
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)),
]
class iCALTECH101(DataHandler):
base_dataset = datasets.Caltech101
train_transforms = [
transforms.Resize(136),
transforms.RandomCrop(128, padding=4),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(10),
# transforms.ColorJitter(brightness=63 / 255),
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)),
]
common_transforms = [
transforms.Resize(130),
transforms.CenterCrop(128),
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)),
]
class iCELEB(DataHandler):
base_dataset = datasets.CelebA
train_transforms = [
transforms.Resize(70),
transforms.RandomCrop(64, padding=4),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(10),
# transforms.ColorJitter(brightness=63 / 255),
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)),
]
common_transforms = [
transforms.Resize(70),
transforms.CenterCrop(64),
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)),
]
class iIMAGENET(DataHandler):
base_dataset = datasets.ImageNet
train_transforms = [
transforms.Resize(70),
transforms.RandomResizedCrop(64),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(10),
# transforms.ColorJitter(brightness=63 / 255),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
]
common_transforms = [
transforms.Resize(70),
transforms.CenterCrop(64),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
]
class iCUB200(DataHandler):
base_dataset = Cub2011
train_transforms = [
transforms.Resize(70),
transforms.RandomResizedCrop(64),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(10),
transforms.ColorJitter(brightness=63 / 255),
transforms.ToTensor(),
]
common_transforms = [
transforms.Resize(70),
transforms.CenterCrop(64),
transforms.ToTensor(),
]
class iMNIST(DataHandler):
base_dataset = datasets.MNIST
train_transforms = [ transforms.ToTensor()]
common_transforms = [transforms.ToTensor()]
# def _preprocess_initial_data(self, data):
# print("$$$$$$$$$$", data.shape)
# print("$$$$$$$$$$", data.size)
# b, w, h, c = data.shape
# data = data.reshape(b, 784)
class iPermutedMNIST(iMNIST):
def _preprocess_initial_data(self, data):
b, w, h, c = data.shape
# print("$$$$$$$$$$", data.shape)
data = data.reshape(b, -1, c)
permutation = np.random.permutation(w * h)
data = data[:, permutation, :]
return data.reshape(b, w, h, c)
| 14,326 | 34.72818 | 188 | py |
IAN | IAN-master/test.py | import os
import torch
import logging
from train import parse_options
from network import create_model
from options.yaml_opt import dict2str
from dataset import create_dataloader, create_dataset
from base_utils.utils import get_time_str, make_exp_dirs
from base_utils.logger import get_root_logger, get_env_info
def main():
# parse options, set distributed setting, set ramdom seed
opt = parse_options(is_train=False)
os.environ["CUDA_VISIBLE_DEVICES"] = opt['gpu_id']
torch.backends.cudnn.benchmark = True
# torch.backends.cudnn.deterministic = True
# mkdir and initialize loggers
make_exp_dirs(opt)
log_file = os.path.join(opt['path']['log'],
f"test_{opt['name']}_{get_time_str()}.log")
logger = get_root_logger(
logger_name='relighting', log_level=logging.INFO, log_file=log_file)
logger.info(get_env_info())
logger.info(dict2str(opt))
# create test dataset and dataloader
test_loaders = []
for phase, dataset_opt in sorted(opt['datasets'].items()):
test_set = create_dataset(dataset_opt)
test_loader = create_dataloader(
test_set,
dataset_opt,
num_gpu=opt['num_gpu'],
dist=opt['dist'],
sampler=None,
seed=opt['manual_seed'])
logger.info(
f"Number of test images in {dataset_opt['name']}: {len(test_set)}")
test_loaders.append(test_loader)
# create model
model = create_model(opt)
#model = nn.DataParallel(model).cuda()
for test_loader in test_loaders:
test_set_name = test_loader.dataset.opt['name']
logger.info(f'Testing {test_set_name}...')
model.validation(
test_loader,
current_iter=opt['name'],
tb_logger=None,
save_img=opt['val']['save_img'])
if __name__ == '__main__':
main() | 1,890 | 31.603448 | 79 | py |
IAN | IAN-master/train.py | import os
import math
import time
import torch
import random
import logging
import argparse
import datetime
from network import create_model
from options.yaml_opt import parse, dict2str
from dataset.data_sampler import EnlargedSampler
from dataset import create_dataset, create_dataloader
from base_utils.logger import get_root_logger, get_env_info, init_tb_logger, MessageLogger
from base_utils.utils import set_random_seed, get_time_str, check_resume, make_exp_dirs, mkdir_and_rename
torch.cuda.is_available()
def parse_options(is_train=True):
format_str = 'train' if is_train else 'test'
parser = argparse.ArgumentParser()
parser.add_argument(
'-opt', type=str, default='options/{}_option.yml'.format(format_str), help='Path to option YAML file.')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
opt = parse(args.opt, is_train=is_train)
opt['dist'] = False
# random seed
seed = opt.get('manual_seed')
if seed is None:
seed = random.randint(1, 10000)
opt['manual_seed'] = seed
set_random_seed(seed + opt['rank'])
return opt
def init_loggers(opt):
log_path = opt['path'].get('log_path')
if log_path == None:
log_path = opt['path']['experiments_root']
log_file = os.path.join(log_path,
f"train_{opt['name']}_{get_time_str()}.log")
if os.path.exists(log_path) == False:
os.makedirs(log_path)
logger = get_root_logger(
logger_name='relighting', log_level=logging.INFO, log_file=log_file)
logger.info(get_env_info())
logger.info(dict2str(opt))
# initialize tensorboard logger and wandb logger
tb_logger = None
if opt['logger'].get('use_tb_logger') and 'debug' not in opt['name']:
tb_logger = init_tb_logger(log_dir=opt['path']['tb_logger'])
return logger, tb_logger
def create_train_val_dataloader(opt, logger):
# create train and val dataloaders
train_loader, val_loader = None, None
for phase, dataset_opt in opt['datasets'].items():
if phase == 'train':
dataset_enlarge_ratio = dataset_opt.get('dataset_enlarge_ratio', 1)
train_set = create_dataset(dataset_opt)
train_sampler = EnlargedSampler(train_set, opt['world_size'],
opt['rank'], dataset_enlarge_ratio)
train_loader = create_dataloader(
train_set,
dataset_opt,
num_gpu=opt['num_gpu'],
dist=opt['dist'],
sampler=train_sampler,
seed=opt['manual_seed'])
num_iter_per_epoch = math.ceil(
len(train_set) * dataset_enlarge_ratio /
(dataset_opt['batch_size_per_gpu'] * opt['num_gpu'] * opt['world_size']))
total_iters = int(opt['train']['total_iter'])
total_epochs = math.ceil(total_iters / (num_iter_per_epoch))
logger.info(
'Training statistics:'
f'\n\tNumber of train images: {len(train_set)}'
f'\n\tDataset enlarge ratio: {dataset_enlarge_ratio}'
f'\n\tBatch size per gpu: {dataset_opt["batch_size_per_gpu"]}'
f'\n\tWorld size (gpu number): {opt["world_size"]}'
f'\n\tRequire iter number per epoch: {num_iter_per_epoch}'
f'\n\tTotal epochs: {total_epochs}; iters: {total_iters}.')
elif phase == 'val':
val_set = create_dataset(dataset_opt)
val_loader = create_dataloader(
val_set,
dataset_opt,
num_gpu=opt['num_gpu'],
dist=opt['dist'],
sampler=None,
seed=opt['manual_seed'])
logger.info(
f'Number of val images/folders in {dataset_opt["name"]}: '
f'{len(val_set)}')
else:
raise ValueError(f'Dataset phase {phase} is not recognized.')
return train_loader, train_sampler, val_loader, total_epochs, total_iters
def main():
# parse options, set distributed setting, set ramdom seed
opt = parse_options(is_train=True)
os.environ["CUDA_VISIBLE_DEVICES"] = opt['gpu_id']
torch.backends.cudnn.benchmark = True
# torch.backends.cudnn.deterministic = True
# load resume states if necessary
if opt['path'].get('resume_state'):
device_id = torch.cuda.current_device()
resume_state = torch.load(
opt['path']['resume_state'],
map_location=lambda storage, loc: storage.cuda(device_id))
else:
resume_state = None
# mkdir for experiments and logger
if resume_state is None:
make_exp_dirs(opt)
# initialize loggers
logger, tb_logger = init_loggers(opt)
# create train and validation dataloaders
result = create_train_val_dataloader(opt, logger)
train_loader, train_sampler, val_loader, total_epochs, total_iters = result
# create model
if resume_state: # resume training
check_resume(opt, resume_state['iter'])
model = create_model(opt)
model.resume_training(resume_state) # handle optimizers and schedulers
logger.info(f"Resuming training from epoch: {resume_state['epoch']}, "
f"iter: {resume_state['iter']}.")
start_epoch = resume_state['epoch']
current_iter = resume_state['iter']
else:
model = create_model(opt)
start_epoch = 0
current_iter = 0
# create message logger (formatted outputs)
msg_logger = MessageLogger(opt, current_iter, tb_logger)
# training
logger.info(
f'Start training from epoch: {start_epoch}, iter: {current_iter}')
data_time, iter_time = time.time(), time.time()
start_time = time.time()
for epoch in range(start_epoch, total_epochs + 1):
train_sampler.set_epoch(epoch)
iter_trainloader = iter(train_loader)
train_data = iter_trainloader.next()
while train_data is not None:
data_time = time.time() - data_time
current_iter += 1
if current_iter > total_iters:
break
model.lr_decay(current_iter)
# training
model.feed_data(train_data)
model.optimize_parameters(current_iter)
iter_time = time.time() - iter_time
# log
if current_iter % opt['logger']['print_freq'] == 0:
log_vars = {'epoch': epoch, 'iter': current_iter}
log_vars.update({'lrs': model.get_current_learning_rate()})
log_vars.update({'time': iter_time, 'data_time': data_time})
log_vars.update(model.get_current_log())
msg_logger(log_vars)
# save models and training states
if current_iter % opt['logger']['save_checkpoint_freq'] == 0:
logger.info('Saving models and training states.')
model.save(epoch, current_iter)
# validation
if opt.get('val') is not None and (current_iter %
opt['val']['val_freq'] == 0):
model.validation(val_loader, current_iter, tb_logger,
opt['val']['save_img'])
data_time = time.time()
iter_time = time.time()
try:
train_data = iter_trainloader.next()
except StopIteration:
train_data = None
# end of iter
# end of epoch
consumed_time = str(
datetime.timedelta(seconds=int(time.time() - start_time)))
logger.info(f'End of training. Time consumed: {consumed_time}')
logger.info('Save the latest model.')
model.save(epoch=-1, current_iter=-1) # -1 stands for the latest
if opt.get('val') is not None:
model.validation(val_loader, current_iter, tb_logger,
opt['val']['save_img'])
if tb_logger:
tb_logger.close()
if __name__ == '__main__':
main()
| 8,133 | 36.483871 | 111 | py |
IAN | IAN-master/dataset/AdobeMI_dataset.py | import ast
import torch
import numpy as np
from torch.utils import data as data
from dataset.data_utils import img2tensor, imread, parse_adobe_dataset, select_one2one_data
from dataset.transforms import augment, multi_random_crop
def showimg(img, name):
import numpy as np
import cv2
img = (img + 1) / 2 * 255
img = img.astype(np.uint8)
cv2.imwrite(f'input_{name}.png', img)
def showimgs(imgs):
import numpy as np
import cv2
for i in range(imgs.shape[0]):
img = imgs[i].astype(np.uint8)
cv2.imwrite(f'input_{i}.png', img)
def get_seg_dict(root):
with open(root, 'r') as fr:
dic_str = fr.readline()
return ast.literal_eval(dic_str)
def seg2tensor(val, seg_dic):
onehot = np.zeros(len(seg_dic), dtype=np.float32)
class_idx = seg_dic[str(val)]
onehot[class_idx] = 1.
return onehot
# Image name format: {notation of scene}/dir_{light_dir}_mip2.jpg
# other component: probes, materials_mip2.png, meta.json, thumb.jpg, label_map.png
class AdobeMultiIlluminationOnetoOneDataset(data.Dataset):
def __init__(self, opt):
super().__init__()
self.opt = opt
self.paths = parse_adobe_dataset(opt['dataroot'])
self.paths = select_one2one_data(self.paths, self.opt['input_dir'], self.opt['gt_dir'])
#self.seg_dic = get_seg_dict(opt['seg_root'])
if opt.get('test_mode') == None:
self.test_mode = False
else:
self.test_mode = opt['test_mode']
def __maskprobe__(self, input, gt, chrome_pos, gray_pos):
# input H x W x C
chrome_begx, chrome_begy, chrome_endx, chrome_endy = chrome_pos
gray_begx, gray_begy, gray_endx, gray_endy = gray_pos
h, w, c = input.shape
scale = 4
chrome_begx = round(chrome_begx / scale)
chrome_begy = round(chrome_begy / scale)
chrome_endx = round(chrome_endx / scale)
chrome_endy = round(chrome_endy / scale)
gray_begx = round(gray_begx / scale)
gray_begy = round(gray_begy / scale)
gray_endx = round(gray_endx / scale)
gray_endy = round(gray_endy / scale)
mask = torch.ones((h,w,1))
mask[chrome_begy: chrome_endy+1, chrome_begx: chrome_endx+1, :] = 0
mask[gray_begy: gray_endy+1, gray_begx: gray_endx+1, :] = 0
input[chrome_begy: chrome_endy+1, chrome_begx: chrome_endx+1, :] = -1.
gt[chrome_begy: chrome_endy+1, chrome_begx: chrome_endx+1, :] = -1.
input[gray_begy: gray_endy+1, gray_begx: gray_endx+1, :] = -1.
gt[gray_begy: gray_endy+1, gray_begx: gray_endx+1, :] = -1.
return mask
def __getitem__(self, index):
# Load gt and input images. Dimension order: HWC; channel order: BGR;
# image range: [-1, 1], float32.
data_dic = self.paths[index]
gt_path = data_dic['gt']
input_path = data_dic['input']
#print(input_path)
img_input = imread(input_path)
img_gt = imread(gt_path)
mask = self.__maskprobe__(img_input, img_gt, data_dic['chrome_pos'], data_dic['gray_pos'])
scene_name, input_name = input_path.split('/')[-2:]
gt_name = gt_path.split('/')[-1]
input_path = f'{scene_name}_{input_name}'
gt_path = f'{scene_name}_{gt_name}'
# augmentation for training
if self.opt['phase'] == 'train':
img_gt, img_input, mask = augment([img_gt, img_input, mask],
self.opt['use_flip'], self.opt['use_rot'], self.opt.get('use_color'))
# crop imgs
if self.opt['gt_size'] > 0:
(img_gt, img_input, mask), _ = multi_random_crop([img_gt, img_input, mask], self.opt['gt_size'])
if img_gt.shape[0] % 32 != 0 or img_gt.shape[1] % 32 != 0:
# crop border
# print("crop")
mask = mask[4:-4, 14:-14, :]
img_input = img_input[4:-4, 14:-14, :]
img_gt = img_gt[4:-4, 14:-14, :]
# BGR to RGB, HWC to CHW, numpy to tensor
if self.test_mode:
img_input = img2tensor([img_input],
bgr2rgb=True,
float32=True)
img_gt = None
gt_path = None
else:
img_gt, img_input = img2tensor([img_gt, img_input],
bgr2rgb=True,
float32=True)
mask = mask.permute(2, 0, 1)
if img_gt != None:
return {
'input': img_input,
'gt': img_gt,
'mask': mask,
'input_path': input_path,
'gt_path': gt_path
}
else:
return {
'input': img_input,
'input_path': input_path,
'mask': mask,
}
def __len__(self):
return len(self.paths)
class AdobeMultiIlluminationWithSegOnetoOneDataset(data.Dataset):
def __init__(self, opt):
super().__init__()
self.opt = opt
self.paths = parse_adobe_dataset(opt['dataroot'], True)
self.paths = select_one2one_data(self.paths, self.opt['input_dir'], self.opt['gt_dir'])
self.vec_seg2tensor = np.vectorize(seg2tensor, excluded=[1], signature="(n)->(m)")
if opt.get('test_mode') == None:
self.test_mode = False
else:
self.test_mode = opt['test_mode']
def __maskprobe__(self, input, gt, onehot, chrome_pos, gray_pos):
# input H x W x C
chrome_begx, chrome_begy, chrome_endx, chrome_endy = chrome_pos
gray_begx, gray_begy, gray_endx, gray_endy = gray_pos
h, w, c = input.shape
scale = 4
chrome_begx = round(chrome_begx / scale)
chrome_begy = round(chrome_begy / scale)
chrome_endx = round(chrome_endx / scale)
chrome_endy = round(chrome_endy / scale)
gray_begx = round(gray_begx / scale)
gray_begy = round(gray_begy / scale)
gray_endx = round(gray_endx / scale)
gray_endy = round(gray_endy / scale)
mask = torch.ones((h,w,1))
mask[chrome_begy: chrome_endy+1, chrome_begx: chrome_endx+1, :] = 0
mask[gray_begy: gray_endy+1, gray_begx: gray_endx+1, :] = 0
input[chrome_begy: chrome_endy+1, chrome_begx: chrome_endx+1, :] = -1.
gt[chrome_begy: chrome_endy+1, chrome_begx: chrome_endx+1, :] = -1.
input[gray_begy: gray_endy+1, gray_begx: gray_endx+1, :] = -1.
gt[gray_begy: gray_endy+1, gray_begx: gray_endx+1, :] = -1.
onehot[chrome_begy: chrome_endy+1, chrome_begx: chrome_endx+1, :] = 0
onehot[gray_begy: gray_endy+1, gray_begx: gray_endx+1, :] = 0
return mask
def __getitem__(self, index):
# Load gt and input images. Dimension order: HWC; channel order: BGR;
# image range: [-1, 1], float32.
data_dic = self.paths[index]
gt_path = data_dic['gt']
input_path = data_dic['input']
label_path = data_dic['lab_img']
img_input = imread(input_path)
img_gt = imread(gt_path)
label = imread(label_path, False)
label_tensor = torch.LongTensor(label)
onehot_tensor = torch.nn.functional.one_hot(label_tensor, 38)
mask = self.__maskprobe__(img_input, img_gt, onehot_tensor, data_dic['chrome_pos'], data_dic['gray_pos'])
# crop border
onehot_tensor = onehot_tensor[4:-4, 6:-6, :]
mask = mask[4:-4, 6:-6, :]
img_input = img_input[4:-4, 6:-6, :]
img_gt = img_gt[4:-4, 6:-6, :]
scene_name, input_name = input_path.split('/')[-2:]
gt_name = gt_path.split('/')[-1]
input_path = f'{scene_name}_{input_name}'
gt_path = f'{scene_name}_{gt_name}'
# augmentation for training
if self.opt['phase'] == 'train':
img_gt, img_input, mask, onehot_tensor = augment([img_gt, img_input, mask, onehot_tensor],
self.opt['use_flip'], self.opt['use_rot'], self.opt.get('use_color'))
# crop imgs
if self.opt['gt_size'] > 0:
(img_gt, img_input, mask, onehot_tensor
), _ = multi_random_crop([img_gt, img_input, mask, onehot_tensor], self.opt['gt_size'])
# BGR to RGB, HWC to CHW, numpy to tensor
if self.test_mode:
img_input = img2tensor([img_input],
bgr2rgb=True,
float32=True)
img_gt = None
gt_path = None
else:
img_gt, img_input = img2tensor([img_gt, img_input],
bgr2rgb=True,
float32=True)
mask = mask.permute(2, 0, 1)
onehot_tensor = onehot_tensor.permute(2, 0, 1)
img_input = torch.cat([img_input, onehot_tensor], axis=0)
if img_gt != None:
return {
'input': img_input,
'gt': img_gt,
'mask': mask,
'input_path': input_path,
'gt_path': gt_path
}
else:
return {
'input': img_input,
'input_path': input_path,
'mask': mask,
}
def __len__(self):
return len(self.paths)
| 9,494 | 34.830189 | 121 | py |
IAN | IAN-master/dataset/anytoany_dataset.py | import os
import torch
import numpy as np
from torch.utils import data as data
from base_utils.utils import load_depth
from dataset.data_utils import img2tensor, imread
from dataset.transforms import multi_random_crop, dir_augment
'''Image{idx}_{color}_{angle}.png'''
class Any2anyTrainingDataset(data.Dataset):
def __init__(self, opt):
super(Any2anyTrainingDataset, self).__init__()
self.opt = opt
self.root = opt['dataset_root']
self.colors = ['2500', '3500', '4500', '5500', '6500']
self.angles = ['E', 'W', 'S', 'N', 'NE', 'NW', 'SE', 'SW']
self.multiply = len(self.colors) * len(self.angles)
self.is_val = opt.get('is_val')
self.mask = opt.get('mask')
if isinstance(self.opt['auxiliary_dim'], list) == False:
self.opt['auxiliary_dim'] = [self.opt['auxiliary_dim']]
if isinstance(opt['dataroot_auxiliary'], list) == False:
opt['dataroot_auxiliary'] = [opt['dataroot_auxiliary']]
self.aux_roots = opt['dataroot_auxiliary']
self.aux_dims = opt['auxiliary_dim']
self.aux_type = opt['auxiliary_type']
# amount of scene
self.num_scene = len(os.listdir(self.root)) // self.multiply
def __getitem__(self, idx):
# addressing
scene_id = idx // self.multiply
idx -= scene_id * self.multiply
color_id = idx // len(self.angles)
angle_id = idx - color_id * len(self.angles)
ref_scene_id = scene_id
while ref_scene_id == scene_id:
ref_scene_id = np.random.randint(0, self.num_scene)
gt_scene_id = scene_id
target_color_id = np.random.randint(0, len(self.colors))
target_angle_id = np.random.randint(0, len(self.angles))
if self.is_val:
scene_id += 275
ref_scene_id += 275
gt_scene_id += 275
input_path = os.path.join(self.root,
f'Image{scene_id:0>3}_{self.colors[color_id]}_{self.angles[angle_id]}.png')
ref_path = os.path.join(self.root,
f'Image{ref_scene_id:0>3}_{self.colors[target_color_id]}_{self.angles[target_angle_id]}.png')
gt_path = os.path.join(self.root,
f'Image{gt_scene_id:0>3}_{self.colors[target_color_id]}_{self.angles[target_angle_id]}.png')
input_img = imread(input_path)
ref_img = imread(ref_path)
gt_img = imread(gt_path)
if self.mask == True:
mask_path = os.path.join(self.root, 'mask', f'Image{gt_scene_id:0>3}.npy')
mask = np.load(mask_path)[:, :, np.newaxis]
else:
h, w, _ = input_img.shape
mask = np.array([[1.0]]).repeat(w, 1).repeat(h, 0)[:, :, np.newaxis]
input_auxs = []
ref_auxs = []
for dim, aux_root in zip(self.aux_dims, self.aux_roots):
input_aux = os.path.join(aux_root, f'Image{scene_id:0>3}{self.aux_type}')
ref_aux = os.path.join(aux_root, f'Image{ref_scene_id:0>3}{self.aux_type}')
if dim == 1:
input_aux = load_depth(input_aux)
ref_aux = load_depth(ref_aux)
else:
input_aux = np.load(input_aux)
ref_aux = np.load(ref_aux)
if dim == 1:
input_aux = input_aux[:, :, np.newaxis]
ref_aux = ref_aux[:, :, np.newaxis]
input_auxs.append(input_aux)
ref_auxs.append(ref_aux)
input_auxs = np.concatenate(input_auxs, axis=2)
ref_auxs = np.concatenate(ref_auxs, axis=2)
(input_img, ref_img, gt_img,
input_auxs, ref_auxs, mask), (angle_id, target_angle_id) = dir_augment([
input_img, ref_img, gt_img, input_auxs, ref_auxs, mask], [angle_id, target_angle_id])
if self.opt['gt_size'] > 0:
(input_img, ref_img, gt_img, input_auxs,
ref_auxs, mask), _ = multi_random_crop([
input_img, ref_img, gt_img, input_auxs, ref_auxs, mask], self.opt['gt_size'], None)
input_img, ref_img, input_auxs, ref_auxs, gt_img, mask = img2tensor([
input_img, ref_img, input_auxs, ref_auxs, gt_img, mask], True, True)
input_img = torch.cat([input_img, input_auxs], 0)
ref_img = torch.cat([ref_img, ref_auxs], 0)
return {
'input': input_img,
'input_path': input_path,
'ref': ref_img,
'ref_path': ref_path,
'gt': gt_img,
'gt_path': gt_path,
'input_angle': torch.tensor(angle_id),
'target_angle': torch.tensor(target_angle_id),
'input_color': torch.tensor(color_id),
'target_color': torch.tensor(target_color_id),
'mask': mask
}
def __len__(self):
return self.num_scene * self.multiply
class Any2anyTrainingDataset2(data.Dataset):
def __init__(self, opt):
super(Any2anyTrainingDataset2, self).__init__()
self.opt = opt
self.root = opt['dataset_root']
self.colors = ['2500', '3500', '4500', '5500', '6500']
self.colors_code = torch.from_numpy(np.eye(5, dtype=np.float32))
self.angles = ['E', 'W', 'S', 'N', 'NE', 'NW', 'SE', 'SW']
self.angles_code = torch.from_numpy(np.eye(8, dtype=np.float32))
self.multiply = len(self.colors) * len(self.angles)
self.is_val = opt.get('is_val')
if isinstance(self.opt['auxiliary_dim'], list) == False:
self.opt['auxiliary_dim'] = [self.opt['auxiliary_dim']]
if isinstance(opt['dataroot_auxiliary'], list) == False:
opt['dataroot_auxiliary'] = [opt['dataroot_auxiliary']]
self.aux_roots = opt['dataroot_auxiliary']
self.aux_dims = opt['auxiliary_dim']
self.aux_type = opt['auxiliary_type']
# amount of scene
self.num_scene = len(os.listdir(self.root)) // self.multiply
def __getitem__(self, idx):
# addressing
scene_id = idx // self.multiply
idx -= scene_id * self.multiply
color_id = idx // len(self.angles)
angle_id = idx - color_id * len(self.angles)
ref_scene_id = scene_id
while ref_scene_id == scene_id:
ref_scene_id = np.random.randint(0, self.num_scene)
gt_scene_id = scene_id
target_color_id = np.random.randint(0, len(self.colors))
target_angle_id = np.random.randint(0, len(self.angles))
if self.is_val:
scene_id += 275
ref_scene_id += 275
gt_scene_id += 275
if self.is_val:
input_path = os.path.join(self.root, f'Image{(+scene_id):0>3}_{self.colors[color_id]}_{self.angles[angle_id]}.png')
ref_path = os.path.join(self.root, f'Image{ref_scene_id:0>3}_{self.colors[target_color_id]}_{self.angles[target_angle_id]}.png')
gt_path = os.path.join(self.root, f'Image{gt_scene_id:0>3}_{self.colors[target_color_id]}_{self.angles[target_angle_id]}.png')
else:
input_path = os.path.join(self.root, f'Image{scene_id:0>3}_{self.colors[color_id]}_{self.angles[angle_id]}.png')
ref_path = os.path.join(self.root, f'Image{ref_scene_id:0>3}_{self.colors[target_color_id]}_{self.angles[target_angle_id]}.png')
gt_path = os.path.join(self.root, f'Image{gt_scene_id:0>3}_{self.colors[target_color_id]}_{self.angles[target_angle_id]}.png')
input_img = imread(input_path)
ref_img = imread(ref_path)
gt_img = imread(gt_path)
input_auxs = []
ref_auxs = []
for dim, aux_root in zip(self.aux_dims, self.aux_roots):
input_aux = os.path.join(aux_root, f'Image{scene_id:0>3}{self.aux_type}')
ref_aux = os.path.join(aux_root, f'Image{ref_scene_id:0>3}{self.aux_type}')
if dim == 1:
input_aux = load_depth(input_aux)
ref_aux = load_depth(ref_aux)
else:
input_aux = np.load(input_aux)
ref_aux = np.load(ref_aux)
if dim == 1:
input_aux = input_aux[:, :, np.newaxis]
ref_aux = ref_aux[:, :, np.newaxis]
input_auxs.append(input_aux)
ref_auxs.append(ref_aux)
input_auxs = np.concatenate(input_auxs, axis=2)
ref_auxs = np.concatenate(ref_auxs, axis=2)
(input_img, ref_img, gt_img, input_auxs, ref_auxs), (angle_id, target_angle_id) = dir_augment([input_img, ref_img, gt_img, input_auxs, ref_auxs], [angle_id, target_angle_id])
if self.opt['gt_size'] > 0:
(input_img, ref_img, gt_img, input_auxs, ref_auxs), _ = multi_random_crop([input_img, ref_img, gt_img, input_auxs, ref_auxs], self.opt['gt_size'], None)
input_img, ref_img, input_auxs, ref_auxs, gt_img = img2tensor([input_img, ref_img, input_auxs, ref_auxs, gt_img], True, True)
input_img = torch.cat([input_img, input_auxs], 0)
ref_img = torch.cat([ref_img, ref_auxs], 0)
# print(input_img.shape)
input_img = torch.nn.functional.interpolate(input_img.unsqueeze(0), scale_factor=0.5, mode='bilinear', align_corners=True).squeeze(0)
ref_img = torch.nn.functional.interpolate(ref_img.unsqueeze(0), scale_factor=0.5, mode='bilinear', align_corners=True).squeeze(0)
gt_img = torch.nn.functional.interpolate(gt_img.unsqueeze(0), scale_factor=0.5, mode='bilinear', align_corners=True).squeeze(0)
return {
'input': input_img,
'input_path': input_path,
'ref': ref_img,
'ref_path': ref_path,
'gt': gt_img,
'gt_path': gt_path,
'input_angle': self.angles_code[angle_id],
'target_angle': self.angles_code[target_angle_id],
'input_color': self.colors_code[color_id],
'target_color': self.colors_code[target_color_id]
}
def __len__(self):
return self.num_scene * self.multiply
'''Pair{idx}.png'''
class Any2anyTestingDataset(data.Dataset):
def __init__(self, opt):
super(Any2anyTestingDataset, self).__init__()
self.opt = opt
self.input_dir = opt['dataroot_input'][0]
self.ref_dir = opt['dataroot_ref'][0]
self.gt_dir = opt['dataroot_gt'][0]
self.mask = opt.get('mask')
if isinstance(self.opt['auxiliary_dim'], list) == False:
self.opt['auxiliary_dim'] = [self.opt['auxiliary_dim']]
if isinstance(opt['dataroot_auxiliary_input'], list) == False:
opt['dataroot_auxiliary_input'] = [opt['dataroot_auxiliary_input']]
opt['dataroot_auxiliary_ref'] = [opt['dataroot_auxiliary_ref']]
self.input_aux_roots = opt['dataroot_auxiliary_input']
self.ref_aux_roots = opt['dataroot_auxiliary_ref']
self.aux_dims = opt['auxiliary_dim']
self.aux_type = opt['auxiliary_type']
self.num_scene = len(os.listdir(self.input_dir))
def __getitem__(self, idx):
input_path = os.path.join(self.input_dir, f'Pair{idx:0>3}.png')
ref_path = os.path.join(self.ref_dir, f'Pair{idx:0>3}.png')
gt_path = os.path.join(self.gt_dir, f'Pair{idx:0>3}.png')
input_img = imread(input_path)
ref_img = imread(ref_path)
gt_img = imread(gt_path)
if self.mask == True:
mask_path = os.path.join(self.input_dir[:-6], 'mask', f'Pair{idx:0>3}.npy')
mask = np.load(mask_path)[:, :, np.newaxis]
else:
h, w, _ = input_img.shape
mask = np.array([[1.0]]).repeat(w, 1).repeat(h, 0)[:, :, np.newaxis]
input_auxs = []
ref_auxs = []
for dim, input_aux_root, ref_aux_root in zip(self.aux_dims, self.input_aux_roots, self.ref_aux_roots):
input_aux = os.path.join(input_aux_root, f'Pair{idx:0>3}{self.aux_type}')
ref_aux = os.path.join(ref_aux_root, f'Pair{idx:0>3}{self.aux_type}')
if dim == 1:
input_aux = load_depth(input_aux)
ref_aux = load_depth(ref_aux)
else:
input_aux = np.load(input_aux)
ref_aux = np.load(ref_aux)
if dim == 1:
input_aux = input_aux[:, :, np.newaxis]
ref_aux = ref_aux[:, :, np.newaxis]
input_auxs.append(input_aux)
ref_auxs.append(ref_aux)
input_auxs = np.concatenate(input_auxs, axis=2)
ref_auxs = np.concatenate(ref_auxs, axis=2)
input_img, ref_img, input_auxs, ref_auxs, gt_img, mask = img2tensor([input_img, ref_img, input_auxs, ref_auxs, gt_img, mask], True, True)
input_img = torch.cat([input_img, input_auxs], 0)
ref_img = torch.cat([ref_img, ref_auxs], 0)
# mask = torch.nn.functional.interpolate(mask.unsqueeze(0), scale_factor=0.5, mode='bilinear', align_corners=True).squeeze(0)
# input_img = torch.nn.functional.interpolate(input_img.unsqueeze(0), scale_factor=0.5, mode='bilinear', align_corners=True).squeeze(0)
# ref_img = torch.nn.functional.interpolate(ref_img.unsqueeze(0), scale_factor=0.5, mode='bilinear', align_corners=True).squeeze(0)
# gt_img = torch.nn.functional.interpolate(gt_img.unsqueeze(0), scale_factor=0.5, mode='bilinear', align_corners=True).squeeze(0)
return {
'input': input_img,
'input_path': input_path,
'ref': ref_img,
'ref_path': ref_path,
'gt': gt_img,
'gt_path': gt_path,
'mask': mask
}
def __len__(self):
return self.num_scene
if __name__ == '__main__':
opt = {
'dataset_root': 'data/any2any/train/input',
'auxiliary_dim': [3, 1],
'dataroot_auxiliary': ['data/any2any/train/normals', 'data/any2any/train/depth'],
'auxiliary_type': '.npy',
'gt_size': 0,
}
dataset = Any2anyTrainingDataset(opt)
print(len(dataset))
data = dataset.__getitem__(30*40) | 14,109 | 40.378299 | 182 | py |
IAN | IAN-master/dataset/paired_dataset.py | from dataset.data_utils import paired_paths_from_folder, img2tensor, imread
from dataset.transforms import augment, paired_random_crop
from torch.utils import data as data
# from data_utils import paired_paths_from_folder, img2tensor, imread
# from transforms import augment, paired_random_crop
# from torch.utils import data as data
class PairedImageDataset(data.Dataset):
"""Paired image dataset for image restoration.
Read input and GT image pairs.
params:
opt: dict (Config for train datasets. It contains the following keys:
dataroot_gt (str): Data root path for gt.
dataroot_input (str): Data root path for input.
gt_size (int): Cropped patched size for gt patches, enabled when it's greater than zero.
use_flip (bool): Use horizontal flips.
use_rot (bool): Use rotation (use vertical flip and transposing h
and w for implementation)
)
scale: bool (Scale, which will be added automatically)
phase: str ('train' or 'val')
"""
def __init__(self, opt):
super(PairedImageDataset, self).__init__()
self.opt = opt
self.paths = []
if opt.get('dataroot_pinput') == None:
opt['dataroot_pinput'] = opt['dataroot_input']
if isinstance(opt['dataroot_pinput'], list) == False:
opt['dataroot_pinput'] = [opt['dataroot_pinput']]
for i in range(len(opt['dataroot_pinput'])):
self.gt_folder, self.input_folder = opt['dataroot_gt'][i], opt['dataroot_pinput'][i]
if 'filename_tmpl' in opt:
self.filename_tmpl = opt['filename_tmpl']
else:
self.filename_tmpl = '{}'
self.paths += paired_paths_from_folder(
[self.input_folder, self.gt_folder], ['input', 'gt'],
self.filename_tmpl)
# print(self.paths)
def __getitem__(self, index):
scale = self.opt['scale']
# Load gt and input images. Dimension order: HWC; channel order: BGR;
# image range: [0, 1], float32.
gt_path = self.paths[index]['gt_path']
img_gt = imread(gt_path)
input_path = self.paths[index]['input_path']
img_input = imread(input_path)
# augmentation for training
if self.opt['phase'] == 'train':
gt_size = self.opt['gt_size']
# random crop while gt_size > 0
if gt_size > 0:
img_gt, img_input = paired_random_crop(img_gt, img_input, gt_size, scale,
gt_path)
# flip, rotation
img_gt, img_input = augment([img_gt, img_input], self.opt['use_flip'],
self.opt['use_rot'])
# BGR to RGB, HWC to CHW, numpy to tensor
img_gt, img_input = img2tensor([img_gt, img_input],
bgr2rgb=True,
float32=True)
return {
'input': img_input,
'gt': img_gt,
'input_path': input_path,
'gt_path': gt_path
}
def __len__(self):
return len(self.paths)
| 3,237 | 36.651163 | 100 | py |
IAN | IAN-master/dataset/paired_with_auxiliary_dataset.py | import torch
import numpy as np
from torch.utils import data as data
from base_utils.utils import load_depth
from dataset.transforms import augment, multi_random_crop
from dataset.data_utils import paired_paths_from_folder, img2tensor, imread
class PairedImageWithAuxiliaryDataset(data.Dataset):
def __init__(self, opt):
super(PairedImageWithAuxiliaryDataset, self).__init__()
self.opt = opt
self.mask = False
self.return_mask = False
self.paths = []
if opt.get('mask') != None:
self.mask = opt['mask']['enable']
self.return_mask = opt['mask']['return']
if opt.get('test_mode') == None:
self.test_mode = False
else:
self.test_mode = opt['test_mode']
if isinstance(self.opt['auxiliary_dim'], list) == False:
self.opt['auxiliary_dim'] = [self.opt['auxiliary_dim']]
if isinstance(opt['dataroot_auxiliary'], list) == False:
opt['dataroot_auxiliary'] = [[opt['dataroot_auxiliary']]]
elif isinstance(opt['dataroot_auxiliary'][0], list) == False:
opt['dataroot_auxiliary'] = [opt['dataroot_auxiliary']]
for i in range(len(opt['dataroot_input'])):
if self.test_mode:
self.input_folder = opt['dataroot_input'][i]
self.aux_folders = opt['dataroot_auxiliary'][i]
else:
self.gt_folder, self.input_folder = opt['dataroot_gt'][i], opt['dataroot_input'][i]
self.aux_folders = opt['dataroot_auxiliary'][i]
if 'filename_tmpl' in opt:
self.filename_tmpl = opt['filename_tmpl']
else:
self.filename_tmpl = '{}'
if self.test_mode:
self.paths += paired_paths_from_folder(
[self.input_folder, None, self.aux_folders], ['input', None, 'aux'],
self.filename_tmpl)
else:
self.paths += paired_paths_from_folder(
[self.input_folder, self.gt_folder, self.aux_folders], ['input', 'gt', 'aux'],
self.filename_tmpl)
def __getitem__(self, index):
# Load gt and input images. Dimension order: HWC; channel order: BGR;
# image range: [-1, 1], float32.
if not self.test_mode:
gt_path = self.paths[index]['gt_path']
img_gt = imread(gt_path)
input_path = self.paths[index]['input_path']
img_input = imread(input_path)
sum_aux_input = []
for dim, aux_paths in zip(self.opt['auxiliary_dim'], self.paths[index]['aux_paths']):
aux_path = aux_paths
if dim == 1:
aux_input = load_depth(aux_path)
else:
aux_input = np.load(aux_path)
if len(aux_input.shape) == 2:
aux_input = aux_input[:, :, np.newaxis]
sum_aux_input.append(aux_input)
sum_aux_input = np.concatenate(sum_aux_input, axis=2).astype(np.float32)
# augmentation for training
if self.opt['phase'] == 'train':
# flip, rotation
img_gt, img_input, sum_aux_input = augment([img_gt, img_input, sum_aux_input], self.opt['use_flip'], self.opt['use_rot'], self.opt.get('use_color'))
# crop imgs
if self.opt['gt_size'] > 0:
(img_gt, img_input, sum_aux_input), mask = multi_random_crop([img_gt, img_input, sum_aux_input], self.opt['gt_size'])
# BGR to RGB, HWC to CHW, numpy to tensor
if self.test_mode:
img_input, sum_aux_input = img2tensor([img_input, sum_aux_input],
bgr2rgb=True,
float32=True)
img_gt = None
gt_path = None
else:
img_gt, img_input, sum_aux_input = img2tensor([img_gt, img_input, sum_aux_input],
bgr2rgb=True,
float32=True)
img_input = torch.cat([img_input, sum_aux_input], axis=0)
if self.mask == True and self.return_mask == False:
img_input = img_input[:-1, ...]
if img_gt != None:
return {
'input': img_input,
'gt': img_gt,
'input_path': input_path,
'gt_path': gt_path
}
else:
return {
'input': img_input,
'input_path': input_path,
}
def __len__(self):
return len(self.paths)
| 4,611 | 39.45614 | 160 | py |
IAN | IAN-master/dataset/data_utils.py | import cv2
import numpy as np
import os
import math
from torch.utils import data
from torchvision.utils import make_grid
import torch
import json
def imread(path, float32=True):
'''
params:
path: str
float32: bool
output:
img: ndarray(uint8/float32) [-1, 1]
description:
read a image from "path". convert and normalize it when float32 is Ture.
CAUTION:
image format: HWC BGR
'''
img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
if len(img.shape) == 3:
img = img[:, : ,:3]
if float32:
img = img.astype(np.float32) / 255 * 2 - 1
return img
def imwrite(img, save_path, mkdir=True):
'''
params:
img: ndarray(uint8)
save_path: str
mkdir: bool
description:
write a image to "save_path". when "mkdir"==True && "save_path" doesn't
exist, make a new directory.
'''
dir_name = os.path.abspath(os.path.dirname(save_path))
if os.path.exists(dir_name) == False and mkdir:
os.makedirs(dir_name, exist_ok=True)
# img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.imwrite(save_path, img)
def crop_border(imgs, crop_border):
"""
params:
imgs: list[ndarray] | ndarray (Images with shape (h, w, c))
crop_border: int
output:
imgs: list[ndarray]
description:
crop borders of images.
"""
if crop_border == 0:
return imgs
else:
if isinstance(imgs, list):
return [
v[crop_border:-crop_border, crop_border:-crop_border, ...]
for v in imgs
]
else:
return imgs[crop_border:-crop_border, crop_border:-crop_border,
...]
def img2tensor(imgs, bgr2rgb=True, float32=True):
'''
params:
imgs: list[ndarray] | ndarray.
bgr2rgb: bool (Whether to change bgr to rgb)
float32: bool (Whether to change to float32)
output:
list[tensor] | tensor
description:
numpy array to torch Tensor
'''
def _totensor(img, bgr2rgb, float32):
if img.shape[2] == 3 and bgr2rgb:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = torch.from_numpy(img.transpose(2, 0, 1))
if float32:
img = img.float()
return img
if isinstance(imgs, list):
return [_totensor(img, bgr2rgb, float32) for img in imgs]
else:
return _totensor(imgs, bgr2rgb, float32)
def tensor2img(tensor, rgb2bgr=True, out_type=np.uint8, min_max=(0, 1)):
"""Convert torch Tensors into image numpy arrays.
After clamping to [min, max], values will be normalized to [0, 1].
Args:
tensor (Tensor or list[Tensor]): Accept shapes:
1) 4D mini-batch Tensor of shape (B x 3/1 x H x W);
2) 3D Tensor of shape (3/1 x H x W);
3) 2D Tensor of shape (H x W).
Tensor channel should be in RGB order.
rgb2bgr (bool): Whether to change rgb to bgr.
out_type (numpy type): output types. If ``np.uint8``, transform outputs
to uint8 type with range [0, 255]; otherwise, float type with
range [0, 1]. Default: ``np.uint8``.
min_max (tuple[int]): min and max values for clamp.
Returns:
(Tensor or list): 3D ndarray of shape (H x W x C) OR 2D ndarray of
shape (H x W). The channel order is BGR.
"""
if not (torch.is_tensor(tensor) or
(isinstance(tensor, list)
and all(torch.is_tensor(t) for t in tensor))):
raise TypeError(
f'tensor or list of tensors expected, got {type(tensor)}')
if torch.is_tensor(tensor):
tensor = [tensor]
result = []
for _tensor in tensor:
_tensor = _tensor.squeeze(0).float().detach().cpu().clamp_(*min_max)
_tensor = (_tensor - min_max[0]) / (min_max[1] - min_max[0])
n_dim = _tensor.dim()
if n_dim == 4:
img_np = make_grid(
_tensor, nrow=int(math.sqrt(_tensor.size(0))),
normalize=False).numpy()
img_np = img_np.transpose(1, 2, 0)
if rgb2bgr:
img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)
elif n_dim == 3:
img_np = _tensor.numpy()
img_np = img_np.transpose(1, 2, 0)
if img_np.shape[2] == 1: # gray image
img_np = np.squeeze(img_np, axis=2)
else:
if rgb2bgr:
img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)
elif n_dim == 2:
img_np = _tensor.numpy()
else:
raise TypeError('Only support 4D, 3D or 2D tensor. '
f'But received with dimension: {n_dim}')
if out_type == np.uint8:
# Unlike MATLAB, numpy.unit8() WILL NOT round by default.
img_np = (img_np * 255.0).round()
img_np = img_np.astype(out_type)
result.append(img_np)
if len(result) == 1:
result = result[0]
return result
def feat2img(tensor, out_type=np.uint8, min_max=(-1, 1)):
if torch.is_tensor(tensor):
tensor = [tensor]
result = []
for _tensor in tensor:
# _tensor = _tensor.sum().squeeze()
_tensor = _tensor.squeeze().float().detach().cpu()
max_t = _tensor.max()
print(max_t)
min_t = _tensor.min()
print(min_t)
_tensor = (_tensor - min_t) / (max_t - min_t)
img_np = _tensor.numpy()
img_np = (img_np * 255.0).round()
img_np = img_np.astype(out_type)
result.append(img_np)
if len(result) == 1:
result = result[0]
return result
def paired_paths_from_folder(folders, keys, filename_tmpl):
'''
params:
folders: list[str] )A list of folder path. The order of list should
be [input_folder, gt_folder])
keys: list[str] (A list of keys identifying folders. The order should
be in consistent with folders, e.g., ['input', 'gt'])
filename_tmpl: str (Template for each filename. Note that the
template excludes the file extension. Usually the filename_tmpl is
for files in the input folder)
output:
list[str] (returned path list).
description
generate paired paths from folders.
'''
# assert len(folders) == 2, (
# 'The len of folders should be 2 with [input_folder, gt_folder]. '
# f'But got {len(folders)}')
# assert len(keys) == 2, (
# 'The len of keys should be 2 with [input_key, gt_key]. '
# f'But got {len(keys)}')
enable_aux = False
input_folder, gt_folder = folders[:2]
if len(keys) == 3:
input_key, gt_key, aux_key = keys
enable_aux = True
elif len(keys) == 2:
input_key, gt_key = keys
input_paths = list(os.listdir(input_folder))
if gt_folder != None:
gt_paths = list(os.listdir(gt_folder))
if enable_aux:
aux_dirs = folders[2]
paths = []
for input_path in input_paths:
basename, ext = os.path.splitext(os.path.basename(input_path))
input_name = f'{filename_tmpl.format(basename)}{ext}'
input_path = os.path.join(input_folder, input_name)
if gt_folder != None:
gt_path = os.path.join(gt_folder, input_name)
if enable_aux:
aux_paths = [os.path.join(x, basename + '.npy') for x in aux_dirs]
else:
aux_paths = []
assert input_name in input_paths, (f'{input_name} is not in '
f'{input_key}_paths.')
if gt_folder == None:
gt_path = ''
gt_key = 'nogt'
paths.append(
dict([(f'{input_key}_path', input_path),
(f'{gt_key}_path', gt_path),
('aux_paths', aux_paths)
]
)
)
return paths
def parse_adobe_dataset(root, with_label=False):
scenes = os.listdir(root)
results = []
for scene in scenes:
scene = os.path.join(root, scene)
imgs = [os.path.join(scene, x) for x in os.listdir(scene) if x.endswith('.jpg')]
probes = [os.path.join(scene, 'probes', x) for x in os.listdir(f'{scene}/probes')]
seg_img = os.path.join(scene, 'materials_mip2.png')
info = os.path.join(scene, 'meta.json')
lab_img = os.path.join(scene, 'label_map.png')
chrome_pos, gray_pos = parse_meta_json(info)
if with_label:
results.append({'imgs': imgs, 'probes': probes, 'seg_img': seg_img, 'info': info,
'chrome_pos': chrome_pos, 'gray_pos': gray_pos, 'lab_img':lab_img})
else:
results.append({'imgs': imgs, 'probes': probes, 'seg_img': seg_img, 'info': info,
'chrome_pos': chrome_pos, 'gray_pos': gray_pos})
return results
# Image name format: {notation of scene}/dir_{light_dir}_mip2.jpg
# other component: probes, materials_mip2.png, meta.json, thumb.jpg
def select_one2one_data(parsed_data, input_dir, gt_dir):
for data_dic in parsed_data:
imgs = data_dic.pop('imgs')
data_root = os.path.split(imgs[0])[0]
input_name = f'dir_{input_dir}_mip2.jpg'
gt_name = f'dir_{gt_dir}_mip2.jpg'
data_dic.pop('probes')
input_gray_probe = os.path.join(data_root, 'probes', f'dir_{input_dir}_gray256.jpg')
input_chrome_probe = os.path.join(data_root, 'probes', f'dir_{input_dir}_chrome256.jpg')
gt_gray_probe = os.path.join(data_root, 'probes', f'dir_{gt_dir}_gray256.jpg')
gt_chrome_probe = os.path.join(data_root, 'probes', f'dir_{gt_dir}_chrome256.jpg')
data_dic['input'] = os.path.join(data_root, input_name)
data_dic['gt'] = os.path.join(data_root, gt_name)
data_dic['input_gray_probe'] = input_gray_probe
data_dic['gt_gray_probe'] = gt_gray_probe
data_dic['input_chrome_probe'] = input_chrome_probe
data_dic['gt_chrome_probe'] = gt_chrome_probe
return parsed_data
def parse_meta_json(path):
with open(path, 'r', encoding='utf-8') as f:
lines = f.readlines()
data = json.loads(''.join(lines))
chrome_begx = data['chrome']['bounding_box']['x']
chrome_begy = data['chrome']['bounding_box']['y']
chrome_endx = chrome_begx + data['chrome']['bounding_box']['w']
chrome_endy = chrome_begy + data['chrome']['bounding_box']['h']
gray_begx = data['gray']['bounding_box']['x']
gray_begy = data['gray']['bounding_box']['y']
gray_endx = gray_begx + data['gray']['bounding_box']['w']
gray_endy = gray_begy + data['gray']['bounding_box']['h']
return (chrome_begx, chrome_begy, chrome_endx, chrome_endy), (gray_begx, gray_begy, gray_endx, gray_endy)
| 10,844 | 34.097087 | 109 | py |
IAN | IAN-master/dataset/data_sampler.py | import math
import torch
from torch.utils.data.sampler import Sampler
class EnlargedSampler(Sampler):
"""Sampler that restricts data loading to a subset of the dataset.
Modified from torch.utils.data.distributed.DistributedSampler
Support enlarging the dataset for iteration-based training, for saving
time when restart the dataloader after each epoch
Args:
dataset (torch.utils.data.Dataset): Dataset used for sampling.
num_replicas (int | None): Number of processes participating in
the training. It is usually the world_size.
rank (int | None): Rank of the current process within num_replicas.
ratio (int): Enlarging ratio. Default: 1.
"""
def __init__(self, dataset, num_replicas, rank, ratio=1):
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.num_samples = math.ceil(
len(self.dataset) * ratio / self.num_replicas)
self.total_size = self.num_samples * self.num_replicas
def __iter__(self):
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
indices = torch.randperm(self.total_size, generator=g).tolist()
dataset_size = len(self.dataset)
indices = [v % dataset_size for v in indices]
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch | 1,649 | 34.106383 | 75 | py |
IAN | IAN-master/dataset/videodemo_dataset.py | import math
import torch
import numpy as np
from torch.utils import data as data
from dataset.data_utils import img2tensor, imread
def load_light(path):
with open(path, 'r') as fr:
lines = fr.readlines()
light_params = np.array([float(l[:-1]) for l in lines])
return light_params
def get_pos(l):
def pos(x):
if np.abs(x) <= l:
return l - x
else:
return l
return pos
def rot_sh(sh, degree):
angle = degree / 180. * math.pi
cos_a = np.cos(angle)
sin_a = np.sin(angle)
rotmat0 = np.array([[1.]])
rotmat1 = np.array(
[[1., 0., 0.],
[0, cos_a, sin_a],
[0, -sin_a, cos_a]]
)
# 1, 0, -1 -> 0, 1, 2
# 2 1 0 -1, -2 -> 0, 1, 2, 3, 4
# (row, col)
rotmat2 = np.zeros((5, 5))
# corner (2, 2)
rotmat2[0][0] = 0.5 * cos_a * rotmat1[0][0] + 0.5 * rotmat1[2][2]
# corner (-2, -2)
rotmat2[4][4] = 0.5 * rotmat1[0][0] + 0.5 * cos_a * rotmat1[2][2]
l = 2
pos2 = get_pos(l)
pos1 = get_pos(l-1)
pos0 = get_pos(l-2)
for m in range(0, l):
coff = np.sqrt(l*(l-0.5)/(l**2 - m**2))
rotmat2[pos2(m)][pos2(l)] = coff * sin_a * rotmat1[pos1(m)][pos1(l-1)]
rotmat2[pos2(-m)][pos2(-l)] = coff * sin_a * rotmat1[pos1(-m)][pos1(-l+1)]
rotmat2[pos2(l)][pos2(m)] = (-1)**(m-l)*rotmat2[pos2(m)][pos2(l)]
rotmat2[pos2(-l)][pos2(-m)] = (-1)**(l-m)*rotmat2[pos2(-m)][pos2(-l)]
for m in range(0, l):
for n in range(0, l):
coff0 = l*(2*l-1)/np.sqrt((l**2-m**2)*(l**2-n**2))
coff1 = m*n / (l*(l-1))
coff2 = ((l-1)**2-m**2)*((l-1)**2-n**2) / ((l-1)*(2*l-1))
rotmat2[pos2(m)][pos2(n)] = coff0 * \
(cos_a * rotmat1[pos1(m)][pos1(n)] \
- coff1 * rotmat1[pos1(-m)][pos1(-n)] \
- coff2 * rotmat0[pos0(m)][pos0(n)])
rotmat2[pos2(n)][pos2(m)] = (-1)**(m-n)*rotmat2[pos2(m)][pos2(n)]
rotmat2[pos2(-m)][pos2(-n)] = coff0 * \
(cos_a * rotmat1[pos1(-m)][pos1(-n)] \
- coff1 * rotmat1[pos1(m)][pos1(n)] \
- coff2 * rotmat0[pos0(-m)][pos0(-n)])
rotmat2[pos2(-n)][pos2(-m)] = (-1)**(n-m)*rotmat2[pos2(-m)][pos2(-n)]
new_sh = np.zeros_like(sh)
new_sh[:1] = sh[:1]
new_sh[1:4] = np.matmul(rotmat1, sh[1:4])
new_sh[4:] = np.matmul(rotmat2, sh[4:])
return new_sh
class VideoDemoDataset(data.Dataset):
def __init__(self, opt):
super(VideoDemoDataset, self).__init__()
self.opt = opt
self.stride = opt['stride']
self.frame_num = opt['frames']
self.init_sh = load_light(opt['shroot'])# * 0.7
self.demo_img = imread(opt['img_path'])
self.normal = imread(opt['normal_path'])
self.demo_img, self.normal = img2tensor([self.demo_img, self.normal],
bgr2rgb=True,
float32=True)
self.input = torch.cat([self.demo_img, self.normal], axis=0)
def __getitem__(self, index):
# Load gt and input images. Dimension order: HWC; channel order: BGR;
# image range: [-1, 1], float32.
angle = index * self.stride
curr_sh = torch.tensor(rot_sh(self.init_sh, angle)).float()
return {
'input': self.input,
'input_light': self.init_sh,
'gt': self.input[:3,:,:],
'gt_light': curr_sh,
'input_path': f'{index}.png',
'gt_path': f'{index}.png'
}
def __len__(self):
return self.frame_num
| 3,804 | 32.086957 | 82 | py |
IAN | IAN-master/dataset/__init__.py | import importlib
import numpy as np
import random
import torch
import torch.utils.data
from functools import partial
import os
from base_utils.logger import get_root_logger
# import .anytoany_Dataset
__all__ = ['create_dataset', 'create_dataloader']
# automatically scan and import dataset modules
# scan all the files under the data folder with '_dataset' in file names
data_folder = os.path.dirname(os.path.abspath(__file__))
dataset_filenames = [
os.path.splitext(os.path.basename(v))[0] for v in os.listdir(data_folder)
if v.endswith('_dataset.py')
]
# import all the dataset modules
_dataset_modules = [
importlib.import_module(f'dataset.{file_name}')
for file_name in dataset_filenames
]
def create_dataset(dataset_opt):
"""Create dataset.
Args:
dataset_opt (dict): Configuration for dataset. It constains:
name (str): Dataset name.
type (str): Dataset type.
"""
dataset_type = dataset_opt['type']
# dynamic instantiation
for module in _dataset_modules:
dataset_cls = getattr(module, dataset_type, None)
if dataset_cls is not None:
break
if dataset_cls is None:
raise ValueError(f'Dataset {dataset_type} is not found.')
dataset = dataset_cls(dataset_opt)
logger = get_root_logger()
logger.info(
f'Dataset {dataset.__class__.__name__} - {dataset_opt["name"]} '
'is created.')
return dataset
def create_dataloader(dataset,
dataset_opt,
num_gpu=1,
dist=False,
sampler=None,
seed=None):
"""Create dataloader.
Args:
dataset (torch.utils.data.Dataset): Dataset.
dataset_opt (dict): Dataset options. It contains the following keys:
phase (str): 'train' or 'val'.
num_worker_per_gpu (int): Number of workers for each GPU.
batch_size_per_gpu (int): Training batch size for each GPU.
num_gpu (int): Number of GPUs. Used only in the train phase.
Default: 1.
dist (bool): Whether in distributed training. Used only in the train
phase. Default: False.
sampler (torch.utils.data.sampler): Data sampler. Default: None.
seed (int | None): Seed. Default: None
"""
phase = dataset_opt['phase']
if phase == 'train':
if dist: # distributed training
batch_size = dataset_opt['batch_size_per_gpu']
num_workers = dataset_opt['num_worker_per_gpu']
else: # non-distributed training
multiplier = 1 if num_gpu == 0 else num_gpu
batch_size = dataset_opt['batch_size_per_gpu'] * multiplier
num_workers = dataset_opt['num_worker_per_gpu'] * multiplier
dataloader_args = dict(
dataset=dataset,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
sampler=sampler,
drop_last=True)
if sampler is None:
dataloader_args['shuffle'] = True
dataloader_args['worker_init_fn'] = partial(
worker_init_fn, num_workers=num_workers, rank=0,
seed=seed) if seed is not None else None
elif phase in ['val', 'test']: # validation
dataloader_args = dict(
dataset=dataset, batch_size=1, shuffle=False, num_workers=0)
else:
raise ValueError(f'Wrong dataset phase: {phase}. '
"Supported ones are 'train', 'val' and 'test'.")
dataloader_args['pin_memory'] = dataset_opt.get('pin_memory', False)
return torch.utils.data.DataLoader(**dataloader_args)
def worker_init_fn(worker_id, num_workers, rank, seed):
# Set the worker seed to num_workers * rank + worker_id + seed
worker_seed = num_workers * rank + worker_id + seed
np.random.seed(worker_seed)
random.seed(worker_seed) | 3,932 | 34.116071 | 77 | py |
IAN | IAN-master/dataset/DPR_dataset.py | from dataset.data_utils import img2tensor, imread
from dataset.transforms import multi_random_crop
from torch.utils import data as data
import os
import os.path as osp
import torch
import numpy as np
def load_light(path):
with open(path, 'r') as fr:
lines = fr.readlines()
light_params = np.array([float(l[:-1]) for l in lines])
return light_params
class DPRDataset(data.Dataset):
def __init__(self, opt):
super(DPRDataset, self).__init__()
self.opt = opt
self.light_num = 5
if opt.get('test_mode') == None:
self.test_mode = False
else:
self.test_mode = opt['test_mode']
self.input_root = opt['dataroot']
self.input_folders = sorted(os.listdir(self.input_root))
if self.test_mode:
self.input_folders = self.input_folders[-100:]
else:
self.input_folders = self.input_folders[:-100]
def __getitem__(self, index):
# Load gt and input images. Dimension order: HWC; channel order: BGR;
# image range: [-1, 1], float32.
folder_idx = index // self.light_num
input_light_idx = index % self.light_num if not self.test_mode else 0
folder_path = osp.join(self.input_root, self.input_folders[folder_idx])
gt_light_idx = np.random.randint(0, 5) if not self.test_mode else 1
input_path = osp.join(folder_path, f'{self.input_folders[folder_idx]}_{input_light_idx:02}.png')
input_light_path = osp.join(folder_path, f'{self.input_folders[folder_idx]}_light_{input_light_idx:02}.txt')
gt_path = osp.join(folder_path, f'{self.input_folders[folder_idx]}_{gt_light_idx:02}.png')
gt_light_path = osp.join(folder_path, f'{self.input_folders[folder_idx]}_light_{gt_light_idx:02}.txt')
light_input = torch.tensor(load_light(input_light_path)).float()
light_gt = torch.tensor(load_light(gt_light_path)).float()
img_input = imread(input_path)
img_gt = imread(gt_path)
input_normal = imread(osp.join(folder_path, 'full_normal.png'))
# crop imgs
if self.test_mode == False and self.opt['gt_size'] > 0:
(img_gt, img_input, input_normal), _= multi_random_crop([img_gt, img_input, input_normal], self.opt['gt_size'])
# BGR to RGB, HWC to CHW, numpy to tensor
img_gt, img_input, input_normal = img2tensor([img_gt, img_input, input_normal],
bgr2rgb=True,
float32=True)
img_input = torch.cat([img_input, input_normal], axis=0)
return {
'input': img_input,
'input_light': light_input,
'gt': img_gt,
'gt_light': light_gt,
'input_path': input_path,
'gt_path': gt_path
}
def __len__(self):
return len(self.input_folders) * self.light_num
| 2,987 | 34.571429 | 132 | py |
IAN | IAN-master/dataset/transforms.py | import cv2
import torch
import random
import torchvision.transforms as transforms
def mod_crop(img, scale):
"""Mod crop images, used during testing.
Args:
img (ndarray): Input image.
scale (int): Scale factor.
Returns:
ndarray: Result image.
"""
img = img.copy()
if img.ndim in (2, 3):
h, w = img.shape[0], img.shape[1]
h_remainder, w_remainder = h % scale, w % scale
img = img[:h - h_remainder, :w - w_remainder, ...]
else:
raise ValueError(f'Wrong img ndim: {img.ndim}.')
return img
def multi_random_crop(imgs, patch_size):
if not isinstance(imgs, list):
imgs = [imgs]
h, w = imgs[0].shape[:2]
flag = True
top = random.randint(0, h - patch_size)
left = random.randint(0, w - patch_size)
imgs = [img[top:top+patch_size, left:left+patch_size, ...] for img in imgs]
return imgs, None
def paired_random_crop(img_gts, img_lqs, gt_patch_size, scale, gt_path):
"""Paired random crop.
It crops lists of lq and gt images with corresponding locations.
Args:
img_gts (list[ndarray] | ndarray): GT images. Note that all images
should have the same shape. If the input is an ndarray, it will
be transformed to a list containing itself.
img_lqs (list[ndarray] | ndarray): LQ images. Note that all images
should have the same shape. If the input is an ndarray, it will
be transformed to a list containing itself.
gt_patch_size (int): GT patch size.
scale (int): Scale factor.
gt_path (str): Path to ground-truth.
Returns:
list[ndarray] | ndarray: GT images and LQ images. If returned results
only have one element, just return ndarray.
"""
if not isinstance(img_gts, list):
img_gts = [img_gts]
if not isinstance(img_lqs, list):
img_lqs = [img_lqs]
h_lq, w_lq, _ = img_lqs[0].shape
h_gt, w_gt, _ = img_gts[0].shape
lq_patch_size = gt_patch_size // scale
if h_gt != h_lq * scale or w_gt != w_lq * scale:
raise ValueError(
f'Scale mismatches. GT ({h_gt}, {w_gt}) is not {scale}x ',
f'multiplication of LQ ({h_lq}, {w_lq}).')
if h_lq < lq_patch_size or w_lq < lq_patch_size:
raise ValueError(f'LQ ({h_lq}, {w_lq}) is smaller than patch size '
f'({lq_patch_size}, {lq_patch_size}). '
f'Please remove {gt_path}.')
# randomly choose top and left coordinates for lq patch
top = random.randint(0, h_lq - lq_patch_size)
left = random.randint(0, w_lq - lq_patch_size)
# crop lq patch
img_lqs = [
v[top:top + lq_patch_size, left:left + lq_patch_size, ...]
for v in img_lqs
]
# crop corresponding gt patch
top_gt, left_gt = int(top * scale), int(left * scale)
img_gts = [
v[top_gt:top_gt + gt_patch_size, left_gt:left_gt + gt_patch_size, ...]
for v in img_gts
]
if len(img_gts) == 1:
img_gts = img_gts[0]
if len(img_lqs) == 1:
img_lqs = img_lqs[0]
return img_gts, img_lqs
def augment(imgs, hflip=True, rotation=True, color=True, flows=None, return_status=False):
"""Augment: horizontal flips OR rotate (0, 90, 180, 270 degrees).
We use vertical flip and transpose for rotation implementation.
All the images in the list use the same augmentation.
args:
imgs (list[ndarray] | ndarray): Images to be augmented. If the input
is an ndarray, it will be transformed to a list.
hflip (bool): Horizontal flip. Default: True.
rotation (bool): Ratotation. Default: True.
flows (list[ndarray]: Flows to be augmented. If the input is an
ndarray, it will be transformed to a list.
Dimension is (h, w, 2). Default: None.
return_status (bool): Return the status of flip and rotation.
Default: False.
Returns:
list[ndarray] | ndarray: Augmented images and flows. If returned
results only have one element, just return ndarray.
"""
hflip = hflip and random.random() < 0.5
vflip = rotation and random.random() < 0.5
rot90 = rotation and random.random() < 0.5
color = color and random.random() < 0.5
def _augment(img):
if hflip: # horizontal
cv2.flip(img, 1, img)
if vflip: # vertical
cv2.flip(img, 0, img)
if rot90:
img = img.transpose(1, 0, 2)
if False:
if img.shape[2] == 3:
img = img.transpose(1, 2, 0)
print(img.shape)
img = torch.from_numpy(img)
img = transforms.ColorJitter(0.2, 0.2, 0.2, 0.2)(img)
img = img.numpy()
img = img.transpose(2, 0, 1)
return img
def _augment_flow(flow):
if hflip: # horizontal
cv2.flip(flow, 1, flow)
flow[:, :, 0] *= -1
if vflip: # vertical
cv2.flip(flow, 0, flow)
flow[:, :, 1] *= -1
if rot90:
flow = flow.transpose(1, 0, 2)
flow = flow[:, :, [1, 0]]
if color:
print('None')
return flow
if not isinstance(imgs, list):
imgs = [imgs]
imgs = [_augment(img) for img in imgs]
if len(imgs) == 1:
imgs = imgs[0]
if flows is not None:
if not isinstance(flows, list):
flows = [flows]
flows = [_augment_flow(flow) for flow in flows]
if len(flows) == 1:
flows = flows[0]
return imgs, flows
else:
if return_status:
return imgs, (hflip, vflip, rot90)
else:
return imgs
def img_rotate(img, angle, center=None, scale=1.0):
"""Rotate image.
Args:
img (ndarray): Image to be rotated.
angle (float): Rotation angle in degrees. Positive values mean
counter-clockwise rotation.
center (tuple[int]): Rotation center. If the center is None,
initialize it as the center of the image. Default: None.
scale (float): Isotropic scale factor. Default: 1.0.
"""
(h, w) = img.shape[:2]
if center is None:
center = (w // 2, h // 2)
matrix = cv2.getRotationMatrix2D(center, angle, scale)
rotated_img = cv2.warpAffine(img, matrix, (w, h))
return rotated_img
def dir_augment(imgs, a_codes):
hflip = random.random() < 0.5
# hflip = 1
def _augment(img):
cv2.flip(img, 1, img)
return img
def _cvt_dir(a_code):
if a_code == 2 or a_code == 3:
return a_code
elif a_code % 2 == 0:
return a_code + 1
else:
return a_code - 1
if not isinstance(imgs, list):
imgs = [imgs]
a_codes = [a_codes]
if hflip:
imgs = [_augment(img) for img in imgs]
a_codes = [_cvt_dir(a_code) for a_code in a_codes]
if len(imgs) == 1:
imgs = imgs[0]
a_codes = a_codes[0]
return imgs, a_codes
| 7,111 | 31.623853 | 90 | py |
IAN | IAN-master/lpips-pytorch/setup.py | from setuptools import setup
setup(
name='lpips_pytorch',
version='latest',
description='LPIPS as a Package.',
packages=['lpips_pytorch', 'lpips_pytorch.modules'],
author='So Uchida',
author_email='s.aiueo32@gmail.com',
install_requires=["torch", "torchvision"],
url='https://github.com/S-aiueo32/lpips-pytorch',
)
| 348 | 25.846154 | 56 | py |
IAN | IAN-master/lpips-pytorch/tests/test_allclose.py | from pathlib import Path
import torchvision.transforms.functional as TF
from PIL import Image
from torch.testing import assert_allclose
from lpips_pytorch import LPIPS
from lpips_pytorch import lpips
from PerceptualSimilarity.models import PerceptualLoss
img = Image.open(Path(__file__).parents[1].joinpath('data/lenna.png'))
img_x2 = img.resize((x // 2 for x in img.size)).resize(img.size)
tensor_org = TF.to_tensor(img).unsqueeze(0) * 2 - 1
tensor_x2 = TF.to_tensor(img_x2).unsqueeze(0) * 2 - 1
def test_functional():
assert lpips(tensor_x2, tensor_org)
def test_functional_on_gpu():
assert lpips(tensor_x2.to('cuda:0'), tensor_org.to('cuda:0'))
def test_on_gpu():
org_criterion = PerceptualLoss(net='alex', use_gpu=True)
my_criterion = LPIPS('alex', version='0.1').to('cuda:0')
org_loss = org_criterion.forward(
tensor_x2.to('cuda:0'), tensor_org.to('cuda:0'))
my_loss = my_criterion(
tensor_x2.to('cuda:0'), tensor_org.to('cuda:0'))
assert_allclose(org_loss, my_loss)
def test_alex_v0_1():
org_criterion = PerceptualLoss(net='alex', use_gpu=False)
my_criterion = LPIPS('alex', version='0.1')
org_loss = org_criterion.forward(tensor_x2, tensor_org)
my_loss = my_criterion(tensor_x2, tensor_org)
assert_allclose(org_loss, my_loss)
def test_squeeze_v0_1():
org_criterion = PerceptualLoss(net='squeeze', use_gpu=False)
my_criterion = LPIPS('squeeze', version='0.1')
org_loss = org_criterion.forward(tensor_x2, tensor_org)
my_loss = my_criterion(tensor_x2, tensor_org)
assert_allclose(org_loss, my_loss)
def test_vgg_v0_1():
org_criterion = PerceptualLoss(net='vgg', use_gpu=False)
my_criterion = LPIPS('vgg', version='0.1')
org_loss = org_criterion.forward(tensor_x2, tensor_org)
my_loss = my_criterion(tensor_x2, tensor_org)
assert_allclose(org_loss, my_loss)
| 1,889 | 27.636364 | 70 | py |
IAN | IAN-master/lpips-pytorch/lpips_pytorch/__init__.py | import torch
from .modules.lpips import LPIPS
def lpips(x: torch.Tensor,
y: torch.Tensor,
net_type: str = 'alex',
version: str = '0.1'):
r"""Function that measures
Learned Perceptual Image Patch Similarity (LPIPS).
Arguments:
x, y (torch.Tensor): the input tensors to compare.
net_type (str): the network type to compare the features:
'alex' | 'squeeze' | 'vgg'. Default: 'alex'.
version (str): the version of LPIPS. Default: 0.1.
"""
device = x.device
criterion = LPIPS(net_type, version).to(device)
return criterion(x, y)
| 635 | 27.909091 | 68 | py |
IAN | IAN-master/lpips-pytorch/lpips_pytorch/modules/lpips.py | import torch
import torch.nn as nn
from .networks import get_network, LinLayers
from .utils import get_state_dict
class LPIPS(nn.Module):
r"""Creates a criterion that measures
Learned Perceptual Image Patch Similarity (LPIPS).
Arguments:
net_type (str): the network type to compare the features:
'alex' | 'squeeze' | 'vgg'. Default: 'alex'.
version (str): the version of LPIPS. Default: 0.1.
"""
def __init__(self, net_type: str = 'alex', version: str = '0.1'):
assert version in ['0.1'], 'v0.1 is only supported now'
super(LPIPS, self).__init__()
# pretrained network
self.net = get_network(net_type)
# linear layers
self.lin = LinLayers(self.net.n_channels_list)
self.lin.load_state_dict(get_state_dict(net_type, version))
def forward(self, x: torch.Tensor, y: torch.Tensor):
feat_x, feat_y = self.net(x), self.net(y)
diff = [(fx - fy) ** 2 for fx, fy in zip(feat_x, feat_y)]
res = [l(d).mean((2, 3), True) for d, l in zip(diff, self.lin)]
return torch.sum(torch.cat(res, 0), 0, True)
| 1,151 | 30.135135 | 71 | py |
IAN | IAN-master/lpips-pytorch/lpips_pytorch/modules/utils.py | from collections import OrderedDict
import torch
def normalize_activation(x, eps=1e-10):
norm_factor = torch.sqrt(torch.sum(x ** 2, dim=1, keepdim=True))
return x / (norm_factor + eps)
def get_state_dict(net_type: str = 'alex', version: str = '0.1'):
# build url
url = 'https://raw.githubusercontent.com/richzhang/PerceptualSimilarity/' \
+ f'master/lpips/weights/v{version}/{net_type}.pth'
# download
old_state_dict = torch.hub.load_state_dict_from_url(
url, progress=True,
map_location=None if torch.cuda.is_available() else torch.device('cpu')
)
# rename keys
new_state_dict = OrderedDict()
for key, val in old_state_dict.items():
new_key = key
new_key = new_key.replace('lin', '')
new_key = new_key.replace('model.', '')
new_state_dict[new_key] = val
return new_state_dict
| 885 | 27.580645 | 79 | py |
IAN | IAN-master/lpips-pytorch/lpips_pytorch/modules/networks.py | from typing import Sequence
from itertools import chain
import torch
import torch.nn as nn
from torchvision import models
from .utils import normalize_activation
def get_network(net_type: str):
if net_type == 'alex':
return AlexNet()
elif net_type == 'squeeze':
return SqueezeNet()
elif net_type == 'vgg':
return VGG16()
else:
raise NotImplementedError('choose net_type from [alex, squeeze, vgg].')
class LinLayers(nn.ModuleList):
def __init__(self, n_channels_list: Sequence[int]):
super(LinLayers, self).__init__([
nn.Sequential(
nn.Identity(),
nn.Conv2d(nc, 1, 1, 1, 0, bias=False)
) for nc in n_channels_list
])
for param in self.parameters():
param.requires_grad = False
class BaseNet(nn.Module):
def __init__(self):
super(BaseNet, self).__init__()
# register buffer
self.register_buffer(
'mean', torch.Tensor([-.030, -.088, -.188])[None, :, None, None])
self.register_buffer(
'std', torch.Tensor([.458, .448, .450])[None, :, None, None])
def set_requires_grad(self, state: bool):
for param in chain(self.parameters(), self.buffers()):
param.requires_grad = state
def z_score(self, x: torch.Tensor):
return (x - self.mean) / self.std
def forward(self, x: torch.Tensor):
x = self.z_score(x)
output = []
for i, (_, layer) in enumerate(self.layers._modules.items(), 1):
x = layer(x)
if i in self.target_layers:
output.append(normalize_activation(x))
if len(output) == len(self.target_layers):
break
return output
class SqueezeNet(BaseNet):
def __init__(self):
super(SqueezeNet, self).__init__()
self.layers = models.squeezenet1_1(True).features
self.target_layers = [2, 5, 8, 10, 11, 12, 13]
self.n_channels_list = [64, 128, 256, 384, 384, 512, 512]
self.set_requires_grad(False)
class AlexNet(BaseNet):
def __init__(self):
super(AlexNet, self).__init__()
self.layers = models.alexnet(True).features
self.target_layers = [2, 5, 8, 10, 12]
self.n_channels_list = [64, 192, 384, 256, 256]
self.set_requires_grad(False)
class VGG16(BaseNet):
def __init__(self):
super(VGG16, self).__init__()
self.layers = models.vgg16(True).features
self.target_layers = [4, 9, 16, 23, 30]
self.n_channels_list = [64, 128, 256, 512, 512]
self.set_requires_grad(False)
| 2,654 | 26.371134 | 79 | py |
IAN | IAN-master/network/lr_scheduler.py | import math
from collections import Counter
from torch.optim.lr_scheduler import _LRScheduler
class MultiStepRestartLR(_LRScheduler):
""" MultiStep with restarts learning rate scheme.
Args:
optimizer (torch.nn.optimizer): Torch optimizer.
milestones (list): Iterations that will decrease learning rate.
gamma (float): Decrease ratio. Default: 0.1.
restarts (list): Restart iterations. Default: [0].
restart_weights (list): Restart weights at each restart iteration.
Default: [1].
last_epoch (int): Used in _LRScheduler. Default: -1.
"""
def __init__(self,
optimizer,
milestones,
gamma=0.1,
restarts=(0, ),
restart_weights=(1, ),
last_epoch=-1):
self.milestones = Counter(milestones)
self.gamma = gamma
self.restarts = restarts
self.restart_weights = restart_weights
assert len(self.restarts) == len(
self.restart_weights), 'restarts and their weights do not match.'
super(MultiStepRestartLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
if self.last_epoch in self.restarts:
weight = self.restart_weights[self.restarts.index(self.last_epoch)]
return [
group['initial_lr'] * weight
for group in self.optimizer.param_groups
]
if self.last_epoch not in self.milestones:
return [group['lr'] for group in self.optimizer.param_groups]
return [
group['lr'] * self.gamma**self.milestones[self.last_epoch]
for group in self.optimizer.param_groups
]
def get_position_from_periods(iteration, cumulative_period):
"""Get the position from a period list.
It will return the index of the right-closest number in the period list.
For example, the cumulative_period = [100, 200, 300, 400],
if iteration == 50, return 0;
if iteration == 210, return 2;
if iteration == 300, return 2.
Args:
iteration (int): Current iteration.
cumulative_period (list[int]): Cumulative period list.
Returns:
int: The position of the right-closest number in the period list.
"""
for i, period in enumerate(cumulative_period):
if iteration <= period:
return i
class CosineAnnealingRestartLR(_LRScheduler):
""" Cosine annealing with restarts learning rate scheme.
An example of config:
periods = [10, 10, 10, 10]
restart_weights = [1, 0.5, 0.5, 0.5]
eta_min=1e-7
It has four cycles, each has 10 iterations. At 10th, 20th, 30th, the
scheduler will restart with the weights in restart_weights.
Args:
optimizer (torch.nn.optimizer): Torch optimizer.
periods (list): Period for each cosine anneling cycle.
restart_weights (list): Restart weights at each restart iteration.
Default: [1].
eta_min (float): The mimimum lr. Default: 0.
last_epoch (int): Used in _LRScheduler. Default: -1.
"""
def __init__(self,
optimizer,
periods,
restart_weights=(1, ),
eta_min=0,
last_epoch=-1):
self.periods = periods
self.restart_weights = restart_weights
self.eta_min = eta_min
assert (len(self.periods) == len(self.restart_weights)
), 'periods and restart_weights should have the same length.'
self.cumulative_period = [
sum(self.periods[0:i + 1]) for i in range(0, len(self.periods))
]
super(CosineAnnealingRestartLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
idx = get_position_from_periods(self.last_epoch,
self.cumulative_period)
current_weight = self.restart_weights[idx]
nearest_restart = 0 if idx == 0 else self.cumulative_period[idx - 1]
current_period = self.periods[idx]
return [
self.eta_min + current_weight * 0.5 * (base_lr - self.eta_min) *
(1 + math.cos(math.pi * (
(self.last_epoch - nearest_restart) / current_period)))
for base_lr in self.base_lrs
]
| 4,312 | 37.508929 | 79 | py |
IAN | IAN-master/network/base_model.py | import logging
import os
import torch
from collections import OrderedDict
from copy import deepcopy
from torch.nn.parallel import DataParallel, DistributedDataParallel
from network import lr_scheduler as lr_scheduler
logger = logging.getLogger('relighting')
class BaseModel():
"""Base model."""
def __init__(self, opt):
self.opt = opt
self.device = torch.device('cuda' if opt['num_gpu'] != 0 else 'cpu')
self.is_train = opt['is_train']
self.optimizers = []
self.log_dict = {}
def feed_data(self, data):
pass
def optimize_parameters(self):
pass
def get_current_visuals(self):
pass
def save(self, epoch, current_iter):
"""Save networks and training state."""
pass
def setup_schedulers(self):
"""Set up schedulers."""
train_opt = self.opt['train']
scheduler_type = train_opt['scheduler'].pop('type')
if scheduler_type in ['MultiStepLR', 'MultiStepRestartLR']:
for optimizer in self.optimizers:
self.schedulers.append(
lr_scheduler.MultiStepRestartLR(optimizer,
**train_opt['scheduler']))
elif scheduler_type == 'CosineAnnealingRestartLR':
for optimizer in self.optimizers:
self.schedulers.append(
lr_scheduler.CosineAnnealingRestartLR(
optimizer, **train_opt['scheduler']))
else:
raise NotImplementedError(
f'Scheduler {scheduler_type} is not implemented yet.')
def validation(self, dataloader, current_iter, tb_logger, save_img=False):
"""Validation function.
Args:
dataloader (torch.utils.data.DataLoader): Validation dataloader.
current_iter (int): Current iteration.
tb_logger (tensorboard logger): Tensorboard logger.
save_img (bool): Whether to save images. Default: False.
"""
self.nondist_validation(dataloader, current_iter, tb_logger, save_img)
def get_current_log(self):
return self.log_dict
def model_to_device(self, net):
"""Model to device. It also warps models with DistributedDataParallel
or DataParallel.
Args:
net (nn.Module)
"""
net = net.to(self.device)
if self.opt['parallel'] and self.opt['num_gpu'] > 1:
net = DataParallel(net)
return net
def get_bare_model(self, net):
"""Get bare model, especially under wrapping with
DistributedDataParallel or DataParallel.
"""
if isinstance(net, (DataParallel, DistributedDataParallel)):
net = net.module
return net
def print_network(self, net):
"""Print the str and parameter number of a network.
Args:
net (nn.Module)
"""
if isinstance(net, (DataParallel, DistributedDataParallel)):
net_cls_str = (f'{net.__class__.__name__} - '
f'{net.module.__class__.__name__}')
else:
net_cls_str = f'{net.__class__.__name__}'
net = self.get_bare_model(net)
net_str = str(net)
net_params = sum(map(lambda x: x.numel(), net.parameters()))
logger.info(
f'Network: {net_cls_str}, with parameters: {net_params:,d}')
logger.info(net_str)
def _set_lr(self, lr_groups_l):
"""Set learning rate for warmup.
Args:
lr_groups_l (list): List for lr_groups, each for an optimizer.
"""
for optimizer, lr_groups in zip(self.optimizers, lr_groups_l):
for param_group, lr in zip(optimizer.param_groups, lr_groups):
param_group['lr'] = lr
def _get_init_lr(self):
"""Get the initial lr, which is set by the scheduler.
"""
init_lr_groups_l = []
for optimizer in self.optimizers:
init_lr_groups_l.append(
[v['initial_lr'] for v in optimizer.param_groups])
return init_lr_groups_l
def update_learning_rate(self, current_iter, warmup_iter=-1):
"""Update learning rate.
Args:
current_iter (int): Current iteration.
warmup_iter (int): Warmup iter numbers. -1 for no warmup.
Default: -1.
"""
if current_iter > 1:
for scheduler in self.schedulers:
scheduler.step()
# set up warm-up learning rate
if current_iter < warmup_iter:
# get initial lr for each group
init_lr_g_l = self._get_init_lr()
# modify warming-up learning rates
# currently only support linearly warm up
warm_up_lr_l = []
for init_lr_g in init_lr_g_l:
warm_up_lr_l.append(
[v / warmup_iter * current_iter for v in init_lr_g])
# set learning rate
self._set_lr(warm_up_lr_l)
def get_current_learning_rate(self):
return [
param_group['lr']
for param_group in self.optimizers[0].param_groups
]
def save_network(self, net, net_label, current_iter, param_key='params'):
"""Save networks.
Args:
net (nn.Module | list[nn.Module]): Network(s) to be saved.
net_label (str): Network label.
current_iter (int): Current iter number.
param_key (str | list[str]): The parameter key(s) to save network.
Default: 'params'.
"""
if current_iter == -1:
current_iter = 'latest'
save_filename = f'{net_label}_{current_iter}.pth'
save_path = os.path.join(self.opt['path']['models'], save_filename)
if os.path.exists(self.opt['path']['models']) == False:
os.mkdir(self.opt['path']['models'])
net = net if isinstance(net, list) else [net]
param_key = param_key if isinstance(param_key, list) else [param_key]
assert len(net) == len(
param_key), 'The lengths of net and param_key should be the same.'
save_dict = {}
for net_, param_key_ in zip(net, param_key):
net_ = self.get_bare_model(net_)
state_dict = net_.state_dict()
for key, param in state_dict.items():
if key.startswith('module.'): # remove unnecessary 'module.'
key = key[7:]
state_dict[key] = param.cpu()
save_dict[param_key_] = state_dict
# print('save')
torch.save(save_dict, save_path)
def _print_different_keys_loading(self, crt_net, load_net, strict=True):
"""Print keys with differnet name or different size when loading models.
1. Print keys with differnet names.
2. If strict=False, print the same key but with different tensor size.
It also ignore these keys with different sizes (not load).
Args:
crt_net (torch model): Current network.
load_net (dict): Loaded network.
strict (bool): Whether strictly loaded. Default: True.
"""
crt_net = self.get_bare_model(crt_net)
crt_net = crt_net.state_dict()
crt_net_keys = set(crt_net.keys())
load_net_keys = set(load_net.keys())
if crt_net_keys != load_net_keys:
logger.warning('Current net - loaded net:')
for v in sorted(list(crt_net_keys - load_net_keys)):
logger.warning(f' {v}')
logger.warning('Loaded net - current net:')
for v in sorted(list(load_net_keys - crt_net_keys)):
logger.warning(f' {v}')
# check the size for the same keys
if not strict:
common_keys = crt_net_keys & load_net_keys
for k in common_keys:
if crt_net[k].size() != load_net[k].size():
logger.warning(
f'Size different, ignore [{k}]: crt_net: '
f'{crt_net[k].shape}; load_net: {load_net[k].shape}')
load_net[k + '.ignore'] = load_net.pop(k)
def load_network(self, net, load_path, strict=True, param_key='params'):
"""Load network.
Args:
load_path (str): The path of networks to be loaded.
net (nn.Module): Network.
strict (bool): Whether strictly loaded.
param_key (str): The parameter key of loaded network. If set to
None, use the root 'path'.
Default: 'params'.
"""
net = self.get_bare_model(net)
logger.info(
f'Loading {net.__class__.__name__} model from {load_path}.')
load_net = torch.load(
load_path, map_location=lambda storage, loc: storage)
if param_key is not None:
# param_key = 'model'
load_net = load_net[param_key]
# remove unnecessary 'module.'
for k, v in deepcopy(load_net).items():
if k.startswith('module.'):
load_net[k[7:]] = v
load_net.pop(k)
self._print_different_keys_loading(net, load_net, strict)
net.load_state_dict(load_net, strict=strict)
def save_training_state(self, epoch, current_iter):
"""Save training states during training, which will be used for
resuming.
Args:
epoch (int): Current epoch.
current_iter (int): Current iteration.
"""
if current_iter != -1:
state = {
'epoch': epoch,
'iter': current_iter,
'optimizers': [],
}
for o in self.optimizers:
state['optimizers'].append(o.state_dict())
save_filename = f'{current_iter}.state'
save_path = os.path.join(self.opt['path']['training_states'],
save_filename)
torch.save(state, save_path)
def resume_training(self, resume_state):
"""Reload the optimizers and schedulers for resumed training.
Args:
resume_state (dict): Resume state.
"""
resume_optimizers = resume_state['optimizers']
assert len(resume_optimizers) == len(
self.optimizers), 'Wrong lengths of optimizers'
for i, o in enumerate(resume_optimizers):
self.optimizers[i].load_state_dict(o)
# for distribute training
def reduce_loss_dict(self, loss_dict):
"""reduce loss dict.
In distributed training, it averages the losses among different GPUs .
Args:
loss_dict (OrderedDict): Loss dict.
"""
with torch.no_grad():
if self.opt['dist']:
keys = []
losses = []
for name, value in loss_dict.items():
keys.append(name)
losses.append(value)
losses = torch.stack(losses, 0)
torch.distributed.reduce(losses, dst=0)
if self.opt['rank'] == 0:
losses /= self.opt['world_size']
loss_dict = {key: loss for key, loss in zip(keys, losses)}
log_dict = OrderedDict()
for name, value in loss_dict.items():
log_dict[name] = value.mean().item()
return log_dict | 11,448 | 37.163333 | 80 | py |
IAN | IAN-master/network/relight_model.py | import os
import cv2
import torch
import importlib
import numpy as np
from tqdm import tqdm
from copy import deepcopy
from network.loss import MSELoss
from collections import OrderedDict
from base_utils.utils import col_stitch
from network.arch import define_network
from network.base_model import BaseModel
from torch.nn.functional import interpolate
from base_utils.logger import get_root_logger
from dataset.data_utils import imwrite, tensor2img
loss_module = importlib.import_module('network.loss')
metric_module = importlib.import_module('metrics')
align_corners = False
class PyramidRelightingModel(BaseModel):
"""Pyramid Religting model for Religting."""
def __init__(self, opt):
super(PyramidRelightingModel, self).__init__(opt)
self.num_layers = opt['layers']
self.scale = opt['scale_factor']
self.interp_mode = opt['interp_mode']
self.loss_mask = opt['loss_mask'] if opt.get('loss_mask') != None else False
# define network
self.net_g = define_network(deepcopy(opt['network_g']))
self.net_g = self.model_to_device(self.net_g)
self.print_network(self.net_g)
self.losses = dict()
self.best_cri = opt.get('val').get('best_metrics')
self.best_cri2 = opt.get('val').get('best_metrics2')
# self.with_depth = opt['with_depty']
if opt['is_train']:
self.decay_freq = opt.get('train').get('decay_freq')
self.gamma = opt.get('train').get('gamma')
self.multi_stage = opt.get('multi_stage')
self.stage_weights = opt.get('stage_weights')
self.curr_stage = 0
# load pretrained models
load_path = self.opt['path'].get('pretrain_network_g', None)
if load_path is not None:
self.load_network(self.net_g, load_path,
self.opt['path'].get('strict_load_g', True))
if self.is_train:
self.init_training_settings()
def init_training_settings(self):
self.net_g.train()
train_opt = self.opt['train']
for key in train_opt.keys():
if key.endswith('_opt'):
type = train_opt[key].pop('type')
cri_cls = getattr(loss_module, type)
for i in range(self.num_layers):
self.losses[type + str(i)] = cri_cls(**train_opt[key]).to(self.device)
if len(self.losses) == 0:
raise ValueError('Not Define Losses.')
# set up optimizers and schedulers
self.setup_optimizers()
def setup_optimizers(self):
train_opt = self.opt['train']
optim_params = []
for k, v in self.net_g.named_parameters():
if v.requires_grad:
optim_params.append(v)
else:
logger = get_root_logger()
logger.warning(f'Params {k} will not be optimized.')
optim_type = train_opt['optim_g'].pop('type')
if optim_type == 'Adam':
self.optimizer_g = torch.optim.Adam(optim_params,
**train_opt['optim_g'])
else:
raise NotImplementedError(
f'optimizer {optim_type} is not supperted yet.')
self.optimizers.append(self.optimizer_g)
def lr_decay(self, curr_iter):
if self.decay_freq and self.gamma and curr_iter != 0 and curr_iter % self.decay_freq == 0:
for optimizer in self.optimizers:
for param_group in optimizer.param_groups:
param_group['lr'] *= self.gamma
def __make_pyramid__(self, tensor, interp_mode=None):
outputs = [tensor]
resize_scale = 1. / self.scale
for i in range(self.num_layers - 1):
if interp_mode == None:
sub_tensor = interpolate(tensor.detach(), scale_factor=resize_scale, mode=self.interp_mode)
else:
sub_tensor = interpolate(tensor.detach(), scale_factor=resize_scale, mode=interp_mode)
resize_scale *= 1. / self.scale
outputs.append(sub_tensor)
return outputs
def feed_data(self, data, is_training=True):
self.input = data['input'].to(self.device)
self.input = self.__make_pyramid__(self.input)
if data.get('mask') != None:
self.mask = data['mask'].to(self.device)
else:
self.mask = torch.ones_like(data['input']).to(self.device)
self.mask = self.__make_pyramid__(self.mask, 'nearest')
if 'gt' in data:
self.gt = data['gt'].to(self.device)
self.gt = self.__make_pyramid__(self.gt)
def get_masked_output(self):
return [(x+1)*y[:, :x.shape[1], :, :] - 1 for x, y in zip(self.output, self.mask)]
def optimize_parameters(self, current_iter):
self.optimizer_g.zero_grad()
self.output = self.net_g(self.input)
self.output = self.get_masked_output()
l_total = 0
loss_dict = OrderedDict()
# pixel loss
for loss_type in self.losses.keys():
idx = int(loss_type[-1])
loss = self.losses[loss_type](self.output[idx], self.gt[idx])
if isinstance(loss, tuple):
# perceptual loss
l_total = l_total + loss[0]
loss_dict[loss_type] = loss[0]
else:
l_total = l_total + loss
loss_dict[loss_type] = loss
l_total.backward()
self.optimizer_g.step()
self.log_dict = self.reduce_loss_dict(loss_dict)
if self.multi_stage != None and current_iter >= self.multi_stage[self.curr_stage] \
and self.curr_stage < len(self.multi_stage)-1:
self.curr_stage += 1
print(f'change weight from {self.stage_weights[self.curr_stage-1]} to {self.stage_weights[self.curr_stage]}')
for loss_type in self.losses.keys():
# loss0->loss2, small->large
idx = int(loss_type[-1])
self.losses[loss_type].loss_weight = self.stage_weights[self.curr_stage][idx]
def test(self):
self.net_g.eval()
with torch.no_grad():
self.output = self.net_g(self.input)
self.output = self.get_masked_output()
self.net_g.train()
def dist_validation(self, dataloader, current_iter, tb_logger, save_img):
logger = get_root_logger()
logger.info('Only support single GPU validation.')
self.nondist_validation(dataloader, current_iter, tb_logger, save_img)
def nondist_validation(self, dataloader, current_iter, tb_logger,
save_img):
dataset_name = dataloader.dataset.opt['name']
with_metrics = self.opt['val'].get('metrics') is not None
if with_metrics:
self.metric_results = {
metric: 0
for metric in self.opt['val']['metrics'].keys()
}
pbar = tqdm(total=len(dataloader), unit='image')
for idx, val_data in enumerate(dataloader):
img_name = os.path.splitext(os.path.basename(val_data['input_path'][0]))[0]
self.feed_data(val_data, is_training=False)
self.test()
visuals = self.get_current_visuals()
relight_img = tensor2img([visuals['result']], min_max=(-1, 1))
if 'gt' in visuals:
gt_img = tensor2img([visuals['gt']], min_max=(-1, 1))
# tentative for out of GPU memory
torch.cuda.empty_cache()
if self.opt['val']['save_tb_img'] and img_name in self.opt['val']['save_tb_select']:
input_img_rgb = tensor2img(self.input[0][:, :3, :, :], rgb2bgr=False, min_max=(-1, 1))
relight_img_rgb = cv2.cvtColor(relight_img, cv2.COLOR_BGR2RGB)
gt_img_rgb = cv2.cvtColor(gt_img, cv2.COLOR_BGR2RGB)
img = col_stitch([input_img_rgb, relight_img_rgb, gt_img_rgb])
tb_logger.add_images(f'eval_sample/{img_name}', np.array(img), current_iter, dataformats='HWC')
if save_img:
if self.opt['is_train']:
save_img_path = os.path.join(self.opt['path']['visualization'],
img_name,
f'{img_name}_{current_iter}.png')
else:
if self.opt.get('val').get('suffix'):
save_img_path = os.path.join(
self.opt['path']['visualization'], dataset_name,
f'{img_name}.png')
else:
save_img_path = os.path.join(
self.opt['path']['visualization'], dataset_name,
f'{img_name}.png')
imwrite(relight_img, save_img_path)
if with_metrics:
# calculate metrics
opt_metric = deepcopy(self.opt['val']['metrics'])
for name, opt_ in opt_metric.items():
metric_type = opt_.pop('type')
self.metric_results[name] += getattr(
metric_module, metric_type)(relight_img, gt_img, **opt_)
pbar.update(1)
pbar.set_description(f'Test {img_name}')
pbar.close()
if with_metrics:
for metric in self.metric_results.keys():
self.metric_results[metric] /= (idx + 1)
if self.best_cri != None and self.best_cri['name'] == metric:
self.save_best(metric, self.metric_results[metric], current_iter)
elif self.best_cri2 != None and self.best_cri2['name'] == metric:
self.save_best2(metric, self.metric_results[metric], current_iter)
self._log_validation_metric_values(current_iter, dataset_name,
tb_logger)
def _log_validation_metric_values(self, current_iter, dataset_name,
tb_logger):
log_str = f'Validation {dataset_name}\n'
for metric, value in self.metric_results.items():
log_str += f'\t # {metric}: {value:.4f}\n'
if self.best_cri != None:
log_str += f'\t # best_{self.best_cri["name"]}: {self.best_cri["val"]:.4f}({self.best_cri["iter"]})\n'
if self.best_cri2 != None:
log_str += f'\t # best_{self.best_cri2["name"]}: {self.best_cri2["val"]:.4f}({self.best_cri2["iter"]})\n'
logger = get_root_logger()
logger.info(log_str)
if tb_logger:
for metric, value in self.metric_results.items():
tb_logger.add_scalar(f'metrics/{metric}', value, current_iter)
def get_current_visuals(self):
out_dict = OrderedDict()
out_dict['input'] = self.input[0].detach().cpu()
out_dict['result'] = self.output[0].detach().cpu()
if hasattr(self, 'gt'):
out_dict['gt'] = self.gt[0].detach().cpu()
return out_dict
def save(self, epoch, current_iter):
self.save_network(self.net_g, 'net_g', current_iter)
def save_best(self, name, val, current_iter):
flag = False
if self.best_cri['greater']:
flag = True if self.best_cri['val'] < val else False
else:
flag = True if self.best_cri['val'] > val else False
if flag:
self.best_cri['val'] = val
self.best_cri['iter'] = current_iter
self.save(0, '{}_best'.format(name))
def save_best2(self, name, val, current_iter):
flag = False
if self.best_cri2['greater']:
flag = True if self.best_cri2['val'] < val else False
else:
flag = True if self.best_cri2['val'] > val else False
if flag:
self.best_cri2['val'] = val
self.best_cri2['iter'] = current_iter
self.save(0, '{}_best'.format(name))
class PyramidAnyRelightingModel(BaseModel):
"""Pyramid Religting model for Religting."""
def __init__(self, opt):
super(PyramidAnyRelightingModel, self).__init__(opt)
self.num_layers = opt['layers']
self.scale = opt['scale_factor']
self.interp_mode = opt['interp_mode']
# define network
self.net_g = define_network(deepcopy(opt['network_g']))
self.net_g = self.model_to_device(self.net_g)
self.print_network(self.net_g)
self.losses = dict()
self.best_cri = opt.get('val').get('best_metrics')
self.best_cri2 = opt.get('val').get('best_metrics2')
# self.with_depth = opt['with_depty']
if opt['is_train']:
self.decay_freq = opt.get('train').get('decay_freq')
self.gamma = opt.get('train').get('gamma')
# load pretrained models
load_path = self.opt['path'].get('pretrain_network_g', None)
if load_path is not None:
self.load_network(self.net_g, load_path,
self.opt['path'].get('strict_load_g', True))
if self.is_train:
self.init_training_settings()
def init_training_settings(self):
self.net_g.train()
train_opt = self.opt['train']
for key in train_opt.keys():
if key.endswith('_opt'):
type = train_opt[key].pop('type')
cri_cls = getattr(loss_module, type)
for i in range(self.num_layers):
self.losses[type + str(i)] = cri_cls(**train_opt[key]).to(self.device)
# if i == 2:
# self.losses[type + str(i)].loss_weight = .5
if len(self.losses) == 0:
raise ValueError('Not Define Losses.')
# set up optimizers and schedulers
self.setup_optimizers()
def setup_optimizers(self):
train_opt = self.opt['train']
optim_params = []
for k, v in self.net_g.named_parameters():
if v.requires_grad:
optim_params.append(v)
else:
logger = get_root_logger()
logger.warning(f'Params {k} will not be optimized.')
optim_type = train_opt['optim_g'].pop('type')
if optim_type == 'Adam':
self.optimizer_g = torch.optim.Adam(optim_params,
**train_opt['optim_g'])
else:
raise NotImplementedError(
f'optimizer {optim_type} is not supperted yet.')
self.optimizers.append(self.optimizer_g)
def lr_decay(self, curr_iter):
if self.decay_freq and self.gamma and curr_iter != 0 and curr_iter % self.decay_freq == 0:
for optimizer in self.optimizers:
for param_group in optimizer.param_groups:
param_group['lr'] *= self.gamma
def __make_pyramid__(self, tensor, interp_mode=None):
outputs = [tensor]
resize_scale = 1. / self.scale
for i in range(self.num_layers - 1):
if interp_mode == None:
sub_tensor = interpolate(tensor.detach(), scale_factor=resize_scale, mode=self.interp_mode)
else:
sub_tensor = interpolate(tensor.detach(), scale_factor=resize_scale, mode=interp_mode)
resize_scale *= 1. / self.scale
outputs.append(sub_tensor)
return outputs
def feed_data(self, data, is_training=True):
self.input = data['input'].to(self.device)
self.input_light = data['input_light'].to(self.device)
self.input = self.__make_pyramid__(self.input)
if 'gt' in data:
self.gt = data['gt'].to(self.device)
self.gt_light = data['gt_light'].to(self.device)
self.gt = self.__make_pyramid__(self.gt)
def optimize_parameters(self, current_iter):
self.optimizer_g.zero_grad()
self.output = self.net_g(self.input, self.gt_light)
l_total = 0
loss_dict = OrderedDict()
# pixel loss
for loss_type in self.losses.keys():
idx = int(loss_type[-1])
loss = self.losses[loss_type](self.output[idx], self.gt[idx])
if isinstance(loss, tuple):
# perceptual loss
l_total = l_total + loss[0]
loss_dict[loss_type] = loss[0]
else:
l_total = l_total + loss
loss_dict[loss_type] = loss
l_total.backward()
self.optimizer_g.step()
self.log_dict = self.reduce_loss_dict(loss_dict)
def test(self):
self.net_g.eval()
with torch.no_grad():
self.output = self.net_g(self.input, self.gt_light)
self.net_g.train()
def dist_validation(self, dataloader, current_iter, tb_logger, save_img):
logger = get_root_logger()
logger.info('Only support single GPU validation.')
self.nondist_validation(dataloader, current_iter, tb_logger, save_img)
def nondist_validation(self, dataloader, current_iter, tb_logger,
save_img):
dataset_name = dataloader.dataset.opt['name']
with_metrics = self.opt['val'].get('metrics') is not None
if with_metrics:
self.metric_results = {
metric: 0
for metric in self.opt['val']['metrics'].keys()
}
pbar = tqdm(total=len(dataloader), unit='image')
for idx, val_data in enumerate(dataloader):
img_name = os.path.splitext(os.path.basename(val_data['input_path'][0]))[0]
self.feed_data(val_data, is_training=False)
self.test()
visuals = self.get_current_visuals()
relight_img = tensor2img([visuals['result']], min_max=(-1, 1))
if 'gt' in visuals:
gt_img = tensor2img([visuals['gt']], min_max=(-1, 1))
# tentative for out of GPU memory
torch.cuda.empty_cache()
if self.opt['val']['save_tb_img'] and img_name in self.opt['val']['save_tb_select']:
input_img_rgb = tensor2img(self.input[0][:, :3, :, :], rgb2bgr=False, min_max=(-1, 1))
relight_img_rgb = cv2.cvtColor(relight_img, cv2.COLOR_BGR2RGB)
gt_img_rgb = cv2.cvtColor(gt_img, cv2.COLOR_BGR2RGB)
img = col_stitch([input_img_rgb, relight_img_rgb, gt_img_rgb])
tb_logger.add_images(f'eval_sample/{img_name}', np.array(img), current_iter, dataformats='HWC')
if save_img:
if self.opt['is_train']:
save_img_path = os.path.join(self.opt['path']['visualization'],
img_name,
f'{img_name}_{current_iter}.png')
else:
if self.opt.get('val').get('suffix'):
save_img_path = os.path.join(
self.opt['path']['visualization'], dataset_name,
f'{img_name}.png')
else:
save_img_path = os.path.join(
self.opt['path']['visualization'], dataset_name,
f'{img_name}.png')
imwrite(relight_img, save_img_path)
if with_metrics:
# calculate metrics
opt_metric = deepcopy(self.opt['val']['metrics'])
for name, opt_ in opt_metric.items():
metric_type = opt_.pop('type')
self.metric_results[name] += getattr(
metric_module, metric_type)(relight_img, gt_img, **opt_)
pbar.update(1)
pbar.set_description(f'Test {img_name}')
pbar.close()
if with_metrics:
for metric in self.metric_results.keys():
self.metric_results[metric] /= (idx + 1)
if self.best_cri != None and self.best_cri['name'] == metric:
self.save_best(metric, self.metric_results[metric], current_iter)
elif self.best_cri2 != None and self.best_cri2['name'] == metric:
self.save_best2(metric, self.metric_results[metric], current_iter)
self._log_validation_metric_values(current_iter, dataset_name,
tb_logger)
def _log_validation_metric_values(self, current_iter, dataset_name,
tb_logger):
log_str = f'Validation {dataset_name}\n'
for metric, value in self.metric_results.items():
log_str += f'\t # {metric}: {value:.4f}\n'
if self.best_cri != None:
log_str += f'\t # best_{self.best_cri["name"]}: {self.best_cri["val"]:.4f}({self.best_cri["iter"]})\n'
if self.best_cri2 != None:
log_str += f'\t # best_{self.best_cri2["name"]}: {self.best_cri2["val"]:.4f}({self.best_cri2["iter"]})\n'
logger = get_root_logger()
logger.info(log_str)
if tb_logger:
for metric, value in self.metric_results.items():
tb_logger.add_scalar(f'metrics/{metric}', value, current_iter)
def get_current_visuals(self):
out_dict = OrderedDict()
out_dict['input'] = self.input[0].detach().cpu()
out_dict['result'] = self.output[0].detach().cpu()
if hasattr(self, 'gt'):
out_dict['gt'] = self.gt[0].detach().cpu()
return out_dict
def save(self, epoch, current_iter):
self.save_network(self.net_g, 'net_g', current_iter)
def save_best(self, name, val, current_iter):
flag = False
if self.best_cri['greater']:
flag = True if self.best_cri['val'] < val else False
else:
flag = True if self.best_cri['val'] > val else False
if flag:
self.best_cri['val'] = val
self.best_cri['iter'] = current_iter
self.save(0, '{}_best'.format(name))
def save_best2(self, name, val, current_iter):
flag = False
if self.best_cri2['greater']:
flag = True if self.best_cri2['val'] < val else False
else:
flag = True if self.best_cri2['val'] > val else False
if flag:
self.best_cri2['val'] = val
self.best_cri2['iter'] = current_iter
self.save(0, '{}_best'.format(name))
class DeepPortraitRelightingModel(BaseModel):
"""Pyramid Religting model for Religting."""
def __init__(self, opt):
super(DeepPortraitRelightingModel, self).__init__(opt)
# define network
self.net_g = define_network(deepcopy(opt['network_g']))
self.net_g = self.model_to_device(self.net_g)
self.print_network(self.net_g)
self.losses = dict()
self.light_loss = MSELoss(loss_weight=1)
self.feat_loss = MSELoss(loss_weight=0.5)
self.best_cri = opt.get('val').get('best_metrics')
self.best_cri2 = opt.get('val').get('best_metrics2')
if opt['is_train']:
self.decay_freq = opt.get('train').get('decay_freq')
self.gamma = opt.get('train').get('gamma')
# load pretrained models
load_path = self.opt['path'].get('pretrain_network_g', None)
if load_path is not None:
self.load_network(self.net_g, load_path,
self.opt['path'].get('strict_load_g', True))
if self.is_train:
self.init_training_settings()
def init_training_settings(self):
self.net_g.train()
train_opt = self.opt['train']
for key in train_opt.keys():
if key.endswith('_opt'):
type = train_opt[key].pop('type')
cri_cls = getattr(loss_module, type)
self.losses[type] = cri_cls(**train_opt[key]).to(self.device)
if len(self.losses) == 0:
raise ValueError('Not Define Losses.')
# set up optimizers and schedulers
self.setup_optimizers()
def setup_optimizers(self):
train_opt = self.opt['train']
optim_params = []
for k, v in self.net_g.named_parameters():
if v.requires_grad:
optim_params.append(v)
else:
logger = get_root_logger()
logger.warning(f'Params {k} will not be optimized.')
optim_type = train_opt['optim_g'].pop('type')
if optim_type == 'Adam':
self.optimizer_g = torch.optim.Adam(optim_params,
**train_opt['optim_g'])
else:
raise NotImplementedError(
f'optimizer {optim_type} is not supperted yet.')
self.optimizers.append(self.optimizer_g)
def lr_decay(self, curr_iter):
if self.decay_freq and self.gamma and curr_iter != 0 and curr_iter % self.decay_freq == 0:
for optimizer in self.optimizers:
for param_group in optimizer.param_groups:
param_group['lr'] *= self.gamma
def feed_data(self, data, is_training=True):
self.input = data['input'].to(self.device)
self.input_light = data['input_light'].to(self.device)
b, c = self.input_light.shape
self.input_light = self.input_light.view(b, c, 1, 1)
if 'gt' in data:
self.gt = data['gt'].to(self.device)
self.gt_light = data['gt_light'].to(self.device)
self.gt_light = self.gt_light.view(b, c, 1, 1)
def optimize_parameters(self, current_iter):
self.optimizer_g.zero_grad()
self.output, self.out_feat, self.out_light, self.ori_feat \
= self.net_g(self.input, self.gt_light, 4, self.gt)
l_total = 0
loss_dict = OrderedDict()
# pixel loss
for loss_type in self.losses.keys():
loss = self.losses[loss_type](self.output, self.gt)
if isinstance(loss, tuple):
# perceptual loss
l_total = l_total + loss[0]
loss_dict[loss_type] = loss[0]
else:
l_total = l_total + loss
loss_dict[loss_type] = loss
loss_dict['feat_loss'] = self.feat_loss(self.out_feat, self.ori_feat)
loss_dict['light_loss'] = self.light_loss(self.out_light, self.gt_light)
l_total += loss_dict['feat_loss'] + loss_dict['light_loss']
l_total.backward()
self.optimizer_g.step()
self.log_dict = self.reduce_loss_dict(loss_dict)
def test(self):
self.net_g.eval()
with torch.no_grad():
self.output, self.out_feat, self.out_light, self.ori_feat \
= self.net_g(self.input, self.gt_light, 4, None)
self.net_g.train()
def dist_validation(self, dataloader, current_iter, tb_logger, save_img):
logger = get_root_logger()
logger.info('Only support single GPU validation.')
self.nondist_validation(dataloader, current_iter, tb_logger, save_img)
def nondist_validation(self, dataloader, current_iter, tb_logger,
save_img):
dataset_name = dataloader.dataset.opt['name']
with_metrics = self.opt['val'].get('metrics') is not None
if with_metrics:
self.metric_results = {
metric: 0
for metric in self.opt['val']['metrics'].keys()
}
pbar = tqdm(total=len(dataloader), unit='image')
for idx, val_data in enumerate(dataloader):
img_name = os.path.splitext(os.path.basename(val_data['input_path'][0]))[0]
self.feed_data(val_data, is_training=False)
self.test()
visuals = self.get_current_visuals()
relight_img = tensor2img([visuals['result']], min_max=(-1, 1))
if 'gt' in visuals:
gt_img = tensor2img([visuals['gt']], min_max=(-1, 1))
# tentative for out of GPU memory
torch.cuda.empty_cache()
if self.opt['val']['save_tb_img'] and img_name in self.opt['val']['save_tb_select']:
input_img_rgb = tensor2img(self.input[0][:, :3, :, :], rgb2bgr=False, min_max=(-1, 1))
relight_img_rgb = cv2.cvtColor(relight_img, cv2.COLOR_BGR2RGB)
gt_img_rgb = cv2.cvtColor(gt_img, cv2.COLOR_BGR2RGB)
img = col_stitch([input_img_rgb, relight_img_rgb, gt_img_rgb])
tb_logger.add_images(f'eval_sample/{img_name}', np.array(img), current_iter, dataformats='HWC')
if save_img:
if self.opt['is_train']:
save_img_path = os.path.join(self.opt['path']['visualization'],
img_name,
f'{img_name}_{current_iter}.png')
else:
if self.opt.get('val').get('suffix'):
save_img_path = os.path.join(
self.opt['path']['visualization'], dataset_name,
f'{img_name}.png')
else:
save_img_path = os.path.join(
self.opt['path']['visualization'], dataset_name,
f'{img_name}.png')
imwrite(relight_img, save_img_path)
if with_metrics:
# calculate metrics
opt_metric = deepcopy(self.opt['val']['metrics'])
for name, opt_ in opt_metric.items():
metric_type = opt_.pop('type')
self.metric_results[name] += getattr(
metric_module, metric_type)(relight_img, gt_img, **opt_)
pbar.update(1)
pbar.set_description(f'Test {img_name}')
pbar.close()
if with_metrics:
for metric in self.metric_results.keys():
self.metric_results[metric] /= (idx + 1)
if self.best_cri != None and self.best_cri['name'] == metric:
self.save_best(metric, self.metric_results[metric], current_iter)
elif self.best_cri2 != None and self.best_cri2['name'] == metric:
self.save_best2(metric, self.metric_results[metric], current_iter)
self._log_validation_metric_values(current_iter, dataset_name,
tb_logger)
def _log_validation_metric_values(self, current_iter, dataset_name,
tb_logger):
log_str = f'Validation {dataset_name}\n'
for metric, value in self.metric_results.items():
log_str += f'\t # {metric}: {value:.4f}\n'
if self.best_cri != None:
log_str += f'\t # best_{self.best_cri["name"]}: {self.best_cri["val"]:.4f}({self.best_cri["iter"]})\n'
if self.best_cri2 != None:
log_str += f'\t # best_{self.best_cri2["name"]}: {self.best_cri2["val"]:.4f}({self.best_cri2["iter"]})\n'
logger = get_root_logger()
logger.info(log_str)
if tb_logger:
for metric, value in self.metric_results.items():
tb_logger.add_scalar(f'metrics/{metric}', value, current_iter)
def get_current_visuals(self):
out_dict = OrderedDict()
out_dict['input'] = self.input[0].detach().cpu()
out_dict['result'] = self.output[0].detach().cpu()
if hasattr(self, 'gt'):
out_dict['gt'] = self.gt[0].detach().cpu()
return out_dict
def save(self, epoch, current_iter):
self.save_network(self.net_g, 'net_g', current_iter)
def save_best(self, name, val, current_iter):
flag = False
if self.best_cri['greater']:
flag = True if self.best_cri['val'] < val else False
else:
flag = True if self.best_cri['val'] > val else False
if flag:
self.best_cri['val'] = val
self.best_cri['iter'] = current_iter
self.save(0, '{}_best'.format(name))
def save_best2(self, name, val, current_iter):
flag = False
if self.best_cri2['greater']:
flag = True if self.best_cri2['val'] < val else False
else:
flag = True if self.best_cri2['val'] > val else False
if flag:
self.best_cri2['val'] = val
self.best_cri2['iter'] = current_iter
self.save(0, '{}_best'.format(name))
| 32,717 | 39.69403 | 121 | py |
IAN | IAN-master/network/arch/full_model_one2one_arch.py | import torch
import torch.nn as nn
import torch.nn.functional as F
conv_s2 = 4
pad0 = 1
align_corners=True
def mean_channels(F):
assert(F.dim() == 4)
spatial_sum = F.sum(3, keepdim=True).sum(2, keepdim=True)
return spatial_sum / (F.size(2) * F.size(3))
def std_channels(F):
assert(F.dim() == 4)
F_mean = mean_channels(F)
F_variance = (F - F_mean).pow(2).sum(3, keepdim=True).sum(2, keepdim=True) / (F.size(2) * F.size(3))
return F_variance.pow(0.5)
class DirectAwareAtt(nn.Module):
def __init__(self, channels=144, reduction=16):
super().__init__()
self.reduction = reduction
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.std = std_channels
self.avg_att_module = nn.Sequential(
nn.Conv2d(channels, channels // reduction, 1, 1, 0),
nn.ReLU(True),
nn.Conv2d(channels // reduction, channels, 1, 1, 0),
nn.Sigmoid()
)
self.std_att_module = nn.Sequential(
nn.Conv2d(channels, channels // reduction, 1, 1, 0),
nn.ReLU(True),
nn.Conv2d(channels // reduction, channels, 1, 1, 0),
nn.Sigmoid()
)
def forward(self, x):
avg_std = self.std(x)
avg = self.avg_pool(x)
att = self.avg_att_module(avg) + self.std_att_module(avg_std)
att = att / 2
return att * x
class DilateBlock(nn.Module):
def __init__(self, channels):
super().__init__()
self.conv3x3_d1 = nn.Sequential(nn.Conv2d(channels, channels, 3, 1, 1, 1))
self.conv3x3_d2 = nn.Sequential(nn.Conv2d(channels, channels, 3, 1, 2, 2))
self.conv3x3_d3 = nn.Sequential(nn.Conv2d(channels, channels, 3, 1, 3, 3))
self.att_module = DirectAwareAtt(channels*3, 16)
for m in self.modules():
if isinstance(m, nn.Sequential):
for p in m:
if isinstance(p, nn.Conv2d) or isinstance(p, nn.ConvTranspose2d):
nn.init.xavier_normal_(p.weight)
nn.init.constant_(p.bias, 0.01)
def forward(self, x):
feat = torch.cat([self.conv3x3_d1(x), self.conv3x3_d2(x), self.conv3x3_d3(x)], 1)
return self.att_module(feat)
class DilateResBlock(nn.Module):
def __init__(self, channels):
super().__init__()
self.block = nn.Sequential(
DilateBlock(channels),
nn.ReLU(True),
nn.Conv2d(channels*3, channels, 3, 1, 1),
)
for m in self.modules():
if isinstance(m, nn.Sequential):
for p in m:
if isinstance(p, nn.Conv2d) or isinstance(p, nn.ConvTranspose2d):
nn.init.xavier_normal_(p.weight)
nn.init.constant_(p.bias, 0.01)
def forward(self, x):
x = x + self.block(x)
return x
class UpConv(nn.Module):
def __init__(self, inc, outc, scale=2):
super(UpConv, self).__init__()
self.scale = scale
self.conv = nn.Conv2d(inc, outc, 3, stride=1, padding=1)
for m in self.modules():
if isinstance(m, nn.Sequential):
for p in m:
if isinstance(p, nn.Conv2d) or isinstance(p, nn.ConvTranspose2d):
nn.init.xavier_normal_(p.weight)
nn.init.constant_(p.bias, 0.01)
def forward(self, x):
return self.conv(F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=True))
class RGBEncoder(nn.Module):
def __init__(self, in_channels, channels, filter_size):
super(RGBEncoder, self).__init__()
padding = int((filter_size - 1) / 2)
self.init = nn.Sequential(nn.Conv2d(in_channels, channels, filter_size, stride=1, padding=padding),
nn.ReLU(True),
nn.Conv2d(channels, channels, filter_size, stride=1, padding=padding))
self.enc1 = nn.Sequential(nn.ReLU(True),
nn.Conv2d(channels, channels, conv_s2, stride=2, padding=pad0),
nn.ReLU(True),
nn.Conv2d(channels, channels, filter_size, stride=1, padding=padding),
)
self.enc2 = nn.Sequential(nn.ReLU(True),
nn.Conv2d(channels, channels, conv_s2, stride=2, padding=pad0),
nn.ReLU(True),
nn.Conv2d(channels, channels, filter_size, stride=1, padding=padding),
)
self.bottleneck = nn.Sequential(*[DilateResBlock(channels) for _ in range(4)])
# Init Weights
for m in self.modules():
if isinstance(m, nn.Sequential):
for p in m:
if isinstance(p, nn.Conv2d) or isinstance(p, nn.ConvTranspose2d):
nn.init.xavier_normal_(p.weight)
nn.init.constant_(p.bias, 0.01)
def forward(self, input, scale=2, c_x0=None, c_x1=None, c_x2=None, pre_x2=None, pre_x3=None, pre_x4=None):
### input
x0 = self.init(input)
x0 = x0 + c_x0
if pre_x4 is not None:
x0 = x0 + F.interpolate(pre_x4, scale_factor=scale, mode='bilinear', align_corners=align_corners)
x1 = self.enc1(x0) #1/2 input size
x1 = x1 + c_x1
if pre_x3 is not None: # newly added skip connection
x1 = x1 + F.interpolate(pre_x3, scale_factor=scale, mode='bilinear', align_corners=align_corners)
x2 = self.enc2(x1) # 1/4 input size
x2 = x2 + c_x2
if pre_x2 is not None: # newly added skip connection
x2 = x2 + F.interpolate(pre_x2, scale_factor=scale, mode='bilinear', align_corners=align_corners)
x2 = self.bottleneck(x2)
return x0, x1, x2
class CondEncoder(nn.Module):
def __init__(self, in_channels, channels, filter_size):
super(CondEncoder, self).__init__()
in_channels += 2
padding = int((filter_size - 1) / 2)
self.init = nn.Sequential(nn.Conv2d(in_channels, channels, filter_size, stride=1, padding=padding),
nn.ReLU(True),
nn.Conv2d(channels, channels, filter_size, stride=1, padding=padding))
self.enc1 = nn.Sequential(nn.ReLU(True),
nn.Conv2d(channels, channels, conv_s2, stride=2, padding=pad0),
nn.ReLU(True),
nn.Conv2d(channels, channels, filter_size, stride=1, padding=padding), )
self.enc2 = nn.Sequential(nn.ReLU(True),
nn.Conv2d(channels, channels, conv_s2, stride=2, padding=pad0),
nn.ReLU(True),
nn.Conv2d(channels, channels, filter_size, stride=1, padding=padding), )
self.enc3 = nn.Sequential(nn.ReLU(True),
nn.Conv2d(channels, channels, conv_s2, stride=2, padding=pad0),
nn.ReLU(True),
nn.Conv2d(channels, channels, filter_size, stride=1, padding=padding), )
self.enc4 = nn.Sequential(nn.ReLU(True),
nn.Conv2d(channels, channels, conv_s2, stride=2, padding=pad0),
nn.ReLU(True),
nn.Conv2d(channels, channels, filter_size, stride=1, padding=padding), )
# Init Weights
for m in self.modules():
if isinstance(m, nn.Sequential):
for p in m:
if isinstance(p, nn.Conv2d) or isinstance(p, nn.ConvTranspose2d):
nn.init.xavier_normal_(p.weight)
nn.init.constant_(p.bias, 0.01)
def forward(self, input, scale=2, pre_x=None):
### input
b, _, h, w = input.shape
x_code = torch.Tensor([float(x)/(w-1) for x in range(w)]).float().cuda() * 2 - 1
y_code = torch.Tensor([float(y)/(h-1) for y in range(h)]).float().cuda() * 2 - 1
grid_y, grid_x = torch.meshgrid(y_code, x_code)
grid_y = grid_y.view(1,1,h,w).expand(b,1,h,w)
grid_x = grid_x.view(1,1,h,w).expand(b,1,h,w)
input = torch.cat([grid_x, grid_y, input], 1)
x0 = self.init(input)
if pre_x is not None:
x0 = x0 + F.interpolate(pre_x, scale_factor=scale, mode='bilinear', align_corners=align_corners)
x1 = self.enc1(x0) # 1/2 input size
x2 = self.enc2(x1) # 1/4 input size
x3 = self.enc3(x2) # 1/8 input size
x4 = self.enc4(x3) # 1/16 input size
# return the pre-activated features
return x0, x1, x2, x3, x4
class RGBDecoder(nn.Module):
def __init__(self, channels, filter_size):
super(RGBDecoder, self).__init__()
padding = int((filter_size-1)/2)
self.dec2 = nn.Sequential(nn.ReLU(True),
nn.Conv2d(channels//2, channels//2, filter_size, stride=1, padding=padding),
nn.ReLU(True),
UpConv(channels//2, channels//2),
nn.ReLU(True),
nn.Conv2d(channels//2, channels//2, filter_size, stride=1, padding=padding),
)
self.dec1 = nn.Sequential(nn.ReLU(True),
nn.Conv2d(channels//2, channels//2, filter_size, stride=1, padding=padding),
nn.ReLU(True),
UpConv(channels//2, channels//2),
nn.ReLU(True),
nn.Conv2d(channels//2, channels//2, filter_size, stride=1, padding=padding),
)
self.prdct = nn.Sequential(nn.ReLU(True),
nn.Conv2d(channels//2, channels//2, filter_size, stride=1, padding=padding),
nn.ReLU(True),
nn.Conv2d(channels//2, 3, filter_size, stride=1, padding=padding))
# Init Weights
for m in self.modules():
if isinstance(m, nn.Sequential):
for p in m:
if isinstance(p, nn.Conv2d) or isinstance(p, nn.ConvTranspose2d):
nn.init.xavier_normal_(p.weight)
nn.init.constant_(p.bias, 0.01)
def forward(self, pre_rgbx, pre_x2 = None, pre_x3 = None, pre_x4 = None):
x2 = pre_rgbx[2]
x1 = pre_rgbx[1]
x0 = pre_rgbx[0]
if pre_x2 != None:
x2 = x2 + F.interpolate(pre_x2, scale_factor=2, mode='bilinear', align_corners=align_corners)
x3 = self.dec2(x2) # 1/2 input size
if pre_x3 != None:
x3 = x3 + F.interpolate(pre_x3, scale_factor=2, mode='bilinear', align_corners=align_corners)
x4 = self.dec1(x1+x3) # 1/1 input size
if pre_x4 != None:
x4 = x4 + F.interpolate(pre_x4, scale_factor=2, mode='bilinear', align_corners=align_corners)
### prediction
output_rgb = self.prdct(x4 + x0)
return x2, x3, x4, output_rgb
class One2One(nn.Module):
def __init__(self, in_channels=3, aux_channels=4, short_connection=True):
super(One2One, self).__init__()
self.short_connection = short_connection
self.in_channels = in_channels
self.aux_channels = aux_channels
denc_channels = 48
cenc_channels = 48
ddcd_channels = denc_channels+cenc_channels
self.cond_encoder = CondEncoder(aux_channels, cenc_channels, 3)
self.rgb_encoder1 = RGBEncoder(in_channels, denc_channels, 3)
self.rgb_decoder1 = RGBDecoder(ddcd_channels, 3)
self.rgb_encoder2 = RGBEncoder(2*in_channels, denc_channels, 3)
self.rgb_decoder2 = RGBDecoder(ddcd_channels, 3)
self.rgb_encoder3 = RGBEncoder(2*in_channels, denc_channels, 3)
self.rgb_decoder3 = RGBDecoder(ddcd_channels, 3)
def forward(self, x, enable_layers=None):
x = x[0]
if self.aux_channels != 0:
input_rgb, input_d = torch.split(x, [self.in_channels, self.aux_channels], 1)
enc_c = self.cond_encoder(input_d)
## for the 1/4 res
input_rgb14 = F.interpolate(input_rgb, scale_factor=0.25, mode='bilinear',align_corners=align_corners)
enc_rgb14 = self.rgb_encoder1(input_rgb14, 2, enc_c[2], enc_c[3], enc_c[4]) # enc_rgb [larger -> smaller size]
dcd_rgb14 = self.rgb_decoder1(enc_rgb14) # dec_rgb [smaller -> larger size]
## for the 1/2 res
input_rgb12 = F.interpolate(input_rgb, scale_factor=0.5, mode='bilinear', align_corners=align_corners)
ori_pred_rgb14 = dcd_rgb14[3]
if self.short_connection:
ori_pred_rgb14 += input_rgb14
predict_rgb12 = F.interpolate(ori_pred_rgb14, scale_factor=2, mode='bilinear', align_corners=align_corners)
input_12 = torch.cat((input_rgb12, predict_rgb12), 1)
enc_rgb12 = self.rgb_encoder2(input_12, 2, enc_c[1], enc_c[2], enc_c[3])
dcd_rgb12 = self.rgb_decoder2(enc_rgb12, dcd_rgb14[0], dcd_rgb14[1], dcd_rgb14[2])
## for the 1/1 res
ori_pred_rgb12 = dcd_rgb12[3]
if self.short_connection:
ori_pred_rgb12 += input_rgb12
predict_rgb11 = F.interpolate(ori_pred_rgb12, scale_factor=2, mode='bilinear', align_corners=align_corners)
input_11 = torch.cat((input_rgb, predict_rgb11), 1)
enc_rgb11 = self.rgb_encoder3(input_11, 2, enc_c[0], enc_c[1], enc_c[2])
dcd_rgb11 = self.rgb_decoder3(enc_rgb11, dcd_rgb12[0], dcd_rgb12[1], dcd_rgb12[2])
output_rgb11 = dcd_rgb11[3]
if self.short_connection:
output_rgb11 += input_rgb
return output_rgb11, ori_pred_rgb12, ori_pred_rgb14
| 14,082 | 40.178363 | 120 | py |
IAN | IAN-master/network/arch/full_model_one2one_woaux_arch.py | import torch
import torch.nn as nn
import torch.nn.functional as F
conv_s2 = 4
pad0 = 1
align_corners=True
def mean_channels(F):
assert(F.dim() == 4)
spatial_sum = F.sum(3, keepdim=True).sum(2, keepdim=True)
return spatial_sum / (F.size(2) * F.size(3))
def std_channels(F):
assert(F.dim() == 4)
F_mean = mean_channels(F)
F_variance = (F - F_mean).pow(2).sum(3, keepdim=True).sum(2, keepdim=True) / (F.size(2) * F.size(3))
return F_variance.pow(0.5)
class DirectAwareAtt(nn.Module):
def __init__(self, channels=144, reduction=16):
super().__init__()
self.reduction = reduction
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.std = std_channels
self.avg_att_module = nn.Sequential(
nn.Conv2d(channels, channels // reduction, 1, 1, 0),
nn.ReLU(True),
nn.Conv2d(channels // reduction, channels, 1, 1, 0),
nn.Sigmoid()
)
self.std_att_module = nn.Sequential(
nn.Conv2d(channels, channels // reduction, 1, 1, 0),
nn.ReLU(True),
nn.Conv2d(channels // reduction, channels, 1, 1, 0),
nn.Sigmoid()
)
def forward(self, x):
avg_std = self.std(x)
avg = self.avg_pool(x)
att = self.avg_att_module(avg) + self.std_att_module(avg_std)
att = att / 2
return att * x
class DilateBlock(nn.Module):
def __init__(self, channels):
super().__init__()
self.conv3x3_d1 = nn.Sequential(nn.Conv2d(channels, channels, 3, 1, 1, 1))
self.conv3x3_d2 = nn.Sequential(nn.Conv2d(channels, channels, 3, 1, 2, 2))
self.conv3x3_d3 = nn.Sequential(nn.Conv2d(channels, channels, 3, 1, 3, 3))
self.att_module = DirectAwareAtt(channels*3, 16)
for m in self.modules():
if isinstance(m, nn.Sequential):
for p in m:
if isinstance(p, nn.Conv2d) or isinstance(p, nn.ConvTranspose2d):
nn.init.xavier_normal_(p.weight)
nn.init.constant_(p.bias, 0.01)
def forward(self, x):
feat = torch.cat([self.conv3x3_d1(x), self.conv3x3_d2(x), self.conv3x3_d3(x)], 1)
return self.att_module(feat)
class DilateResBlock(nn.Module):
def __init__(self, channels):
super().__init__()
self.block = nn.Sequential(
DilateBlock(channels),
nn.ReLU(True),
nn.Conv2d(channels*3, channels, 3, 1, 1),
)
for m in self.modules():
if isinstance(m, nn.Sequential):
for p in m:
if isinstance(p, nn.Conv2d) or isinstance(p, nn.ConvTranspose2d):
nn.init.xavier_normal_(p.weight)
nn.init.constant_(p.bias, 0.01)
def forward(self, x):
x = x + self.block(x)
return x
class UpConv(nn.Module):
def __init__(self, inc, outc, scale=2):
super(UpConv, self).__init__()
self.scale = scale
self.conv = nn.Conv2d(inc, outc, 3, stride=1, padding=1)
for m in self.modules():
if isinstance(m, nn.Sequential):
for p in m:
if isinstance(p, nn.Conv2d) or isinstance(p, nn.ConvTranspose2d):
nn.init.xavier_normal_(p.weight)
nn.init.constant_(p.bias, 0.01)
def forward(self, x):
return self.conv(F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=True))
class RGBEncoder(nn.Module):
def __init__(self, in_channels, channels, filter_size):
super(RGBEncoder, self).__init__()
padding = int((filter_size - 1) / 2)
self.init = nn.Sequential(nn.Conv2d(in_channels, channels, filter_size, stride=1, padding=padding),
nn.ReLU(True),
nn.Conv2d(channels, channels, filter_size, stride=1, padding=padding))
self.enc1 = nn.Sequential(nn.ReLU(True),
nn.Conv2d(channels, channels, conv_s2, stride=2, padding=pad0),
nn.ReLU(True),
nn.Conv2d(channels, channels, filter_size, stride=1, padding=padding),
)
self.enc2 = nn.Sequential(nn.ReLU(True),
nn.Conv2d(channels, channels, conv_s2, stride=2, padding=pad0),
nn.ReLU(True),
nn.Conv2d(channels, channels, filter_size, stride=1, padding=padding),
)
self.bottleneck = nn.Sequential(*[DilateResBlock(channels) for _ in range(4)])
# Init Weights
for m in self.modules():
if isinstance(m, nn.Sequential):
for p in m:
if isinstance(p, nn.Conv2d) or isinstance(p, nn.ConvTranspose2d):
nn.init.xavier_normal_(p.weight)
nn.init.constant_(p.bias, 0.01)
def forward(self, input, scale=2, c_x0=None, c_x1=None, c_x2=None, pre_x2=None, pre_x3=None, pre_x4=None):
### input
x0 = self.init(input)
x0 = x0# + c_x0
if pre_x4 is not None:
x0 = x0 + F.interpolate(pre_x4, scale_factor=scale, mode='bilinear', align_corners=align_corners)
x1 = self.enc1(x0) #1/2 input size
x1 = x1# + c_x1
if pre_x3 is not None:
x1 = x1 + F.interpolate(pre_x3, scale_factor=scale, mode='bilinear', align_corners=align_corners)
x2 = self.enc2(x1) # 1/4 input size
x2 = x2# + c_x2
if pre_x2 is not None:
x2 = x2 + F.interpolate(pre_x2, scale_factor=scale, mode='bilinear', align_corners=align_corners)
x2 = self.bottleneck(x2)
return x0, x1, x2
class CondEncoder(nn.Module):
def __init__(self, in_channels, channels, filter_size):
super(CondEncoder, self).__init__()
in_channels += 2
padding = int((filter_size - 1) / 2)
self.init = nn.Sequential(nn.Conv2d(in_channels, channels, filter_size, stride=1, padding=padding),
nn.ReLU(True),
nn.Conv2d(channels, channels, filter_size, stride=1, padding=padding))
self.enc1 = nn.Sequential(nn.ReLU(True),
nn.Conv2d(channels, channels, conv_s2, stride=2, padding=pad0),
nn.ReLU(True),
nn.Conv2d(channels, channels, filter_size, stride=1, padding=padding), )
self.enc2 = nn.Sequential(nn.ReLU(True),
nn.Conv2d(channels, channels, conv_s2, stride=2, padding=pad0),
nn.ReLU(True),
nn.Conv2d(channels, channels, filter_size, stride=1, padding=padding), )
self.enc3 = nn.Sequential(nn.ReLU(True),
nn.Conv2d(channels, channels, conv_s2, stride=2, padding=pad0),
nn.ReLU(True),
nn.Conv2d(channels, channels, filter_size, stride=1, padding=padding), )
self.enc4 = nn.Sequential(nn.ReLU(True),
nn.Conv2d(channels, channels, conv_s2, stride=2, padding=pad0),
nn.ReLU(True),
nn.Conv2d(channels, channels, filter_size, stride=1, padding=padding), )
# Init Weights
for m in self.modules():
if isinstance(m, nn.Sequential):
for p in m:
if isinstance(p, nn.Conv2d) or isinstance(p, nn.ConvTranspose2d):
nn.init.xavier_normal_(p.weight)
nn.init.constant_(p.bias, 0.01)
def forward(self, input, scale=2, pre_x=None):
### input
b, _, h, w = input.shape
x_code = torch.Tensor([float(x)/(w-1) for x in range(w)]).float().cuda() * 2 - 1
y_code = torch.Tensor([float(y)/(h-1) for y in range(h)]).float().cuda() * 2 - 1
grid_y, grid_x = torch.meshgrid(y_code, x_code)
grid_y = grid_y.view(1,1,h,w).expand(b,1,h,w)
grid_x = grid_x.view(1,1,h,w).expand(b,1,h,w)
input = torch.cat([grid_x, grid_y], 1)
x0 = self.init(input)
if pre_x is not None:
x0 = x0 + F.interpolate(pre_x, scale_factor=scale, mode='bilinear', align_corners=align_corners)
x1 = self.enc1(x0) # 1/2 input size
x2 = self.enc2(x1) # 1/4 input size
x3 = self.enc3(x2) # 1/8 input size
x4 = self.enc4(x3) # 1/16 input size
# return the pre-activated features
return x0, x1, x2, x3, x4
class RGBDecoder(nn.Module):
def __init__(self, channels, filter_size):
super(RGBDecoder, self).__init__()
padding = int((filter_size-1)/2)
self.dec2 = nn.Sequential(nn.ReLU(True),
nn.Conv2d(channels//2, channels//2, filter_size, stride=1, padding=padding),
nn.ReLU(True),
UpConv(channels//2, channels//2),
nn.ReLU(True),
nn.Conv2d(channels//2, channels//2, filter_size, stride=1, padding=padding),
)
self.dec1 = nn.Sequential(nn.ReLU(True),
nn.Conv2d(channels//2, channels//2, filter_size, stride=1, padding=padding),
nn.ReLU(True),
UpConv(channels//2, channels//2),
nn.ReLU(True),
nn.Conv2d(channels//2, channels//2, filter_size, stride=1, padding=padding),
)
self.prdct = nn.Sequential(nn.ReLU(True),
nn.Conv2d(channels//2, channels//2, filter_size, stride=1, padding=padding),
nn.ReLU(True),
nn.Conv2d(channels//2, 3, filter_size, stride=1, padding=padding))
# Init Weights
for m in self.modules():
if isinstance(m, nn.Sequential):
for p in m:
if isinstance(p, nn.Conv2d) or isinstance(p, nn.ConvTranspose2d):
nn.init.xavier_normal_(p.weight)
nn.init.constant_(p.bias, 0.01)
def forward(self, pre_rgbx, pre_x2 = None, pre_x3 = None, pre_x4 = None):
x2 = pre_rgbx[2]
x1 = pre_rgbx[1]
x0 = pre_rgbx[0]
if pre_x2 != None:
x2 = x2 + F.interpolate(pre_x2, scale_factor=2, mode='bilinear', align_corners=align_corners)
x3 = self.dec2(x2) # 1/2 input size
if pre_x3 != None:
x3 = x3 + F.interpolate(pre_x3, scale_factor=2, mode='bilinear', align_corners=align_corners)
x4 = self.dec1(x1+x3) #1/1 input size
if pre_x4 != None:
x4 = x4 + F.interpolate(pre_x4, scale_factor=2, mode='bilinear', align_corners=align_corners)
### prediction
output_rgb = self.prdct(x4 + x0)
return x2, x3, x4, output_rgb
class One2One_noaux(nn.Module):
def __init__(self, in_channels=3, short_connection=True):
super(One2One_noaux, self).__init__()
self.short_connection = short_connection
self.in_channels = in_channels
denc_channels = 48
cenc_channels = 48
ddcd_channels = denc_channels+cenc_channels
# self.cond_encoder = CondEncoder(0, cenc_channels, 3)
self.rgb_encoder1 = RGBEncoder(in_channels, denc_channels, 3)
self.rgb_decoder1 = RGBDecoder(ddcd_channels, 3)
self.rgb_encoder2 = RGBEncoder(2*in_channels, denc_channels, 3)
self.rgb_decoder2 = RGBDecoder(ddcd_channels, 3)
self.rgb_encoder3 = RGBEncoder(2*in_channels, denc_channels, 3)
self.rgb_decoder3 = RGBDecoder(ddcd_channels, 3)
def forward(self, x, enable_layers=None):
x = x[0]
input_rgb=x
## for the 1/4 res
input_rgb14 = F.interpolate(input_rgb, scale_factor=0.25, mode='bilinear',align_corners=align_corners)
# print(enc_c[2].shape)
enc_rgb14 = self.rgb_encoder1(input_rgb14, 2) # enc_rgb [larger -> smaller size]
dcd_rgb14 = self.rgb_decoder1(enc_rgb14) # dec_rgb [smaller -> larger size]
## for the 1/2 res
input_rgb12 = F.interpolate(input_rgb, scale_factor=0.5, mode='bilinear', align_corners=align_corners)
ori_pred_rgb14 = dcd_rgb14[3]
if self.short_connection:
ori_pred_rgb14 += input_rgb14
predict_rgb12 = F.interpolate(ori_pred_rgb14, scale_factor=2, mode='bilinear', align_corners=align_corners)
input_12 = torch.cat((input_rgb12, predict_rgb12), 1)
enc_rgb12 = self.rgb_encoder2(input_12, 2)
dcd_rgb12 = self.rgb_decoder2(enc_rgb12, dcd_rgb14[0], dcd_rgb14[1], dcd_rgb14[2])
## for the 1/1 res
ori_pred_rgb12 = dcd_rgb12[3]
if self.short_connection:
ori_pred_rgb12 += input_rgb12
predict_rgb11 = F.interpolate(ori_pred_rgb12, scale_factor=2, mode='bilinear', align_corners=align_corners)
input_11 = torch.cat((input_rgb, predict_rgb11), 1)
enc_rgb11 = self.rgb_encoder3(input_11, 2)
dcd_rgb11 = self.rgb_decoder3(enc_rgb11, dcd_rgb12[0], dcd_rgb12[1], dcd_rgb12[2])
output_rgb11 = dcd_rgb11[3]
if self.short_connection:
output_rgb11 += input_rgb
return output_rgb11, ori_pred_rgb12, ori_pred_rgb14
| 13,759 | 39.351906 | 115 | py |
IAN | IAN-master/network/arch/vgg_arch.py | import os
import torch
from collections import OrderedDict
from torch import nn as nn
from torchvision.models import vgg as vgg
VGG_PRETRAIN_PATH = 'experiments/pretrained_models/vgg19-dcbb9e9d.pth'
NAMES = {
'vgg11': [
'conv1_1', 'relu1_1', 'pool1', 'conv2_1', 'relu2_1', 'pool2',
'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'pool3', 'conv4_1',
'relu4_1', 'conv4_2', 'relu4_2', 'pool4', 'conv5_1', 'relu5_1',
'conv5_2', 'relu5_2', 'pool5'
],
'vgg13': [
'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 'conv2_1',
'relu2_1', 'conv2_2', 'relu2_2', 'pool2', 'conv3_1', 'relu3_1',
'conv3_2', 'relu3_2', 'pool3', 'conv4_1', 'relu4_1', 'conv4_2',
'relu4_2', 'pool4', 'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'pool5'
],
'vgg16': [
'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 'conv2_1',
'relu2_1', 'conv2_2', 'relu2_2', 'pool2', 'conv3_1', 'relu3_1',
'conv3_2', 'relu3_2', 'conv3_3', 'relu3_3', 'pool3', 'conv4_1',
'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3', 'relu4_3', 'pool4',
'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3', 'relu5_3',
'pool5'
],
'vgg19': [
'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 'conv2_1',
'relu2_1', 'conv2_2', 'relu2_2', 'pool2', 'conv3_1', 'relu3_1',
'conv3_2', 'relu3_2', 'conv3_3', 'relu3_3', 'conv3_4', 'relu3_4',
'pool3', 'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3',
'relu4_3', 'conv4_4', 'relu4_4', 'pool4', 'conv5_1', 'relu5_1',
'conv5_2', 'relu5_2', 'conv5_3', 'relu5_3', 'conv5_4', 'relu5_4',
'pool5'
]
}
def insert_bn(names):
"""Insert bn layer after each conv.
Args:
names (list): The list of layer names.
Returns:
list: The list of layer names with bn layers.
"""
names_bn = []
for name in names:
names_bn.append(name)
if 'conv' in name:
position = name.replace('conv', '')
names_bn.append('bn' + position)
return names_bn
class VGGFeatureExtractor(nn.Module):
"""VGG network for feature extraction.
In this implementation, we allow users to choose whether use normalization
in the input feature and the type of vgg network. Note that the pretrained
path must fit the vgg type.
Args:
layer_name_list (list[str]): Forward function returns the corresponding
features according to the layer_name_list.
Example: {'relu1_1', 'relu2_1', 'relu3_1'}.
vgg_type (str): Set the type of vgg network. Default: 'vgg19'.
use_input_norm (bool): If True, normalize the input image. Importantly,
the input feature must in the range [0, 1]. Default: True.
range_norm (bool): If True, norm images with range [-1, 1] to [0, 1].
Default: False.
requires_grad (bool): If true, the parameters of VGG network will be
optimized. Default: False.
remove_pooling (bool): If true, the max pooling operations in VGG net
will be removed. Default: False.
pooling_stride (int): The stride of max pooling operation. Default: 2.
"""
def __init__(self,
layer_name_list,
vgg_type='vgg19',
use_input_norm=True,
range_norm=False,
requires_grad=False,
remove_pooling=False,
pooling_stride=2):
super(VGGFeatureExtractor, self).__init__()
self.layer_name_list = layer_name_list
self.use_input_norm = use_input_norm
self.range_norm = range_norm
self.names = NAMES[vgg_type.replace('_bn', '')]
if 'bn' in vgg_type:
self.names = insert_bn(self.names)
# only borrow layers that will be used to avoid unused params
max_idx = 0
for v in layer_name_list:
idx = self.names.index(v)
if idx > max_idx:
max_idx = idx
if os.path.exists(VGG_PRETRAIN_PATH):
vgg_net = getattr(vgg, vgg_type)(pretrained=False)
state_dict = torch.load(
VGG_PRETRAIN_PATH, map_location=lambda storage, loc: storage)
vgg_net.load_state_dict(state_dict)
else:
vgg_net = getattr(vgg, vgg_type)(pretrained=True)
features = vgg_net.features[:max_idx + 1]
modified_net = OrderedDict()
for k, v in zip(self.names, features):
if 'pool' in k:
# if remove_pooling is true, pooling operation will be removed
if remove_pooling:
continue
else:
# in some cases, we may want to change the default stride
modified_net[k] = nn.MaxPool2d(
kernel_size=2, stride=pooling_stride)
else:
modified_net[k] = v
self.vgg_net = nn.Sequential(modified_net)
if not requires_grad:
self.vgg_net.eval()
for param in self.parameters():
param.requires_grad = False
else:
self.vgg_net.train()
for param in self.parameters():
param.requires_grad = True
if self.use_input_norm:
# the mean is for image with range [0, 1]
self.register_buffer(
'mean',
torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1))
# the std is for image with range [0, 1]
self.register_buffer(
'std',
torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1))
def forward(self, x):
"""Forward function.
Args:
x (Tensor): Input tensor with shape (n, c, h, w).
Returns:
Tensor: Forward results.
"""
if self.range_norm:
x = (x + 1) / 2
if self.use_input_norm:
x = (x - self.mean) / self.std
output = {}
for key, layer in self.vgg_net._modules.items():
x = layer(x)
if key in self.layer_name_list:
output[key] = x.clone()
return output | 6,223 | 36.721212 | 79 | py |
IAN | IAN-master/network/arch/any2any_arch.py | import torch
import torch.nn as nn
import torch.nn.functional as F
conv_s2 = 4
pad0 = 1
align_corners=True
def mean_channels(F):
assert(F.dim() == 4)
spatial_sum = F.sum(3, keepdim=True).sum(2, keepdim=True)
return spatial_sum / (F.size(2) * F.size(3))
def std_channels(F):
assert(F.dim() == 4)
F_mean = mean_channels(F)
F_variance = (F - F_mean).pow(2).sum(3, keepdim=True).sum(2, keepdim=True) / (F.size(2) * F.size(3))
return F_variance.pow(0.5)
class DirectAwareAtt(nn.Module):
def __init__(self, channels=144, light_channels=48, reduction=16):
super().__init__()
self.reduction = reduction
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.std = std_channels
total_channels = channels + light_channels
self.avg_att_module = nn.Sequential(
nn.Conv2d(total_channels, total_channels // reduction, 1, 1, 0),
nn.ReLU(True),
nn.Conv2d(total_channels // reduction, channels, 1, 1, 0),
nn.Sigmoid()
)
self.std_att_module = nn.Sequential(
nn.Conv2d(total_channels, total_channels // reduction, 1, 1, 0),
nn.ReLU(True),
nn.Conv2d(total_channels // reduction, channels, 1, 1, 0),
nn.Sigmoid()
)
def forward(self, x, light):
l_b, l_c = light.shape
light = light.view(l_b, l_c, 1, 1)
avg_std = self.std(x)
avg = self.avg_pool(x)
att = self.avg_att_module(torch.cat([avg, light], 1)) + self.std_att_module(torch.cat([avg_std, light], 1))
att = att / 2
return att * x
class DilateBlock(nn.Module):
def __init__(self, channels, light_channels):
super().__init__()
self.conv3x3_d1 = nn.Sequential(nn.Conv2d(channels, channels, 3, 1, 1, 1))
self.conv3x3_d2 = nn.Sequential(nn.Conv2d(channels, channels, 3, 1, 2, 2))
self.conv3x3_d3 = nn.Sequential(nn.Conv2d(channels, channels, 3, 1, 3, 3))
self.att_module = DirectAwareAtt(channels*3, light_channels, 16)
for m in self.modules():
if isinstance(m, nn.Sequential):
for p in m:
if isinstance(p, nn.Conv2d) or isinstance(p, nn.ConvTranspose2d):
nn.init.xavier_normal_(p.weight)
nn.init.constant_(p.bias, 0.01)
def forward(self, x, light):
feat = torch.cat([self.conv3x3_d1(x), self.conv3x3_d2(x), self.conv3x3_d3(x)], 1)
return self.att_module(feat, light)
class DilateResBlock(nn.Module):
def __init__(self, channels, light_channels):
super().__init__()
self.dilateblock = DilateBlock(channels, light_channels)
self.block = nn.Sequential(
nn.ReLU(True),
nn.Conv2d(channels*3, channels, 3, 1, 1),
)
for m in self.modules():
if isinstance(m, nn.Sequential):
for p in m:
if isinstance(p, nn.Conv2d) or isinstance(p, nn.ConvTranspose2d):
nn.init.xavier_normal_(p.weight)
nn.init.constant_(p.bias, 0.01)
def forward(self, x, light):
res = self.dilateblock(x, light)
x = x + self.block(res)
return x
class Bottleneck(nn.Module):
def __init__(self, num=4, channels=48, light_channels=48):
super().__init__()
self.module = nn.ModuleList([DilateResBlock(channels, light_channels)]*num)
def forward(self, x, lights):
for i, m in enumerate(self.module):
x = m(x, lights[:, i, :])
return x
class UpConv(nn.Module):
def __init__(self, inc, outc, scale=2):
super(UpConv, self).__init__()
self.scale = scale
self.conv = nn.Conv2d(inc, outc, 3, stride=1, padding=1)
for m in self.modules():
if isinstance(m, nn.Sequential):
for p in m:
if isinstance(p, nn.Conv2d) or isinstance(p, nn.ConvTranspose2d):
nn.init.xavier_normal_(p.weight)
nn.init.constant_(p.bias, 0.01)
def forward(self, x):
return self.conv(F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=True))
class RGBEncoder(nn.Module):
def __init__(self, in_channels, channels, filter_size):
super(RGBEncoder, self).__init__()
padding = int((filter_size - 1) / 2)
self.init = nn.Sequential(nn.Conv2d(in_channels, channels, filter_size, stride=1, padding=padding),
nn.ReLU(True),
nn.Conv2d(channels, channels, filter_size, stride=1, padding=padding))
self.enc1 = nn.Sequential(nn.ReLU(True),
nn.Conv2d(channels, channels, conv_s2, stride=2, padding=pad0),
nn.ReLU(True),
nn.Conv2d(channels, channels, filter_size, stride=1, padding=padding),
)
self.enc2 = nn.Sequential(nn.ReLU(True),
nn.Conv2d(channels, channels, conv_s2, stride=2, padding=pad0),
nn.ReLU(True),
nn.Conv2d(channels, channels, filter_size, stride=1, padding=padding),
)
# Init Weights
for m in self.modules():
if isinstance(m, nn.Sequential):
for p in m:
if isinstance(p, nn.Conv2d) or isinstance(p, nn.ConvTranspose2d):
nn.init.xavier_normal_(p.weight)
nn.init.constant_(p.bias, 0.01)
def forward(self, input, scale=2, c_x0=None, c_x1=None, c_x2=None, pre_x2=None, pre_x3=None, pre_x4=None):
### input
x0 = self.init(input)
x0 = x0 + c_x0
if pre_x4 is not None:
x0 = x0 + F.interpolate(pre_x4, scale_factor=scale, mode='bilinear', align_corners=align_corners)
x1 = self.enc1(x0) # 1/2 input size
x1 = x1 + c_x1
if pre_x3 is not None:
x1 = x1 + F.interpolate(pre_x3, scale_factor=scale, mode='bilinear', align_corners=align_corners)
x2 = self.enc2(x1) # 1/4 input size
x2 = x2 + c_x2
if pre_x2 is not None:
x2 = x2 + F.interpolate(pre_x2, scale_factor=scale, mode='bilinear', align_corners=align_corners)
return [x0, x1, x2]
class CondEncoder(nn.Module):
def __init__(self, in_channels, channels, filter_size):
super(CondEncoder, self).__init__()
in_channels += 2
padding = int((filter_size - 1) / 2)
self.init = nn.Sequential(nn.Conv2d(in_channels, channels, filter_size, stride=1, padding=padding),
nn.ReLU(True),
nn.Conv2d(channels, channels, filter_size, stride=1, padding=padding))
self.enc1 = nn.Sequential(nn.ReLU(True),
nn.Conv2d(channels, channels, conv_s2, stride=2, padding=pad0),
nn.ReLU(True),
nn.Conv2d(channels, channels, filter_size, stride=1, padding=padding), )
self.enc2 = nn.Sequential(nn.ReLU(True),
nn.Conv2d(channels, channels, conv_s2, stride=2, padding=pad0),
nn.ReLU(True),
nn.Conv2d(channels, channels, filter_size, stride=1, padding=padding), )
self.enc3 = nn.Sequential(nn.ReLU(True),
nn.Conv2d(channels, channels, conv_s2, stride=2, padding=pad0),
nn.ReLU(True),
nn.Conv2d(channels, channels, filter_size, stride=1, padding=padding), )
self.enc4 = nn.Sequential(nn.ReLU(True),
nn.Conv2d(channels, channels, conv_s2, stride=2, padding=pad0),
nn.ReLU(True),
nn.Conv2d(channels, channels, filter_size, stride=1, padding=padding), )
# Init Weights
for m in self.modules():
if isinstance(m, nn.Sequential):
for p in m:
if isinstance(p, nn.Conv2d) or isinstance(p, nn.ConvTranspose2d):
nn.init.xavier_normal_(p.weight)
nn.init.constant_(p.bias, 0.01)
def forward(self, input, scale=2, pre_x=None):
### input
b, _, h, w = input.shape
x_code = torch.Tensor([float(x)/(w-1) for x in range(w)]).float().cuda() * 2 - 1
y_code = torch.Tensor([float(y)/(h-1) for y in range(h)]).float().cuda() * 2 - 1
grid_y, grid_x = torch.meshgrid(y_code, x_code)
grid_y = grid_y.view(1,1,h,w).expand(b,1,h,w)
grid_x = grid_x.view(1,1,h,w).expand(b,1,h,w)
input = torch.cat([grid_x, grid_y, input], 1)
x0 = self.init(input)
if pre_x is not None:
x0 = x0 + F.interpolate(pre_x, scale_factor=scale, mode='bilinear', align_corners=align_corners)
x1 = self.enc1(x0) # 1/2 input size
x2 = self.enc2(x1) # 1/4 input size
x3 = self.enc3(x2) # 1/8 input size
x4 = self.enc4(x3) # 1/16 input size
# return the pre-activated features
return x0, x1, x2, x3, x4
class RGBDecoder(nn.Module):
def __init__(self, channels, filter_size):
super(RGBDecoder, self).__init__()
padding = int((filter_size-1)/2)
self.dec2 = nn.Sequential(nn.ReLU(True),
nn.Conv2d(channels//2, channels//2, filter_size, stride=1, padding=padding),
nn.ReLU(True),
UpConv(channels//2, channels//2),
nn.ReLU(True),
nn.Conv2d(channels//2, channels//2, filter_size, stride=1, padding=padding),
)
self.dec1 = nn.Sequential(nn.ReLU(True),
nn.Conv2d(channels//2, channels//2, filter_size, stride=1, padding=padding),
nn.ReLU(True),
UpConv(channels//2, channels//2),
nn.ReLU(True),
nn.Conv2d(channels//2, channels//2, filter_size, stride=1, padding=padding),
)
self.prdct = nn.Sequential(nn.ReLU(True),
nn.Conv2d(channels//2, channels//2, filter_size, stride=1, padding=padding),
nn.ReLU(True),
nn.Conv2d(channels//2, 3, filter_size, stride=1, padding=padding))
# Init Weights
for m in self.modules():
if isinstance(m, nn.Sequential):
for p in m:
if isinstance(p, nn.Conv2d) or isinstance(p, nn.ConvTranspose2d):
nn.init.xavier_normal_(p.weight)
nn.init.constant_(p.bias, 0.01)
def forward(self, pre_rgbx, pre_x2 = None, pre_x3 = None, pre_x4 = None):
x2 = pre_rgbx[2]
x1 = pre_rgbx[1]
x0 = pre_rgbx[0]
if pre_x2 != None:
x2 = x2 + F.interpolate(pre_x2, scale_factor=2, mode='bilinear', align_corners=align_corners)
x3 = self.dec2(x2) # 1/2 input size
if pre_x3 != None:
x3 = x3 + F.interpolate(pre_x3, scale_factor=2, mode='bilinear', align_corners=align_corners)
x4 = self.dec1(x1+x3) # 1/1 input size
if pre_x4 != None:
x4 = x4 + F.interpolate(pre_x4, scale_factor=2, mode='bilinear', align_corners=align_corners)
output_rgb = self.prdct(x4 + x0)
return x2, x3, x4, output_rgb
class LightProjectionModule(nn.Module):
def __init__(self, in_channels, out_channels, mid_channels=128):
super(LightProjectionModule, self).__init__()
self.module = nn.Sequential(
nn.Linear(in_channels, mid_channels),
nn.ReLU(True),
nn.Linear(mid_channels, mid_channels),
nn.ReLU(True),
nn.Linear(mid_channels, out_channels)
)
def forward(self, x):
return self.module(x)
class Any2Any(nn.Module):
def __init__(self, in_channels=3, aux_channels=3, light_probe_channels=18, short_connection=True):
super(Any2Any, self).__init__()
self.short_connection = short_connection
self.in_channels = in_channels
self.aux_channels = aux_channels
self.denc_channels = 48
self.cenc_channels = 48
self.light_channels = 48 * 3
self.block_num = 4
ddcd_channels = self.denc_channels+self.cenc_channels
self.cond_encoder = CondEncoder(aux_channels, self.cenc_channels, 3)
self.light_projector = LightProjectionModule(light_probe_channels,
3 * self.block_num * self.light_channels, 1024)
self.rgb_encoder1 = RGBEncoder(in_channels, self.denc_channels, 3)
self.bottleneck1 = Bottleneck(self.block_num, self.denc_channels, self.light_channels)
self.rgb_decoder1 = RGBDecoder(ddcd_channels, 3)
self.rgb_encoder2 = RGBEncoder(2*in_channels, self.denc_channels, 3)
self.bottleneck2 = Bottleneck(self.block_num, self.denc_channels, self.light_channels)
self.rgb_decoder2 = RGBDecoder(ddcd_channels, 3)
self.rgb_encoder3 = RGBEncoder(2*in_channels, self.denc_channels, 3)
self.bottleneck3 = Bottleneck(self.block_num, self.denc_channels, self.light_channels)
self.rgb_decoder3 = RGBDecoder(ddcd_channels, 3)
def forward(self, x, light_prior):
x = x[0]
if self.aux_channels != 0:
input_rgb, input_d = torch.split(x, [self.in_channels, self.aux_channels], 1)
enc_c = self.cond_encoder(input_d)
light_f = self.light_projector(light_prior)
light_f = light_f.view(-1, 3, self.block_num, self.light_channels)
## for the 1/4 res
input_rgb14 = F.interpolate(input_rgb, scale_factor=0.25, mode='bilinear',align_corners=align_corners)
enc_rgb14 = self.rgb_encoder1(input_rgb14, 2, enc_c[2], enc_c[3], enc_c[4]) # enc_rgb [larger -> smaller size]
enc_rgb14[-1] = self.bottleneck1(enc_rgb14[-1], light_f[:, 0, :, :])
dcd_rgb14 = self.rgb_decoder1(enc_rgb14) # dec_rgb [smaller -> larger size]
## for the 1/2 res
input_rgb12 = F.interpolate(input_rgb, scale_factor=0.5, mode='bilinear', align_corners=align_corners)
ori_pred_rgb14 = dcd_rgb14[3]
if self.short_connection:
ori_pred_rgb14 += input_rgb14
predict_rgb12 = F.interpolate(ori_pred_rgb14, scale_factor=2, mode='bilinear', align_corners=align_corners)
input_12 = torch.cat((input_rgb12, predict_rgb12), 1)
enc_rgb12 = self.rgb_encoder2(input_12, 2, enc_c[1], enc_c[2], enc_c[3])
enc_rgb12[-1] = self.bottleneck1(enc_rgb12[-1], light_f[:, 1, :, :])
dcd_rgb12 = self.rgb_decoder2(enc_rgb12, dcd_rgb14[0], dcd_rgb14[1], dcd_rgb14[2])
## for the 1/1 res
ori_pred_rgb12 = dcd_rgb12[3]
if self.short_connection:
ori_pred_rgb12 += input_rgb12
predict_rgb11 = F.interpolate(ori_pred_rgb12, scale_factor=2, mode='bilinear', align_corners=align_corners)
input_11 = torch.cat((input_rgb, predict_rgb11), 1)
enc_rgb11 = self.rgb_encoder3(input_11, 2, enc_c[0], enc_c[1], enc_c[2])
enc_rgb11[-1] = self.bottleneck1(enc_rgb11[-1], light_f[:, 2, :, :])
dcd_rgb11 = self.rgb_decoder3(enc_rgb11, dcd_rgb12[0], dcd_rgb12[1], dcd_rgb12[2])
output_rgb11 = dcd_rgb11[3]
if self.short_connection:
output_rgb11 += input_rgb
return output_rgb11, ori_pred_rgb12, ori_pred_rgb14
| 16,008 | 41.128947 | 120 | py |
IAN | IAN-master/network/arch/any2any_woaux_arch.py | import torch
import torch.nn as nn
import torch.nn.functional as F
conv_s2 = 4
pad0 = 1
align_corners=True
def mean_channels(F):
assert(F.dim() == 4)
spatial_sum = F.sum(3, keepdim=True).sum(2, keepdim=True)
return spatial_sum / (F.size(2) * F.size(3))
def std_channels(F):
assert(F.dim() == 4)
F_mean = mean_channels(F)
F_variance = (F - F_mean).pow(2).sum(3, keepdim=True).sum(2, keepdim=True) / (F.size(2) * F.size(3))
return F_variance.pow(0.5)
class DirectAwareAtt(nn.Module):
def __init__(self, channels=144, light_channels=48, reduction=16):
super().__init__()
self.reduction = reduction
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.std = std_channels
total_channels = channels + light_channels
self.avg_att_module = nn.Sequential(
nn.Conv2d(total_channels, total_channels // reduction, 1, 1, 0),
nn.ReLU(True),
nn.Conv2d(total_channels // reduction, channels, 1, 1, 0),
nn.Sigmoid()
)
self.std_att_module = nn.Sequential(
nn.Conv2d(total_channels, total_channels // reduction, 1, 1, 0),
nn.ReLU(True),
nn.Conv2d(total_channels // reduction, channels, 1, 1, 0),
nn.Sigmoid()
)
def forward(self, x, light):
l_b, l_c = light.shape
light = light.view(l_b, l_c, 1, 1)
avg_std = self.std(x)
avg = self.avg_pool(x)
att = self.avg_att_module(torch.cat([avg, light], 1)) + self.std_att_module(torch.cat([avg_std, light], 1))
att = att / 2
return att * x
class DilateBlock(nn.Module):
def __init__(self, channels, light_channels):
super().__init__()
self.conv3x3_d1 = nn.Sequential(nn.Conv2d(channels, channels, 3, 1, 1, 1))
self.conv3x3_d2 = nn.Sequential(nn.Conv2d(channels, channels, 3, 1, 2, 2))
self.conv3x3_d3 = nn.Sequential(nn.Conv2d(channels, channels, 3, 1, 3, 3))
self.att_module = DirectAwareAtt(channels*3, light_channels, 16)
for m in self.modules():
if isinstance(m, nn.Sequential):
for p in m:
if isinstance(p, nn.Conv2d) or isinstance(p, nn.ConvTranspose2d):
nn.init.xavier_normal_(p.weight)
nn.init.constant_(p.bias, 0.01)
def forward(self, x, light):
feat = torch.cat([self.conv3x3_d1(x), self.conv3x3_d2(x), self.conv3x3_d3(x)], 1)
return self.att_module(feat, light)
class DilateResBlock(nn.Module):
def __init__(self, channels, light_channels):
super().__init__()
self.dilateblock = DilateBlock(channels, light_channels)
self.block = nn.Sequential(
nn.ReLU(True),
nn.Conv2d(channels*3, channels, 3, 1, 1),
)
for m in self.modules():
if isinstance(m, nn.Sequential):
for p in m:
if isinstance(p, nn.Conv2d) or isinstance(p, nn.ConvTranspose2d):
nn.init.xavier_normal_(p.weight)
nn.init.constant_(p.bias, 0.01)
def forward(self, x, light):
res = self.dilateblock(x, light)
x = x + self.block(res)
return x
class Bottleneck(nn.Module):
def __init__(self, num=4, channels=48, light_channels=48):
super().__init__()
self.module = nn.ModuleList([DilateResBlock(channels, light_channels)]*num)
def forward(self, x, lights):
for i, m in enumerate(self.module):
x = m(x, lights[:, i, :])
return x
class UpConv(nn.Module):
def __init__(self, inc, outc, scale=2):
super(UpConv, self).__init__()
self.scale = scale
self.conv = nn.Conv2d(inc, outc, 3, stride=1, padding=1)
for m in self.modules():
if isinstance(m, nn.Sequential):
for p in m:
if isinstance(p, nn.Conv2d) or isinstance(p, nn.ConvTranspose2d):
nn.init.xavier_normal_(p.weight)
nn.init.constant_(p.bias, 0.01)
def forward(self, x):
return self.conv(F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=True))
class RGBEncoder(nn.Module):
def __init__(self, in_channels, channels, filter_size):
super(RGBEncoder, self).__init__()
padding = int((filter_size - 1) / 2)
self.init = nn.Sequential(nn.Conv2d(in_channels, channels, filter_size, stride=1, padding=padding),
nn.ReLU(True),
nn.Conv2d(channels, channels, filter_size, stride=1, padding=padding))
self.enc1 = nn.Sequential(nn.ReLU(True),
nn.Conv2d(channels, channels, conv_s2, stride=2, padding=pad0),
nn.ReLU(True),
nn.Conv2d(channels, channels, filter_size, stride=1, padding=padding),
)
self.enc2 = nn.Sequential(nn.ReLU(True),
nn.Conv2d(channels, channels, conv_s2, stride=2, padding=pad0),
nn.ReLU(True),
nn.Conv2d(channels, channels, filter_size, stride=1, padding=padding),
)
# Init Weights
for m in self.modules():
if isinstance(m, nn.Sequential):
for p in m:
if isinstance(p, nn.Conv2d) or isinstance(p, nn.ConvTranspose2d):
nn.init.xavier_normal_(p.weight)
nn.init.constant_(p.bias, 0.01)
def forward(self, input, scale=2, c_x0=None, c_x1=None, c_x2=None, pre_x2=None, pre_x3=None, pre_x4=None):
### input
x0 = self.init(input)
x0 = x0# + c_x0
if pre_x4 is not None:
x0 = x0 + F.interpolate(pre_x4, scale_factor=scale, mode='bilinear', align_corners=align_corners)
x1 = self.enc1(x0) #1/2 input size
x1 = x1# + c_x1
if pre_x3 is not None:
x1 = x1 + F.interpolate(pre_x3, scale_factor=scale, mode='bilinear', align_corners=align_corners)
x2 = self.enc2(x1) # 1/4 input size
x2 = x2# + c_x2
if pre_x2 is not None:
x2 = x2 + F.interpolate(pre_x2, scale_factor=scale, mode='bilinear', align_corners=align_corners)
return [x0, x1, x2]
class CondEncoder(nn.Module):
def __init__(self, in_channels, channels, filter_size):
super(CondEncoder, self).__init__()
in_channels += 2
padding = int((filter_size - 1) / 2)
self.init = nn.Sequential(nn.Conv2d(in_channels, channels, filter_size, stride=1, padding=padding),
nn.ReLU(True),
nn.Conv2d(channels, channels, filter_size, stride=1, padding=padding))
self.enc1 = nn.Sequential(nn.ReLU(True),
nn.Conv2d(channels, channels, conv_s2, stride=2, padding=pad0),
nn.ReLU(True),
nn.Conv2d(channels, channels, filter_size, stride=1, padding=padding), )
self.enc2 = nn.Sequential(nn.ReLU(True),
nn.Conv2d(channels, channels, conv_s2, stride=2, padding=pad0),
nn.ReLU(True),
nn.Conv2d(channels, channels, filter_size, stride=1, padding=padding), )
self.enc3 = nn.Sequential(nn.ReLU(True),
nn.Conv2d(channels, channels, conv_s2, stride=2, padding=pad0),
nn.ReLU(True),
nn.Conv2d(channels, channels, filter_size, stride=1, padding=padding), )
self.enc4 = nn.Sequential(nn.ReLU(True),
nn.Conv2d(channels, channels, conv_s2, stride=2, padding=pad0),
nn.ReLU(True),
nn.Conv2d(channels, channels, filter_size, stride=1, padding=padding), )
# Init Weights
for m in self.modules():
if isinstance(m, nn.Sequential):
for p in m:
if isinstance(p, nn.Conv2d) or isinstance(p, nn.ConvTranspose2d):
nn.init.xavier_normal_(p.weight)
nn.init.constant_(p.bias, 0.01)
def forward(self, input, scale=2, pre_x=None):
### input
b, _, h, w = input.shape
x_code = torch.Tensor([float(x)/(w-1) for x in range(w)]).float().cuda() * 2 - 1
y_code = torch.Tensor([float(y)/(h-1) for y in range(h)]).float().cuda() * 2 - 1
grid_y, grid_x = torch.meshgrid(y_code, x_code)
grid_y = grid_y.view(1,1,h,w).expand(b,1,h,w)
grid_x = grid_x.view(1,1,h,w).expand(b,1,h,w)
input = torch.cat([grid_x, grid_y, input], 1)
x0 = self.init(input)
if pre_x is not None:
x0 = x0 + F.interpolate(pre_x, scale_factor=scale, mode='bilinear', align_corners=align_corners)
x1 = self.enc1(x0) # 1/2 input size
x2 = self.enc2(x1) # 1/4 input size
x3 = self.enc3(x2) # 1/8 input size
x4 = self.enc4(x3) # 1/16 input size
# return the pre-activated features
return x0, x1, x2, x3, x4
class RGBDecoder(nn.Module):
def __init__(self, channels, filter_size):
super(RGBDecoder, self).__init__()
padding = int((filter_size-1)/2)
self.dec2 = nn.Sequential(nn.ReLU(True),
nn.Conv2d(channels//2, channels//2, filter_size, stride=1, padding=padding),
nn.ReLU(True),
UpConv(channels//2, channels//2),
nn.ReLU(True),
nn.Conv2d(channels//2, channels//2, filter_size, stride=1, padding=padding),
)
self.dec1 = nn.Sequential(nn.ReLU(True),
nn.Conv2d(channels//2, channels//2, filter_size, stride=1, padding=padding),
nn.ReLU(True),
UpConv(channels//2, channels//2),
nn.ReLU(True),
nn.Conv2d(channels//2, channels//2, filter_size, stride=1, padding=padding),
)
self.prdct = nn.Sequential(nn.ReLU(True),
nn.Conv2d(channels//2, channels//2, filter_size, stride=1, padding=padding),
nn.ReLU(True),
nn.Conv2d(channels//2, 3, filter_size, stride=1, padding=padding))
# Init Weights
for m in self.modules():
if isinstance(m, nn.Sequential):
for p in m:
if isinstance(p, nn.Conv2d) or isinstance(p, nn.ConvTranspose2d):
nn.init.xavier_normal_(p.weight)
nn.init.constant_(p.bias, 0.01)
def forward(self, pre_rgbx, pre_x2 = None, pre_x3 = None, pre_x4 = None):
x2 = pre_rgbx[2]
x1 = pre_rgbx[1]
x0 = pre_rgbx[0]
if pre_x2 != None:
x2 = x2 + F.interpolate(pre_x2, scale_factor=2, mode='bilinear', align_corners=align_corners)
x3 = self.dec2(x2) # 1/2 input size
if pre_x3 != None:
x3 = x3 + F.interpolate(pre_x3, scale_factor=2, mode='bilinear', align_corners=align_corners)
x4 = self.dec1(x1+x3) #1/1 input size
if pre_x4 != None:
x4 = x4 + F.interpolate(pre_x4, scale_factor=2, mode='bilinear', align_corners=align_corners)
### prediction
output_rgb = self.prdct(x4 + x0)
return x2, x3, x4, output_rgb
class LightProjectionModule(nn.Module):
def __init__(self, in_channels, out_channels, mid_channels=128):
super(LightProjectionModule, self).__init__()
self.module = nn.Sequential(
nn.Linear(in_channels, mid_channels),
nn.ReLU(True),
nn.Linear(mid_channels, mid_channels),
nn.ReLU(True),
nn.Linear(mid_channels, out_channels)
)
def forward(self, x):
return self.module(x)
class Any2Any_woaux(nn.Module):
def __init__(self, in_channels=3, aux_channels=3, light_probe_channels=18, short_connection=True):
super(Any2Any_woaux, self).__init__()
self.short_connection = short_connection
self.in_channels = in_channels
self.aux_channels = aux_channels
self.denc_channels = 48
self.cenc_channels = 48
self.light_channels = 48 * 3
self.block_num = 4
ddcd_channels = self.denc_channels+self.cenc_channels
self.light_projector = LightProjectionModule(light_probe_channels, 3 * self.block_num * self.light_channels, 1024)
self.rgb_encoder1 = RGBEncoder(in_channels, self.denc_channels, 3)
self.bottleneck1 = Bottleneck(self.block_num, self.denc_channels, self.light_channels)
self.rgb_decoder1 = RGBDecoder(ddcd_channels, 3)
self.rgb_encoder2 = RGBEncoder(2*in_channels, self.denc_channels, 3)
self.bottleneck2 = Bottleneck(self.block_num, self.denc_channels, self.light_channels)
self.rgb_decoder2 = RGBDecoder(ddcd_channels, 3)
self.rgb_encoder3 = RGBEncoder(2*in_channels, self.denc_channels, 3)
self.bottleneck3 = Bottleneck(self.block_num, self.denc_channels, self.light_channels)
self.rgb_decoder3 = RGBDecoder(ddcd_channels, 3)
def forward(self, x, light_prior):
x = x[0]
if self.aux_channels != 0:
input_rgb, _ = torch.split(x, [self.in_channels, self.aux_channels], 1)
light_f = self.light_projector(light_prior)
light_f = light_f.view(-1, 3, self.block_num, self.light_channels)
## for the 1/4 res
input_rgb14 = F.interpolate(input_rgb, scale_factor=0.25, mode='bilinear',align_corners=align_corners)
enc_rgb14 = self.rgb_encoder1(input_rgb14, 2) # enc_rgb [larger -> smaller size]
enc_rgb14[-1] = self.bottleneck1(enc_rgb14[-1], light_f[:, 0, :, :])
dcd_rgb14 = self.rgb_decoder1(enc_rgb14) # dec_rgb [smaller -> larger size]
## for the 1/2 res
input_rgb12 = F.interpolate(input_rgb, scale_factor=0.5, mode='bilinear', align_corners=align_corners)
ori_pred_rgb14 = dcd_rgb14[3]
if self.short_connection:
ori_pred_rgb14 += input_rgb14
predict_rgb12 = F.interpolate(ori_pred_rgb14, scale_factor=2, mode='bilinear', align_corners=align_corners)
input_12 = torch.cat((input_rgb12, predict_rgb12), 1)
enc_rgb12 = self.rgb_encoder2(input_12, 2)
enc_rgb12[-1] = self.bottleneck1(enc_rgb12[-1], light_f[:, 1, :, :])
dcd_rgb12 = self.rgb_decoder2(enc_rgb12, dcd_rgb14[0], dcd_rgb14[1], dcd_rgb14[2])
## for the 1/1 res
ori_pred_rgb12 = dcd_rgb12[3]
if self.short_connection:
ori_pred_rgb12 += input_rgb12
predict_rgb11 = F.interpolate(ori_pred_rgb12, scale_factor=2, mode='bilinear', align_corners=align_corners)
input_11 = torch.cat((input_rgb, predict_rgb11), 1)
enc_rgb11 = self.rgb_encoder3(input_11, 2)
enc_rgb11[-1] = self.bottleneck1(enc_rgb11[-1], light_f[:, 2, :, :])
dcd_rgb11 = self.rgb_decoder3(enc_rgb11, dcd_rgb12[0], dcd_rgb12[1], dcd_rgb12[2])
output_rgb11 = dcd_rgb11[3]
if self.short_connection:
output_rgb11 += input_rgb
return output_rgb11, ori_pred_rgb12, ori_pred_rgb14
| 15,796 | 40.680739 | 122 | py |
IAN | IAN-master/network/arch/DPR_arch.py |
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import sys
import numpy as np
import time
# we define Hour Glass network based on the paper
# Stacked Hourglass Networks for Human Pose Estimation
# Alejandro Newell, Kaiyu Yang, and Jia Deng
# the code is adapted from
# https://github.com/umich-vl/pose-hg-train/blob/master/src/models/hg.lua
def conv3X3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
# define the network
class BasicBlock(nn.Module):
def __init__(self, inplanes, outplanes, batchNorm_type=0, stride=1, downsample=None):
super(BasicBlock, self).__init__()
# batchNorm_type 0 means batchnormalization
# 1 means instance normalization
self.inplanes = inplanes
self.outplanes = outplanes
self.conv1 = conv3X3(inplanes, outplanes, 1)
self.conv2 = conv3X3(outplanes, outplanes, 1)
if batchNorm_type == 0:
self.bn1 = nn.BatchNorm2d(outplanes)
self.bn2 = nn.BatchNorm2d(outplanes)
else:
self.bn1 = nn.InstanceNorm2d(outplanes)
self.bn2 = nn.InstanceNorm2d(outplanes)
self.shortcuts = nn.Conv2d(inplanes, outplanes, kernel_size=1, stride=1, bias=False)
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = F.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.inplanes != self.outplanes:
out += self.shortcuts(x)
else:
out += x
out = F.relu(out)
return out
class HourglassBlock(nn.Module):
'''
define a basic block for hourglass neetwork
^-------------------------upper conv-------------------
| |
| V
input------>downsample-->low1-->middle-->low2-->upsample-->+-->output
NOTE about output:
Since we need the lighting from the inner most layer,
let's also output the results from middel layer
'''
def __init__(self, inplane, mid_plane, middleNet):
super(HourglassBlock, self).__init__()
# upper branch
self.skipLayer = True
self.upper = BasicBlock(inplane, inplane, batchNorm_type=1)
# lower branch
self.downSample = nn.MaxPool2d(kernel_size=2, stride=2)
self.upSample = nn.Upsample(scale_factor=2, mode='nearest')
self.low1 = BasicBlock(inplane, mid_plane)
self.middle = middleNet
self.low2 = BasicBlock(mid_plane, inplane, batchNorm_type=1)
def forward(self, x, light, count, skip_count):
# we use count to indicate wich layer we are in
# max_count indicates the from which layer, we would use skip connections
out_upper = self.upper(x)
out_lower = self.downSample(x)
out_lower = self.low1(out_lower)
out_lower, out_feat, out_middle = self.middle(out_lower, light, count+1, skip_count)
out_lower = self.low2(out_lower)
out_lower = self.upSample(out_lower)
if count >= skip_count and self.skipLayer:
# withSkip is true, then we use skip layer
# easy for analysis
out = out_lower + out_upper
else:
out = out_lower
#out = out_upper
return out, out_feat, out_middle
class lightingNet(nn.Module):
'''
define lighting network
'''
def __init__(self, ncInput, ncOutput, ncMiddle):
super(lightingNet, self).__init__()
self.ncInput = ncInput
self.ncOutput = ncOutput
self.ncMiddle = ncMiddle
# basic idea is to compute the average of the channel corresponding to lighting
# using fully connected layers to get the lighting
# then fully connected layers to get back to the output size
self.predict_FC1 = nn.Conv2d(self.ncInput, self.ncMiddle, kernel_size=1, stride=1, bias=False)
self.predict_relu1 = nn.PReLU()
self.predict_FC2 = nn.Conv2d(self.ncMiddle, self.ncOutput, kernel_size=1, stride=1, bias=False)
self.post_FC1 = nn.Conv2d(self.ncOutput, self.ncMiddle, kernel_size=1, stride=1, bias=False)
self.post_relu1 = nn.PReLU()
self.post_FC2 = nn.Conv2d(self.ncMiddle, self.ncInput, kernel_size=1, stride=1, bias=False)
self.post_relu2 = nn.ReLU() # to be consistance with the original feature
def forward(self, innerFeat, target_light, foo, foo2):
x = innerFeat[:,0:self.ncInput,:,:] # lighting feature
_, _, row, col = x.shape
# predict lighting
feat = x.mean(dim=(2,3), keepdim=True)
light = self.predict_relu1(self.predict_FC1(feat))
light = self.predict_FC2(light)
# get back the feature space
upFeat = self.post_relu1(self.post_FC1(target_light))
upFeat = self.post_relu2(self.post_FC2(upFeat))
upFeat = upFeat.repeat((1,1,row, col))
innerFeat[:,0:self.ncInput,:,:] = upFeat
return innerFeat, innerFeat[:, self.ncInput:, :, :], light
class HourglassNet(nn.Module):
'''
basic idea: low layers are shared, upper layers are different
lighting should be estimated from the inner most layer
NOTE: we split the bottle neck layer into albedo, normal and lighting
'''
def __init__(self, baseFilter = 16, gray=True):
super(HourglassNet, self).__init__()
self.ncLight = 27 # number of channels for input to lighting network
self.baseFilter = baseFilter
# number of channles for output of lighting network
if gray:
self.ncOutLight = 9 # gray: channel is 1
else:
self.ncOutLight = 27 # color: channel is 3
self.ncPre = self.baseFilter # number of channels for pre-convolution
# number of channels
self.ncHG3 = self.baseFilter
self.ncHG2 = 2*self.baseFilter
self.ncHG1 = 4*self.baseFilter
self.ncHG0 = 8*self.baseFilter + self.ncLight
self.pre_conv = nn.Conv2d(1, self.ncPre, kernel_size=5, stride=1, padding=2)
self.pre_bn = nn.BatchNorm2d(self.ncPre)
self.light = lightingNet(self.ncLight, self.ncOutLight, 128)
self.HG0 = HourglassBlock(self.ncHG1, self.ncHG0, self.light)
self.HG1 = HourglassBlock(self.ncHG2, self.ncHG1, self.HG0)
self.HG2 = HourglassBlock(self.ncHG3, self.ncHG2, self.HG1)
self.HG3 = HourglassBlock(self.ncPre, self.ncHG3, self.HG2)
self.conv_1 = nn.Conv2d(self.ncPre, self.ncPre, kernel_size=3, stride=1, padding=1)
self.bn_1 = nn.BatchNorm2d(self.ncPre)
self.conv_2 = nn.Conv2d(self.ncPre, self.ncPre, kernel_size=1, stride=1, padding=0)
self.bn_2 = nn.BatchNorm2d(self.ncPre)
self.conv_3 = nn.Conv2d(self.ncPre, self.ncPre, kernel_size=1, stride=1, padding=0)
self.bn_3 = nn.BatchNorm2d(self.ncPre)
self.output = nn.Conv2d(self.ncPre, 1, kernel_size=1, stride=1, padding=0)
def forward(self, x, target_light, skip_count=4, oriImg=None):
#feat = self.pre_conv(x)
#feat = F.relu(self.pre_bn(feat))
feat = x
# get the inner most features
feat, out_feat, out_light = self.HG3(feat, target_light, 0, skip_count)
#feat = F.relu(self.bn_1(self.conv_1(feat)))
#feat = F.relu(self.bn_2(self.conv_2(feat)))
#feat = F.relu(self.bn_3(self.conv_3(feat)))
#out_img = self.output(feat)
#out_img = torch.sigmoid(out_img)
# for training, we need the original image
# to supervise the bottle neck layer feature
out_feat_ori = None
if not oriImg is None:
_, out_feat_ori, _ = self.HG3(oriImg, target_light, 0, skip_count)
return out_feat, out_light, out_feat_ori, feat
class DPRNet(nn.Module):
'''
basic idea: low layers are shared, upper layers are different
lighting should be estimated from the inner most layer
NOTE: we split the bottle neck layer into albedo, normal and lighting
'''
def __init__(self, baseFilter = 16, gray=True):
super(DPRNet, self).__init__()
self.model_512 = HourglassNet(baseFilter, gray)
self.baseFilter = baseFilter
self.ncPre = self.baseFilter # number of channels for pre-convolution
self.pre_conv = nn.Conv2d(3, self.ncPre, kernel_size=5, stride=1, padding=2)
self.pre_bn = nn.BatchNorm2d(self.ncPre)
self.downSample = nn.MaxPool2d(kernel_size=2, stride=2)
self.upSample = nn.Upsample(scale_factor=2, mode='nearest')
self.conv_1 = nn.Conv2d(self.ncPre, self.ncPre, kernel_size=3, stride=1, padding=1)
self.bn_1 = nn.BatchNorm2d(self.ncPre)
self.conv_2 = nn.Conv2d(self.ncPre, self.ncPre, kernel_size=1, stride=1, padding=0)
self.bn_2 = nn.BatchNorm2d(self.ncPre)
self.conv_3 = nn.Conv2d(self.ncPre, self.ncPre, kernel_size=1, stride=1, padding=0)
self.bn_3 = nn.BatchNorm2d(self.ncPre)
self.output = nn.Conv2d(self.ncPre, 3, kernel_size=1, stride=1, padding=0)
def forward(self, x, target_light, skip_count=4, oriImg=None):
x = x[:, :3, :, :]
if oriImg != None:
oriImg = oriImg[:, :3, :, :]
feat = self.pre_conv(x)
feat = F.relu(self.pre_bn(feat))
feat = self.downSample(feat)
if not oriImg is None:
feat_ori = self.pre_conv(oriImg)
feat_ori = F.relu(self.pre_bn(feat_ori))
oriImg = self.downSample(feat_ori)
out_feat, out_light, out_feat_ori, feat = self.model_512(feat, target_light, skip_count, oriImg)
feat = self.upSample(feat)
feat = F.relu(self.bn_1(self.conv_1(feat)))
feat = F.relu(self.bn_2(self.conv_2(feat)))
feat = F.relu(self.bn_3(self.conv_3(feat)))
out_img = self.output(feat)
out_img = torch.sigmoid(out_img)
out_img = out_img * 2 - 1
return out_img, out_feat, out_light, out_feat_ori | 10,302 | 39.72332 | 104 | py |
IAN | IAN-master/network/loss/losses.py | import math
import torch
from torch import autograd as autograd
from torch import nn as nn
from torch.nn import functional as F
from network.arch.vgg_arch import VGGFeatureExtractor
from network.loss.loss_utils import weighted_loss, ssim, create_window, rgb2gray
_reduction_modes = ['none', 'mean', 'sum']
@weighted_loss
def l1_loss(pred, target):
return F.l1_loss(pred, target, reduction='none')
@weighted_loss
def mse_loss(pred, target):
return F.mse_loss(pred, target, reduction='none')
@weighted_loss
def charbonnier_loss(pred, target, eps=1e-12):
return torch.sqrt((pred - target)**2 + eps)
class L1Loss(nn.Module):
"""L1 (mean absolute error, MAE) loss.
Args:
loss_weight (float): Loss weight for L1 loss. Default: 1.0.
reduction (str): Specifies the reduction to apply to the output.
Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
"""
def __init__(self, loss_weight=1.0, reduction='mean'):
super(L1Loss, self).__init__()
if reduction not in ['none', 'mean', 'sum']:
raise ValueError(f'Unsupported reduction mode: {reduction}. '
f'Supported ones are: {_reduction_modes}')
self.loss_weight = loss_weight
self.reduction = reduction
def forward(self, pred, target, weight=None, **kwargs):
"""
Args:
pred (Tensor): of shape (N, C, H, W). Predicted tensor.
target (Tensor): of shape (N, C, H, W). Ground truth tensor.
weight (Tensor, optional): of shape (N, C, H, W). Element-wise
weights. Default: None.
"""
return self.loss_weight * l1_loss(
pred, target, weight, reduction=self.reduction)
class MSELoss(nn.Module):
"""MSE (L2) loss.
Args:
loss_weight (float): Loss weight for MSE loss. Default: 1.0.
reduction (str): Specifies the reduction to apply to the output.
Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
"""
def __init__(self, loss_weight=1.0, reduction='mean'):
super(MSELoss, self).__init__()
if reduction not in ['none', 'mean', 'sum']:
raise ValueError(f'Unsupported reduction mode: {reduction}. '
f'Supported ones are: {_reduction_modes}')
self.loss_weight = loss_weight
self.reduction = reduction
def forward(self, pred, target, weight=None, **kwargs):
"""
Args:
pred (Tensor): of shape (N, C, H, W). Predicted tensor.
target (Tensor): of shape (N, C, H, W). Ground truth tensor.
weight (Tensor, optional): of shape (N, C, H, W). Element-wise
weights. Default: None.
"""
return self.loss_weight * mse_loss(
pred, target, weight, reduction=self.reduction)
class CharbonnierLoss(nn.Module):
"""Charbonnier loss (one variant of Robust L1Loss, a differentiable
variant of L1Loss).
Described in "Deep Laplacian Pyramid Networks for Fast and Accurate
Super-Resolution".
Args:
loss_weight (float): Loss weight for L1 loss. Default: 1.0.
reduction (str): Specifies the reduction to apply to the output.
Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
eps (float): A value used to control the curvature near zero.
Default: 1e-12.
"""
def __init__(self, loss_weight=1.0, reduction='mean', eps=1e-12):
super(CharbonnierLoss, self).__init__()
if reduction not in ['none', 'mean', 'sum']:
raise ValueError(f'Unsupported reduction mode: {reduction}. '
f'Supported ones are: {_reduction_modes}')
self.loss_weight = loss_weight
self.reduction = reduction
self.eps = eps
def forward(self, pred, target, weight=None, **kwargs):
"""
Args:
pred (Tensor): of shape (N, C, H, W). Predicted tensor.
target (Tensor): of shape (N, C, H, W). Ground truth tensor.
weight (Tensor, optional): of shape (N, C, H, W). Element-wise
weights. Default: None.
"""
return self.loss_weight * charbonnier_loss(
pred, target, weight, eps=self.eps, reduction=self.reduction)
class WeightedTVLoss(L1Loss):
"""Weighted TV loss.
Args:
loss_weight (float): Loss weight. Default: 1.0.
"""
def __init__(self, loss_weight=1.0):
super(WeightedTVLoss, self).__init__(loss_weight=loss_weight)
def forward(self, pred, weight=None):
y_diff = super(WeightedTVLoss, self).forward(
pred[:, :, :-1, :], pred[:, :, 1:, :], weight=weight[:, :, :-1, :])
x_diff = super(WeightedTVLoss, self).forward(
pred[:, :, :, :-1], pred[:, :, :, 1:], weight=weight[:, :, :, :-1])
loss = x_diff + y_diff
return loss
class PerceptualLoss(nn.Module):
"""Perceptual loss with commonly used style loss.
Args:
layer_weights (dict): The weight for each layer of vgg feature.
Here is an example: {'conv5_4': 1.}, which means the conv5_4
feature layer (before relu5_4) will be extracted with weight
1.0 in calculting losses.
vgg_type (str): The type of vgg network used as feature extractor.
Default: 'vgg19'.
use_input_norm (bool): If True, normalize the input image in vgg.
Default: True.
range_norm (bool): If True, norm images with range [-1, 1] to [0, 1].
Default: False.
perceptual_weight (float): If `perceptual_weight > 0`, the perceptual
loss will be calculated and the loss will multiplied by the
weight. Default: 1.0.
style_weight (float): If `style_weight > 0`, the style loss will be
calculated and the loss will multiplied by the weight.
Default: 0.
criterion (str): Criterion used for perceptual loss. Default: 'l1'.
"""
def __init__(self,
layer_weights,
vgg_type='vgg19',
use_input_norm=True,
range_norm=False,
perceptual_weight=1.0,
style_weight=0.,
criterion='l1'):
super(PerceptualLoss, self).__init__()
self.perceptual_weight = perceptual_weight
self.style_weight = style_weight
self.layer_weights = layer_weights
self.vgg = VGGFeatureExtractor(
layer_name_list=list(layer_weights.keys()),
vgg_type=vgg_type,
use_input_norm=use_input_norm,
range_norm=range_norm)
self.criterion_type = criterion
if self.criterion_type == 'l1':
self.criterion = torch.nn.L1Loss()
elif self.criterion_type == 'l2':
self.criterion = torch.nn.L2loss()
elif self.criterion_type == 'fro':
self.criterion = None
else:
raise NotImplementedError(
f'{criterion} criterion has not been supported.')
def forward(self, x, gt):
"""Forward function.
Args:
x (Tensor): Input tensor with shape (n, c, h, w).
gt (Tensor): Ground-truth tensor with shape (n, c, h, w).
Returns:
Tensor: Forward results.
"""
# extract vgg features
x_features = self.vgg(x)
gt_features = self.vgg(gt.detach())
# calculate perceptual loss
if self.perceptual_weight > 0:
percep_loss = 0
for k in x_features.keys():
if self.criterion_type == 'fro':
percep_loss += torch.norm(
x_features[k] - gt_features[k],
p='fro') * self.layer_weights[k]
else:
percep_loss += self.criterion(
x_features[k], gt_features[k]) * self.layer_weights[k]
percep_loss *= self.perceptual_weight
else:
percep_loss = None
# calculate style loss
if self.style_weight > 0:
style_loss = 0
for k in x_features.keys():
if self.criterion_type == 'fro':
style_loss += torch.norm(
self._gram_mat(x_features[k]) -
self._gram_mat(gt_features[k]),
p='fro') * self.layer_weights[k]
else:
style_loss += self.criterion(
self._gram_mat(x_features[k]),
self._gram_mat(gt_features[k])) * self.layer_weights[k]
style_loss *= self.style_weight
else:
style_loss = None
return percep_loss, style_loss
def _gram_mat(self, x):
"""Calculate Gram matrix.
Args:
x (torch.Tensor): Tensor with shape of (n, c, h, w).
Returns:
torch.Tensor: Gram matrix.
"""
n, c, h, w = x.size()
features = x.view(n, c, w * h)
features_t = features.transpose(1, 2)
gram = features.bmm(features_t) / (c * h * w)
return gram
class GANLoss(nn.Module):
"""Define GAN loss.
Args:
gan_type (str): Support 'vanilla', 'lsgan', 'wgan', 'hinge'.
real_label_val (float): The value for real label. Default: 1.0.
fake_label_val (float): The value for fake label. Default: 0.0.
loss_weight (float): Loss weight. Default: 1.0.
Note that loss_weight is only for generators; and it is always 1.0
for discriminators.
"""
def __init__(self,
gan_type,
real_label_val=1.0,
fake_label_val=0.0,
loss_weight=1.0):
super(GANLoss, self).__init__()
self.gan_type = gan_type
self.loss_weight = loss_weight
self.real_label_val = real_label_val
self.fake_label_val = fake_label_val
if self.gan_type == 'vanilla':
self.loss = nn.BCEWithLogitsLoss()
elif self.gan_type == 'lsgan':
self.loss = nn.MSELoss()
elif self.gan_type == 'wgan':
self.loss = self._wgan_loss
elif self.gan_type == 'wgan_softplus':
self.loss = self._wgan_softplus_loss
elif self.gan_type == 'hinge':
self.loss = nn.ReLU()
else:
raise NotImplementedError(
f'GAN type {self.gan_type} is not implemented.')
def _wgan_loss(self, input, target):
"""wgan loss.
Args:
input (Tensor): Input tensor.
target (bool): Target label.
Returns:
Tensor: wgan loss.
"""
return -input.mean() if target else input.mean()
def _wgan_softplus_loss(self, input, target):
"""wgan loss with soft plus. softplus is a smooth approximation to the
ReLU function.
In StyleGAN2, it is called:
Logistic loss for discriminator;
Non-saturating loss for generator.
Args:
input (Tensor): Input tensor.
target (bool): Target label.
Returns:
Tensor: wgan loss.
"""
return F.softplus(-input).mean() if target else F.softplus(
input).mean()
def get_target_label(self, input, target_is_real):
"""Get target label.
Args:
input (Tensor): Input tensor.
target_is_real (bool): Whether the target is real or fake.
Returns:
(bool | Tensor): Target tensor. Return bool for wgan, otherwise,
return Tensor.
"""
if self.gan_type in ['wgan', 'wgan_softplus']:
return target_is_real
target_val = (
self.real_label_val if target_is_real else self.fake_label_val)
return input.new_ones(input.size()) * target_val
def forward(self, input, target_is_real, is_disc=False):
"""
Args:
input (Tensor): The input for the loss module, i.e., the network
prediction.
target_is_real (bool): Whether the targe is real or fake.
is_disc (bool): Whether the loss for discriminators or not.
Default: False.
Returns:
Tensor: GAN loss value.
"""
target_label = self.get_target_label(input, target_is_real)
if self.gan_type == 'hinge':
if is_disc: # for discriminators in hinge-gan
input = -input if target_is_real else input
loss = self.loss(1 + input).mean()
else: # for generators in hinge-gan
loss = -input.mean()
else: # other gan types
loss = self.loss(input, target_label)
# loss_weight is always 1.0 for discriminators
return loss if is_disc else loss * self.loss_weight
class SSIMLoss(nn.Module):
def __init__(self, loss_weight=1.0, window_size=11, size_average=True, val_range=None):
super(SSIMLoss, self).__init__()
self.window_size = window_size
self.size_average = size_average
self.val_range = val_range
self.channel = 1
self.window = create_window(window_size)
self.loss_weight = loss_weight
def forward(self, pred, gt):
pred = rgb2gray(pred)
gt = rgb2gray(gt)
(_, channel, _, _) = pred.size()
if channel == self.channel and self.window.dtype == pred.dtype:
window = self.window.to(pred.device).type(pred.dtype)
else:
window = create_window(self.window_size, channel).to(pred.device).type(pred.dtype)
self.window = window
self.channel = channel
return self.loss_weight * (1 - ssim(pred, gt, window=window, window_size=self.window_size, size_average=self.size_average))
class ColorSSIMLoss(nn.Module):
def __init__(self, loss_weight=1.0, window_size=11, size_average=True, val_range=None):
super(ColorSSIMLoss, self).__init__()
self.window_size = window_size
self.size_average = size_average
self.val_range = val_range
self.channel = 1
self.window = create_window(window_size)
self.loss_weight = loss_weight
def forward(self, pred, gt):
# pred = rgb2gray(pred)
# gt = rgb2gray(gt)
(_, channel, _, _) = pred.size()
if channel == self.channel and self.window.dtype == pred.dtype:
window = self.window.to(pred.device).type(pred.dtype)
else:
window = create_window(self.window_size, channel).to(pred.device).type(pred.dtype)
self.window = window
self.channel = channel
return self.loss_weight * (1 - ssim(pred, gt, window=window, window_size=self.window_size, size_average=self.size_average))
class GrayLoss(nn.Module):
def __init__(self, loss_weight=1.0, window_size=11, size_average=True, val_range=None, reduction='mean'):
super(GrayLoss, self).__init__()
self.kernel = create_window(window_size=window_size, channel=1)
self.padding = (window_size - 1) // 2
self.L1 = F.l1_loss
self.reduction = reduction
def forward(self, pred, gt):
pred = rgb2gray(pred)
gt = rgb2gray(gt)
if self.kernel.device != pred.device:
self.kernel = self.kernel.to(pred.device)
g_pred = F.conv2d(pred, self.kernel, padding=self.padding)
g_gt = F.conv2d(gt, self.kernel, padding=self.padding)
return l1_loss(g_pred, g_gt, reduction=self.reduction)
class SimpleShadowLoss(nn.Module):
def __init__(self, loss_weight):
super(SimpleShadowLoss, self).__init__()
window_size = 9
self.kernel = create_window(window_size=window_size, channel=1)
self.padding = (window_size - 1) // 2
self.loss_weight = loss_weight
self.threshold = 4.5 / 255.0
self.cri = nn.L1Loss()
def forward(self, x, input, gt):
if self.kernel.device != x.device:
self.kernel = self.kernel.to(x.device)
input = rgb2gray(input.detach()[:, :3, :, :])
input[input>=self.threshold] = 1
input[input<self.threshold] = 0
gt_gray = rgb2gray(gt.detach())
gt_gray[gt_gray>=self.threshold] = 1
gt_gray[gt_gray<self.threshold] = 0
mask = input * (1- gt_gray) + (1-input) * gt_gray
mask = F.conv2d(mask, self.kernel, padding=self.padding)
b, c, h, w = mask.shape
total_pixel = b * h * w
return self.loss_weight * self.cri(x * mask, gt * mask) * total_pixel / torch.sum(mask)
class CosineDistLoss(nn.Module):
def __init__(self, loss_weight, reduction='mean'):
super(CosineDistLoss, self).__init__()
self.loss_weight = loss_weight
self.reduction = reduction
def forward(self, pred, target):
b, _ = pred.shape
dot_prod = pred * target
if self.reduction == 'mean':
return self.loss_weight * torch.sum(dot_prod) / b
else:
return self.loss_weight * torch.sum(dot_prod)
class CELoss(nn.Module):
def __init__(self, loss_weight):
super(CELoss, self).__init__()
self.loss_weight = loss_weight
self.cri = nn.CrossEntropyLoss()
def forward(self, pred, gt):
return self.loss_weight * self.cri(pred, gt)
class GradientLoss(nn.Module):
"""Gradient loss.
Args:
loss_weight (float): Loss weight for L1 loss. Default: 1.0.
reduction (str): Specifies the reduction to apply to the output.
Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
"""
def __init__(self, loss_weight=1.0, reduction='mean'):
super().__init__()
self.loss_weight = loss_weight
self.reduction = reduction
if self.reduction not in ['none', 'mean', 'sum']:
raise ValueError(f'Unsupported reduction mode: {self.reduction}. '
f'Supported ones are: {_reduction_modes}')
def forward(self, pred, target, weight=None):
"""
Args:
pred (Tensor): of shape (N, C, H, W). Predicted tensor.
target (Tensor): of shape (N, C, H, W). Ground truth tensor.
weight (Tensor, optional): of shape (N, C, H, W). Element-wise
weights. Default: None.
"""
c = pred.size(1)
kx = torch.Tensor([[1, 0, -1], [2, 0, -2],
[1, 0, -1]]).view(1, 1, 3, 3).repeat(c, 1, 1, 1).to(target)
ky = torch.Tensor([[1, 2, 1], [0, 0, 0],
[-1, -2, -1]]).view(1, 1, 3, 3).repeat(c, 1, 1, 1).to(target)
pred_grad_x = F.conv2d(pred, kx, padding=1, groups=c)
pred_grad_y = F.conv2d(pred, ky, padding=1, groups=c)
target_grad_x = F.conv2d(target, kx, padding=1, groups=c)
target_grad_y = F.conv2d(target, ky, padding=1, groups=c)
loss = (
l1_loss(
pred_grad_x, target_grad_x, weight, reduction=self.reduction) +
l1_loss(
pred_grad_y, target_grad_y, weight, reduction=self.reduction))
return loss * self.loss_weight
def r1_penalty(real_pred, real_img):
"""R1 regularization for discriminator. The core idea is to
penalize the gradient on real data alone: when the
generator distribution produces the true data distribution
and the discriminator is equal to 0 on the data manifold, the
gradient penalty ensures that the discriminator cannot create
a non-zero gradient orthogonal to the data manifold without
suffering a loss in the GAN game.
Ref:
Eq. 9 in Which training methods for GANs do actually converge.
"""
grad_real = autograd.grad(
outputs=real_pred.sum(), inputs=real_img, create_graph=True)[0]
grad_penalty = grad_real.pow(2).view(grad_real.shape[0], -1).sum(1).mean()
return grad_penalty
def g_path_regularize(fake_img, latents, mean_path_length, decay=0.01):
noise = torch.randn_like(fake_img) / math.sqrt(
fake_img.shape[2] * fake_img.shape[3])
grad = autograd.grad(
outputs=(fake_img * noise).sum(), inputs=latents, create_graph=True)[0]
path_lengths = torch.sqrt(grad.pow(2).sum(2).mean(1))
path_mean = mean_path_length + decay * (
path_lengths.mean() - mean_path_length)
path_penalty = (path_lengths - path_mean).pow(2).mean()
return path_penalty, path_lengths.detach().mean(), path_mean.detach()
def gradient_penalty_loss(discriminator, real_data, fake_data, weight=None):
"""Calculate gradient penalty for wgan-gp.
Args:
discriminator (nn.Module): Network for the discriminator.
real_data (Tensor): Real input data.
fake_data (Tensor): Fake input data.
weight (Tensor): Weight tensor. Default: None.
Returns:
Tensor: A tensor for gradient penalty.
"""
batch_size = real_data.size(0)
alpha = real_data.new_tensor(torch.rand(batch_size, 1, 1, 1))
# interpolate between real_data and fake_data
interpolates = alpha * real_data + (1. - alpha) * fake_data
interpolates = autograd.Variable(interpolates, requires_grad=True)
disc_interpolates = discriminator(interpolates)
gradients = autograd.grad(
outputs=disc_interpolates,
inputs=interpolates,
grad_outputs=torch.ones_like(disc_interpolates),
create_graph=True,
retain_graph=True,
only_inputs=True)[0]
if weight is not None:
gradients = gradients * weight
gradients_penalty = ((gradients.norm(2, dim=1) - 1)**2).mean()
if weight is not None:
gradients_penalty /= torch.mean(weight)
return gradients_penalty | 21,903 | 36.635739 | 131 | py |
IAN | IAN-master/network/loss/loss_utils.py | import functools
from torch.nn import functional as F
import torch
from math import exp
def gaussian(window_size, sigma):
gauss = torch.Tensor([exp(-(x - window_size//2)**2/float(2*sigma**2)) for x in range(window_size)])
return gauss/gauss.sum()
def create_window(window_size, channel=1):
_1D_window = gaussian(window_size, 1.5).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = _2D_window.expand(channel, 1, window_size, window_size).contiguous()
return window
def ssim(img1, img2, window_size=11, window=None, size_average=True, full=False, val_range=None):
# Value range can be different from 255. Other common ranges are 1 (sigmoid) and 2 (tanh).
if val_range is None:
if torch.max(img1) > 128:
max_val = 255
else:
max_val = 1
if torch.min(img1) < -0.5:
min_val = -1
else:
min_val = 0
L = max_val - min_val
else:
L = val_range
padd = 0
(_, channel, height, width) = img1.size()
if window is None:
real_size = min(window_size, height, width)
window = create_window(real_size, channel=channel).to(img1.device)
mu1 = F.conv2d(img1, window, padding=padd, groups=channel)
mu2 = F.conv2d(img2, window, padding=padd, groups=channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1 * mu2
sigma1_sq = F.conv2d(img1 * img1, window, padding=padd, groups=channel) - mu1_sq
sigma2_sq = F.conv2d(img2 * img2, window, padding=padd, groups=channel) - mu2_sq
sigma12 = F.conv2d(img1 * img2, window, padding=padd, groups=channel) - mu1_mu2
C1 = (0.01 * L) ** 2
C2 = (0.03 * L) ** 2
v1 = 2.0 * sigma12 + C2
v2 = sigma1_sq + sigma2_sq + C2
cs = torch.mean(v1 / v2) # contrast sensitivity
ssim_map = ((2 * mu1_mu2 + C1) * v1) / ((mu1_sq + mu2_sq + C1) * v2)
if size_average:
ret = ssim_map.mean()
else:
ret = ssim_map.mean(1).mean(1).mean(1)
if full:
return ret, cs
return ret
def rgb2gray(img):
_, channel, _, _ = img.size()
if channel == 3:
R = img[:, 0, :, :]
G = img[:, 1, :, :]
B = img[:, 2, :, :]
img = 0.299*R+0.587*G+0.114*B
img = img.unsqueeze(1)
return img
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are 'none', 'mean' and 'sum'.
Returns:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
# none: 0, elementwise_mean:1, sum: 2
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
else:
return loss.sum()
def weight_reduce_loss(loss, weight=None, reduction='mean'):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights. Default: None.
reduction (str): Same as built-in losses of PyTorch. Options are
'none', 'mean' and 'sum'. Default: 'mean'.
Returns:
Tensor: Loss values.
"""
# if weight is specified, apply element-wise weight
if weight is not None:
assert weight.dim() == loss.dim()
assert weight.size(1) == 1 or weight.size(1) == loss.size(1)
loss = loss * weight
# if weight is not specified or reduction is sum, just reduce the loss
if weight is None or reduction == 'sum':
loss = reduce_loss(loss, reduction)
# if reduction is mean, then compute mean over weight region
elif reduction == 'mean':
if weight.size(1) > 1:
weight = weight.sum()
else:
weight = weight.sum() * loss.size(1)
loss = loss.sum() / weight
return loss
def weighted_loss(loss_func):
"""Create a weighted version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
**kwargs)`.
:Example:
>>> import torch
>>> @weighted_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.5000)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, reduction='sum')
tensor(3.)
"""
@functools.wraps(loss_func)
def wrapper(pred, target, weight=None, reduction='mean', **kwargs):
# get element-wise loss
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction)
return loss
return wrapper | 5,179 | 30.975309 | 103 | py |
IAN | IAN-master/base_utils/utils.py | import numpy as np
import cv2
import random
import torch
import time
import os
from PIL import Image
from tqdm import tqdm
def get_time_str():
return time.strftime('%Y%m%d_%H%M%S', time.localtime())
def set_random_seed(seed):
"""Set random seeds."""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def load_depth(path):
'''
params:
path: str
output:
ref_center_dis: float32(maybe useless)
depth_map: ndarray(float32) [-1, 1]
description:
load depth info from "path"
'''
depth_map = np.load(path, allow_pickle=True)
if len(depth_map.shape) != 0:
return depth_map * 2 - 1
return depth_map.item()['normalized_depth'] * 2 - 1
def stitch_images(inputs, *outputs, img_per_row=2):
gap = 5
columns = len(outputs) + 1
width, height = inputs[0][:, :, 0].shape
img = Image.new('RGB', (width * img_per_row * columns + gap * (img_per_row - 1), height * int(len(inputs) / img_per_row)))
images = [inputs, *outputs]
for ix in range(len(inputs)):
xoffset = int(ix % img_per_row) * width * columns + int(ix % img_per_row) * gap
yoffset = int(ix / img_per_row) * height
for cat in range(len(images)):
im = np.array((images[cat][ix]).cpu()).astype(np.uint8).squeeze()
im = Image.fromarray(im)
img.paste(im, (xoffset + cat * width, yoffset))
return img
def mkdir_and_rename(path):
"""mkdirs. If path exists, rename it with timestamp and create a new one.
Args:
path (str): Folder path.
"""
if os.path.exists(path):
new_name = path + '_archived_' + get_time_str()
print(f'Path already exists. Rename it to {new_name}', flush=True)
os.rename(path, new_name)
os.makedirs(path, exist_ok=True)
def make_exp_dirs(opt):
print('mkdir')
"""Make dirs for experiments."""
path_opt = opt['path'].copy()
if opt['is_train']:
mkdir_and_rename(path_opt.pop('experiments_root'))
else:
mkdir_and_rename(path_opt.pop('results_root'))
print(path_opt)
for key, path in path_opt.items():
print(path)
if ('strict_load' not in key) and ('pretrain_network'
not in key) and ('resume'
not in key):
os.makedirs(path, exist_ok=True)
def check_resume(opt, resume_iter):
"""Check resume states and pretrain_network paths.
Args:
opt (dict): Options.
resume_iter (int): Resume iteration.
"""
logger = get_root_logger()
if opt['path']['resume_state']:
# get all the networks
networks = [key for key in opt.keys() if key.startswith('network_')]
flag_pretrain = False
for network in networks:
if opt['path'].get(f'pretrain_{network}') is not None:
flag_pretrain = True
if flag_pretrain:
logger.warning(
'pretrain_network path will be ignored during resuming.')
# set pretrained model paths
for network in networks:
name = f'pretrain_{network}'
basename = network.replace('network_', '')
if opt['path'].get('ignore_resume_networks') is None or (
basename not in opt['path']['ignore_resume_networks']):
opt['path'][name] = os.path.join(
opt['path']['models'], f'net_{basename}_{resume_iter}.pth')
logger.info(f"Set {name} to {opt['path'][name]}")
def gen_depth_img(depth_map, save_path)->None:
'''
params:
depth_map: ndarray(float32)
save_path: str
description:
convert a depth_map to a grayscale image(dark->bright, near->remote)
and save it to "save_dir"
'''
depth_img = depth_map * 255.
depth_img = depth_img.astype(np.uint8)
cv2.imwrite(save_path, depth_img)
def cal_normal(d_im):
'''
params:
d_im: ndarray(float32)
description:
convert depth_map to surface normals map(input needs to multiply 2^16-1 before cal_normal)
'''
zy, zx = np.gradient(d_im)
normal = np.dstack((-zx, -zy, np.ones_like(d_im)))
n = np.linalg.norm(normal, axis=2)
normal[:, :, 0] /= n
normal[:, :, 1] /= n
normal[:, :, 2] /= n
return normal
def col_stitch(imgs):
return np.concatenate(imgs, axis=1)
def stitch_images(inputs, *outputs, img_per_row=2):
gap = 5
columns = len(outputs) + 1
width, height = inputs[0][:, :, 0].shape
img = Image.new('RGB', (width * img_per_row * columns + gap * (img_per_row - 1), height * int(len(inputs) / img_per_row)))
images = [inputs, *outputs]
for ix in range(len(inputs)):
xoffset = int(ix % img_per_row) * width * columns + int(ix % img_per_row) * gap
yoffset = int(ix / img_per_row) * height
for cat in range(len(images)):
im = np.array((images[cat][ix]).cpu()).astype(np.uint8).squeeze()
im = Image.fromarray(im)
img.paste(im, (xoffset + cat * width, yoffset))
return img
def get_mask(input_dir, output_dir):
'''
params:
input_dir(str): depth map directory
output_dir(str): saving directory
output:
0-1 mask indicates where is valid
'''
input_paths = os.listdir(input_dir)
for path in tqdm(input_paths):
full_path = os.path.join(input_dir, path)
depth = load_depth(full_path)
depth[depth>0] = 1
depth.astype(np.uint8)
np.save(os.path.join(output_dir, path[:-4]+'.npy'), depth)
# def find_ref(depth, surface_normal, input, target):
def cal_psnr_ssim(input, gt):
from skimage.measure import compare_psnr, compare_ssim
return compare_psnr(input, gt), compare_ssim(input, gt, multichannel=True)
# print(f'psnr: {}, ssim: {}')
if __name__ == '__main__':
x_code = np.array([float(x)/1024 for x in range(1024)])
x_code = x_code[np.newaxis, :]
x_code = np.concatenate([x_code]*1024, 0)
from cv2 import imread, imwrite
heat_map = paint_heatmap(imread('/home/paper99/media/Codes/NTIRE2021/results/HGNet_pos_enc/visualization/VIDIT/Image311_HGNet_pos_enc.png'), x_code)
imwrite('./test.png', heat_map)
# import PIL.Image as Image
# from imageio import imread
# inputdir1 = '/home/paper99/media/Codes/Relighting-With-Depth/results/largedataDepthSurfacePyrSFTWDRN_L1/visualization/VIDIT/'
# suffix1 = '_largedataDepthSurfacePyrSFTWDRN_L1.png'
# inputdir2 = '/home/paper99/media/Codes/NTIRE2021/results/HGNet/visualization/VIDIT/'
# suffix2 = '_HGNet.png'
# gt_dir = '/media/ssd/CVPRW/VIDIT/validation/target/'
# print('WDRN\tHGNet')
# for i in range(300, 345):
# print(f'Image{i}:')
# input1 = imread(inputdir1+f'Image{i}'+suffix1)
# input2 = imread(inputdir2+f'Image{i}'+suffix2)
# gt = imread(gt_dir+f'Image{i}'+'.png')[:, :, :3]
# psnr1, ssim1 = cal_psnr_ssim(input1, gt)
# psnr2, ssim2 = cal_psnr_ssim(input2, gt)
# print(f'psnr: {psnr1}, {psnr2}')
# print(f'ssim: {ssim1}, {ssim2}')
# # get_mask('/home/paper99/media/Datasets/VIDIT/unzip_any2any/depth', '/home/paper99/media/Datasets/VIDIT/unzip_any2any/mask')
# # flip_img('/home/paper99/media/Datasets/VIDIT/unzip_any2any/raw_input', '/home/paper99/media/Datasets/VIDIT/unzip_any2any/target', '_4500_W.png')
# # flip_depth('/home/paper99/media/Datasets/VIDIT/unzip_any2any/raw_depth', '/home/paper99/media/Datasets/VIDIT/unzip_any2any/depth')
| 7,695 | 32.754386 | 152 | py |
IAN | IAN-master/base_utils/logger.py | import datetime
import logging
import time
import os
def get_env_info():
import torch
import torchvision
msg = ('\nVersion Information: '
f'\n\tPyTorch: {torch.__version__}'
f'\n\tTorchVision: {torchvision.__version__}')
return msg
def init_tb_logger(log_dir):
from torch.utils.tensorboard import SummaryWriter
tb_logger = SummaryWriter(log_dir=log_dir)
return tb_logger
def get_root_logger(logger_name='relighting',
log_level=logging.INFO,
log_file=None):
"""Get the root logger.
The logger will be initialized if it has not been initialized. By default a
StreamHandler will be added. If `log_file` is specified, a FileHandler will
also be added.
Args:
logger_name (str): root logger name. Default: 'relighting'.
log_file (str | None): The log filename. If specified, a FileHandler
will be added to the root logger.
log_level (int): The root logger level. Note that only the process of
rank 0 is affected, while other processes will set the level to
"Error" and be silent most of the time.
Returns:
logging.Logger: The root logger.
"""
logger = logging.getLogger(logger_name)
# if the logger has been initialized, just return it
if logger.hasHandlers():
return logger
format_str = '%(asctime)s %(levelname)s: %(message)s'
logging.basicConfig(format=format_str, level=log_level)
if log_file is not None:
file_handler = logging.FileHandler(log_file, 'w')
file_handler.setFormatter(logging.Formatter(format_str))
file_handler.setLevel(log_level)
logger.addHandler(file_handler)
return logger
class MessageLogger():
"""Message logger for printing.
Args:
opt (dict): Config. It contains the following keys:
name (str): Exp name.
logger (dict): Contains 'print_freq' (str) for logger interval.
train (dict): Contains 'total_iter' (int) for total iters.
use_tb_logger (bool): Use tensorboard logger.
start_iter (int): Start iter. Default: 1.
tb_logger (obj:`tb_logger`): Tensorboard logger. Default: None.
"""
def __init__(self, opt, start_iter=1, tb_logger=None):
self.exp_name = opt['name']
self.interval = opt['logger']['print_freq']
self.start_iter = start_iter
self.max_iters = opt['train']['total_iter']
self.use_tb_logger = opt['logger']['use_tb_logger']
self.tb_logger = tb_logger
self.start_time = time.time()
self.logger = get_root_logger()
def __call__(self, log_vars):
"""Format logging message.
Args:
log_vars (dict): It contains the following keys:
epoch (int): Epoch number.
iter (int): Current iter.
lrs (list): List for learning rates.
time (float): Iter time.
data_time (float): Data time for each iter.
"""
# epoch, iter, learning rates
epoch = log_vars.pop('epoch')
current_iter = log_vars.pop('iter')
lrs = log_vars.pop('lrs')
message = (f'[{self.exp_name}]\n[epoch:{epoch:3d}, '
f'iter:{current_iter:8,d}, lr:(')
for v in lrs:
message += f'{v:.3e},'
message += ')] '
# time and estimated time
if 'time' in log_vars.keys():
iter_time = log_vars.pop('time')
data_time = log_vars.pop('data_time')
total_time = time.time() - self.start_time
time_sec_avg = total_time / (current_iter - self.start_iter + 1)
eta_sec = time_sec_avg * (self.max_iters - current_iter - 1)
eta_str = str(datetime.timedelta(seconds=int(eta_sec)))
message += f'[eta: {eta_str}, '
message += f'time (data): {iter_time:.3f} ({data_time:.3f})]'
# other items, especially losses
for k, v in log_vars.items():
message += f'{k}: {v:.6f} '
# tensorboard logger
if self.use_tb_logger and 'debug' not in self.exp_name:
if k.startswith('l_'):
self.tb_logger.add_scalar(f'losses/{k}', v, current_iter)
else:
self.tb_logger.add_scalar(k, v, current_iter)
self.logger.info(message) | 4,431 | 35.933333 | 79 | py |
IAN | IAN-master/base_utils/matlab_utils.py | import math
import numpy as np
import torch
def cubic(x):
"""cubic function used for calculate_weights_indices."""
absx = torch.abs(x)
absx2 = absx**2
absx3 = absx**3
return (1.5 * absx3 - 2.5 * absx2 + 1) * (
(absx <= 1).type_as(absx)) + (-0.5 * absx3 + 2.5 * absx2 - 4 * absx +
2) * (((absx > 1) *
(absx <= 2)).type_as(absx))
def calculate_weights_indices(in_length, out_length, scale, kernel,
kernel_width, antialiasing):
"""Calculate weights and indices, used for imresize function.
Args:
in_length (int): Input length.
out_length (int): Output length.
scale (float): Scale factor.
kernel_width (int): Kernel width.
antialisaing (bool): Whether to apply anti-aliasing when downsampling.
"""
if (scale < 1) and antialiasing:
# Use a modified kernel (larger kernel width) to simultaneously
# interpolate and antialias
kernel_width = kernel_width / scale
# Output-space coordinates
x = torch.linspace(1, out_length, out_length)
# Input-space coordinates. Calculate the inverse mapping such that 0.5
# in output space maps to 0.5 in input space, and 0.5 + scale in output
# space maps to 1.5 in input space.
u = x / scale + 0.5 * (1 - 1 / scale)
# What is the left-most pixel that can be involved in the computation?
left = torch.floor(u - kernel_width / 2)
# What is the maximum number of pixels that can be involved in the
# computation? Note: it's OK to use an extra pixel here; if the
# corresponding weights are all zero, it will be eliminated at the end
# of this function.
p = math.ceil(kernel_width) + 2
# The indices of the input pixels involved in computing the k-th output
# pixel are in row k of the indices matrix.
indices = left.view(out_length, 1).expand(out_length, p) + torch.linspace(
0, p - 1, p).view(1, p).expand(out_length, p)
# The weights used to compute the k-th output pixel are in row k of the
# weights matrix.
distance_to_center = u.view(out_length, 1).expand(out_length, p) - indices
# apply cubic kernel
if (scale < 1) and antialiasing:
weights = scale * cubic(distance_to_center * scale)
else:
weights = cubic(distance_to_center)
# Normalize the weights matrix so that each row sums to 1.
weights_sum = torch.sum(weights, 1).view(out_length, 1)
weights = weights / weights_sum.expand(out_length, p)
# If a column in weights is all zero, get rid of it. only consider the
# first and last column.
weights_zero_tmp = torch.sum((weights == 0), 0)
if not math.isclose(weights_zero_tmp[0], 0, rel_tol=1e-6):
indices = indices.narrow(1, 1, p - 2)
weights = weights.narrow(1, 1, p - 2)
if not math.isclose(weights_zero_tmp[-1], 0, rel_tol=1e-6):
indices = indices.narrow(1, 0, p - 2)
weights = weights.narrow(1, 0, p - 2)
weights = weights.contiguous()
indices = indices.contiguous()
sym_len_s = -indices.min() + 1
sym_len_e = indices.max() - in_length
indices = indices + sym_len_s - 1
return weights, indices, int(sym_len_s), int(sym_len_e)
@torch.no_grad()
def imresize(img, scale, antialiasing=True):
"""imresize function same as MATLAB.
It now only supports bicubic.
The same scale applies for both height and width.
Args:
img (Tensor | Numpy array):
Tensor: Input image with shape (c, h, w), [0, 1] range.
Numpy: Input image with shape (h, w, c), [0, 1] range.
scale (float): Scale factor. The same scale applies for both height
and width.
antialisaing (bool): Whether to apply anti-aliasing when downsampling.
Default: True.
Returns:
Tensor: Output image with shape (c, h, w), [0, 1] range, w/o round.
"""
if type(img).__module__ == np.__name__: # numpy type
numpy_type = True
img = torch.from_numpy(img.transpose(2, 0, 1)).float()
else:
numpy_type = False
in_c, in_h, in_w = img.size()
out_h, out_w = math.ceil(in_h * scale), math.ceil(in_w * scale)
kernel_width = 4
kernel = 'cubic'
# get weights and indices
weights_h, indices_h, sym_len_hs, sym_len_he = calculate_weights_indices(
in_h, out_h, scale, kernel, kernel_width, antialiasing)
weights_w, indices_w, sym_len_ws, sym_len_we = calculate_weights_indices(
in_w, out_w, scale, kernel, kernel_width, antialiasing)
# process H dimension
# symmetric copying
img_aug = torch.FloatTensor(in_c, in_h + sym_len_hs + sym_len_he, in_w)
img_aug.narrow(1, sym_len_hs, in_h).copy_(img)
sym_patch = img[:, :sym_len_hs, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
img_aug.narrow(1, 0, sym_len_hs).copy_(sym_patch_inv)
sym_patch = img[:, -sym_len_he:, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
img_aug.narrow(1, sym_len_hs + in_h, sym_len_he).copy_(sym_patch_inv)
out_1 = torch.FloatTensor(in_c, out_h, in_w)
kernel_width = weights_h.size(1)
for i in range(out_h):
idx = int(indices_h[i][0])
for j in range(in_c):
out_1[j, i, :] = img_aug[j, idx:idx + kernel_width, :].transpose(
0, 1).mv(weights_h[i])
# process W dimension
# symmetric copying
out_1_aug = torch.FloatTensor(in_c, out_h, in_w + sym_len_ws + sym_len_we)
out_1_aug.narrow(2, sym_len_ws, in_w).copy_(out_1)
sym_patch = out_1[:, :, :sym_len_ws]
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(2, inv_idx)
out_1_aug.narrow(2, 0, sym_len_ws).copy_(sym_patch_inv)
sym_patch = out_1[:, :, -sym_len_we:]
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(2, inv_idx)
out_1_aug.narrow(2, sym_len_ws + in_w, sym_len_we).copy_(sym_patch_inv)
out_2 = torch.FloatTensor(in_c, out_h, out_w)
kernel_width = weights_w.size(1)
for i in range(out_w):
idx = int(indices_w[i][0])
for j in range(in_c):
out_2[j, :, i] = out_1_aug[j, :,
idx:idx + kernel_width].mv(weights_w[i])
if numpy_type:
out_2 = out_2.numpy().transpose(1, 2, 0)
return out_2
def rgb2ycbcr(img, y_only=False):
"""Convert a RGB image to YCbCr image.
This function produces the same results as Matlab's `rgb2ycbcr` function.
It implements the ITU-R BT.601 conversion for standard-definition
television. See more details in
https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion.
It differs from a similar function in cv2.cvtColor: `RGB <-> YCrCb`.
In OpenCV, it implements a JPEG conversion. See more details in
https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion.
Args:
img (ndarray): The input image. It accepts:
1. np.uint8 type with range [0, 255];
2. np.float32 type with range [0, 1].
y_only (bool): Whether to only return Y channel. Default: False.
Returns:
ndarray: The converted YCbCr image. The output image has the same type
and range as input image.
"""
img_type = img.dtype
img = _convert_input_type_range(img)
if y_only:
out_img = np.dot(img, [65.481, 128.553, 24.966]) + 16.0
else:
out_img = np.matmul(
img, [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786],
[24.966, 112.0, -18.214]]) + [16, 128, 128]
out_img = _convert_output_type_range(out_img, img_type)
return out_img
def bgr2ycbcr(img, y_only=False):
"""Convert a BGR image to YCbCr image.
The bgr version of rgb2ycbcr.
It implements the ITU-R BT.601 conversion for standard-definition
television. See more details in
https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion.
It differs from a similar function in cv2.cvtColor: `BGR <-> YCrCb`.
In OpenCV, it implements a JPEG conversion. See more details in
https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion.
Args:
img (ndarray): The input image. It accepts:
1. np.uint8 type with range [0, 255];
2. np.float32 type with range [0, 1].
y_only (bool): Whether to only return Y channel. Default: False.
Returns:
ndarray: The converted YCbCr image. The output image has the same type
and range as input image.
"""
img_type = img.dtype
img = _convert_input_type_range(img)
if y_only:
out_img = np.dot(img, [24.966, 128.553, 65.481]) + 16.0
else:
out_img = np.matmul(
img, [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786],
[65.481, -37.797, 112.0]]) + [16, 128, 128]
out_img = _convert_output_type_range(out_img, img_type)
return out_img
def ycbcr2rgb(img):
"""Convert a YCbCr image to RGB image.
This function produces the same results as Matlab's ycbcr2rgb function.
It implements the ITU-R BT.601 conversion for standard-definition
television. See more details in
https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion.
It differs from a similar function in cv2.cvtColor: `YCrCb <-> RGB`.
In OpenCV, it implements a JPEG conversion. See more details in
https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion.
Args:
img (ndarray): The input image. It accepts:
1. np.uint8 type with range [0, 255];
2. np.float32 type with range [0, 1].
Returns:
ndarray: The converted RGB image. The output image has the same type
and range as input image.
"""
img_type = img.dtype
img = _convert_input_type_range(img) * 255
out_img = np.matmul(img, [[0.00456621, 0.00456621, 0.00456621],
[0, -0.00153632, 0.00791071],
[0.00625893, -0.00318811, 0]]) * 255.0 + [
-222.921, 135.576, -276.836
] # noqa: E126
out_img = _convert_output_type_range(out_img, img_type)
return out_img
def ycbcr2bgr(img):
"""Convert a YCbCr image to BGR image.
The bgr version of ycbcr2rgb.
It implements the ITU-R BT.601 conversion for standard-definition
television. See more details in
https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion.
It differs from a similar function in cv2.cvtColor: `YCrCb <-> BGR`.
In OpenCV, it implements a JPEG conversion. See more details in
https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion.
Args:
img (ndarray): The input image. It accepts:
1. np.uint8 type with range [0, 255];
2. np.float32 type with range [0, 1].
Returns:
ndarray: The converted BGR image. The output image has the same type
and range as input image.
"""
img_type = img.dtype
img = _convert_input_type_range(img) * 255
out_img = np.matmul(img, [[0.00456621, 0.00456621, 0.00456621],
[0.00791071, -0.00153632, 0],
[0, -0.00318811, 0.00625893]]) * 255.0 + [
-276.836, 135.576, -222.921
] # noqa: E126
out_img = _convert_output_type_range(out_img, img_type)
return out_img
def _convert_input_type_range(img):
"""Convert the type and range of the input image.
It converts the input image to np.float32 type and range of [0, 1].
It is mainly used for pre-processing the input image in colorspace
convertion functions such as rgb2ycbcr and ycbcr2rgb.
Args:
img (ndarray): The input image. It accepts:
1. np.uint8 type with range [0, 255];
2. np.float32 type with range [0, 1].
Returns:
(ndarray): The converted image with type of np.float32 and range of
[0, 1].
"""
img_type = img.dtype
img = img.astype(np.float32)
if img_type == np.float32:
pass
elif img_type == np.uint8:
img /= 255.
else:
raise TypeError('The img type should be np.float32 or np.uint8, '
f'but got {img_type}')
return img
def _convert_output_type_range(img, dst_type):
"""Convert the type and range of the image according to dst_type.
It converts the image to desired type and range. If `dst_type` is np.uint8,
images will be converted to np.uint8 type with range [0, 255]. If
`dst_type` is np.float32, it converts the image to np.float32 type with
range [0, 1].
It is mainly used for post-processing images in colorspace convertion
functions such as rgb2ycbcr and ycbcr2rgb.
Args:
img (ndarray): The image to be converted with np.float32 type and
range [0, 255].
dst_type (np.uint8 | np.float32): If dst_type is np.uint8, it
converts the image to np.uint8 type with range [0, 255]. If
dst_type is np.float32, it converts the image to np.float32 type
with range [0, 1].
Returns:
(ndarray): The converted image with desired type and range.
"""
if dst_type not in (np.uint8, np.float32):
raise TypeError('The dst_type should be np.float32 or np.uint8, '
f'but got {dst_type}')
if dst_type == np.uint8:
img = img.round()
else:
img /= 255.
return img.astype(dst_type) | 13,720 | 39.958209 | 79 | py |
IAN | IAN-master/base_utils/gen_surface_normals.py | import numpy as np
import cv2
import random
import torch
import os
from tqdm import tqdm
import argparse
# from base_utils.utils import load_depth
# args = argparse.ArgumentParser(description='Gen_Surface_Normal')
# args.add_argument('--save_dir', type=str)
# args.add_argument('--input_dir', type=str)
# args = args.parse_args()
def load_depth(path):
'''
params:
path: str
output:
ref_center_dis: float32(maybe useless)
depth_map: ndarray(float32) [-1, 1]
description:
load depth info from "path"
'''
depth_map = np.load(path, allow_pickle=True)
if len(depth_map.shape) != 0:
return depth_map * 2 - 1
return depth_map.item()['normalized_depth'] * 2 - 1
def cal_normal(d_im):
d_im = d_im.astype(np.float32)
# print(type(d_im[0][0]))
# d_im = cv2.bilateralFilter(d_im, 9, 40, 40)
zy, zx = np.gradient(d_im, 2)
normal = np.dstack((-zx, -zy, np.ones_like(d_im)))
n = np.linalg.norm(normal, axis=2)
normal[:, :, 0] /= n
normal[:, :, 1] /= n
normal[:, :, 2] /= n
return normal
if __name__ == '__main__':
# input_dir = args.input_dir
input_dir = "data/test/depth"
output_dir = 'data/test/normals'
# output_dir = args.save_dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
def cal_normals(path):
depth_path = os.path.join(input_dir, path)
depth_map = load_depth(depth_path)
# depth_map_vis = ((depth_map+ 1) / 2 cd * 255).astype(np.uint8)
# cv2.imwrite(f'/home/vm411/Codes/NTIRE2021/base_utils/depth_vis/{path[:-4]+".png"}', depth_map_vis)
depth_map = (load_depth(depth_path) + 1) / 2 * 65536
normal = cal_normal(depth_map)
np.save(os.path.join(output_dir, path), normal)
# normal_vis = normal * 127.5 + 127.5
# normal_vis = normal_vis.astype(np.uint8)
# print(f"./normal_vis/{path[:-4]+'.png'}")
# print(cv2.imwrite(f"/home/vm411/Codes/NTIRE2021/base_utils/normals_vis/{path[:-4]+'.png'}", normal_vis))
depth_paths = os.listdir(input_dir)
import multiprocessing
p = multiprocessing.Pool(8)
for _ in tqdm(p.imap(cal_normals, depth_paths), total=len(depth_paths)):
continue
p.close()
p.join()
| 2,297 | 28.844156 | 114 | py |
SiPRNet | SiPRNet-main/Save_result_SiPRNet.py | import h5py
import torch
import os
from pathlib import Path
import numpy as np
from Utils.Util import save_tensor_img
from Model.model_SiPRNet import MyNet
import random
# Seed everything
seed = 123
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# Set the device
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
if __name__ == '__main__':
dataset_name = 'RAF'
modelname = 'SiPRNet_'
if dataset_name == 'RAF':
test_set = h5py.File('./Data/RAF_data.h5', 'r')
m_model = MyNet()
m_model.load_state_dict(torch.load("./Model/SiPRNet_RAF.pth"))
elif dataset_name == 'Fashion':
test_set = h5py.File('./Data/Fashion_data.h5', 'r')
m_model = MyNet()
m_model.load_state_dict(torch.load("./Model/SiPRNet_Fashion.pth"))
else:
raise Exception('No dataset')
m_model = m_model.eval().to(device)
save_dir = os.path.join('Result', modelname + dataset_name)
Path(save_dir).mkdir(parents=True, exist_ok=True) # create a directory if not exist
for idx in list(test_set.keys()):
data = test_set[idx]
in_tensor, tar_tensor = torch.from_numpy(data[:, 0:1, :, :]).to(device), \
torch.from_numpy(data[:, 1:, :, :]).to(device)
pred = m_model(in_tensor)
save_file_prefix = os.path.join(save_dir, modelname + "img_%04d" % (int(idx)))
save_tensor_img(pred, save_file_prefix, isGT=False)
save_tensor_img(tar_tensor, save_file_prefix, isGT=True)
| 1,633 | 29.259259 | 88 | py |
SiPRNet | SiPRNet-main/Model/model_SiPRNet.py | import torch # to load PyTorch library
import torch.nn as nn # to load PyTorch library
import torch.nn.functional as F
class MyNet(nn.Module):
def __init__(self):
super(MyNet, self).__init__()
in_dim = 1
out_dim = 2
in_size = 128
dim = 32
num_layer_res = 1
self.in_layer = FeatExt(in_size=in_size)
self.preupsample = nn.Sequential(
ConvBlock(in_dim, dim, 3, 1, 1, isUseNorm=True),
self._make_up_block(dim, kernel_size=3, num_layer=num_layer_res),
nn.PixelShuffle(2)
# ConvBlock(dim, dim * 4, 3, 1, 1, isUseNorm=True),
)
self.upsample1 = nn.Sequential(
self._make_up_block2(dim, kernel_size=3, num_layer=num_layer_res),
nn.PixelShuffle(2)
)
# self.upsample2 = self._make_up_block(dim, kernel_size=3, num_layer=num_layer_res)
self.last = nn.Sequential(
ConvBlock(dim//2, out_dim, 3, 1, 1, isUseNorm=False),
nn.Conv2d(out_dim, out_dim, 3, 1, 1)
)
for m in self.modules():
classname = m.__class__.__name__
if classname.find('Conv2d') != -1:
torch.nn.init.kaiming_normal_(m.weight)
if m.bias is not None:
m.bias.data.zero_()
elif classname.find('ConvTranspose2d') != -1:
torch.nn.init.kaiming_normal_(m.weight)
if m.bias is not None:
m.bias.data.zero_()
def _make_up_block(self, in_channels, kernel_size, num_layer):
upsample_channel = ConvBlock(in_channels, in_channels * 4, kernel_size=kernel_size, stride=1,
padding=1, isUseNorm=True)
layers = []
for i in range(1, num_layer):
layers.append(UpsampleBlock(in_channels, in_channels, kernel_size=kernel_size, upsample=None))
layers.append(
UpsampleBlock(in_channels, in_channels * 4, kernel_size=kernel_size, upsample=upsample_channel))
return nn.Sequential(*layers)
def _make_up_block2(self, in_channels, kernel_size, num_layer):
upsample_channel = ConvBlock(in_channels, in_channels * 2, kernel_size=kernel_size, stride=1,
padding=1, isUseNorm=True)
layers = []
for i in range(1, num_layer):
layers.append(UpsampleBlock(in_channels, in_channels, kernel_size=kernel_size, upsample=None))
layers.append(
UpsampleBlock(in_channels, in_channels*2, kernel_size=kernel_size, upsample=upsample_channel))
return nn.Sequential(*layers)
def forward(self, in_img): # "self" stores the variables (modules) that is defined in initialization function,
m = self.in_layer(in_img)
img = self.preupsample(m)
img = self.upsample1(img)
# img = self.upsample2(img)
img = self.last(img)
return img
class ConvBlock(nn.Module): # contruct the ConvBlock module based on the structure of nn.Module
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1,
padding=1, isUseNorm=False): # define the initialization function.
# "self" is the variable pool of the ConvBlock class
# The function is conducted automatically when build the ConvBlock module,
super(ConvBlock, self).__init__() # call the initialization function of the father class ("nn.Module")
self.isUseNorm = isUseNorm
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size,
stride=stride,
padding=padding) # define a convolutional layer
if self.isUseNorm: self.norm = nn.InstanceNorm2d(out_channels)
self.act = nn.PReLU() # define a activation layer
return
def forward(self, x): # "self" stores the variables (modules) that is defined in initialization function,
# x is the received input
m = self.conv(x) # call the "self.conv" module as defined in initialization function
if self.isUseNorm: m = self.norm(m)
out = self.act(m) # call the "self.act" module as defined in initialization function
return out # output the result (store in "out")
class UpsampleBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, upsample=None):
super(UpsampleBlock, self).__init__()
self.upsample = nn.Sequential(
ConvBlock(in_channels, out_channels, kernel_size, stride=1, padding=1, isUseNorm=True),
nonlocalblock(channel=out_channels),
ConvBlock(out_channels, out_channels, kernel_size, stride=1, padding=1, isUseNorm=True),
nonlocalblock(channel=out_channels),
)
self.activation = nn.PReLU()
self.upsample_chn = upsample
def forward(self, x):
if self.upsample_chn is not None:
shortcut = self.upsample_chn(x)
else:
shortcut = x
out = self.upsample(x)
out += shortcut
out = self.activation(out)
return out
def upsample(x):
b,c,h,w = x.shape[0:4]
avg = nn.AvgPool2d([4,4],stride=4)
output = avg(x).view(b,c,-1)
return output
class nonlocalblock(nn.Module):
def __init__(self,channel=32,avg_kernel=2):
super(nonlocalblock,self).__init__()
self.channel = channel//2
self.theta = nn.Conv2d(channel,self.channel,1)
self.phi = nn.Conv2d(channel,self.channel,1)
self.g = nn.Conv2d(channel,self.channel,1)
self.conv = nn.Conv2d(self.channel,channel,1)
self.avg = nn.AvgPool2d([avg_kernel,avg_kernel],stride=avg_kernel)
def forward(self,x):
H,W = x.shape[2:4]
u=self.avg(x)
b,c,h,w = u.shape[0:4]
theta_x = self.theta(u).view(b,self.channel,-1).permute(0,2,1)
phi_x = self.phi(u)
phi_x = upsample(phi_x)
g_x = self.g(u)
g_x = upsample(g_x).permute(0,2,1)
theta_x = torch.matmul(theta_x,phi_x)
theta_x = F.softmax(theta_x,dim=-1)
y = torch.matmul(theta_x,g_x)
y = y.permute(0,2,1)
y = y.view(b,self.channel,h,w)
y = self.conv(y)
y = F.interpolate(y,size=[H,W])
return y
class FeatExt(nn.Module):
def __init__(self, in_size=64, basic_channels=1024):
super(FeatExt, self).__init__()
total_pixel = in_size * in_size
self.extractor = nn.Sequential(
ConvBlock(in_channels=total_pixel, out_channels=basic_channels, kernel_size=1, stride=1, padding=0),
nn.Dropout(p=0.2),
ConvBlock(in_channels=basic_channels, out_channels=basic_channels, kernel_size=1, stride=1, padding=0),
ConvBlock(in_channels=basic_channels, out_channels=basic_channels, kernel_size=1, stride=1, padding=0),
)
return
def forward(self, in_img):
b, c, w, h = in_img.shape
flat_data = in_img.reshape(b, c * w * h, 1, 1)
feat = self.extractor(flat_data)
return feat.reshape(b, c, 32, 32)
| 7,166 | 39.954286 | 115 | py |
Molformer | Molformer-master/model/tr_msa.py | import copy
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from model.tr_spe import Embeddings, FeedForward, clones, Generator3D, Feat_Embedding
from model.tr_cpe import Encoder, EncoderLayer
def build_model(vocab, tgt, dist_bar, N=6, embed_dim=512, ffn_dim=2048, head=8, dropout=0.1, extra_feat=None):
c = copy.deepcopy
attn = MultiScaleMultiHeadedAttention(head, embed_dim, dist_bar)
ff = FeedForward(embed_dim, ffn_dim, dropout)
if extra_feat: extra_feat = Feat_Embedding(extra_feat, embed_dim)
model = MultiScaleTransformer3D(Encoder(EncoderLayer(embed_dim, c(attn), c(ff), dropout), N),
Embeddings(embed_dim, vocab), Generator3D(embed_dim, tgt, dropout),
extra_feat)
for p in model.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
return model
class MultiScaleTransformer3D(nn.Module):
def __init__(self, encoder, src_embed, generator, feat_embed):
super(MultiScaleTransformer3D, self).__init__()
self.encoder = encoder
self.src_embed = src_embed
self.feat_embed = feat_embed
self.generator = generator
def forward(self, src, src_mask, dist):
x = self.src_embed(src)
if self.feat_embed: x += self.feat_embed(src)
return self.generator(self.encoder(x, dist, src_mask)[:, 0])
#######################################
## attention部分
#######################################
def attention(query, key, value, dist_conv, dist, dist_bar, mask=None, dropout=None):
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(query.size(-1))
scores *= dist_conv
if mask is not None:
scores = scores.masked_fill(mask == 0, -1e10)
# 多尺度注意力mask
out = []
for i in dist_bar:
# 所有点与中心点始终允许交互
dist_mask = dist < i
dist_mask[:, :, 0, :] = 1
dist_mask[:, :, :, 0] = 1
# 根据距离mask掉部分attention score
scores_dist = scores.masked_fill(dist_mask == 0, -1e10)
p_attn = F.softmax(scores_dist, dim=-1)
if dropout is not None:
p_attn = dropout(p_attn)
out.append(torch.matmul(p_attn, value))
return out, p_attn
class MultiScaleMultiHeadedAttention(nn.Module):
def __init__(self, h, embed_dim, dist_bar, dropout=0.1):
super(MultiScaleMultiHeadedAttention, self).__init__()
assert embed_dim % h == 0
# 4个线性层
self.linears = clones(nn.Linear(embed_dim, embed_dim), 4)
# 处理距离矩阵的1 * 1卷积
self.cnn = nn.Sequential(nn.Conv2d(1, h, kernel_size=1), nn.ReLU(), nn.Conv2d(h, h, kernel_size=1))
# 转换不同尺度cat后的向量维度
self.scale_linear = nn.Sequential(nn.Linear((len(dist_bar) + 1) * embed_dim, embed_dim), nn.ReLU(),
nn.Dropout(p=dropout), nn.Linear(embed_dim, embed_dim))
self.dropout = nn.Dropout(p=dropout)
self.dist_bar = dist_bar + [1e10]
self.d_k = embed_dim // h
self.h = h
self.attn = None
def forward(self, query, key, value, dist, mask=None):
if mask is not None:
mask = mask.unsqueeze(1)
nbatches = query.size(0)
# 1) Do all the linear projections in batch from (B, embed_dim) => (B, head, N, d_k)
query, key, value = [l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2) for l, x in
zip(self.linears, (query, key, value))]
# dist维度从(B,N,N)扩展为(B,1,N,N)后输入CNN中,得到dist_conv
dist = dist.unsqueeze(1)
dist_conv = self.cnn(dist)
# 2) Apply attention on all the projected vectors in batch.
x_list, self.attn = attention(query, key, value, dist_conv, dist,
self.dist_bar, mask=mask, dropout=self.dropout)
# 3) "Concat" using a view and apply a final linear.
x_list = [x.transpose(1, 2).contiguous().view(nbatches, -1, self.h * self.d_k) for x in x_list]
# 4) concat不同尺度的attention向量,维度为(B, N, (len(dist_bar) + 1) * d_k)
x = torch.cat([self.linears[-1](x) for x in x_list], dim=-1)
return self.scale_linear(x)
| 4,202 | 35.868421 | 110 | py |
Molformer | Molformer-master/model/tr_all.py | import copy
import torch.nn as nn
from model.tr_spe import Embeddings, FeedForward, Generator3D
from model.tr_afps import MultiRelationEncoder, EncoderLayer
from model.tr_msa import MultiScaleMultiHeadedAttention
def build_model(vocab, tgt, dist_bar, k, N=6, embed_dim=512, ffn_dim=2048, head=8, dropout=0.1):
assert k > 1
c = copy.deepcopy
attn = MultiScaleMultiHeadedAttention(head, embed_dim, dist_bar)
ff = FeedForward(embed_dim, ffn_dim, dropout)
model = FullTransformer3D(MultiRelationEncoder(EncoderLayer(embed_dim, c(attn), c(ff), dropout), N, k),
Embeddings(embed_dim, vocab), Generator3D(embed_dim, tgt, dropout))
for p in model.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
return model
class FullTransformer3D(nn.Module):
def __init__(self, encoder, src_embed, generator):
super(FullTransformer3D, self).__init__()
self.encoder = encoder
self.src_embed = src_embed
self.generator = generator
def forward(self, src, src_mask, dist):
out = self.encoder(self.src_embed(src), dist, src_mask)
return self.generator(out)
| 1,179 | 34.757576 | 107 | py |
Molformer | Molformer-master/model/tr_lsa.py | import copy
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
def build_model(vocab, tgt, dist_bar, N=6, embed_dim=512, ffn_dim=2048, head=8, dropout=0.1, out_both=False):
c = copy.deepcopy
attn = MultiHeadedAttention(head, embed_dim)
ff = FeedForward(embed_dim, ffn_dim, dropout)
position = PositionalEncoding3D(embed_dim, dropout)
# 搭建模型,并对模型参数初始化
model = Encoder3D(Encoder(EncoderLayer(embed_dim, c(attn), c(ff), dropout), N), Embeddings(embed_dim, vocab),
c(position), Generator3D(embed_dim, tgt, dropout), dist_bar, out_both)
for p in model.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
return model
class Encoder3D(nn.Module):
def __init__(self, encoder, src_embed, src_pe, generator, dist_bar, out_both):
super(Encoder3D, self).__init__()
self.encoder = encoder
self.src_embed = src_embed
self.src_pe = src_pe
self.generator = generator
self.dist_bar = dist_bar
self.dist = None
self.out_both = out_both
def forward_once(self, src, src_mask, pos):
dist = torch.cdist(pos, pos) < self.dist_bar
dist[:, 0, :] = 1
dist[:, :, 0] = 1
# 保存dist,方便后续统计
self.dist = dist
# 取占位中心的输出作为FFN的输入
return self.encoder(self.src_pe(self.src_embed(src), pos), dist.unsqueeze(1), src_mask)[:, 0, :]
def forward(self, src, src_mask, pos):
if self.out_both:
h = self.forward_once(src, src_mask, pos)
return self.generator(h), h
return self.generator(self.forward_once(src, src_mask, pos))
#######################################
## Encoder部分
#######################################
class Encoder(nn.Module):
"""Core encoder is a stack of N layers"""
def __init__(self, layer, N):
super(Encoder, self).__init__()
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
def forward(self, x, dist, mask):
for layer in self.layers:
x = layer(x, dist, mask)
return self.norm(x)
class EncoderLayer(nn.Module):
"""Encoder_XL is made up of self-attn and feed forward (defined below)"""
def __init__(self, size, self_attn, feed_forward, dropout):
super(EncoderLayer, self).__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.sublayer = clones(SublayerConnection(size, dropout), 2)
self.size = size
def forward(self, x, dist, mask):
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, dist, mask))
return self.sublayer[1](x, self.feed_forward)
class Generator3D(nn.Module):
"""Define standard linear + activation generation step."""
def __init__(self, embed_dim, tgt, dropout):
super(Generator3D, self).__init__()
# 单层即可,多层MLP效果提升不显著甚至更差
self.tgt = tgt
self.proj = nn.Linear(embed_dim, tgt)
self.dropout = nn.Dropout(p=dropout)
def forward(self, x):
return self.dropout(self.proj(x))
def clones(module, N):
"""Produce N identical layers."""
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
#######################################
## attention部分
#######################################
def constrained_attention(query, key, value, dist, mask=None, dropout=None):
"""Compute Scaled Dot Product Attention"""
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask == 0, -1e9)
# 根据距离mask掉部分attention score
scores = scores.masked_fill(dist == 0, -1e9)
p_attn = F.softmax(scores, dim=-1)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
class MultiHeadedAttention(nn.Module):
def __init__(self, h, embed_dim, dropout=0.1):
"""Take in model size and number of heads."""
super(MultiHeadedAttention, self).__init__()
assert embed_dim % h == 0
self.h = h
self.linears = clones(nn.Linear(embed_dim, embed_dim), 4)
self.attn = None
self.dropout = nn.Dropout(p=dropout)
# 默认V和K的维度相同,维度为embedding维度除以head数
self.d_k = embed_dim // h
def forward(self, query, key, value, dist, mask=None):
if mask is not None:
# Same mask applied to all h heads.
mask = mask.unsqueeze(1)
nbatches = query.size(0)
# 1) Do all the linear projections in batch from embed_dim => h x d_k
query, key, value = [l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)
for l, x in zip(self.linears, (query, key, value))]
# 2) Apply attention on all the projected vectors in batch.
x, self.attn = constrained_attention(query, key, value, dist, mask=mask, dropout=self.dropout)
# 3) "Concat" using a view and apply a final linear.
x = x.transpose(1, 2).contiguous().view(nbatches, -1, self.h * self.d_k)
return self.linears[-1](x)
#######################################
## Encoder部分
#######################################
class LayerNorm(nn.Module):
""" layernorm层"""
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True) + self.eps
return self.a_2 * (x - mean) / std + self.b_2
class SublayerConnection(nn.Module):
"""A residual connection followed by a layer norm. For code simplicity the norm is first as opposed to last."""
def __init__(self, size, dropout):
super(SublayerConnection, self).__init__()
self.norm = LayerNorm(size)
self.dropout = nn.Dropout(dropout)
def forward(self, x, sublayer):
"""Apply residual connection to any sublayer with the same size."""
return x + self.dropout(sublayer(self.norm(x)))
#######################################
## Position-wise前向传播网络部分
#######################################
class FeedForward(nn.Module):
"""Implements FFN equation."""
def __init__(self, embed_dim, ffn_dim, dropout=0.1):
super(FeedForward, self).__init__()
self.w_1 = nn.Linear(embed_dim, ffn_dim)
self.w_2 = nn.Linear(ffn_dim, embed_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
return self.w_2(self.dropout(F.relu(self.w_1(x))))
#######################################
## Embedding部分
#######################################
class PositionalEncoding3D(nn.Module):
def __init__(self, embed_dim, dropout):
super(PositionalEncoding3D, self).__init__()
self.dropout = nn.Dropout(p=dropout)
self.embed_dim = embed_dim
def forward(self, x, pos):
# 扩大坐标的量级
pos = pos * 10
# 计算三个位置的PE,并与token embedding直接相加
div = torch.exp(torch.arange(0., self.embed_dim, 2) * -(math.log(10000.0) / self.embed_dim)).double().cuda()
for i in range(3):
pe = torch.zeros(x.shape).cuda()
pe[..., 0::2] = torch.sin(pos[..., i].unsqueeze(2) * div)
pe[..., 1::2] = torch.cos(pos[..., i].unsqueeze(2) * div)
x += Variable(pe, requires_grad=False)
return self.dropout(x)
class Embeddings(nn.Module):
def __init__(self, embed_dim, vocab):
super(Embeddings, self).__init__()
self.embed = nn.Embedding(vocab, embed_dim)
self.embed_dim = embed_dim
def forward(self, x):
# 成倍放大token embedding的值
return self.embed(x) * math.sqrt(self.embed_dim)
| 7,868 | 31.25 | 116 | py |
Molformer | Molformer-master/model/tr_cpe.py | import copy
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import sys
sys.path.append("..")
from model.tr_spe import Embeddings, FeedForward
from model.tr_spe import LayerNorm, SublayerConnection, clones, Generator3D
def build_model(vocab, tgt, N=6, embed_dim=512, ffn_dim=2048, head=8, dropout=0.1):
c = copy.deepcopy
attn = MultiHeadedAttention(head, embed_dim)
ff = FeedForward(embed_dim, ffn_dim, dropout)
model = MultiRepresentationTransformer3D(Encoder(EncoderLayer(embed_dim, c(attn), c(ff), dropout), N),
Embeddings(embed_dim, vocab), Generator3D(embed_dim, tgt, dropout))
for p in model.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
return model
class MultiRepresentationTransformer3D(nn.Module):
def __init__(self, encoder, src_embed, generator):
super(MultiRepresentationTransformer3D, self).__init__()
self.encoder = encoder
self.src_embed = src_embed
self.generator = generator
def forward(self, src, src_mask, dist):
return self.generator(self.encoder(self.src_embed(src), dist, src_mask)[:, 0])
class Encoder(nn.Module):
"""Core encoder is a stack of N layers"""
def __init__(self, layer, N):
super(Encoder, self).__init__()
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
def forward(self, x, dist, mask):
for layer in self.layers:
x = layer(x, dist, mask)
return self.norm(x)
class EncoderLayer(nn.Module):
"""MultiRelationEncoder is made up of self-attn and feed forward (defined below)"""
def __init__(self, size, self_attn, feed_forward, dropout):
super(EncoderLayer, self).__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.sublayer = clones(SublayerConnection(size, dropout), 2)
self.size = size
def forward(self, x, dist, mask):
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, dist, mask))
return self.sublayer[1](x, self.feed_forward)
#######################################
## attention part
#######################################
def attention(query, key, value, dist_conv, mask=None, dropout=None):
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
scores *= dist_conv
if mask is not None:
scores = scores.masked_fill(mask == 0, -1e9)
p_attn = F.softmax(scores, dim=-1)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
class MultiHeadedAttention(nn.Module):
def __init__(self, h, embed_dim, dropout=0.1):
super(MultiHeadedAttention, self).__init__()
assert embed_dim % h == 0
# four linear layers
self.linears = clones(nn.Linear(embed_dim, embed_dim), 4)
# 1 * 1 convolution operator
self.cnn = nn.Sequential(nn.Conv2d(1, h, kernel_size=1), nn.ReLU(), nn.Conv2d(h, h, kernel_size=1))
self.dropout = nn.Dropout(p=dropout)
self.d_k = embed_dim // h
self.h = h
self.attn = None
def forward(self, query, key, value, dist_conv, mask=None):
if mask is not None:
mask = mask.unsqueeze(1)
nbatches = query.size(0)
# 1) Do all the linear projections in batch from (B, embed_dim) => (B, head, N, d_k)
query, key, value = [l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2) for l, x in
zip(self.linears, (query, key, value))]
# dist维度从(B, N, N)扩展为(B,1,N,N)后输入CNN中
dist_conv = self.cnn(dist_conv.unsqueeze(1))
# 2) Apply attention on all the projected vectors in batch.
x, self.attn = attention(query, key, value, dist_conv, mask=mask, dropout=self.dropout)
# 3) "Concat" using a view and apply a final linear.
x = x.transpose(1, 2).contiguous().view(nbatches, -1, self.h * self.d_k)
return self.linears[-1](x)
| 4,071 | 33.803419 | 112 | py |
Molformer | Molformer-master/model/tr_spe.py | """ reference from Harvard NLP: https://nlp.seas.harvard.edu/2018/04/03/attention.html"""
import copy
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from mendeleev import element
import sys
sys.path.append("..")
#######################################
## Transformer with only Encoder
#######################################
def build_model(vocab, tgt, N=6, embed_dim=512, ffn_dim=2048, head=8, dropout=0.1):
c = copy.deepcopy
attn = MultiHeadedAttention(head, embed_dim)
ff = FeedForward(embed_dim, ffn_dim, dropout)
position = SinusoidalPositionEncoding3D(embed_dim, dropout)
# initialize model
model = Transformer3D(Encoder(EncoderLayer(embed_dim, c(attn), c(ff), dropout), N), Embeddings(embed_dim, vocab),
c(position), Generator3D(embed_dim, tgt, dropout))
for p in model.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
return model
class Transformer3D(nn.Module):
def __init__(self, encoder, src_embed, src_pe, generator):
super(Transformer3D, self).__init__()
self.encoder = encoder
self.src_embed = src_embed
self.src_pe = src_pe
self.generator = generator
def forward(self, src, src_mask, pos):
return self.generator(self.encoder(self.src_pe(self.src_embed(src), pos), src_mask)[:, 0])
def clones(module, N):
"""Produce N identical layers."""
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
#######################################
## Encoder part
#######################################
class Encoder(nn.Module):
"""Core encoder is a stack of N layers"""
def __init__(self, layer, N):
super(Encoder, self).__init__()
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
def forward(self, x, mask):
for layer in self.layers:
x = layer(x, mask)
return self.norm(x)
class EncoderLayer(nn.Module):
"""MultiRelationEncoder is made up of self-attn and feed forward (defined below)"""
def __init__(self, size, self_attn, feed_forward, dropout):
super(EncoderLayer, self).__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.sublayer = clones(SublayerConnection(size, dropout), 2)
self.size = size
def forward(self, x, mask):
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask))
return self.sublayer[1](x, self.feed_forward)
class LayerNorm(nn.Module):
""" layernorm layer """
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True) + self.eps
return self.a_2 * (x - mean) / std + self.b_2
class SublayerConnection(nn.Module):
"""A residual connection followed by a layer norm. For code simplicity the norm is first as opposed to last."""
def __init__(self, size, dropout):
super(SublayerConnection, self).__init__()
self.norm = LayerNorm(size)
self.dropout = nn.Dropout(dropout)
def forward(self, x, sublayer):
"""Apply residual connection to any sublayer with the same size."""
return x + self.dropout(sublayer(self.norm(x)))
#######################################
## attention part
#######################################
def attention(query, key, value, mask=None, dropout=None):
"""Compute 'Scaled Dot Product Attention'"""
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask == 0, -1e9)
p_attn = F.softmax(scores, dim=-1)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
class MultiHeadedAttention(nn.Module):
def __init__(self, h, embed_dim, dropout=0.1):
"""Take in model size and number of heads."""
super(MultiHeadedAttention, self).__init__()
assert embed_dim % h == 0
self.h = h
self.linears = clones(nn.Linear(embed_dim, embed_dim), 4)
self.attn = None
self.dropout = nn.Dropout(p=dropout)
# 默认V和K的维度相同,维度为embedding维度除以head数
self.d_k = embed_dim // h
def forward(self, query, key, value, mask=None):
# expand to H heads
if mask is not None: mask = mask.unsqueeze(1)
nbatches = query.size(0)
# 1) Do all the linear projections in batch from embed_dim => h x d_k
query, key, value = [l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2) for l, x in
zip(self.linears, (query, key, value))]
# 2) Apply attention on all the projected vectors in batch.
x, self.attn = attention(query, key, value, mask=mask, dropout=self.dropout)
# 3) "Concat" using a view and apply a final linear.
x = x.transpose(1, 2).contiguous().view(nbatches, -1, self.h * self.d_k)
return self.linears[-1](x)
#######################################
## Position-wise FFN
#######################################
class FeedForward(nn.Module):
"""Implements FFN equation."""
def __init__(self, embed_dim, ffn_dim, dropout=0.1):
super(FeedForward, self).__init__()
self.w_1 = nn.Linear(embed_dim, ffn_dim)
self.w_2 = nn.Linear(ffn_dim, embed_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
return self.w_2(self.dropout(F.relu(self.w_1(x))))
#######################################
## Embedding part
#######################################
class Embeddings(nn.Module):
def __init__(self, embed_dim, vocab):
super(Embeddings, self).__init__()
self.embed = nn.Embedding(vocab, embed_dim)
self.embed_dim = embed_dim
def forward(self, x):
# scale the values of token embedding
return self.embed(x) * math.sqrt(self.embed_dim)
class Feat_Embedding(nn.Module):
def __init__(self, src_vocab, embed_dim):
super(Feat_Embedding, self).__init__()
self.feat = ['atomic_radius', 'atomic_radius_rahm', 'atomic_volume', 'atomic_weight', 'boiling_point', 'c6',
'covalent_radius_cordero', 'covalent_radius_pyykko', 'density', 'dipole_polarizability',
'dipole_polarizability_unc', 'electron_affinity']
self.feat_embed = self.creat_embed(src_vocab)
self.feat_linear = nn.Sequential(nn.Linear(len(self.feat), embed_dim), nn.ReLU(),
nn.Linear(embed_dim, embed_dim))
def creat_embed(self, src_vocab):
embedding = []
for atom in range(100):
if atom in src_vocab:
embedding.append(torch.tensor([float(element(atom).__dict__[x]) for x in self.feat]).unsqueeze(0))
else:
embedding.append(torch.zeros(len(self.feat)).unsqueeze(0))
embedding = torch.cat(embedding)
return nn.Embedding.from_pretrained(embedding, freeze=True)
def forward(self, x):
return self.feat_linear(self.feat_embed(x))
#######################################
## Position Encoding
#######################################
class PositionalEncoding(nn.Module):
def __init__(self, embed_dim, dropout, max_len=1000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, embed_dim)
position = torch.arange(0., max_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0, embed_dim, 2) * -(math.log(10000.0) / embed_dim))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
# 调整PE的维度,并将其存放在不视为模型参数的缓冲区内`
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
# 让token embedding与PE直接相加
x = x + Variable(self.pe[:, :x.size(1)], requires_grad=False)
return self.dropout(x)
class SinusoidalPositionEncoding3D(nn.Module):
def __init__(self, embed_dim, dropout):
super(SinusoidalPositionEncoding3D, self).__init__()
self.dropout = nn.Dropout(p=dropout)
self.embed_dim = embed_dim
def forward(self, x, pos):
# 扩大坐标的量级
pos = pos * 10
# sum PE with token embeddings
div = torch.exp(torch.arange(0., self.embed_dim, 2) * -(math.log(10000.0) / self.embed_dim)).double().cuda()
for i in range(3):
pe = torch.zeros(x.shape).cuda()
pe[..., 0::2] = torch.sin(pos[..., i].unsqueeze(2) * div)
pe[..., 1::2] = torch.cos(pos[..., i].unsqueeze(2) * div)
x += Variable(pe, requires_grad=False)
return self.dropout(x)
#######################################
## Predictor
#######################################
class Generator3D(nn.Module):
def __init__(self, embed_dim, tgt, dropout):
super(Generator3D, self).__init__()
self.proj = nn.Sequential(nn.Linear(embed_dim, embed_dim), nn.ReLU(), nn.Dropout(p=dropout),
nn.Linear(embed_dim, tgt))
def forward(self, x):
return self.proj(x)
| 9,470 | 33.819853 | 117 | py |
Molformer | Molformer-master/model/tr_afps.py | import copy
import torch
import torch.nn as nn
import sys
sys.path.append("..")
from model.tr_spe import Embeddings, FeedForward, LayerNorm, SublayerConnection, clones, Generator3D
from model.tr_cpe import MultiHeadedAttention
def build_model(vocab, tgt, k, N=6, embed_dim=512, ffn_dim=2048, head=8, dropout=0.1):
# make sure AFPS exists
assert k > 1
c = copy.deepcopy
attn = MultiHeadedAttention(head, embed_dim)
ff = FeedForward(embed_dim, ffn_dim, dropout)
model = MultiRelationTransformer3D(MultiRelationEncoder(EncoderLayer(embed_dim, c(attn), c(ff), dropout), N, k),
Embeddings(embed_dim, vocab), Generator3D(embed_dim, tgt, dropout))
for p in model.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
return model
class MultiRelationTransformer3D(nn.Module):
def __init__(self, encoder, src_embed, generator):
super(MultiRelationTransformer3D, self).__init__()
self.encoder = encoder
self.src_embed = src_embed
self.generator = generator
def forward(self, src, src_mask, dist):
return self.generator(self.encoder(self.src_embed(src), dist, src_mask))
class MultiRelationEncoder(nn.Module):
def __init__(self, layer, N, k):
super(MultiRelationEncoder, self).__init__()
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
self.k = k
def forward(self, x, dist, mask):
for layer in self.layers:
x, scores = layer(x, dist, mask)
# go through every sample in the batch
x_batch, dist_batch, mask_batch = [], [], []
for i in range(len(dist)):
# if the molecules have less number of atoms than k, keep them all
if torch.sum(mask[i]) <= self.k:
x_batch.append(torch.mean(x[i][mask[i, 0]], dim=0))
else:
with torch.no_grad():
idx = AFPS(scores[i], dist[i], self.k, mask[i, 0])
# dim=1则为global average pooling(generator对应输入维度为k),dim=0就是简单的pooling(generator对应输入维度为embed_dim)
x_batch.append(torch.mean(x[i, idx], dim=0))
return self.norm(torch.stack(x_batch, dim=0))
class EncoderLayer(nn.Module):
def __init__(self, size, self_attn, feed_forward, dropout):
super(EncoderLayer, self).__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.norm = LayerNorm(size)
self.dropout = nn.Dropout(dropout)
self.sublayer = SublayerConnection(size, dropout)
self.size = size
def forward(self, x, dist, mask):
att_out = self.self_attn(self.norm(x), self.norm(x), self.norm(x), dist, mask)
scores = self.self_attn.attn
x = x + self.dropout(att_out)
return self.sublayer(x, self.feed_forward), scores
#######################################
## downsampling part
#######################################
def AFPS(scores, dist, k, mask=None):
# 合并所有的head,遍历head时间消耗大
scores = torch.mean(scores, dim=0)
if mask is not None:
scores = scores[mask][:, mask]
dist = dist[mask][:, mask]
# initialize the first point
scores = torch.sum(scores, dim=-2)
# 初始化候选点和剩余点,从中心点开始
remaining_points = [i for i in range(len(dist))]
solution_set = [remaining_points.pop(0)]
# incorporate the distance information
dist = dist / torch.max(dist) + scores.unsqueeze(-1) / torch.max(scores) * 0.1
while len(solution_set) < k:
# 得到候选点和剩余点的距离矩阵
distances = dist[remaining_points][:, solution_set]
# 更新剩余点的距离,选最大的
distances = torch.min(distances, dim=-1)[0]
new_point = torch.argmax(distances).item()
solution_set.append(remaining_points.pop(new_point))
return solution_set
| 3,849 | 32.478261 | 116 | py |
SeqOT | SeqOT-main/tools/read_samples.py | #!/usr/bin/env python3
# Developed by Junyi Ma, Xieyuanli Chen
# This file is covered by the LICENSE file in the root of the project SeqOT: https://github.com/BIT-MJY/SeqOT
# SeqOT is the sequence enhanced version of our previous work OverlapTransformer: https://github.com/haomo-ai/OverlapTransformer
# Brief: read sampled range images or descriptors as single input or batch input
import torch
import numpy as np
import sys
sys.path.append('../tools/')
sys.path.append('../modules/')
np.set_printoptions(threshold=sys.maxsize)
from utils.utils import *
def read_one_need_from_seq(file_num, seq_len, poses=None, range_image_root=None):
read_complete_flag = True
depth_data_seq = torch.zeros((1, seq_len, 32, 900)).type(torch.FloatTensor).cuda()
for i in np.arange(int(file_num)-(seq_len//2), int(file_num)-(seq_len//2)+seq_len):
file_num_str = str(i).zfill(6)
if not os.path.exists(range_image_root+file_num_str+".npy"):
read_complete_flag = False
depth_data_tmp = np.load(range_image_root+file_num+".npy")
depth_data_tensor_tmp = torch.from_numpy(depth_data_tmp).type(torch.FloatTensor).cuda()
depth_data_tensor_tmp = torch.unsqueeze(depth_data_tensor_tmp, dim=0)
depth_data_tensor_tmp = torch.unsqueeze(depth_data_tensor_tmp, dim=0)
for m in np.arange(int(file_num) - (seq_len // 2), int(file_num) - (seq_len // 2) + seq_len):
depth_data_seq[:, int(m - int(file_num) + (seq_len // 2)), :, :] = depth_data_tensor_tmp
return depth_data_seq, read_complete_flag
depth_data = np.load(range_image_root+file_num_str+".npy")
depth_data_tensor = torch.from_numpy(depth_data).type(torch.FloatTensor).cuda()
depth_data_tensor = torch.unsqueeze(depth_data_tensor, dim=0)
depth_data_tensor = torch.unsqueeze(depth_data_tensor, dim=0)
depth_data_seq[:,int(i-int(file_num)+(seq_len//2)),:,: ] = depth_data_tensor
return depth_data_seq, read_complete_flag
def read_one_batch_pos_neg(f1_index, f1_seq, train_imgf1, train_imgf2, train_dir1, train_dir2, range_image_root,
train_overlap, overlap_thresh,seq_len, poses=None):
read_complete_flag = True
batch_size = 0
for tt in range(len(train_imgf1)):
if f1_index == train_imgf1[tt] and f1_seq == train_dir1[tt] and (train_overlap[tt]> overlap_thresh or train_overlap[tt]<(overlap_thresh-0.0)):
batch_size = batch_size + 1
sample_batch = torch.from_numpy(np.zeros((batch_size, seq_len, 32, 900))).type(torch.FloatTensor).cuda()
sample_truth = torch.from_numpy(np.zeros((batch_size, 1))).type(torch.FloatTensor).cuda()
pos_idx = 0
neg_idx = 0
pos_num = 0
neg_num = 0
for j in range(len(train_imgf1)):
pos_flag = False
if f1_index == train_imgf1[j] and f1_seq==train_dir1[j]:
if train_overlap[j]> overlap_thresh:
pos_num = pos_num + 1
pos_flag = True
elif train_overlap[j]< overlap_thresh - 0.0:
neg_num = neg_num + 1
else:
continue
depth_data_seq = torch.zeros((seq_len, 32, 900)).type(torch.FloatTensor).cuda()
for i in np.arange(int(train_imgf2[j]) - (seq_len // 2),
int(train_imgf2[j]) - (seq_len // 2) + seq_len): # length can be changed !!!!!!!
file_num_str = str(i).zfill(6)
if not os.path.exists(range_image_root + file_num_str + ".npy"):
read_complete_flag = False
return sample_batch, sample_truth, pos_num, neg_num, read_complete_flag
depth_data = np.load(range_image_root + file_num_str + ".npy")
depth_data_tensor = torch.from_numpy(depth_data).type(torch.FloatTensor).cuda()
depth_data_tensor = torch.unsqueeze(depth_data_tensor, dim=0)
depth_data_tensor = torch.unsqueeze(depth_data_tensor, dim=0)
depth_data_seq[int(i - int(train_imgf2[j]) + (seq_len // 2)), :, :] = depth_data_tensor
if pos_flag:
sample_batch[pos_idx,:,:,:] = depth_data_seq
sample_truth[pos_idx, :] = torch.from_numpy(np.array(train_overlap[j])).type(torch.FloatTensor).cuda()
pos_idx = pos_idx + 1
else:
sample_batch[batch_size-neg_idx-1, :, :, :] = depth_data_seq
sample_truth[batch_size-neg_idx-1, :] = torch.from_numpy(np.array(train_overlap[j])).type(torch.FloatTensor).cuda()
neg_idx = neg_idx + 1
return sample_batch, sample_truth, pos_num, neg_num, read_complete_flag
def read_one_need_descriptor_from_seq_ft(file_num, descriptors, seq_len, poses=None):
read_complete_flag = True
descriptors_seq = torch.zeros((1, seq_len, 256)).type(torch.FloatTensor).cuda()
for i in np.arange(int(file_num)-(seq_len//2), int(file_num)-(seq_len//2)+seq_len): # length can be changed !!!!!!!
if i<0 or i>=descriptors.shape[0]:
read_complete_flag = False
for m in np.arange(int(file_num) - (seq_len // 2), int(file_num) - (seq_len // 2) + seq_len):
descriptors_seq[0, int(m - int(file_num) + (seq_len // 2)), :] = torch.from_numpy(descriptors[int(file_num),:]).type(torch.FloatTensor).cuda()
return descriptors_seq, read_complete_flag
descriptor_tensor = torch.from_numpy(descriptors[i,:]).type(torch.FloatTensor).cuda()
descriptors_seq[0,int(i-int(file_num)+(seq_len//2)),:] = descriptor_tensor
return descriptors_seq, read_complete_flag
def read_one_batch_pos_neg_descriptors(f1_index, f1_seq, train_imgf1, train_imgf2, train_dir1, train_dir2, train_overlap, overlap_thresh, seq_len, descs):
read_complete_flag = True
batch_size = 0
for tt in range(len(train_imgf1)):
if f1_index == train_imgf1[tt] and f1_seq == train_dir1[tt] and (train_overlap[tt]> overlap_thresh or train_overlap[tt]<(overlap_thresh-0.0)):
batch_size = batch_size + 1
sample_batch = torch.from_numpy(np.zeros((batch_size, seq_len, 256))).type(torch.FloatTensor).cuda()
sample_truth = torch.from_numpy(np.zeros((batch_size, 1))).type(torch.FloatTensor).cuda()
pos_idx = 0
neg_idx = 0
pos_num = 0
neg_num = 0
for j in range(len(train_imgf1)):
pos_flag = False
if f1_index == train_imgf1[j] and f1_seq==train_dir1[j]:
if train_overlap[j]> overlap_thresh:
pos_num = pos_num + 1
pos_flag = True
elif train_overlap[j]< overlap_thresh - 0.0:
neg_num = neg_num + 1
else:
continue
depth_data_seq, read_complete_flag = read_one_need_descriptor_from_seq_ft(train_imgf2[j], descs, seq_len)
if not read_complete_flag:
return sample_batch, sample_truth, pos_num, neg_num, read_complete_flag
if pos_flag:
sample_batch[pos_idx,:,:] = depth_data_seq
sample_truth[pos_idx, :] = torch.from_numpy(np.array(train_overlap[j])).type(torch.FloatTensor).cuda()
pos_idx = pos_idx + 1
else:
sample_batch[batch_size-neg_idx-1, :, :] = depth_data_seq
sample_truth[batch_size-neg_idx-1, :] = torch.from_numpy(np.array(train_overlap[j])).type(torch.FloatTensor).cuda()
neg_idx = neg_idx + 1
return sample_batch, sample_truth, pos_num, neg_num, read_complete_flag | 7,579 | 49.198675 | 158 | py |
SeqOT | SeqOT-main/tools/loss.py | import torch
import torch.nn as nn
import os
import numpy as np
import sys
def best_pos_distance(query, pos_vecs):
num_pos = pos_vecs.shape[0]
query_copies = query.repeat(int(num_pos), 1)
diff = ((pos_vecs - query_copies) ** 2).sum(1)
min_pos, _ = diff.min(0)
max_pos, _ = diff.max(0)
return min_pos, max_pos
def sliding_triplet_loss(q_vec, pos_vecs, neg_vecs, margin, use_min=True, lazy=False, ignore_zero_loss=False):
q_vec_double = torch.cat((q_vec, q_vec), dim=1)
loss_list = torch.zeros((q_vec.shape[-1],1))
pos_list = torch.zeros((q_vec.shape[-1],1))
neg_list = torch.zeros((q_vec.shape[-1],1))
pos_add = torch.zeros((1,)).cuda()
neg_add = torch.zeros((1,)).cuda()
for i in range(q_vec.shape[-1]):
q_vec = q_vec_double[:, i:i+q_vec.shape[-1]]
min_pos, max_pos = best_pos_distance(q_vec, pos_vecs)
pos_add = pos_add + torch.exp(-min_pos)
min_neg, max_neg = best_pos_distance(q_vec, neg_vecs)
neg_add = neg_add + torch.exp(-min_neg)
pos_add_log = -torch.log(pos_add)
neg_add_log = -torch.log(neg_add)
loss_sliding = margin + pos_add_log - neg_add_log
loss_sliding = loss_sliding.clamp(min=0.0)
return loss_sliding
def triplet_loss(q_vec, pos_vecs, neg_vecs, margin, use_min=False, lazy=False, ignore_zero_loss=False):
min_pos, max_pos = best_pos_distance(q_vec, pos_vecs)
if use_min:
positive = min_pos
else:
positive = max_pos
num_neg = neg_vecs.shape[0]
num_pos= pos_vecs.shape[0]
query_copies = q_vec.repeat(int(num_neg), 1)
positive = positive.view(-1, 1)
positive = positive.repeat(int(num_neg), 1)
negative = ((neg_vecs - query_copies) ** 2).sum(1).unsqueeze(1)
loss = margin + positive - ((neg_vecs - query_copies) ** 2).sum(1).unsqueeze(1) # ([18, 1])
loss = loss.clamp(min=0.0)
if lazy:
triplet_loss = loss.max(1)[0]
else:
triplet_loss = loss.sum(0)
if ignore_zero_loss:
hard_triplets = torch.gt(triplet_loss, 1e-16).float()
num_hard_triplets = torch.sum(hard_triplets)
triplet_loss = triplet_loss.sum() / (num_hard_triplets + 1e-16)
else:
triplet_loss = triplet_loss.mean()
return triplet_loss
def triplet_loss_inv(q_vec, pos_vecs, neg_vecs, margin, use_min=True, lazy=False, ignore_zero_loss=False):
min_neg, max_neg = best_pos_distance(q_vec, neg_vecs)
if use_min:
negative = min_neg
else:
negative = max_neg
num_neg = neg_vecs.shape[0]
num_pos= pos_vecs.shape[0]
query_copies = q_vec.repeat(int(num_pos), 1)
negative = negative.view(-1, 1)
negative = negative.repeat(int(num_pos), 1)
loss = margin - negative + ((pos_vecs - query_copies) ** 2).sum(1).unsqueeze(1)
loss = loss.clamp(min=0.0)
if lazy:
triplet_loss = loss.max(1)[0]
else:
triplet_loss = loss.sum(0)
if ignore_zero_loss:
hard_triplets = torch.gt(triplet_loss, 1e-16).float()
num_hard_triplets = torch.sum(hard_triplets)
triplet_loss = triplet_loss.sum() / (num_hard_triplets + 1e-16)
else:
triplet_loss = triplet_loss.mean()
return triplet_loss
def triplet_loss_wrapper(q_vec, pos_vecs, neg_vecs, m1, m2, use_min=False, lazy=False, ignore_zero_loss=False):
return triplet_loss(q_vec, pos_vecs, neg_vecs, m1, use_min, lazy, ignore_zero_loss) | 3,410 | 33.11 | 111 | py |
SeqOT | SeqOT-main/modules/gem.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class GeM(nn.Module):
def __init__(self, p=3, eps=1e-6):
super(GeM, self).__init__()
self.p = nn.Parameter(torch.ones(1) * p)
self.eps = eps
def forward(self, x):
return self.gem(x, p=self.p, eps=self.eps)
def gem(self, x, p=3, eps=1e-6):
# return F.avg_pool2d(x.clamp(min=eps).pow(p), (x.size(-2), x.size(-1))).pow(1. / p)
return F.avg_pool2d(x.clamp(min=eps).pow(p), (x.size(-2), 1)).pow(1. / p)
def __repr__(self):
return self.__class__.__name__ + '(' + 'p=' + '{:.4f}'.format(self.p.data.tolist()[0]) + ', ' + 'eps=' + str(
self.eps) + ')'
| 701 | 29.521739 | 117 | py |
SeqOT | SeqOT-main/modules/netvlad.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
class NetVLADLoupe(nn.Module):
def __init__(self, feature_size, max_samples, cluster_size, output_dim,
gating=True, add_batch_norm=True, is_training=True):
super(NetVLADLoupe, self).__init__()
self.feature_size = feature_size
self.max_samples = max_samples
self.output_dim = output_dim
self.is_training = is_training
self.gating = gating
self.add_batch_norm = add_batch_norm
self.cluster_size = cluster_size
self.softmax = nn.Softmax(dim=-1)
self.cluster_weights = nn.Parameter(torch.randn(
feature_size, cluster_size) * 1 / math.sqrt(feature_size))
self.cluster_weights2 = nn.Parameter(torch.randn(
1, feature_size, cluster_size) * 1 / math.sqrt(feature_size))
self.hidden1_weights = nn.Parameter(torch.randn(
cluster_size * feature_size, output_dim) * 1 / math.sqrt(feature_size))
if add_batch_norm:
self.cluster_biases = None
self.bn1 = nn.BatchNorm1d(cluster_size)
else:
self.cluster_biases = nn.Parameter(torch.randn(
cluster_size) * 1 / math.sqrt(feature_size))
self.bn1 = None
self.bn2 = nn.BatchNorm1d(output_dim)
if gating:
self.context_gating = GatingContext(
output_dim, add_batch_norm=add_batch_norm)
def forward(self, x):
x = x.transpose(1, 3).contiguous()
x = x.view((-1, self.max_samples, self.feature_size))
activation = torch.matmul(x, self.cluster_weights)
if self.add_batch_norm:
activation = activation.view(-1, self.cluster_size)
activation = self.bn1(activation)
activation = activation.view(-1, self.max_samples, self.cluster_size)
else:
activation = activation + self.cluster_biases
activation = self.softmax(activation)
activation = activation.view((-1, self.max_samples, self.cluster_size))
a_sum = activation.sum(-2, keepdim=True)
a = a_sum * self.cluster_weights2
activation = torch.transpose(activation, 2, 1)
x = x.view((-1, self.max_samples, self.feature_size))
vlad = torch.matmul(activation, x)
vlad = torch.transpose(vlad, 2, 1)
vlad = vlad - a
vlad = F.normalize(vlad, dim=1, p=2)
vlad = vlad.reshape((-1, self.cluster_size * self.feature_size))
vlad = F.normalize(vlad, dim=1, p=2)
vlad = torch.matmul(vlad, self.hidden1_weights)
if self.gating:
vlad = self.context_gating(vlad)
return vlad
class GatingContext(nn.Module):
def __init__(self, dim, add_batch_norm=True):
super(GatingContext, self).__init__()
self.dim = dim
self.add_batch_norm = add_batch_norm
self.gating_weights = nn.Parameter(
torch.randn(dim, dim) * 1 / math.sqrt(dim))
self.sigmoid = nn.Sigmoid()
if add_batch_norm:
self.gating_biases = None
self.bn1 = nn.BatchNorm1d(dim)
else:
self.gating_biases = nn.Parameter(
torch.randn(dim) * 1 / math.sqrt(dim))
self.bn1 = None
def forward(self, x):
gates = torch.matmul(x, self.gating_weights)
if self.add_batch_norm:
gates = self.bn1(gates)
else:
gates = gates + self.gating_biases
gates = self.sigmoid(gates)
activation = x * gates
return activation
| 3,612 | 33.740385 | 83 | py |
SeqOT | SeqOT-main/modules/seqTransformerCat.py | #!/usr/bin/env python3
# Developed by Junyi Ma, Xieyuanli Chen
# This file is covered by the LICENSE file in the root of the project SeqOT: https://github.com/BIT-MJY/SeqOT
# SeqOT is the sequence enhanced version of our previous work OverlapTransformer: https://github.com/haomo-ai/OverlapTransformer
# Brief: architecture of SeqOT
import torch
import torch.nn as nn
import numpy as np
import sys
sys.path.append('../tools/')
from modules.netvlad import NetVLADLoupe
import torch.nn.functional as F
class featureExtracter(nn.Module):
def __init__(self, seqL=5):
super(featureExtracter, self).__init__()
self.seqL = seqL
self.conv1 = nn.Conv2d(1, 16, kernel_size=(2,1), stride=(2,1), bias=False)
self.conv1_add = nn.Conv2d(16, 16, kernel_size=(5,1), stride=(1,1), bias=False)
self.conv2 = nn.Conv2d(16, 32, kernel_size=(3,1), stride=(1,1), bias=False)
self.conv3 = nn.Conv2d(32, 64, kernel_size=(3,1), stride=(1,1), bias=False)
self.conv4 = nn.Conv2d(64, 64, kernel_size=(3,1), stride=(1,1), bias=False)
self.conv5 = nn.Conv2d(64, 128, kernel_size=(3,1), stride=(1,1), bias=False)
self.conv6 = nn.Conv2d(128, 128, kernel_size=(3,1), stride=(1,1), bias=False)
self.conv7 = nn.Conv2d(128, 128, kernel_size=(1,1), stride=(2,1), bias=False)
self.relu = nn.ReLU(inplace=True)
encoder_layer = nn.TransformerEncoderLayer(d_model=256, nhead=4, dim_feedforward=1024, activation='relu', batch_first=False,dropout=0.)
self.transformer_encoder = torch.nn.TransformerEncoder(encoder_layer, num_layers=1)
encoder_layer2 = nn.TransformerEncoderLayer(d_model=512, nhead=4, dim_feedforward=1024, activation='relu', batch_first=False,dropout=0.)
self.transformer_encoder2 = torch.nn.TransformerEncoder(encoder_layer2, num_layers=1)
self.convLast1 = nn.Conv2d(128, 256, kernel_size=(1,1), stride=(1,1), bias=False)
self.convLast2 = nn.Conv2d(512, 512, kernel_size=(1,1), stride=(1,1), bias=False)
self.sigmoid = nn.Sigmoid()
self.softmax = nn.Softmax()
self.net_vlad = NetVLADLoupe(feature_size=512, max_samples=int(900*self.seqL), cluster_size=64, # before 11.12 --- 64
output_dim=256, gating=True, add_batch_norm=False, # output_dim=512
is_training=True)
def forward(self, x_l):
out_l_seq = None
for i in range(self.seqL):
one_x_l_from_seq = x_l[:, i:(i+1), :, :]
out_l = self.relu(self.conv1(one_x_l_from_seq))
out_l = self.relu(self.conv1_add(out_l))
out_l = self.relu(self.conv2(out_l))
out_l = self.relu(self.conv3(out_l))
out_l = self.relu(self.conv4(out_l))
out_l = self.relu(self.conv5(out_l))
out_l = self.relu(self.conv6(out_l))
out_l = self.relu(self.conv7(out_l))
out_l_1 = out_l.permute(0,1,3,2)
out_l_1 = self.relu(self.convLast1(out_l_1))
out_l = out_l_1.squeeze(3)
out_l = out_l.permute(2, 0, 1)
out_l = self.transformer_encoder(out_l)
out_l = out_l.permute(1, 2, 0)
out_l = out_l.unsqueeze(3)
out_l = torch.cat((out_l_1, out_l), dim=1)
out_l = self.relu(self.convLast2(out_l))
out_l = F.normalize(out_l, dim=1)
if i==0:
out_l_seq = out_l
else:
out_l_seq = torch.cat((out_l_seq, out_l), dim=-2)
out_l_seq = out_l_seq.squeeze(3)
out_l_seq = out_l_seq.permute(2, 0, 1)
out_l_seq = self.transformer_encoder2(out_l_seq)
out_l_seq = out_l_seq.permute(1, 2, 0)
out_l_seq = out_l_seq.unsqueeze(3)
out_l_seq = self.net_vlad(out_l_seq)
out_l_seq = F.normalize(out_l_seq, dim=1)
return out_l_seq
if __name__ == '__main__':
amodel = featureExtracter(5)
print(amodel)
| 3,992 | 41.031579 | 144 | py |
SeqOT | SeqOT-main/test/test_gem_prepare.py | #!/usr/bin/env python3
# Developed by Junyi Ma, Xieyuanli Chen
# This file is covered by the LICENSE file in the root of the project SeqOT: https://github.com/BIT-MJY/SeqOT
# SeqOT is the sequence enhanced version of our previous work OverlapTransformer: https://github.com/haomo-ai/OverlapTransformer
# Brief: generate predictions for the final evaluation
import os
import sys
p = os.path.dirname(os.path.dirname((os.path.abspath(__file__))))
if p not in sys.path:
sys.path.append(p)
sys.path.append('../tools/')
sys.path.append('../modules/')
import torch
import yaml
from tqdm import tqdm
import faiss
import numpy as np
np.set_printoptions(threshold=sys.maxsize)
from modules.gem import GeM
from tools.read_samples import read_one_need_descriptor_from_seq_ft
from tools.utils.utils import *
class testHandler():
def __init__(self, seqlen=20, pretrained_weights=None, descs_database=None, descs_query=None):
super(testHandler, self).__init__()
self.amodel = GeM()
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.amodel.to(self.device)
self.weights = pretrained_weights
self.descs_database = descs_database
self.descs_query = descs_query
self.seqlen = seqlen
def eval(self):
resume_filename = self.weights
print("Resuming From ", resume_filename)
checkpoint = torch.load(resume_filename)
self.amodel.load_state_dict(checkpoint['state_dict'])
#########################################################################################################################
interval = 10
des_list = np.zeros((int(self.descs_database.shape[0]//interval)+1, 256))
for j in tqdm(np.arange(0, self.descs_database.shape[0], interval)):
f1_index = str(j).zfill(6)
current_batch,_ = read_one_need_descriptor_from_seq_ft(f1_index, self.descs_database, seq_len=self.seqlen)
self.amodel.eval()
current_batch_des = self.amodel(current_batch)
current_batch_des = current_batch_des.squeeze(1)
des_list[int(j//interval), :] = current_batch_des[0, :].cpu().detach().numpy()
des_list = des_list.astype('float32')
row_list = []
nlist = 1
k = 22
d = 256
quantizer = faiss.IndexFlatL2(d)
index = faiss.IndexIVFFlat(quantizer, d, nlist, faiss.METRIC_L2)
assert not index.is_trained
index.train(des_list)
assert index.is_trained
index.add(des_list)
for i in range(0, self.descs_query.shape[0], 5):
i_index = str(i).zfill(6)
current_batch,_ = read_one_need_descriptor_from_seq_ft(i_index, self.descs_query, seq_len=self.seqlen)
self.amodel.eval()
current_batch_des = self.amodel(current_batch)
current_batch_des = current_batch_des.squeeze(1)
des_list_current = current_batch_des[0, :].cpu().detach().numpy()
D, I = index.search(des_list_current.reshape(1, -1), k)
for j in range(D.shape[1]):
one_row = np.zeros((1,3))
one_row[:, 0] = i
one_row[:, 1] = I[:,j]*interval
one_row[:, 2] = D[:,j]
row_list.append(one_row)
print("ref:"+str(i) + "---->" + "query:" + str(I[:, j]*interval ) + " " + str(D[:, j]))
row_list_arr = np.array(row_list)
np.savez_compressed("./predicted_L2_dis", row_list_arr)
if __name__ == '__main__':
# load config ================================================================
config_filename = '../config/config.yml'
config = yaml.safe_load(open(config_filename))
sub_descriptors_database = np.load(config["test_gem_prepare"]["sub_descriptors_database_file"])
sub_descriptors_query = np.load(config["test_gem_prepare"]["sub_descriptors_query_file"])
seqlen = config["test_gem_prepare"]["seqlen"]
pretrained_weights = config["test_gem_prepare"]["weights"]
# ============================================================================
test_handler = testHandler(seqlen=seqlen, pretrained_weights=pretrained_weights,
descs_database=sub_descriptors_database,
descs_query=sub_descriptors_query)
test_handler.eval() | 4,373 | 41.057692 | 129 | py |
SeqOT | SeqOT-main/visualize/viz.py | #!/usr/bin/env python3
# Developed by Junyi Ma, Xieyuanli Chen
# This file is covered by the LICENSE file in the root of the project SeqOT: https://github.com/BIT-MJY/SeqOT
# SeqOT is the sequence enhanced version of our previous work OverlapTransformer: https://github.com/haomo-ai/OverlapTransformer
# Brief: Visualization
import os
import sys
p = os.path.dirname(os.path.dirname((os.path.abspath(__file__))))
if p not in sys.path:
sys.path.append(p)
sys.path.append('../tools/')
sys.path.append('../modules/')
import torch
import yaml
from tqdm import tqdm
import faiss
import matplotlib.pyplot as plt
import numpy as np
np.set_printoptions(threshold=sys.maxsize)
from modules.gem import GeM
from tools.read_samples import read_one_need_descriptor_from_seq_ft
from tools.utils.utils import *
class vizHandler():
def __init__(self, seqlen=20, pretrained_weights=None, descs_database=None, descs_query=None,
gt_file_name=None, poses_database=None, poses_query=None):
super(vizHandler, self).__init__()
self.amodel = GeM()
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.amodel.to(self.device)
self.weights = pretrained_weights
self.descs_database = descs_database
self.descs_query = descs_query
self.seqlen = seqlen
self.poses_database = poses_database
self.poses_query = poses_query
self.gt = np.load(gt_file_name, allow_pickle='True')
def eval(self):
# plt.figure(figsize=(15,8))
poses_database_x = poses_database[:, 0,-1]
poses_database_y = poses_database[:, 1,-1]
poses_query_x = poses_query[:, 0,-1]
poses_query_y = poses_query[:, 1,-1]
resume_filename = self.weights
print("Resuming From ", resume_filename)
checkpoint = torch.load(resume_filename)
self.amodel.load_state_dict(checkpoint['state_dict'])
#########################################################################################################################
interval = 10
des_list = np.zeros((int(self.descs_database.shape[0]//interval)+1, 256))
for j in tqdm(np.arange(0, self.descs_database.shape[0], interval)):
f1_index = str(j).zfill(6)
current_batch,_ = read_one_need_descriptor_from_seq_ft(f1_index, self.descs_database, seq_len=self.seqlen)
self.amodel.eval()
current_batch_des = self.amodel(current_batch)
current_batch_des = current_batch_des.squeeze(1)
des_list[int(j//interval), :] = current_batch_des[0, :].cpu().detach().numpy()
des_list = des_list.astype('float32')
row_list = []
nlist = 1
k = 22
d = 256
quantizer = faiss.IndexFlatL2(d)
index = faiss.IndexIVFFlat(quantizer, d, nlist, faiss.METRIC_L2)
assert not index.is_trained
index.train(des_list)
assert index.is_trained
index.add(des_list)
for i in range(1000, self.descs_query.shape[0], 5):
print(str(i)+" / "+str(self.descs_query.shape[0]))
i_index = str(i).zfill(6)
current_batch,_ = read_one_need_descriptor_from_seq_ft(i_index, self.descs_query, seq_len=self.seqlen)
self.amodel.eval()
current_batch_des = self.amodel(current_batch)
current_batch_des = current_batch_des.squeeze(1)
des_list_current = current_batch_des[0, :].cpu().detach().numpy()
D, I = index.search(des_list_current.reshape(1, -1), k)
for j in range(D.shape[1]):
one_row = np.zeros((1,3))
one_row[:, 0] = i
one_row[:, 1] = I[:,j]*interval
one_row[:, 2] = D[:,j]
row_list.append(one_row)
# print("ref:"+str(i) + "---->" + "query:" + str(I[:, j]*interval ) + " " + str(D[:, j]))
ref_ind = int(I[:,j]*interval)
if j==0:
gt_idxes = np.array(self.gt[int(i//5)]).tolist()
plt.scatter(poses_database_x, poses_database_y, s=0.1, color="blue")
plt.scatter(poses_query_x[i], poses_query_y[i], linewidths=5, color="black" )
if ref_ind in gt_idxes:
plt.scatter(poses_database_x[ref_ind], poses_database_y[ref_ind], linewidths=0.8, color="red" )
else:
plt.scatter(poses_query_x[i], poses_query_y[i], linewidths=5, color="pink" )
plt.scatter(5000, 5000, linewidths=5, c="black", alpha=0.8, label="query")
plt.scatter(5000, 5000, linewidths=0.8, c="red", alpha=0.8, label="loop candidate")
plt.scatter(5000, 5000, linewidths=0.05, c="blue", alpha=0.8, label="database")
plt.xlim([-800, 100])
plt.ylim([-400, 100])
plt.legend()
plt.ion()
plt.pause(0.01)
plt.clf()
if __name__ == '__main__':
# load config ================================================================
config_filename = '../config/config.yml'
config = yaml.safe_load(open(config_filename))
sub_descriptors_database = np.load(config["test_gem_prepare"]["sub_descriptors_database_file"])
sub_descriptors_query = np.load(config["test_gem_prepare"]["sub_descriptors_query_file"])
seqlen = config["test_gem_prepare"]["seqlen"]
pretrained_weights = config["test_gem_prepare"]["weights"]
gt_file_name = config["test_seqot"]["groud_truth_file"]
poses_database = np.load(config["viz"]["poses_database"])
poses_query = np.load(config["viz"]["poses_query"])
# ============================================================================
viz_handler = vizHandler(seqlen=seqlen, pretrained_weights=pretrained_weights,
descs_database=sub_descriptors_database, descs_query=sub_descriptors_query,
gt_file_name=gt_file_name,
poses_database=poses_database, poses_query=poses_query)
viz_handler.eval() | 6,135 | 42.828571 | 129 | py |
SeqOT | SeqOT-main/train/gen_sub_descriptors.py | #!/usr/bin/env python3
# Developed by Junyi Ma, Xieyuanli Chen
# This file is covered by the LICENSE file in the root of the project SeqOT: https://github.com/BIT-MJY/SeqOT
# SeqOT is the sequence enhanced version of our previous work OverlapTransformer: https://github.com/haomo-ai/OverlapTransformer
# Brief: generate sub-descriptors for the gem training and evaluation
import os
import sys
p = os.path.dirname(os.path.dirname((os.path.abspath(__file__))))
if p not in sys.path:
sys.path.append(p)
sys.path.append('../tools/')
sys.path.append('../modules/')
import torch
import numpy as np
np.set_printoptions(threshold=sys.maxsize)
from tqdm import tqdm
import yaml
from modules.seqTransformerCat import featureExtracter
from tools.read_samples import read_one_need_from_seq
from tools.utils.utils import *
class testHandler():
def __init__(self, seqlen=3, pretrained_weights=None,range_image_database_root=None,
range_image_query_root=None):
super(testHandler, self).__init__()
self.seq_len = seqlen
self.amodel = featureExtracter(seqL=self.seq_len)
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.amodel.to(self.device)
print(self.amodel)
self.parameters = self.amodel.parameters()
self.weights = pretrained_weights
self.range_image_database_root = range_image_database_root
self.range_image_query_root = range_image_query_root
def eval(self):
resume_filename = self.weights
print("Resuming From ", resume_filename)
checkpoint = torch.load(resume_filename)
self.amodel.load_state_dict(checkpoint['state_dict']) # 加载状态字典
interval = 1
scan_paths_database = load_files(self.range_image_database_root)
print("the number of reference scans ", len(scan_paths_database))
des_list = np.zeros((int(len(scan_paths_database)//interval)+1, 256))
for j in tqdm(np.arange(0, len(scan_paths_database), interval)):
current_batch, read_complete_flag = read_one_need_from_seq(str(j).zfill(6), self.seq_len, range_image_root=self.range_image_database_root)
self.amodel.eval()
current_batch_des = self.amodel(current_batch)
des_list[int(j//interval), :] = current_batch_des[0, :].cpu().detach().numpy()
des_list = des_list.astype('float32')
np.save("des_list_database", des_list)
scan_paths_query = load_files(self.range_image_query_root)
print("the number of query scans ", len(scan_paths_query))
des_list_query = np.zeros((int(len(scan_paths_query)//interval)+1, 256))
for j in tqdm(np.arange(0, len(scan_paths_query), interval)):
current_batch, read_complete_flag = read_one_need_from_seq(str(j).zfill(6), self.seq_len, range_image_root=self.range_image_query_root)
self.amodel.eval()
current_batch_des = self.amodel(current_batch)
des_list_query[int(j//interval), :] = current_batch_des[0, :].cpu().detach().numpy()
des_list_query = des_list_query.astype('float32')
np.save("des_list_query", des_list_query)
if __name__ == '__main__':
# load config ================================================================
config_filename = '../config/config.yml'
config = yaml.safe_load(open(config_filename))
seqlen = config["gen_sub_descriptors"]["seqlen"]
pretrained_weights = config["gen_sub_descriptors"]["weights"]
range_image_database_root = config["data_root"]["range_image_database_root"]
range_image_query_root = config["data_root"]["range_image_query_root"]
# ============================================================================
test_handler = testHandler(seqlen=seqlen, pretrained_weights=pretrained_weights,
range_image_database_root=range_image_database_root,
range_image_query_root=range_image_query_root)
test_handler.eval() | 4,032 | 47.011905 | 150 | py |
SeqOT | SeqOT-main/train/training_seqot.py | #!/usr/bin/env python3
# Developed by Junyi Ma, Xieyuanli Chen
# This file is covered by the LICENSE file in the root of the project SeqOT: https://github.com/BIT-MJY/SeqOT
# SeqOT is the sequence enhanced version of our previous work OverlapTransformer: https://github.com/haomo-ai/OverlapTransformer
# Brief: train SeqOT with the database of the NCLT dataset
import os
import sys
p = os.path.dirname(os.path.dirname((os.path.abspath(__file__))))
if p not in sys.path:
sys.path.append(p)
sys.path.append('../tools/')
sys.path.append('../modules/')
import torch
import numpy as np
np.set_printoptions(threshold=sys.maxsize)
import yaml
from tensorboardX import SummaryWriter
from modules.seqTransformerCat import featureExtracter
import tools.loss as PNV_loss
from tools.read_samples import read_one_need_from_seq
from tools.read_samples import read_one_batch_pos_neg
from tools.utils.utils import *
class trainHandler():
def __init__(self, height=32, width=900, seqlen=3, lr=0.000005, resume=False, pretrained_weights=None,
train_set=None, poses_file=None, range_image_root=None):
super(trainHandler, self).__init__()
self.height = height
self.width = width
self.seq_len = seqlen
self.learning_rate = lr
self.resume = resume
self.train_set = train_set
self.poses_file = poses_file
self.weights = pretrained_weights
self.range_image_root = range_image_root
self.amodel = featureExtracter(seqL=self.seq_len)
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.amodel.to(self.device)
self.parameters = self.amodel.parameters()
self.optimizer = torch.optim.Adam(self.parameters, self.learning_rate)
self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=5, gamma=0.9)
self.traindata_npzfiles = train_set
self.train_set_imgf1_imgf2_overlap = np.load(self.train_set)
self.poses = np.load(self.poses_file)
self.overlap_thresh = 0.3
def train(self):
epochs = 100
if self.resume:
resume_filename = self.weights
print("Resuming From ", resume_filename)
checkpoint = torch.load(resume_filename)
starting_epoch = checkpoint['epoch']
self.amodel.load_state_dict(checkpoint['state_dict'])
# self.optimizer.load_state_dict(checkpoint['optimizer'])
else:
print("Training From Scratch ..." )
starting_epoch = 0
writer1 = SummaryWriter(comment="LR_xxx")
for i in range(starting_epoch+1, epochs):
# shuffle
# self.train_set_imgf1_imgf2_overlap = np.random.permutation(self.train_set_imgf1_imgf2_overlap)
self.train_imgf1 = self.train_set_imgf1_imgf2_overlap[:, 0]
self.train_imgf2 = self.train_set_imgf1_imgf2_overlap[:, 1]
self.train_dir1 = np.zeros((len(self.train_imgf1),))
self.train_dir2 = np.zeros((len(self.train_imgf2),))
self.train_overlap = self.train_set_imgf1_imgf2_overlap[:, 2].astype(float)
print("=======================================================================\n\n\n")
print("total pairs: ", len(self.train_imgf1))
print("\n\n\n=======================================================================")
loss_each_epoch = 0
used_num = 0
used_list_f1 = []
used_list_dir1 = []
for j in range(len(self.train_imgf1)):
f1_index = self.train_imgf1[j]
dir1_index = self.train_dir1[j]
continue_flag = False
for iddd in range(len(used_list_f1)):
if f1_index==used_list_f1[iddd] and dir1_index==used_list_dir1[iddd]:
continue_flag = True
else:
used_list_f1.append(f1_index)
used_list_dir1.append(dir1_index)
if continue_flag:
continue
current_batch, read_complete_flag = read_one_need_from_seq(f1_index, self.seq_len, self.poses, self.range_image_root)
if not read_complete_flag:
continue
sample_batch, sample_truth, pos_num, neg_num, read_complete_flag = read_one_batch_pos_neg \
(f1_index, dir1_index, self.train_imgf1, self.train_imgf2, self.train_dir1, self.train_dir2, self.range_image_root,
self.train_overlap, self.overlap_thresh, self.seq_len, self.poses)
if not read_complete_flag:
continue
use_pos_num = 3
use_neg_num = 3
if pos_num >= use_pos_num and neg_num >= use_neg_num: # 4
sample_batch = torch.cat((sample_batch[0:use_pos_num, :, :, :],
sample_batch[pos_num:pos_num + use_neg_num, :, :, :]), dim=0)
sample_truth = torch.cat(
(sample_truth[0:use_pos_num, :], sample_truth[pos_num:pos_num + use_neg_num, :]), dim=0)
pos_num = use_pos_num
neg_num = use_neg_num
elif pos_num >= use_pos_num:
sample_batch = torch.cat(
(sample_batch[0:use_pos_num, :, :, :], sample_batch[pos_num:, :, :, :]), dim=0)
sample_truth = torch.cat((sample_truth[0:use_pos_num, :], sample_truth[pos_num:, :]), dim=0)
pos_num = use_pos_num
elif neg_num >= use_neg_num:
sample_batch = sample_batch[0:pos_num + use_neg_num, :, :, :]
sample_truth = sample_truth[0:pos_num + use_neg_num, :]
neg_num = use_neg_num
if neg_num == 0 or pos_num == 0:
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
input_batch = torch.cat((current_batch, sample_batch), dim=0)
input_batch.requires_grad_(True)
self.amodel.train()
self.optimizer.zero_grad()
global_des = self.amodel(input_batch)
o1, o2, o3 = torch.split(
global_des, [1, pos_num, neg_num], dim=0)
MARGIN_1 = 0.5
loss = PNV_loss.triplet_loss(o1, o2, o3, MARGIN_1, lazy=False)
loss.backward()
self.optimizer.step()
print(str(used_num), loss)
if torch.isnan(loss):
print(pos_num)
print(neg_num)
loss_each_epoch = loss_each_epoch + loss.item()
used_num = used_num + 1
print("epoch {} loss {}".format(i, loss_each_epoch / used_num))
print("saving weights ...")
self.scheduler.step()
self.save_name = "./amodel_seqot"+str(i)+".pth.tar"
torch.save({
'epoch': i,
'state_dict': self.amodel.state_dict(),
'optimizer': self.optimizer.state_dict()
},
self.save_name)
print("Model Saved As " + self.save_name)
writer1.add_scalar("loss", loss_each_epoch / used_num, global_step=i)
if __name__ == '__main__':
# load config ================================================================
config_filename = '../config/config.yml'
config = yaml.safe_load(open(config_filename))
traindata_file = config["training_seqot"]["traindata_file"]
poses_file = config["training_seqot"]["poses_file"]
height = config["training_seqot"]["height"]
width = config["training_seqot"]["width"]
seqlen = config["training_seqot"]["seqlen"]
learning_rate = config["training_seqot"]["lr"]
resume = config["training_seqot"]["resume"]
pretrained_weights = config["training_seqot"]["weights"]
range_image_root = config["data_root"]["range_image_database_root"]
# ============================================================================
train_handler = trainHandler(height=32, width=900, seqlen=seqlen, lr=learning_rate, resume=resume, pretrained_weights=pretrained_weights,
train_set=traindata_file, poses_file=poses_file, range_image_root=range_image_root)
train_handler.train() | 8,499 | 41.288557 | 141 | py |
SeqOT | SeqOT-main/train/training_gem.py | #!/usr/bin/env python3
# Developed by Junyi Ma, Xieyuanli Chen
# This file is covered by the LICENSE file in the root of the project SeqOT: https://github.com/BIT-MJY/SeqOT
# SeqOT is the sequence enhanced version of our previous work OverlapTransformer: https://github.com/haomo-ai/OverlapTransformer
# Brief: train GeM module with the descriptors of database of the NCLT dataset
import os
import sys
p = os.path.dirname(os.path.dirname((os.path.abspath(__file__))))
if p not in sys.path:
sys.path.append(p)
sys.path.append('../tools/')
sys.path.append('../modules/')
import torch
import yaml
import numpy as np
np.set_printoptions(threshold=sys.maxsize)
from tensorboardX import SummaryWriter
from modules.gem import GeM
from tools.read_samples import read_one_need_descriptor_from_seq_ft
from tools.read_samples import read_one_batch_pos_neg_descriptors
import tools.loss as PNV_loss
from tools.utils.utils import *
class trainHandler():
def __init__(self, lr=0.000005, seqlen=20, resume=False, pretrained_weights=None, descriptors=None, train_set=None, poses_file=None):
super(trainHandler, self).__init__()
self.learning_rate = lr
self.seqlen = seqlen
self.resume = resume
self.weights = pretrained_weights
self.train_set = train_set
self.poses_file = poses_file
self.descs = descriptors
self.amodel = GeM()
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.amodel.to(self.device)
print(self.amodel)
self.parameters = self.amodel.parameters()
self.optimizer = torch.optim.Adam(self.parameters, self.learning_rate)
self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=5, gamma=0.9)
self.traindata_npzfiles = train_set
self.train_set_imgf1_imgf2_overlap = np.load(self.train_set)
self.poses = np.load(self.poses_file)
self.resume = False
self.weights = self.weights
self.overlap_thresh = 0.3
def train(self):
epochs = 100
if self.resume:
resume_filename = self.weights
print("Resuming From ", resume_filename)
checkpoint = torch.load(resume_filename)
starting_epoch = checkpoint['epoch']
self.amodel.load_state_dict(checkpoint['state_dict']) # 加载状态字典
self.optimizer.load_state_dict(checkpoint['optimizer'])
else:
print("Training From Scratch ..." )
starting_epoch = 0
writer1 = SummaryWriter(comment="LR_xxxx")
for i in range(starting_epoch+1, epochs):
# shuffle
# self.train_set_imgf1_imgf2_overlap = np.random.permutation(self.train_set_imgf1_imgf2_overlap)
self.train_imgf1 = self.train_set_imgf1_imgf2_overlap[:, 0]
self.train_imgf2 = self.train_set_imgf1_imgf2_overlap[:, 1]
self.train_dir1 = np.zeros((len(self.train_imgf1),))
self.train_dir2 = np.zeros((len(self.train_imgf2),))
self.train_overlap = self.train_set_imgf1_imgf2_overlap[:, 2].astype(float)
print("=======================================================================\n\n\n")
print("total pairs: ", len(self.train_imgf1))
print("\n\n\n=======================================================================")
loss_each_epoch = 0
used_num = 0
used_list_f1 = []
used_list_dir1 = []
for j in range(len(self.train_imgf1)):
f1_index = self.train_imgf1[j]
dir1_index = self.train_dir1[j]
continue_flag = False
for iddd in range(len(used_list_f1)):
if f1_index==used_list_f1[iddd] and dir1_index==used_list_dir1[iddd]:
continue_flag = True
else:
used_list_f1.append(f1_index)
used_list_dir1.append(dir1_index)
if continue_flag:
continue
current_batch, read_complete_flag = read_one_need_descriptor_from_seq_ft(f1_index, self.descs, self.seqlen)
if not read_complete_flag:
continue
sample_batch, sample_truth, pos_num, neg_num, read_complete_flag = read_one_batch_pos_neg_descriptors \
(f1_index, dir1_index, self.train_imgf1, self.train_imgf2, self.train_dir1, self.train_dir2, self.train_overlap,
self.overlap_thresh, self.seqlen, self.descs)
if not read_complete_flag:
continue
use_pos_num = 6
use_neg_num = 6
if pos_num >= use_pos_num and neg_num >= use_neg_num: # 4
sample_batch = torch.cat((sample_batch[0:use_pos_num, :, :],
sample_batch[pos_num:pos_num + use_neg_num, :, :]), dim=0)
sample_truth = torch.cat(
(sample_truth[0:use_pos_num, :], sample_truth[pos_num:pos_num + use_neg_num, :]), dim=0)
pos_num = use_pos_num
neg_num = use_neg_num
elif pos_num >= use_pos_num:
sample_batch = torch.cat(
(sample_batch[0:use_pos_num, :, :], sample_batch[pos_num:, :, :]), dim=0)
sample_truth = torch.cat((sample_truth[0:use_pos_num, :], sample_truth[pos_num:, :]), dim=0)
pos_num = use_pos_num
elif neg_num >= use_neg_num:
sample_batch = sample_batch[0:pos_num + use_neg_num, :, :]
sample_truth = sample_truth[0:pos_num + use_neg_num, :]
neg_num = use_neg_num
if neg_num == 0 or pos_num == 0:
continue
input_batch = torch.cat((current_batch, sample_batch), dim=0)
input_batch.requires_grad_(True)
self.amodel.train()
self.optimizer.zero_grad()
global_des = self.amodel(input_batch)
global_des = global_des.squeeze(1)
o1, o2, o3 = torch.split(
global_des, [1, pos_num, neg_num], dim=0)
MARGIN_1 = 0.5
loss = PNV_loss.triplet_loss(o1, o2, o3, MARGIN_1, lazy=False)
loss.backward()
self.optimizer.step()
print(str(used_num), loss)
if torch.isnan(loss):
print(pos_num)
print(neg_num)
loss_each_epoch = loss_each_epoch + loss.item()
used_num = used_num + 1
print("epoch {} loss {}".format(i, loss_each_epoch / used_num))
print("saving weights ...")
self.scheduler.step()
self.save_name = "./amodel_gem"+str(i)+".pth.tar"
torch.save({
'epoch': i,
'state_dict': self.amodel.state_dict(),
'optimizer': self.optimizer.state_dict()
},
self.save_name)
print("Model Saved As " + self.save_name)
writer1.add_scalar("loss", loss_each_epoch / used_num, global_step=i)
if __name__ == '__main__':
# load config ================================================================
config_filename = '../config/config.yml'
config = yaml.safe_load(open(config_filename))
traindata_file = config["training_gem"]["traindata_file"]
poses_file = config["training_gem"]["poses_file"]
seqlen = config["training_gem"]["seqlen"]
learning_rate = config["training_gem"]["lr"]
resume = config["training_gem"]["resume"]
pretrained_weights = config["training_gem"]["weights"]
descriptors = np.load(config["training_gem"]["descriptors_file"])
# ============================================================================
train_handler = trainHandler(lr=learning_rate, seqlen=seqlen, resume=resume, pretrained_weights=pretrained_weights, descriptors=descriptors,
train_set=traindata_file, poses_file=poses_file)
train_handler.train() | 8,258 | 39.886139 | 144 | py |
mlcube | mlcube-master/mlcube/mlcube/shell.py | """Various utils to work with shell (mostly - running external processes).
- `Shell`: This class provides a collection of methods to work with shell to run external processes.
"""
import copy
import logging
import os
import shutil
import sys
import typing as t
from distutils import dir_util
from pathlib import Path
from mlcube.config import (IOType, ParameterType, MountType)
from mlcube.errors import (ConfigurationError, ExecutionError)
from omegaconf import DictConfig
__all__ = ['Shell']
logger = logging.getLogger(__name__)
class Shell(object):
"""Helper functions to run commands."""
@staticmethod
def null() -> str:
"""Return /dev/null for Linux/Windows.
TODO: In powershell, $null works. Is below the correct implementation?
"""
if os.name == 'nt':
return 'NUL'
return '/dev/null'
@staticmethod
def parse_exec_status(status: int) -> t.Tuple[int, str]:
"""Parse execution status returned by `os.system` call.
Args:
status: return code.
Returns:
Tuple containing exit code and exit status.
https://github.com/mlperf/training_results_v0.5/blob/7238ee7edc18f64f0869923a04de2a92418c6c28/v0.5.0/nvidia/
submission/code/translation/pytorch/cutlass/tools/external/googletest/googletest/test/gtest_test_utils.py#L185
"""
if os.name == 'nt':
exit_code, exit_status = (status, 'exited')
else:
if os.WIFEXITED(status):
exit_code, exit_status = (os.WEXITSTATUS(status), 'exited')
elif os.WIFSTOPPED(status):
exit_code, exit_status = (-os.WSTOPSIG(status), 'stopped')
elif os.WIFSIGNALED(status):
exit_code, exit_status = (-os.WTERMSIG(status), 'signalled')
else:
exit_code, exit_status = (status, 'na')
return exit_code, exit_status
@staticmethod
def run(cmd: t.Union[str, t.List], on_error: str = 'raise') -> int:
"""Run the `cmd` command in an external process.
Args:
cmd: Command to execute, e.g. Shell.run(['ls', -lh']). If type is iterable, this method will join into
one string using whitespace as a separator.
on_error: Action to perform if `os.system` returns a non-zero status. Options - ignore (do nothing, return
exit code), 'raise' (raise a RuntimeError exception), 'die' (exit the process).
Returns:
Exit status. On Windows, the exit status is the output of `os.system`. On Linux, the output is either
process exit status if that processes exited, or -1 in other cases (e.g., process was killed).
"""
if isinstance(cmd, t.List):
cmd = ' '.join(cmd)
if on_error not in ('raise', 'die', 'ignore'):
raise ValueError(
f"Unrecognized 'on_error' action ({on_error}). Valid options are ('raise', 'die', 'ignore')."
)
status: int = os.system(cmd)
exit_code, exit_status = Shell.parse_exec_status(status)
if exit_status == 'na':
logger.warning("Command (cmd=%s) did not exit properly (status=%d).", cmd, status)
msg = f"Command='{cmd}' status={status} exit_status={exit_status} exit_code={exit_code} on_error={on_error}"
if exit_code != 0:
logger.error(msg)
if on_error == 'die':
sys.exit(exit_code)
if on_error == 'raise':
raise ExecutionError(
'Failed to execute shell command.', status=exit_status, code=exit_code, cmd=cmd
)
else:
logger.info(msg)
return exit_code
@staticmethod
def docker_image_exists(docker: t.Optional[str], image: str) -> bool:
"""Check if docker image exists.
Args:
docker: Docker executable (docker/sudo docker/podman/nvidia-docker/...).
image: Name of a docker image.
Returns:
True if image exists, else false.
"""
docker = docker or 'docker'
cmd = f'{docker} inspect --type=image {image} > {Shell.null()}'
return Shell.run(cmd, on_error='ignore') == 0
@staticmethod
def ssh(connection_str: str, command: t.Optional[str], on_error: str = 'raise') -> int:
"""Execute a command on a remote host via SSH.
Args:
connection_str: SSH connection string.
command: Command to execute.
on_error: Action to perform if an error occurs.
"""
if not command:
return 0
return Shell.run(f"ssh -o StrictHostKeyChecking=no {connection_str} '{command}'", on_error=on_error)
@staticmethod
def rsync_dirs(source: str, dest: str, on_error: str = 'raise') -> int:
"""Synchronize directories.
Args:
source: Source directory.
dest: Destination directory.
on_error: Action to perform if an error occurs.
"""
return Shell.run(f"rsync -e 'ssh' '{source}' '{dest}'", on_error=on_error)
@staticmethod
def get_host_path(workspace_path: str, path_from_config: str) -> str:
"""Return host path for a task parameter.
Args:
workspace_path: Workspace directory path for this MLCube.
path_from_config: Parameter path as specified by a user in an MLCube configuration file (e.g., mlcube.yaml).
Returns:
Absolute host path.
"""
# Omega conf will resolve any variables defined in MLCube configuration file. We need to take care about `~`
# (user home directory) and environment variables.
host_path = Path(
os.path.expandvars(os.path.expanduser(path_from_config))
)
# According to MLCube contract, relative paths are relative to MLCube workspace directory.
if not host_path.is_absolute():
host_path = Path(workspace_path) / host_path
return host_path.as_posix()
@staticmethod
def generate_mounts_and_args(mlcube: DictConfig, task: str,
make_dirs: bool = True) -> t.Tuple[t.Dict, t.List, t.Dict]:
"""Generate mount points, task arguments and mount options for the given task.
Args:
mlcube: MLCube configuration (e.g., coming from `mlcube.yaml` file).
task: Task name for which mount points need to be generated.
make_dirs: If true, make host directories recursively if they do not exist. We need this to actually make
unit tests work (that set this value to false).
Return:
A tuple containing three elements:
- A mapping from host path to path inside container.
- A list of task arguments.
- A mapping from host paths to mount options (optional).
"""
# First task argument is always the task name.
mounts: t.Dict[str, str] = {} # Mapping from host paths to container paths.
args: t.List[str] = [task] # List of arguments for the given task.
mounts_opts: t.Dict[str, str] = {} # Mapping from host paths to mount options (rw/ro).
def _generate(_params: DictConfig, _io: str) -> None:
"""Process parameters (could be inputs or outputs).
This function updates `mounts`, `args` and `mounts_opts`.
Args:
_params: Dictionary of input or output parameters.
_io: Specifies if these parameters are input our output parameters.
"""
if not IOType.is_valid(_io):
raise ConfigurationError(f"Invalid IO = {_io}")
for _param_name, _param_def in _params.items():
assert isinstance(_param_def, DictConfig), f"Unexpected parameter definition: {_param_def}."
if not ParameterType.is_valid(_param_def.type):
raise ConfigurationError(
f"Invalid task: task={task}, param={_param_name}, type={_param_def.type}. Type is invalid."
)
# MLCube contract says relative paths in MLCube configuration files are relative with respect to MLCube
# workspace directory. In certain cases it makes sense to use absolute paths too. This maybe the case
# when we want to reuse host cache directories that many machine learning frameworks use to cache models
# and datasets. We also need to be able to resolve `~` (user home directory), as well as environment
# variables (BTW, this is probably needs some discussion at some point in time). This environment
# variable could be, for instance, `${HOME}`.
_host_path: str = Shell.get_host_path(mlcube.runtime.workspace, _param_def.default)
if _param_def.type == ParameterType.UNKNOWN:
if _io == IOType.OUTPUT:
raise ConfigurationError(
f"Invalid task: task={task}, param={_param_name}, type={_param_def.type}. "
"Type cannot be unknown for output parameters."
)
else:
if os.path.isdir(_host_path):
_param_def.type = ParameterType.DIRECTORY
elif os.path.isfile(_host_path):
_param_def.type = ParameterType.FILE
else:
raise ConfigurationError(
f"Invalid task: task={task}, param={_param_name}, type={_param_def.type}. "
f"Type is unknown and unable to identify it ({_host_path})."
)
if _param_def.type == ParameterType.DIRECTORY:
if make_dirs:
os.makedirs(_host_path, exist_ok=True)
mounts[_host_path] = mounts.get(_host_path, f"/mlcube_io{len(mounts)}")
args.append('--{}={}'.format(_param_name, mounts[_host_path]))
elif _param_def.type == ParameterType.FILE:
_host_path, _file_name = os.path.split(_host_path)
if make_dirs:
os.makedirs(_host_path, exist_ok=True)
mounts[_host_path] = mounts.get(_host_path, f"/mlcube_io{len(mounts)}")
args.append('--{}={}'.format(_param_name, mounts[_host_path] + '/' + _file_name))
mount_type: t.Optional[str] = _param_def.get('opts', None)
if mount_type:
if not MountType.is_valid(_param_def.opts):
raise ConfigurationError(
f"Invalid mount options: mount={task}, param={_param_name}, opts={_param_def.opts}."
)
if mount_type == MountType.RO and _io == IOType.OUTPUT:
logger.warning(
"Task's (%s) parameter (%s) is OUTPUT and requested to mount as RO.", task, _param_name
)
if _host_path in mounts_opts and mounts_opts[_host_path] != mount_type:
logger.warning(
"Conflicting mount options found. Host path (%s) has already been requested to mount as "
"'%s', but new parameter (%s) requests to mount as '%s'.",
_host_path, mounts_opts[_host_path], _param_name, mount_type
)
# Since we can only have `ro`/`rw`, we'll set the mount option to `rw`.
mount_type = MountType.RW
mounts_opts[_host_path] = mount_type
logger.info(
"Host path (%s) for parameter '%s' will be mounted with '%s' option.",
_host_path, _param_name, mount_type
)
params = mlcube.tasks[task].parameters # Dictionary of input and output parameters for the task.
_generate(params.inputs, IOType.INPUT) # Process input parameters.
_generate(params.outputs, IOType.OUTPUT) # Process output parameters.
return mounts, args, mounts_opts
@staticmethod
def to_cli_args(args: t.Mapping[str, t.Any], sep: str = '=', parent_arg: t.Optional[str] = None) -> str:
"""Convert dict to CLI arguments.
Args:
args: Dictionary with parameters.
sep: Key-value separator. For build args and environment variables it's '=', for mount points it is ':'.
parent_arg: If not None, a parent parameter name for each arg in args, e.g. --build-arg
"""
parent_arg = '' if not parent_arg else parent_arg + ' '
return ' '.join(f'{parent_arg}{k}{sep}{v}' for k, v in args.items())
@staticmethod
def sync_workspace(target_mlcube: DictConfig, task: str) -> None:
"""Synchronize MLCube workspaces.
Args:
target_mlcube: MLCube configuration. Its name (target_) means that this configuration defines actual
configuration where MLCube is supposed to be executed. If workspaces are different, source_mlcube will
refer to the MLCube configuration with default (internal) workspace.
task: Task name to be executed.
"""
def _storage_not_supported(_uri: str) -> str:
"""Raise an exception if the given URI is not supported.
Args:
_uri: URI to check. If it starts with `storage:` (yet unsupported schema), raise an exception.
"""
_uri = _uri.strip()
if _uri.startswith('storage:'):
raise NotImplementedError(f"Storage protocol (uri={_uri}) is not supported yet.")
return _uri
def _is_inside_workspace(_workspace: str, _artifact: str) -> bool:
"""Check if artifact is inside this workspace. Workspace directory and artifact must exist."""
return os.path.commonpath([_workspace]) == os.path.commonpath([_workspace, _artifact])
def _is_ok(_parameter: str, _kind: str, _workspace: str, _artifact: str, _must_exist: bool) -> bool:
"""Return true if this artifact needs to be synced."""
if not _is_inside_workspace(_workspace, _artifact):
logger.debug("[sync_workspace] task = %s, parameter = %s, artifact is not inside %s workspace "
"(workspace = %s, uri = %s)", task, _parameter, _kind, _workspace, _artifact)
return False
if _must_exist and not os.path.exists(_artifact):
logger.debug("[sync_workspace] task = %s, parameter = %s, artifact does not exist in %s workspace "
"(workspace = %s, uri = %s)", task, _parameter, _kind, _workspace, _artifact)
return False
if not _must_exist and os.path.exists(_artifact):
logger.debug("[sync_workspace] task = %s, parameter = %s, artifact exists in %s workspace "
"(workspace = %s, uri = %s)", task, _parameter, _kind, _workspace, _artifact)
return False
return True
def _is_task_output(_target_artifact: str, _input_parameter: str) -> bool:
"""Check of this artifact is an output of some task."""
for _task_name, _task_def in target_mlcube.tasks.items():
for _output_param_name, _output_param_def in _task_def.parameters.outputs.items():
_target_output_artifact: str = \
Path(target_workspace) / _storage_not_supported(_output_param_def.default)
# Can't really use `os.path.samefile` here since files may not exist.
# if os.path.samefile(_target_artifact, _target_output_artifact):
if _target_artifact == _target_output_artifact:
logger.debug("[sync_workspace] task = %s, parameter = %s is an output of task = %s, "
"parameter = %s", task, _input_parameter, _task_name, _output_param_name)
return True
return False
# Check if actual workspace is not internal one (which is default workspace).
target_workspace = os.path.abspath(_storage_not_supported(target_mlcube.runtime.workspace))
os.makedirs(target_workspace, exist_ok=True)
source_workspace = os.path.abspath(Path(target_mlcube.runtime.root) / 'workspace')
if not os.path.exists(source_workspace):
logger.debug("[sync_workspace] source workspace (%s) does not exist, nothing to sync.", source_workspace)
return
if os.path.samefile(target_workspace, source_workspace):
logger.debug("[sync_workspace] target workspace (%s) is the same as source workspace (%s).",
target_workspace, source_workspace)
return
if task not in target_mlcube.tasks:
raise ValueError(f"Task does not exist: {task}")
# Deep copy of the MLCube config with workspace set to internal workspace (we need this to resolve artifact
# paths).
source_mlcube: DictConfig = copy.deepcopy(target_mlcube)
source_mlcube.runtime.workspace = source_workspace
source_mlcube.workspace = source_workspace
inputs: t.Mapping[str, DictConfig] = target_mlcube.tasks[task].parameters.inputs
for input_name, input_def in inputs.items():
# TODO: add support for storage protocol. Idea is to be able to retrieve actual storage specs from
# system settings file. It should be possible to also specify paths within that storage (see
# https://en.wikipedia.org/wiki/Uniform_Resource_Identifier). For instance, the `storage:home/${name}`
# means that the `storage` section defines some storage labelled as `home`, and MLCube needs to use
# ${name} path within that storage.
source_uri: str = Path(source_workspace) / _storage_not_supported(input_def.default)
if not _is_ok(input_name, 'source', source_workspace, source_uri, _must_exist=True):
continue
target_uri: str = Path(target_workspace) / _storage_not_supported(input_def.default)
if not _is_ok(input_name, 'target', target_workspace, target_uri, _must_exist=False):
continue
if _is_task_output(target_uri, input_name):
continue
if os.path.isfile(source_uri):
os.makedirs(os.path.dirname(target_uri), exist_ok=True)
shutil.copy(source_uri, target_uri)
elif os.path.isdir(source_uri):
dir_util.copy_tree(source_uri, target_uri)
else:
raise RuntimeError(f"Unknown artifact type ({source_uri}).")
logger.debug("[sync_workspace] task = %s, parameter = %s, source (%s) copied to target (%s).",
task, input_name, source_uri, target_uri)
| 19,348 | 48.997416 | 120 | py |
Relation-CZSL | Relation-CZSL-master/eval.py | import os, shutil
import json
import os.path as osp
import re
import logging
import time
import random
from functools import reduce
import resource
# rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
# resource.setrlimit(resource.RLIMIT_NOFILE, (2048, rlimit[1]))
import numpy as np
import scipy as sp
from scipy.spatial.distance import pdist, squareform
from tensorboardX import SummaryWriter
from tqdm import tqdm
import torch
from torch import nn
from torch.utils.data import DataLoader
from torch.nn import functional as F
from torchvision.models import resnet18
from model.misc.utils import init, RunningAverage, ShouldSaveModel, myself, prepare_train, save_checkpoint, \
longtensor_to_one_hot, EarlyStop, wait_gpu, worker_init_fn_seed, elapsed_timer, load_checkpoint, plot_grad_flow
from model.datasets.glove import load_glove_as_dict
from model.datasets.CompositionDataset import CompositionDataset
from model.SepMask import SepMix, Discriminator
from model.pygcn import normalize
tqdm_commons = {'ncols': 100, 'ascii': True, 'leave': True}
if 'NO_GPU_WAIT' not in os.environ.keys():
wait_gpu(req_mem=int(os.environ.get('REQ_MEM', '4000')))
def params(p):
p.add_argument('--dataset', choices=['mitstates', 'ut-zap50k'],
default='ut-zap50k', help='Dataset for training and testing.')
p.add_argument('--data_path', default='.', help='Path where you place your dataset.')
p.add_argument('--split', choices=['compositional-split', 'natural-split'], default='compositional-split')
p.add_argument('--batch-size', '--batch_size', type=int, default=512)
p.add_argument('--test-batch-size', '--test_batch_size', type=int, default=32)
p.add_argument('--model_path', type=str)
p.add_argument('--latent_dims', type=int, default=512)
p.add_argument('--topk', type=int, default=1)
p.add_argument('--dropout', type=float, default=0.0)
p.add_argument('--kneg', type=int, default=1)
p.add_argument('--num_workers', type=int, default=5)
p.add_argument('--pre_feat', action='store_true', default=False)
p.add_argument('--debug_val', '--debug-val',
action='store_true', default=False)
return p
def sim(x, y): return -(x-y).norm(p=2, dim=1).unsqueeze(1)
def h_mean(a, b):
return (2*a*b) / (a+b+1e-8)
def val_separate(model, dataloader, phase='val', topk=1, **kwargs):
args = kwargs['args']
model.eval()
total_count = 0
correct_unseen = 0
o_close = 0
a_close = 0
correct_seen = 0
o_open = 0
a_open = 0
train_pairs = dataloader.dataset.train_pairs
test_pairs = dataloader.dataset.val_pairs if dataloader.dataset.phase == 'val' else dataloader.dataset.test_pairs
with torch.no_grad():
test_sample_num = len(dataloader.dataset)
seen_pairs = sorted(list(set(train_pairs).intersection(test_pairs)))
unseen_pairs = sorted(list(set(test_pairs) - set(train_pairs)))
pair_data_seen_att = np.zeros([len(seen_pairs), kwargs['complex_dim']])
pair_data_seen_obj = np.zeros([len(seen_pairs), kwargs['complex_dim']])
pair_data_unseen_att = np.zeros([len(unseen_pairs), kwargs['complex_dim']])
pair_data_unseen_obj = np.zeros([len(unseen_pairs), kwargs['complex_dim']])
test_data_att = np.zeros([test_sample_num, kwargs['complex_dim']])
test_data_obj = np.zeros([test_sample_num, kwargs['complex_dim']])
i = 0
for _, data in tqdm(enumerate(dataloader), desc='GT Feature', total=len(dataloader), disable=args.no_pbar, **tqdm_commons):
if args.parallel:
output = model.module.forward(data[0].float(), None, None, status='eval', vis_backbone=kwargs['vis_backbone'])
else:
output = model.forward(data[0].float(), None, None, status='eval', vis_backbone=kwargs['vis_backbone'])
feat_tmp = output['im_feat']
test_data_att[i:i + feat_tmp.shape[0], :] = output['im_att_feat'].detach().cpu().numpy()
test_data_obj[i:i + feat_tmp.shape[0], :] = output['im_obj_feat'].detach().cpu().numpy()
i += dataloader.batch_size
if args.debug_mode:
break
for i in range(0, len(unseen_pairs)):
att_idx = torch.Tensor([dataloader.dataset.attr2idx[unseen_pairs[i][0]]]).long()
obj_idx = torch.Tensor([dataloader.dataset.obj2idx[unseen_pairs[i][1]]]).long()
if args.parallel:
output = model.module.forward(data[0][0].unsqueeze(0).float(), att_idx, obj_idx, mask_target=True, status='eval', ignore_img=True)
else:
output = model.forward(data[0][0].unsqueeze(0).float(), att_idx, obj_idx, mask_target=True, status='eval', ignore_img=True)
pair_data_unseen_att[i, :] = output['lin_att_values'].detach().cpu().numpy()
pair_data_unseen_obj[i, :] = output['lin_obj_values'].detach().cpu().numpy()
if args.debug_mode:
break
for i in range(0, len(seen_pairs)):
att_idx = torch.Tensor([dataloader.dataset.attr2idx[seen_pairs[i][0]]]).long()
obj_idx = torch.Tensor([dataloader.dataset.obj2idx[seen_pairs[i][1]]]).long()
if args.parallel:
output = model.module.forward(data[0][0].unsqueeze(0).float(), att_idx, obj_idx, status='eval', mask_target=True, ignore_img=True)
else:
output = model.forward(data[0][0].unsqueeze(0).float(), att_idx, obj_idx, status='eval', mask_target=True, ignore_img=True)
pair_data_seen_att[i, :] = output['lin_att_values'].detach().cpu().numpy()
pair_data_seen_obj[i, :] = output['lin_obj_values'].detach().cpu().numpy()
if args.debug_mode:
break
pair_t_unseen_att = torch.FloatTensor(pair_data_unseen_att).cuda()
pair_t_seen_att = torch.FloatTensor(pair_data_seen_att).cuda()
pair_t_att = torch.cat((pair_t_unseen_att, pair_t_seen_att))
pair_t_unseen_obj = torch.FloatTensor(pair_data_unseen_obj).cuda()
pair_t_seen_obj = torch.FloatTensor(pair_data_seen_obj).cuda()
pair_t_obj = torch.cat((pair_t_unseen_obj, pair_t_seen_obj))
dist = torch.zeros(test_sample_num, len(unseen_pairs) + len(seen_pairs))
STEPS = 50
correct_unseen = torch.zeros(STEPS, )
total_unseen = 0
correct_seen = torch.zeros(STEPS, )
total_seen = 0
for i in tqdm(range(0, test_sample_num), disable=args.no_pbar, **tqdm_commons):
dist[i] = sim(pair_t_att, torch.Tensor(test_data_att[i, :]).cuda().repeat(pair_t_att.shape[0], 1)).squeeze() + \
sim(pair_t_obj, torch.Tensor(test_data_obj[i, :]).cuda().repeat(pair_t_obj.shape[0], 1)).squeeze()
dist_diff = dist.max() - dist.min()
biases = torch.linspace(-dist_diff-0.1, dist_diff+0.1, STEPS)
for i in tqdm(range(0, test_sample_num), disable=args.no_pbar, **tqdm_commons):
_, att_gt, obj_gt = dataloader.dataset.data[i]
is_seen = (att_gt, obj_gt) in seen_pairs
if is_seen:
total_seen += 1
else:
total_unseen += 1
for ii, bias in enumerate(biases):
dist_bias = dist[i].clone()
dist_bias[:len(unseen_pairs)] += bias
preds = dist_bias.argsort(dim=0)[-topk:]
for pred in preds:
pred_pairs = (unseen_pairs + seen_pairs)[pred]
correct = int(pred_pairs[0] == att_gt and pred_pairs[1] == obj_gt)
if is_seen:
correct_seen[ii] += correct
else:
correct_unseen[ii] += correct
if correct == 1:
continue
if args.debug_mode:
break
correct_unseen /= total_unseen
correct_seen /= total_seen
auc = torch.trapz(correct_seen, correct_unseen)
test_info = {
'phase': phase,
'auc': float(auc),
'seen_acc': float(correct_seen.max()),
'unseen_acc': float(correct_unseen.max()),
'h_mean': float(h_mean(correct_unseen, correct_seen).max())
}
return test_info
if __name__ == '__main__':
args = init(user_param=params)
obj_class_num = {'ut-zap50k': 12, 'mitstates': 245}
att_class_num = {'ut-zap50k': 16, 'mitstates': 115}
obj_encode_dims = {'ut-zap50k': 300, 'mitstates': 300}
att_encode_dims = {'ut-zap50k': 300, 'mitstates': 300}
glove_embedding = load_glove_as_dict(f'{args.data_path}/glove', dimension=300, identifier='42B')
train_dataset = CompositionDataset(f'{args.data_path}/{args.dataset}', 'train', split=args.split, embedding_dict=glove_embedding, kneg=args.kneg, precompute_feat=args.pre_feat)
test_dataset = CompositionDataset(f'{args.data_path}/{args.dataset}', 'test', split=args.split, embedding_dict=glove_embedding, precompute_feat=args.pre_feat)
train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True,
num_workers=args.num_workers, pin_memory=True, worker_init_fn=worker_init_fn_seed(args), drop_last=True)
test_dataloader = DataLoader(test_dataset, batch_size=args.test_batch_size, shuffle=False,
num_workers=args.num_workers, pin_memory=True, worker_init_fn=worker_init_fn_seed(args), drop_last=False)
if args.split == 'compositional-split':
val_dataset = test_dataset
val_dataloader = test_dataloader
elif args.split == 'natural-split':
val_dataset = CompositionDataset(f'{args.data_path}/{args.dataset}', 'val', split=args.split, embedding_dict=glove_embedding, precompute_feat=args.pre_feat)
val_dataloader = DataLoader(val_dataset, batch_size=args.test_batch_size, shuffle=False,
num_workers=args.num_workers, pin_memory=True, worker_init_fn=worker_init_fn_seed(args), drop_last=False)
def _emb(s):
ss = re.split('\.|-', s)
emb = np.zeros(glove_embedding['the'].shape)
if len(ss) == 1:
try:
emb = glove_embedding[ss[0]]
except KeyError:
logging.warning(
f'Cannot embed word \"{ss[0]}\", fallback to <unk>')
emb = glove_embedding['<unk>']
else:
for w in ss:
try:
emb += glove_embedding[w]
except KeyError:
logging.warning(
f'Cannot embed word \"{w}\", fallback to <unk>')
emb += glove_embedding['<unk>']
return emb
att_emb_dict = {k: v for (k, v) in [(
kk, _emb(kk.lower())) for kk in train_dataset.attrs]}
obj_emb_dict = {k: v for (k, v) in [(
kk, _emb(kk.lower())) for kk in train_dataset.objs]}
train_dataset.att_emb_dict = att_emb_dict
train_dataset.obj_emb_dict = obj_emb_dict
test_dataset.att_emb_dict = att_emb_dict
test_dataset.obj_emb_dict = obj_emb_dict
val_dataset.att_emb_dict = att_emb_dict
val_dataset.obj_emb_dict = obj_emb_dict
att_emb = np.array([v for (_, v) in att_emb_dict.items()])
att_adj = squareform(1-pdist(att_emb, 'cosine'))
obj_emb = np.array([v for (_, v) in obj_emb_dict.items()])
obj_adj = squareform(1-pdist(obj_emb, 'cosine'))
seen_mask = torch.zeros((att_class_num[args.dataset]+obj_class_num[args.dataset], att_class_num[args.dataset]+obj_class_num[args.dataset]))
for seen_pair in train_dataset.train_pairs:
att_idx, obj_idx = train_dataset.attr2idx[seen_pair[0]], train_dataset.obj2idx[seen_pair[1]]
seen_mask[att_idx, att_class_num[args.dataset]+obj_idx] = 1
seen_mask[att_class_num[args.dataset]+obj_idx, att_idx] = 1
seen_mask[:att_class_num[args.dataset], :att_class_num[args.dataset]] = 1
seen_mask[att_class_num[args.dataset]:, att_class_num[args.dataset]:] = 1
model_config = {
'complex_dim': args.latent_dims,
'primitive_dim': 512,
'seen_mask': seen_mask == 1,
'obj_encodes': torch.Tensor(obj_emb).cuda(),
'att_encodes': torch.Tensor(att_emb).cuda(),
'obj_encode_dim': obj_encode_dims[args.dataset],
'att_encode_dim': att_encode_dims[args.dataset],
'obj_class_num': obj_class_num[args.dataset],
'att_class_num': att_class_num[args.dataset],
'obj_adj': torch.Tensor(normalize(obj_adj)).cuda(),
'att_adj': torch.Tensor(normalize(att_adj)).cuda(),
'dropout': args.dropout,
'args': args
}
device = torch.device('cuda')
model = SepMix(**model_config).cuda()
pretrained = torch.load(args.model_path)
model.load_state_dict(pretrained['model'], strict=False)
vis_backbone = None
val = val_separate # val_separate val_distance
if args.test_only:
test_info = val(model, test_dataloader, topk=args.topk, phase='test', device=device, complex_dim=args.latent_dims, vis_backbone=vis_backbone, args=args)
print(test_info)
print()
test_info = val(model, val_dataloader, topk=args.topk, phase='val', device=device, complex_dim=args.latent_dims, vis_backbone=vis_backbone, args=args)
print(test_info) | 13,469 | 46.263158 | 183 | py |
Relation-CZSL | Relation-CZSL-master/train.py | import os, shutil
import json
import os.path as osp
import re
import logging
import time
import random
from functools import reduce
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (2048, rlimit[1]))
import numpy as np
import scipy as sp
from scipy.spatial.distance import pdist, squareform
from tensorboardX import SummaryWriter
from tqdm import tqdm
import torch
from torch import nn
# torch.multiprocessing.set_sharing_strategy('file_system')
from torch.utils.data import DataLoader
from torch.nn import functional as F
from torchvision.models import resnet18
from model.misc.utils import init, RunningAverage, ShouldSaveModel, myself, prepare_train, save_checkpoint, \
longtensor_to_one_hot, EarlyStop, wait_gpu, worker_init_fn_seed, elapsed_timer, load_checkpoint, plot_grad_flow
from model.datasets.glove import load_glove_as_dict
from model.datasets.CompositionDataset import CompositionDataset
from model.SepMask import SepMix
from model.pygcn import normalize
tqdm_commons = {'ncols': 100, 'ascii': True, 'leave': True}
if 'NO_GPU_WAIT' not in os.environ.keys():
wait_gpu(req_mem=int(os.environ.get('REQ_MEM', '4000')))
def params(p):
p.add_argument('--dataset', choices=['mitstates', 'ut-zap50k'],
default='ut-zap50k', help='Dataset for training and testing.')
p.add_argument('--data_path', default='.', help='Path where you place your dataset.')
p.add_argument('--split', choices=['compositional-split', 'natural-split'], default='compositional-split')
p.add_argument('--lr', type=float, default=1e-4)
p.add_argument('--lr_decay', '--lr-decay', type=float, default=0.1)
p.add_argument('--batch-size', '--batch_size', type=int, default=512)
p.add_argument('--test-batch-size', '--test_batch_size', type=int, default=32)
p.add_argument('--momentum', type=float, default=0.9)
p.add_argument('--weight_decay', type=float, default=1e-5)
p.add_argument('--sch_milestones', type=int, nargs='+', default=[500])
p.add_argument('--dropout', type=float, default=0.0)
p.add_argument('--loss_weights', type=str, default='{}')
p.add_argument('--rank_margin', type=float, default=1.0)
p.add_argument('--latent_dims', type=int, default=512)
p.add_argument('--kneg', type=int, default=5)
p.add_argument('--num_workers', type=int, default=5)
p.add_argument('--meta_samples', type=float, default=0.9)
p.add_argument('--meta_inclusive', action='store_true', default=True)
p.add_argument('--pre_feat', action='store_true', default=False)
p.add_argument('--debug_val', '--debug-val',
action='store_true', default=False)
p.add_argument('--model_dir', type=str, default=".")
return p
def log_t_loss(neg, pos, anchor, sim=None, margin=None):
return torch.log(1+torch.exp(margin+sim(neg, anchor)-sim(pos, anchor))).sum() / pos.shape[0]
def t_loss(neg, pos, anchor, sim=None, margin=None):
return F.relu(margin+sim(neg, anchor)-sim(pos, anchor)).sum() / pos.shape[0]
def log_m_loss(x, anchor, sim=None, margin=None):
return torch.log(1+torch.exp(margin+sim(x, anchor))).sum() / x.shape[0]
def m_loss(x, anchor, sim=None, margin=None):
return F.relu(margin+sim(x, anchor)).sum() / x.shape[0]
def sep_loss(a, b):
return (a * b).norm().sum() / a.shape[0]
def h_mean(a, b):
return (2*a*b) / (a+b+1e-8)
def sim(x, y): return -(x-y).norm(p=2, dim=1).unsqueeze(1)
def loss_meta(*args, **kwargs):
output_pp, negs = args
loss_weights = kwargs['loss_weights']
rank_margin = kwargs['rank_margin']
rand_a = kwargs['rand_a']
rand_o = kwargs['rand_o']
should_print = kwargs['should_print']
lp = dict.fromkeys(['ra', 'ro', 'ica', 'ico', 'lca', 'lco'], 0.)
lp['ra'] = F.mse_loss(output_pp['lin_att_values'], output_pp['masked']['lin_att_values'])
lp['ro'] = F.mse_loss(output_pp['lin_obj_values'], output_pp['masked']['lin_obj_values'])
lp['lca'] = F.nll_loss(F.log_softmax(output_pp['masked']['lin_att_logits'], -1), output_pp['masked']['att_idx'])
lp['lco'] = F.nll_loss(F.log_softmax(output_pp['masked']['lin_obj_logits'], -1), output_pp['masked']['obj_idx'])
lp['ica'] = F.nll_loss(F.log_softmax(output_pp['masked']['im_att_logits'], -1), output_pp['masked']['att_idx'])
lp['ico'] = F.nll_loss(F.log_softmax(output_pp['masked']['im_obj_logits'], -1), output_pp['masked']['obj_idx'])
ln = dict.fromkeys(['ta', 'to', 'ita', 'ito'], 0.)
for k in range(len(negs)):
output_pn, output_np = negs[k]
if rand_a > loss_weights['step_a']:
ln['ta'] += log_t_loss(output_np['masked']['lin_att_values'], output_pp['masked']['lin_att_values'], output_pp['masked']['im_att_feat'],
sim=sim, margin=rank_margin)
ln['ita'] += log_t_loss(output_np['masked']['im_att_feat'], output_pp['masked']['im_att_feat'], output_pp['masked']['lin_att_values'],
sim=sim, margin=rank_margin)
if rand_o >loss_weights['step_o']:
ln['to'] += log_t_loss(output_pn['masked']['lin_obj_values'], output_pp['masked']['lin_obj_values'], output_pp['masked']['im_obj_feat'],
sim=sim, margin=rank_margin)
ln['ito'] += log_t_loss(output_pn['masked']['im_obj_feat'], output_pp['masked']['im_obj_feat'], output_pp['masked']['lin_obj_values'],
sim=sim, margin=rank_margin)
for k in ln.keys():
ln[k] /= len(negs)
losses = {**lp, **ln}
pop_keys = []
for k in losses.keys():
lw = loss_weights.get(k, 0.0)
if lw == 0.0 or type(losses[k]) is float:
pop_keys.append(k)
continue
losses[k] *= lw
for ki in pop_keys:
losses.pop(ki)
return losses
def loss_separate(*args, **kwargs):
output_pp, negs = args
loss_weights = kwargs['loss_weights']
rank_margin = kwargs['rank_margin']
should_print = kwargs['should_print']
rand_a = kwargs['rand_a']
rand_o = kwargs['rand_o']
lp = dict.fromkeys(['ra', 'ro', 'ica', 'ico', 'lca', 'lco'], 0.)
lp['lca'] = F.nll_loss(F.log_softmax(output_pp['lin_att_logits'], -1), output_pp['att_idx'])
lp['lco'] = F.nll_loss(F.log_softmax(output_pp['lin_obj_logits'], -1), output_pp['obj_idx'])
lp['ica'] = F.nll_loss(F.log_softmax(output_pp['im_att_logits'], -1), output_pp['att_idx'])
lp['ico'] = F.nll_loss(F.log_softmax(output_pp['im_obj_logits'], -1), output_pp['obj_idx'])
ln = dict.fromkeys(['ta', 'to', 'ita', 'ito'], 0.)
for k in range(len(negs)):
output_pn, output_np = negs[k]
if rand_a > loss_weights['step_a']:
ln['ta'] += log_t_loss(output_np['lin_att_values'], output_pp['lin_att_values'], output_pp['im_att_feat'],
sim=sim, margin=rank_margin)
ln['ita'] += log_t_loss(output_np['im_att_feat'], output_pp['im_att_feat'], output_pp['lin_att_values'],
sim=sim, margin=rank_margin)
if rand_o >loss_weights['step_o']:
ln['to'] += log_t_loss(output_pn['lin_obj_values'], output_pp['lin_obj_values'], output_pp['im_obj_feat'],
sim=sim, margin=rank_margin)
ln['ito'] += log_t_loss(output_pn['im_obj_feat'], output_pp['im_obj_feat'], output_pp['lin_obj_values'],
sim=sim, margin=rank_margin)
for k in ln.keys():
ln[k] /= len(negs)
losses = {**lp, **ln}
pop_keys = []
for k in losses.keys():
lw = loss_weights.get(k, 0.0)
if lw == 0.0 or type(losses[k]) is float:
pop_keys.append(k)
continue
losses[k] *= lw
for ki in pop_keys:
losses.pop(ki)
return losses
def val_separate(model, dataloader, phase='val', topk=1, **kwargs):
args = kwargs['args']
model.eval()
correct_unseen = 0
correct_seen = 0
train_pairs = dataloader.dataset.train_pairs
test_pairs = dataloader.dataset.val_pairs if dataloader.dataset.phase == 'val' else dataloader.dataset.test_pairs
with torch.no_grad():
test_sample_num = len(dataloader.dataset)
seen_pairs = sorted(list(set(train_pairs).intersection(test_pairs)))
unseen_pairs = sorted(list(set(test_pairs) - set(train_pairs)))
# pair_data_seen = np.zeros([len(seen_pairs), kwargs['complex_dim']])
pair_data_seen_att = np.zeros([len(seen_pairs), kwargs['complex_dim']])
pair_data_seen_obj = np.zeros([len(seen_pairs), kwargs['complex_dim']])
# pair_data_unseen = np.zeros([len(unseen_pairs), kwargs['complex_dim']])
pair_data_unseen_att = np.zeros([len(unseen_pairs), kwargs['complex_dim']])
pair_data_unseen_obj = np.zeros([len(unseen_pairs), kwargs['complex_dim']])
# test_data = np.zeros([test_sample_num, kwargs['complex_dim']])
test_data_att = np.zeros([test_sample_num, kwargs['complex_dim']])
test_data_obj = np.zeros([test_sample_num, kwargs['complex_dim']])
i = 0
for _, data in tqdm(enumerate(dataloader), desc='GT Feature', total=len(dataloader), disable=args.no_pbar, **tqdm_commons):
if args.parallel:
output = model.module.forward(data[0].float(), None, None, status='eval', vis_backbone=kwargs['vis_backbone'])
else:
output = model.forward(data[0].float(), None, None, status='eval', vis_backbone=kwargs['vis_backbone'])
feat_tmp = output['im_feat']
# test_data[i:i + feat_tmp.shape[0], :] = output['im_feat'].detach().cpu().numpy()
test_data_att[i:i + feat_tmp.shape[0], :] = output['im_att_feat'].detach().cpu().numpy()
test_data_obj[i:i + feat_tmp.shape[0], :] = output['im_obj_feat'].detach().cpu().numpy()
# test_data_residue[i:i + dataloader.batch_size, :] = model.get_residue(data[0].cuda()).detach().cpu().numpy()
i += dataloader.batch_size
if args.debug_mode:
break
for i in range(0, len(unseen_pairs)):
att_idx = torch.Tensor([dataloader.dataset.attr2idx[unseen_pairs[i][0]]]).long()
obj_idx = torch.Tensor([dataloader.dataset.obj2idx[unseen_pairs[i][1]]]).long()
if args.parallel:
output = model.module.forward(data[0][0].unsqueeze(0).float(), att_idx, obj_idx, mask_target=True, status='eval', ignore_img=True)
else:
output = model.forward(data[0][0].unsqueeze(0).float(), att_idx, obj_idx, mask_target=True, status='eval', ignore_img=True)
# tmp = output['lin_feat_recs']
# pair_data_unseen[i, :] = output['lin_feat_recs'].detach().cpu().numpy()
pair_data_unseen_att[i, :] = output['lin_att_values'].detach().cpu().numpy()
pair_data_unseen_obj[i, :] = output['lin_obj_values'].detach().cpu().numpy()
if args.debug_mode:
break
for i in range(0, len(seen_pairs)):
att_idx = torch.Tensor([dataloader.dataset.attr2idx[seen_pairs[i][0]]]).long()
obj_idx = torch.Tensor([dataloader.dataset.obj2idx[seen_pairs[i][1]]]).long()
if args.parallel:
output = model.module.forward(data[0][0].unsqueeze(0).float(), att_idx, obj_idx, status='eval', mask_target=True, ignore_img=True)
else:
output = model.forward(data[0][0].unsqueeze(0).float(), att_idx, obj_idx, status='eval', mask_target=True, ignore_img=True)
# tmp = output['lin_feat_recs']
# pair_data_seen[i, :] = output['lin_feat_recs'].detach().cpu().numpy()
pair_data_seen_att[i, :] = output['lin_att_values'].detach().cpu().numpy()
pair_data_seen_obj[i, :] = output['lin_obj_values'].detach().cpu().numpy()
if args.debug_mode:
break
# pair_data_seen[0:len(unseen_pairs), :] = pair_data_unseen
pair_t_unseen_att = torch.FloatTensor(pair_data_unseen_att).cuda()
pair_t_seen_att = torch.FloatTensor(pair_data_seen_att).cuda()
pair_t_att = torch.cat((pair_t_unseen_att, pair_t_seen_att))
pair_t_unseen_obj = torch.FloatTensor(pair_data_unseen_obj).cuda()
pair_t_seen_obj = torch.FloatTensor(pair_data_seen_obj).cuda()
pair_t_obj = torch.cat((pair_t_unseen_obj, pair_t_seen_obj))
dist = torch.zeros(test_sample_num, len(unseen_pairs) + len(seen_pairs))
STEPS = 50
correct_unseen = torch.zeros(STEPS, )
total_unseen = 0
correct_seen = torch.zeros(STEPS, )
total_seen = 0
for i in tqdm(range(0, test_sample_num), disable=args.no_pbar, **tqdm_commons):
dist[i] = sim(pair_t_att, torch.Tensor(test_data_att[i, :]).cuda().repeat(pair_t_att.shape[0], 1)).squeeze() + \
sim(pair_t_obj, torch.Tensor(test_data_obj[i, :]).cuda().repeat(pair_t_obj.shape[0], 1)).squeeze()
dist_diff = dist.max() - dist.min()
biases = torch.linspace(-dist_diff-0.1, dist_diff+0.1, STEPS)
for i in tqdm(range(0, test_sample_num), disable=args.no_pbar, **tqdm_commons):
_, att_gt, obj_gt = dataloader.dataset.data[i]
is_seen = (att_gt, obj_gt) in seen_pairs
if is_seen:
total_seen += 1
else:
total_unseen += 1
for ii, bias in enumerate(biases):
dist_bias = dist[i].clone()
dist_bias[:len(unseen_pairs)] += bias
preds = dist_bias.argsort(dim=0)[-topk:]
for pred in preds:
pred_pairs = (unseen_pairs + seen_pairs)[pred]
correct = int(pred_pairs[0] == att_gt and pred_pairs[1] == obj_gt)
if is_seen:
correct_seen[ii] += correct
else:
correct_unseen[ii] += correct
if correct == 1:
continue
if args.debug_mode:
break
correct_unseen /= total_unseen
correct_seen /= total_seen
auc = torch.trapz(correct_seen, correct_unseen)
seen_acc = float(correct_seen.max())
unseen_acc = float(correct_unseen.max())
test_info = {
'phase': phase,
'auc': float(auc),
'seen_acc': seen_acc,
'unseen_acc': unseen_acc,
'overall_acc': (total_seen * seen_acc + total_unseen * unseen_acc) / (total_seen + total_unseen),
'h_mean': float(h_mean(correct_unseen, correct_seen).max())
}
return test_info
def split_meta(*args, meta_samples=0.9, meta_inclusive=False):
a_label_p, o_label_p = args
all_indices = list(range(len(a_label_p)))
meta_idx = random.sample(all_indices, int(len(all_indices)*meta_samples))
if meta_inclusive:
train_idx = all_indices
else:
train_idx = list(set(all_indices) - set(meta_idx))
return train_idx, meta_idx
def train_step(model, optimizer, data_loader, loss_func=loss_separate, meta_loss_func=None, meta_optimizer=None, device=torch.device('cuda'), args=None, **kwargs):
model.train()
train_info = {}
train_loss_avg = RunningAverage(len(data_loader))
train_loss_meta_avg = RunningAverage(len(data_loader))
loss_weights = kwargs['loss_weights']
t = tqdm(data_loader, disable=args.no_pbar, **tqdm_commons)
should_print = True
correct = att_correct = obj_correct = acc_att = acc_obj = total_count = 0
with torch.autograd.set_detect_anomaly(args.debug_mode):
for i, data in enumerate(t):
img_p, a_label_p, o_label_p = data[0], data[3], data[4]
img_pn, att_idx_pn, obj_idx_pn = data[0+7], data[3+7], data[4+7]
img_np, att_idx_np, obj_idx_np = data[0+14], data[3+14], data[4+14]
if meta_optimizer is not None:
train_idx, meta_idx = split_meta(a_label_p, o_label_p, meta_samples=args.meta_samples, meta_inclusive=True)
else:
train_idx = list(range(len(a_label_p)))
loss = torch.Tensor([0])
rand_a = np.random.rand()
rand_o = np.random.rand()
if len(train_idx) > 0:
output_pp = model.forward(img_p[train_idx], a_label_p[train_idx], o_label_p[train_idx], mask_target=False, vis_backbone=kwargs['vis_backbone'])
att_preds = torch.argmax(torch.softmax(output_pp['im_att_logits'], -1), -1)
obj_preds = torch.argmax(torch.softmax(output_pp['im_obj_logits'], -1), -1)
att_correct = (att_preds == a_label_p.to(att_preds.device))
obj_correct = (obj_preds == o_label_p.to(att_preds.device))
correct += int((att_correct & obj_correct).sum())
acc_att += int(att_correct.sum())
acc_obj += int(obj_correct.sum())
total_count += data[0].shape[0]
negs = []
output_pn = model.forward(img_pn[train_idx], att_idx_pn[train_idx], obj_idx_pn[train_idx], mask_target=False, vis_backbone=kwargs['vis_backbone']) # pos att, neg obj
output_np = model.forward(img_np[train_idx], att_idx_np[train_idx], obj_idx_np[train_idx], mask_target=False, vis_backbone=kwargs['vis_backbone']) # neg att, pos obj
negs.append((output_pn, output_np))
losses = loss_func(output_pp, negs, rand_a=rand_a, rand_o=rand_o, loss_weights=loss_weights, should_print=should_print, rank_margin=args.rank_margin, args=args)
loss = 0
for _, v in losses.items():
loss += v
t.set_description(''.join([f'{k}={round(v.item(),3)} ' for k, v in losses.items()]))
if loss != loss:
logging.getLogger(myself()).critical('Training aborted because loss becomes NaN.')
raise ValueError
if loss != 0:
model.zero_grad()
loss.backward(retain_graph=True)
optimizer.step()
# extra generalization training
loss_meta = torch.Tensor([0])
if meta_optimizer is not None:
output_pp = model.forward(img_p[meta_idx], a_label_p[meta_idx], o_label_p[meta_idx], mask_target=False, vis_backbone=kwargs['vis_backbone'], ignore_img=True)
output_pp.update({'masked': model.forward(img_p[meta_idx], a_label_p[meta_idx], o_label_p[meta_idx], mask_target=True, vis_backbone=kwargs['vis_backbone'])})
output_pn = {}
output_pn.update({'masked': model.forward(img_pn[meta_idx], att_idx_pn[meta_idx], obj_idx_pn[meta_idx], mask_target=True, vis_backbone=kwargs['vis_backbone'])})
output_np = {}
output_np.update({'masked': model.forward(img_np[meta_idx], att_idx_np[meta_idx], obj_idx_np[meta_idx], mask_target=True, vis_backbone=kwargs['vis_backbone'])})
losses_meta = meta_loss_func(output_pp, [(output_pn, output_np)], rand_a=rand_a, rand_o=rand_o, loss_weights=loss_weights, should_print=should_print, rank_margin=args.rank_margin, args=args)
loss_meta = 0
for _, v in losses_meta.items():
loss_meta += v
if loss_meta != loss_meta:
logging.getLogger(myself()).critical('Training aborted because loss_meta becomes NaN.')
raise ValueError
model.zero_grad()
loss_meta.backward(retain_graph=True)
meta_optimizer.step()
if loss != 0:
train_info = {
'phase': 'train',
'loss': train_loss_avg.add(loss.item()),
'loss_meta': train_loss_meta_avg.add(loss_meta.item()),
'acc': correct / total_count,
'acc_att': acc_att / total_count,
'acc_obj': acc_obj / total_count
}
else:
train_info = {
'phase': 'train',
'loss': 0,
'loss_meta': train_loss_meta_avg.add(loss_meta.item()),
'acc': correct / total_count,
'acc_att': acc_att / total_count,
'acc_obj': acc_obj / total_count
}
if args.debug_mode:
break
should_print = False
return train_info
if __name__ == '__main__':
args = init(user_param=params)
obj_class_num = {'ut-zap50k': 12, 'mitstates': 245}
att_class_num = {'ut-zap50k': 16, 'mitstates': 115}
obj_encode_dims = {'ut-zap50k': 300, 'mitstates': 300}
att_encode_dims = {'ut-zap50k': 300, 'mitstates': 300}
loss_weights = json.loads(args.loss_weights)
glove_embedding = load_glove_as_dict(f'{args.data_path}/glove', dimension=300, identifier='42B')
train_dataset = CompositionDataset(f'{args.data_path}/{args.dataset}', 'train', split=args.split, embedding_dict=glove_embedding, kneg=args.kneg, precompute_feat=args.pre_feat)
test_dataset = CompositionDataset(f'{args.data_path}/{args.dataset}', 'test', split=args.split, embedding_dict=glove_embedding, precompute_feat=args.pre_feat)
train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True,
num_workers=args.num_workers, pin_memory=True, worker_init_fn=worker_init_fn_seed(args), drop_last=True)
test_dataloader = DataLoader(test_dataset, batch_size=args.test_batch_size, shuffle=False,
num_workers=args.num_workers, pin_memory=True, worker_init_fn=worker_init_fn_seed(args), drop_last=False)
if args.split == 'compositional-split':
val_dataset = test_dataset
val_dataloader = test_dataloader
elif args.split == 'natural-split':
val_dataset = CompositionDataset(f'{args.data_path}/{args.dataset}', 'val', split=args.split, embedding_dict=glove_embedding, precompute_feat=args.pre_feat)
val_dataloader = DataLoader(val_dataset, batch_size=args.test_batch_size, shuffle=False,
num_workers=args.num_workers, pin_memory=True, worker_init_fn=worker_init_fn_seed(args), drop_last=False)
def _emb(s):
ss = re.split('\.|-', s)
emb = np.zeros(glove_embedding['the'].shape)
if len(ss) == 1:
try:
emb = glove_embedding[ss[0]]
except KeyError:
logging.warning(
f'Cannot embed word \"{ss[0]}\", fallback to <unk>')
emb = glove_embedding['<unk>']
else:
for w in ss:
try:
emb += glove_embedding[w]
except KeyError:
logging.warning(
f'Cannot embed word \"{w}\", fallback to <unk>')
emb += glove_embedding['<unk>']
return emb
att_emb_dict = {k: v for (k, v) in [(
kk, _emb(kk.lower())) for kk in train_dataset.attrs]}
obj_emb_dict = {k: v for (k, v) in [(
kk, _emb(kk.lower())) for kk in train_dataset.objs]}
train_dataset.att_emb_dict = att_emb_dict
train_dataset.obj_emb_dict = obj_emb_dict
test_dataset.att_emb_dict = att_emb_dict
test_dataset.obj_emb_dict = obj_emb_dict
val_dataset.att_emb_dict = att_emb_dict
val_dataset.obj_emb_dict = obj_emb_dict
att_emb = np.array([v for (_, v) in att_emb_dict.items()])
att_adj = squareform(1-pdist(att_emb, 'cosine'))
obj_emb = np.array([v for (_, v) in obj_emb_dict.items()])
obj_adj = squareform(1-pdist(obj_emb, 'cosine'))
device = torch.device('cuda')
seen_mask = torch.zeros((att_class_num[args.dataset]+obj_class_num[args.dataset], att_class_num[args.dataset]+obj_class_num[args.dataset]))
for seen_pair in train_dataset.train_pairs:
att_idx, obj_idx = train_dataset.attr2idx[seen_pair[0]], train_dataset.obj2idx[seen_pair[1]]
seen_mask[att_idx, att_class_num[args.dataset]+obj_idx] = 1
seen_mask[att_class_num[args.dataset]+obj_idx, att_idx] = 1
seen_mask[:att_class_num[args.dataset], :att_class_num[args.dataset]] = 1
seen_mask[att_class_num[args.dataset]:, att_class_num[args.dataset]:] = 1
model_config = {
'complex_dim': args.latent_dims,
'primitive_dim': 512,
'seen_mask': seen_mask == 1,
'obj_encodes': torch.Tensor(obj_emb).cuda(),
'att_encodes': torch.Tensor(att_emb).cuda(),
'obj_encode_dim': obj_encode_dims[args.dataset],
'att_encode_dim': att_encode_dims[args.dataset],
'obj_class_num': obj_class_num[args.dataset],
'att_class_num': att_class_num[args.dataset],
'obj_adj': torch.Tensor(normalize(obj_adj)).cuda(),
'att_adj': torch.Tensor(normalize(att_adj)).cuda(),
'dropout': args.dropout,
'args': args
}
model = SepMix(**model_config).cuda()
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
lr_schdlr = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=args.sch_milestones, gamma=0.1, last_epoch=args.start_epoch-1)
vis_backbone = None if args.pre_feat else torch.nn.Sequential(*list(resnet18(pretrained=True).children())[:-1]).cuda().eval()
val = val_separate # val_separate val_distance
loss_func = loss_separate # loss_separate loss_dist
meta_loss_func = loss_meta # loss_meta loss_meta_dist
meta_optimizer = optimizer if args.meta_samples > 0 else None
model, optimizer, lr_schdlr = prepare_train(model, optimizer, lr_schdlr, args)
writer = SummaryWriter(log_dir=args.summary_to)
if not args.test_only:
shutil.copy(osp.join('model', 'SepMask.py'), osp.join(args.save_model_to, args.model_id, 'SepMask.py'))
shutil.copy(osp.join('.', 'train.py'), osp.join(args.save_model_to, args.model_id, 'train.py'))
ss = ShouldSaveModel(init_step=args.start_epoch-1)
es = EarlyStop(patience=args.patience)
for epoch in range(args.start_epoch, args.max_epoch):
logging.getLogger(myself()).info("-"*10 + f" Epoch {epoch} starts. " + "-"*10) # for timing
with elapsed_timer() as elapsed:
train_info = train_step(model, optimizer, train_dataloader, loss_func=loss_func, meta_loss_func=meta_loss_func, meta_optimizer=meta_optimizer, loss_weights=loss_weights, device=device, vis_backbone=vis_backbone, args=args)
logging.getLogger(myself()).info(f"Epoch {epoch} finished. Elapsed={elapsed():.2f}s.") # for timing
logging.getLogger(myself()).info(
f"Epoch {epoch}, "
f"{train_info}"
)
val_info = val(model, val_dataloader, phase='val', device=device, complex_dim=args.latent_dims, vis_backbone=vis_backbone, args=args)
logging.getLogger(myself()).info(
f"Epoch {epoch}, "
f"{val_info}"
)
lr_schdlr.step()
states_dict = {
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_schdlr.state_dict(),
'checkpoint_epoch': epoch,
'initial_lr': args.lr
}
if ss.step(loss=1e3, acc=val_info['auc'], criterion=lambda x1, x2: x2):
save_checkpoint(
f'{args.save_model_to}/{args.model_id}/best.state', **states_dict)
save_checkpoint(
f'{args.save_model_to}/{args.model_id}/latest.state', **states_dict)
if es.step(loss=1e3, acc=val_info['auc'], criterion=lambda x1, x2: x2):
break
if args.debug_mode:
break
logging.getLogger(myself()).info('Training ended.')
states = load_checkpoint(f'{args.save_model_to}/{args.model_id}/best.state', state_dict_to_load=['model', 'checkpoint_epoch'])
best_epoch = states['checkpoint_epoch']
model.load_state_dict(states['model'])
test_info = val(model, test_dataloader, phase='test', device=device, complex_dim=args.latent_dims, vis_backbone=vis_backbone, args=args)
logging.getLogger(myself()).info(
f"Best model at epoch {best_epoch}, "
f"{test_info}"
)
elif args.test_only:
best_epoch = 0
pretrained = torch.load(os.path.join(args.model_dir, 'best.state'))
model.load_state_dict(pretrained['model'])
for i in range(3):
print("step: ", i+1)
test_info = val(model, test_dataloader, topk=i+1, phase='test', device=device, complex_dim=args.latent_dims, vis_backbone=vis_backbone, args=args)
print("test: {}".format(i+1))
print(test_info)
print()
test_info = val(model, val_dataloader, topk=i+1, phase='test', device=device, complex_dim=args.latent_dims, vis_backbone=vis_backbone, args=args)
print("val: {}".format(i+1))
print(test_info)
print('\n\n')
writer.close()
| 29,461 | 46.983713 | 238 | py |
Relation-CZSL | Relation-CZSL-master/model/SepMask.py | import itertools
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision.models import resnet18
from scipy.sparse import diags
VIS_BACKBONE_FEAT_DIM = 512
class Discriminator(nn.Module):
def __init__(self, input_dims=512, hidden_dims=512, output_dims=2):
super(Discriminator, self).__init__()
self.fc1 = nn.Linear(input_dims, hidden_dims)
self.rl1 = nn.ReLU()
self.fc2 = nn.Linear(hidden_dims, hidden_dims)
self.lsm = nn.LogSoftmax()
def forward(self, x):
x1 = self.fc1(x)
x2 = self.rl1(x1)
x3 = self.fc2(x2)
x4 = self.lsm(x3)
return x4
def _to_onehot(labels, num_classes):
oh = torch.zeros(labels.shape[0], num_classes).cuda()
oh.scatter_(1, labels.unsqueeze(1).long(), 1)
return oh
def normalize(mx):
"""Row-normalize sparse matrix"""
rowsum = mx.sum(-1)
r_inv = (1 / rowsum)
r_inv[torch.isinf(r_inv)] = 0
if len(mx.shape) == 2:
r_mat_inv = torch.diag(r_inv.flatten())
mx = r_mat_inv @ mx
else:
r_mat_inv = torch.diag_embed(r_inv)
mx = r_mat_inv.bmm(mx)
return mx
class GraphConvolution(nn.Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
Re-implemented using Conv1d to support batch operation.
"""
def __init__(self, in_features, out_features, bias=True, groups=1, adj=None, num_weights=1, **kwargs):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.adj = adj
self.groups = groups
self.weight = nn.Parameter(torch.FloatTensor(num_weights, in_features, out_features))
if bias:
self.bias = nn.Parameter(torch.FloatTensor(1, out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.out_features)
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, x, adj=None, should_normalize=True):
if len(x.shape) == 3:
support = (x.unsqueeze(-2) @ self.weight[None, :, :].repeat(x.shape[0], 1, 1, 1)).squeeze(-2)
else:
support = (x[:, None, :] @ self.weight).squeeze(-2)
if should_normalize:
adj = normalize(adj)
adj = adj.cuda()
output = adj @ support
if self.bias is not None:
return output + self.bias
else:
return output
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ str(self.in_features) + ' -> ' \
+ str(self.out_features) + ', ' + f'Groups={self.groups}' + ')'
class VisTransformer(nn.Module):
def __init__(self, input_dim=960, output_dim=960, hidden_units=1024, dropout=0.0):
super(VisTransformer, self).__init__()
self.dropout = dropout
self.in_layer = nn.Linear(input_dim, hidden_units)
self.out_layer = nn.Linear(hidden_units, output_dim)
def forward(self, x):
x_ = F.dropout(self.in_layer(x), p=self.dropout, training=self.training)
x_ = F.leaky_relu(x_)
x_ = F.dropout(self.out_layer(x_), p=self.dropout, training=self.training)
x_ = F.leaky_relu(x_)
return x_
class GaussianResidualGenerator(nn.Module):
def __init__(self, dim=512):
super(GaussianResidualGenerator, self).__init__()
self.dim = dim
self.mean = nn.Parameter(torch.randn(dim).cuda())
self.covariance = nn.Parameter(torch.eye(dim).cuda())
self.dist = torch.distributions.multivariate_normal.MultivariateNormal(self.mean, self.covariance)
def forward(self, x, status):
if status == 'train':
ret = self.dist.rsample()
ret = ret.to(x.device)
return ret
elif status == 'eval':
ret = self.dist.mean
ret = ret.to(x.device)
return ret
class SepMix(nn.Module):
def __init__(self, **kwargs):
super(SepMix, self).__init__()
self.args = kwargs['args']
self.dropout = kwargs['dropout']
self.primitive_dim = kwargs['primitive_dim']
self.complex_dim = kwargs['complex_dim']
self.att_encodes = kwargs['att_encodes']
self.obj_encodes = kwargs['obj_encodes']
self.att_class_num = kwargs['att_class_num']
self.obj_class_num = kwargs['obj_class_num']
self.seen_mask = kwargs['seen_mask']
self.kq_dim = kwargs.get('kq_dim', 300)
self.vis_mem_blocks = kwargs.get('vis_mem_blocks', 64)
self.code_dim = 128
self.vis_transform = nn.Sequential(
nn.Linear(VIS_BACKBONE_FEAT_DIM, 1024),
nn.LeakyReLU(),
nn.BatchNorm1d(1024),
nn.Linear(1024, self.complex_dim),
nn.LeakyReLU(),
nn.BatchNorm1d(self.complex_dim)
)
self.residual_generator = GaussianResidualGenerator()
if self.att_class_num > 16:
self.lin_att_gcn = GraphConvolution(in_features=self.primitive_dim, out_features=self.complex_dim, num_weights=self.att_class_num+self.obj_class_num)
self.lin_obj_gcn = GraphConvolution(in_features=self.primitive_dim, out_features=self.complex_dim, num_weights=self.att_class_num+self.obj_class_num)
else:
self.lin_att_gcn = GraphConvolution(in_features=self.primitive_dim, out_features=self.complex_dim, num_weights=self.att_class_num+self.obj_class_num)
self.lin_obj_gcn = GraphConvolution(in_features=self.primitive_dim, out_features=self.complex_dim, num_weights=self.att_class_num+self.obj_class_num)
self.lin_att_key = nn.Parameter(torch.Tensor(kwargs['att_class_num'], self.kq_dim))
self.lin_obj_key = nn.Parameter(torch.Tensor(kwargs['obj_class_num'], self.kq_dim))
torch.nn.init.normal_(self.lin_att_key)
torch.nn.init.normal_(self.lin_obj_key)
self.lin_att_query = nn.Parameter(torch.Tensor(kwargs['att_class_num'], self.kq_dim))
self.lin_obj_query = nn.Parameter(torch.Tensor(kwargs['obj_class_num'], self.kq_dim))
torch.nn.init.normal_(self.lin_att_query)
torch.nn.init.normal_(self.lin_obj_query)
self.att_query_transformer = nn.Sequential(
nn.Linear(self.kq_dim, self.kq_dim),
nn.Tanh())
self.obj_query_transformer = nn.Sequential(
nn.Linear(self.kq_dim, self.kq_dim),
nn.Tanh())
self.att_key_transformer = nn.Sequential(
# nn.Linear(self.kq_dim, self.kq_dim),
nn.Tanh())
self.obj_key_transformer = nn.Sequential(
# nn.Linear(self.kq_dim, self.kq_dim),
nn.Tanh())
self.lin_att_values = nn.Parameter(torch.Tensor(kwargs['att_class_num'], self.primitive_dim))
self.lin_obj_values = nn.Parameter(torch.Tensor(kwargs['obj_class_num'], self.primitive_dim))
torch.nn.init.normal_(self.lin_att_values)
torch.nn.init.normal_(self.lin_obj_values)
# visual query, key, values
self.vis_att_transformer = nn.Sequential(
nn.Linear(self.complex_dim, self.complex_dim),
nn.LeakyReLU(),
)
self.vis_obj_transformer = nn.Sequential(
nn.Linear(self.complex_dim, self.complex_dim),
nn.LeakyReLU(),
)
self.att_cls = nn.Sequential(
nn.Linear(self.complex_dim, 128),
nn.LeakyReLU(),
nn.BatchNorm1d(128),
nn.Linear(128, self.att_class_num),
)
self.obj_cls = nn.Sequential(
nn.Linear(self.complex_dim, 128),
nn.LeakyReLU(),
nn.BatchNorm1d(128),
nn.Linear(128, self.obj_class_num),
)
self.gcn_output_merge = nn.Sequential(
nn.Linear(2*self.complex_dim, 512),
nn.LeakyReLU(),
nn.BatchNorm1d(512),
nn.Linear(512, self.complex_dim),
nn.LeakyReLU()
)
def forward(self, im_feat, att_idx, obj_idx, status='train', mask_target=True, **kwargs):
assert att_idx is not None or im_feat is not None, "Parameter error."
output = {}
ignore_img = kwargs.get('ignore_img', False)
if att_idx is not None:
assert self.lin_att_key.shape[0] == self.att_class_num
batch_size = len(att_idx)
att_idx = att_idx.cuda()
obj_idx = obj_idx.cuda()
cat_key = torch.cat([self.lin_att_key, self.lin_obj_key], 0)
cat_values = torch.cat([self.lin_att_values, self.lin_obj_values], dim=0)[None, :, :]
A_cpl_att_ = self.att_key_transformer(self.lin_att_query[att_idx]) @ self.att_query_transformer(cat_key).t() # / np.sqrt(self.att_query.shape[1])
A_cpl_obj_ = self.obj_key_transformer(self.lin_obj_query[obj_idx]) @ self.obj_query_transformer(cat_key).t() # / np.sqrt(self.obj_query.shape[1])
if mask_target:
seen_mask = self.seen_mask.cuda()
A_cpl_att_1_ = A_cpl_att_.masked_fill(~seen_mask[att_idx].cuda(), float('-inf'))
A_cpl_obj_1_ = A_cpl_obj_.masked_fill(~seen_mask[self.att_class_num+obj_idx].cuda(), float('-inf'))
A_cpl_att_1_[torch.arange(batch_size), self.att_class_num+obj_idx] = float('-inf')
A_cpl_obj_1_[torch.arange(batch_size), att_idx] = float('-inf')
else:
A_cpl_att_1_ = A_cpl_att_
A_cpl_obj_1_ = A_cpl_obj_
A_cpl_att_1_ = torch.cat((torch.softmax(A_cpl_att_1_[:, :self.att_class_num], -1), torch.softmax(A_cpl_att_1_[:, self.att_class_num:], -1)), -1)
A_cpl_att_1_ = A_cpl_att_1_.masked_fill(torch.isnan(A_cpl_att_1_), 0.0)
A_cpl_obj_1_ = torch.cat((torch.softmax(A_cpl_obj_1_[:, :self.att_class_num], -1), torch.softmax(A_cpl_obj_1_[:, self.att_class_num:], -1)), -1)
A_cpl_obj_1_ = A_cpl_obj_1_.masked_fill(torch.isnan(A_cpl_obj_1_), 0.0)
A_cpl_att_1 = torch.zeros(
(self.att_class_num+self.obj_class_num, self.att_class_num+self.obj_class_num))[None, :, :].repeat(batch_size, 1, 1).cuda()
A_cpl_obj_1 = torch.zeros(
(self.att_class_num+self.obj_class_num, self.att_class_num+self.obj_class_num))[None, :, :].repeat(batch_size, 1, 1).cuda()
A_cpl_att_1[torch.arange(batch_size), att_idx, :] = A_cpl_att_1_[torch.arange(batch_size)]
A_cpl_obj_1[torch.arange(batch_size), self.att_class_num+obj_idx, :] = A_cpl_obj_1_[torch.arange(batch_size)]
lin_att_values_ = F.leaky_relu(self.lin_att_gcn.forward(cat_values, adj=A_cpl_att_1, should_normalize=False))
lin_obj_values_ = F.leaky_relu(self.lin_obj_gcn.forward(cat_values, adj=A_cpl_obj_1, should_normalize=False))
lin_att_values = lin_att_values_[torch.arange(batch_size), att_idx]
lin_obj_values = lin_obj_values_[torch.arange(batch_size), self.att_class_num+obj_idx]
lin_feat_recs = F.leaky_relu(self.gcn_output_merge(torch.cat([lin_att_values, lin_obj_values], -1)))
lin_att_logits = self.att_cls(lin_att_values)
lin_obj_logits = self.obj_cls(lin_obj_values)
output.update({
'lin_feat_recs': lin_feat_recs,
'lin_att_logits': lin_att_logits, 'lin_obj_logits': lin_obj_logits,
'lin_att_values': lin_att_values, 'lin_obj_values': lin_obj_values,
'att_idx': att_idx, 'obj_idx': obj_idx})
if (im_feat is not None) and (not ignore_img):
im_feat = im_feat.cuda()
if len(im_feat.shape) == 2:
im_feat_transformed = self.vis_transform(im_feat)
else:
assert 'vis_backbone' in kwargs
with torch.no_grad():
im_feat = kwargs['vis_backbone'](im_feat).squeeze()
im_feat_transformed = self.vis_transform(im_feat)
residual = self.residual_generator(im_feat_transformed, status)
im_feat_reduced = im_feat_transformed - residual
im_feat1 = im_feat_reduced
im_feat2 = im_feat_reduced
im_att_feat = self.vis_att_transformer(im_feat1)
im_obj_feat = self.vis_obj_transformer(im_feat2)
im_att_logits = self.att_cls(im_att_feat)
im_obj_logits = self.obj_cls(im_obj_feat)
output.update({
'im_feat': im_feat,
'im_att_feat': im_att_feat, 'im_obj_feat': im_obj_feat,
'im_att_logits': im_att_logits, 'im_obj_logits': im_obj_logits,
'im_att_fake_logits': None, 'im_obj_fake_logits': None})
return output | 12,973 | 41.260586 | 161 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.