code stringlengths 101 5.91M |
|---|
def get_mean_period(frequencies, p_floor, p_ceil, max_p_factor):
cumsum = 0
counter = 0
for freq in frequencies:
if validate_frequencies([freq], p_floor, p_ceil, max_p_factor):
cumsum += (1 / freq)
counter += 1
mean_period = ((cumsum / counter) if (counter != 0) else None)
return mean_period |
class Timer():
def __init__(self, name):
self.name = name
def __enter__(self):
self.begin = time.time()
return self
def __exit__(self, *args):
self.end = time.time()
self.elapsed = (self.end - self.begin)
self.elapsedH = time.gmtime(self.elapsed)
print('====> [{}] Time: {:7.3f}s or {}'.format(self.name, self.elapsed, time.strftime('%H:%M:%S', self.elapsedH))) |
class ConfiguredInferenceNet(nn.Module):
def __init__(self, vectorizer, article_encoder, intervention_encoder, comparator_encoder, outcome_encoder, cls_layer):
super(ConfiguredInferenceNet, self).__init__()
self.vectorizer = vectorizer
self.article_encoder = article_encoder
self.intervention_encoder = intervention_encoder
self.comparator_encoder = comparator_encoder
self.outcome_encoder = outcome_encoder
self.cls_layer = cls_layer
self.batch_first = True
def _encode(self, I_tokens, C_tokens, O_tokens):
(_, I_v, _) = self.intervention_encoder(I_tokens)
(_, C_v, _) = self.comparator_encoder(C_tokens)
(_, O_v, _) = self.outcome_encoder(O_tokens)
return (I_v, C_v, O_v)
def forward(self, article_tokens: PaddedSequence, I_tokens: PaddedSequence, C_tokens: PaddedSequence, O_tokens: PaddedSequence, batch_size: int, debug_attn: bool=False, verbose_attn: bool=False):
if isinstance(article_tokens, PaddedSequence):
assert all([isinstance(x, PaddedSequence) for x in [I_tokens, C_tokens, O_tokens]])
else:
raise ValueError('Got an unexpected type for our input tensor: {}'.format(type(article_tokens)))
(I_v, C_v, O_v) = self._encode(I_tokens, C_tokens, O_tokens)
query_v = None
if self.article_encoder.condition_attention:
query_v = torch.cat([I_v, C_v, O_v], dim=1)
(_, a_v, attn_weights) = self.article_encoder(article_tokens, query_v_for_attention=query_v)
if (self.article_encoder.use_attention and verbose_attn):
self._print_attention_diagnostic(article_tokens, I_tokens, C_tokens, O_tokens, batch_size, attn_weights)
h = torch.cat([a_v, I_v, C_v, O_v], dim=1)
raw_out = self.cls_layer(h)
return F.softmax(raw_out, dim=1)
def _print_attention_diagnostic(self, article_tokens: PaddedSequence, I_tokens: PaddedSequence, C_tokens: PaddedSequence, O_tokens: PaddedSequence, batch_size: int, attn_weights: torch.Tensor):
attn_weights = attn_weights.data.cpu().numpy()
for i in range(batch_size):
attn_weights_slice = attn_weights[i][:article_tokens.batch_sizes[i].item()].squeeze()
sorted_idx = np.argsort(attn_weights_slice)
if (sorted_idx.size == 1):
continue
length = len(attn_weights_slice)
top_words = [self.vectorizer.idx_to_str[article_tokens.data[i][idx]] for idx in sorted_idx[max((- 20), ((- 1) * length)):]]
top_words.reverse()
top_words_weights = [attn_weights_slice[idx] for idx in sorted_idx[max((- 20), ((- 1) * length)):]]
top_words_weights.reverse()
bottom_words = [self.vectorizer.idx_to_str[article_tokens.data[i][idx]] for idx in sorted_idx[:min(20, length)]]
bottom_words.reverse()
bottom_words_weights = [attn_weights_slice[idx] for idx in sorted_idx[:min(20, length)]]
bottom_words_weights.reverse()
def tokens_to_str(tokens):
return ', '.join([self.vectorizer.idx_to_str[x.item()] for x in tokens])
print('I, C, O frame:', tokens_to_str(I_tokens.data[i][:I_tokens.batch_sizes[i]]), ';', tokens_to_str(C_tokens.data[i][:C_tokens.batch_sizes[i]]), ':', tokens_to_str(O_tokens.data[i][:O_tokens.batch_sizes[i]]))
print('top words:', ', '.join(top_words))
print('weights:', ', '.join((str(x) for x in top_words_weights)))
print('bottom words:', ', '.join(bottom_words))
print('weights:', ', '.join((str(x) for x in bottom_words_weights)))
def init_word_vectors(cls, path_to_wvs, vectorizer, use_cuda=USE_CUDA) -> nn.Embedding:
WVs = KeyedVectors.load_word2vec_format(path_to_wvs, binary=True)
E = np.zeros((len(vectorizer.str_to_idx), WVs.vector_size))
WV_matrix = np.matrix([WVs[v] for v in WVs.vocab.keys()])
mean_vector = np.mean(WV_matrix, axis=0)
for (idx, token) in enumerate(vectorizer.idx_to_str):
if (token in WVs):
E[idx] = WVs[token]
else:
E[idx] = mean_vector
padding_idx = int(vectorizer.str_to_idx[SimpleInferenceVectorizer.PAD])
E[padding_idx] = torch.zeros(E.shape[1])
embedding = nn.Embedding(E.shape[0], E.shape[1], padding_idx=padding_idx)
embedding.weight.data.copy_(torch.from_numpy(E))
embedding.weight.requires_grad = False
if use_cuda:
embedding = embedding.cuda()
return embedding |
class SparseStage(nn.Module):
def __init__(self, in_channels, channels_per_stage, growth_rate, dropout_rate, do_transition):
super(SparseStage, self).__init__()
self.do_transition = do_transition
if self.do_transition:
self.trans = TransitionBlock(in_channels=in_channels, out_channels=(in_channels // 2))
in_channels = (in_channels // 2)
self.blocks = nn.Sequential()
for (i, out_channels) in enumerate(channels_per_stage):
self.blocks.add_module('block{}'.format((i + 1)), SparseBlock(in_channels=in_channels, out_channels=growth_rate, dropout_rate=dropout_rate))
in_channels = out_channels
def forward(self, x):
if self.do_transition:
x = self.trans(x)
outs = [x]
for block in self.blocks._modules.values():
y = block(x)
outs.append(y)
flt_outs = sparsenet_exponential_fetch(outs)
x = torch.cat(tuple(flt_outs), dim=1)
return x |
def make_delta(base_model_path, target_model_path, delta_path):
print(f'Loading the base model from {base_model_path}')
base = AutoModelForCausalLM.from_pretrained(base_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True)
print(f'Loading the target model from {target_model_path}')
target = AutoModelForCausalLM.from_pretrained(target_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True)
print('Calculating the delta')
for (name, param) in tqdm(target.state_dict().items(), desc='Calculating delta'):
assert (name in base.state_dict())
param.data -= base.state_dict()[name]
print(f'Saving the delta to {delta_path}')
if args.hub_repo_id:
kwargs = {'push_to_hub': True, 'repo_id': args.hub_repo_id}
else:
kwargs = {}
target.save_pretrained(delta_path, **kwargs) |
def create_region_from_mask(mask, join_labels: tuple):
mask_new = np.zeros_like(mask, dtype=np.uint8)
for l in join_labels:
mask_new[(mask == l)] = 1
return mask_new |
class LinearLASSO(torch.nn.Linear, BaseARD, SparsityStats):
def penalty(self):
return abs(self.weight)
def relevance(self, *, threshold, **kwargs):
with torch.no_grad():
return torch.ge(torch.log((abs(self.weight) + 1e-20)), threshold)
def sparsity(self, *, threshold, **kwargs):
n_relevant = float(self.relevance(threshold=threshold).sum().item())
return [(id(self.weight), (self.weight.numel() - n_relevant))] |
class SqlOption(CommandLineOption):
__depends_on__ = 'sqlalchemy'
arg = 'DB_URL'
arg_description = 'The typical form is: dialect://username::port/database'
def apply(cls, args, run):
run.observers.append(SqlObserver.create(args)) |
.parametrize('id, name, demodata', [pytest.param(1, 'cities', DEMODATA_CITIES, id='demodata_cities'), pytest.param(2, 'countries', DEMODATA_COUNTRIES, id='demodata_countries')])
def test_lookup_data_demo_data(id, name, demodata):
lookup = LookupData(name=name, data=load_data_from_file(demodata))
validation = lookup.validate()
assert (validation['error_count'] == 0)
assert isinstance(validation, dict) |
class Adafactor(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class ONNXRTBertDataset():
def __init__(self, model, data_dir, model_name_or_path, max_seq_length=128, do_lower_case=True, task='mrpc', model_type='bert', dynamic_length=False, evaluate=True, transform=None, filter=None):
self.inputs = [inp.name for inp in onnx.load(model).graph.input]
task = task.lower()
model_type = model_type.lower()
assert (task in ['mrpc', 'qqp', 'qnli', 'rte', 'sts-b', 'cola', 'mnli', 'wnli', 'sst-2']), 'Unsupported task type'
assert (model_type in ['distilbert', 'bert', 'mobilebert', 'roberta']), 'Unsupported model type'
self.dynamic_length = dynamic_length
self.model_type = model_type
self.max_seq_length = max_seq_length
tokenizer = transformers.AutoTokenizer.from_pretrained(model_name_or_path, do_lower_case=do_lower_case)
self.dataset = load_and_cache_examples(data_dir, model_name_or_path, max_seq_length, task, model_type, tokenizer, evaluate)
def __len__(self):
return len(self.dataset)
def __getitem__(self, index):
batch = tuple(((t.detach().cpu().numpy() if (not isinstance(t, np.ndarray)) else t) for t in self.dataset[index]))
return (batch[:len(self.inputs)], batch[(- 1)]) |
class SchemaMapAttentionDecoderOutput(namedtuple('DecoderOutput', ['logits', 'predicted_ids', 'cell_output', 'attention_scores', 'attention_context', 'schema_attention_scores', 'schema_attention_context', 'schema_map_attention_scores', 'schema_map_attention_context'])):
pass |
class Normalize(transforms.Normalize):
def __init__(self, mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), inplace=False):
super(Normalize, self).__init__(mean, std, inplace)
def __call__(self, x):
return F.normalize(x, self.mean, self.std, self.inplace) |
def split_data(data: MoleculeDataset, split_type: str='random', sizes: Tuple[(float, float, float)]=(0.8, 0.1, 0.1), seed: int=0, args: Namespace=None, logger: Logger=None) -> Tuple[(MoleculeDataset, MoleculeDataset, MoleculeDataset)]:
assert ((len(sizes) == 3) and (sum(sizes) == 1))
if (args is not None):
(folds_file, val_fold_index, test_fold_index) = (args.folds_file, args.val_fold_index, args.test_fold_index)
else:
folds_file = val_fold_index = test_fold_index = None
if (split_type == 'crossval'):
index_set = args.crossval_index_sets[args.seed]
data_split = []
for split in range(3):
split_indices = []
for index in index_set[split]:
with open(os.path.join(args.crossval_index_dir, f'{index}.pkl'), 'rb') as rf:
split_indices.extend(pickle.load(rf))
data_split.append([data[i] for i in split_indices])
(train, val, test) = tuple(data_split)
return (MoleculeDataset(train), MoleculeDataset(val), MoleculeDataset(test))
elif (split_type == 'index_predetermined'):
split_indices = args.crossval_index_sets[args.seed]
assert (len(split_indices) == 3)
data_split = []
for split in range(3):
data_split.append([data[i] for i in split_indices[split]])
(train, val, test) = tuple(data_split)
return (MoleculeDataset(train), MoleculeDataset(val), MoleculeDataset(test))
elif (split_type == 'predetermined'):
if (not val_fold_index):
assert (sizes[2] == 0)
assert (folds_file is not None)
assert (test_fold_index is not None)
try:
with open(folds_file, 'rb') as f:
all_fold_indices = pickle.load(f)
except UnicodeDecodeError:
with open(folds_file, 'rb') as f:
all_fold_indices = pickle.load(f, encoding='latin1')
log_scaffold_stats(data, all_fold_indices, logger=logger)
folds = [[data[i] for i in fold_indices] for fold_indices in all_fold_indices]
test = folds[test_fold_index]
if (val_fold_index is not None):
val = folds[val_fold_index]
train_val = []
for i in range(len(folds)):
if ((i != test_fold_index) and ((val_fold_index is None) or (i != val_fold_index))):
train_val.extend(folds[i])
if (val_fold_index is not None):
train = train_val
else:
random.seed(seed)
random.shuffle(train_val)
train_size = int((sizes[0] * len(train_val)))
train = train_val[:train_size]
val = train_val[train_size:]
return (MoleculeDataset(train), MoleculeDataset(val), MoleculeDataset(test))
elif (split_type == 'scaffold_balanced'):
return scaffold_split(data, sizes=sizes, balanced=True, seed=seed, logger=logger)
elif (split_type == 'random'):
data.shuffle(seed=seed)
train_size = int((sizes[0] * len(data)))
train_val_size = int(((sizes[0] + sizes[1]) * len(data)))
train = data[:train_size]
val = data[train_size:train_val_size]
test = data[train_val_size:]
return (MoleculeDataset(train), MoleculeDataset(val), MoleculeDataset(test))
else:
raise ValueError(f'split_type "{split_type}" not supported.') |
class NullEvaluator(DatasetEvaluator):
def reset(self):
return
def process(self, inputs: List[Dict], outputs: Dict):
return
def evaluate(self):
synchronize()
return |
def create_pipeline():
fps = 10
monoResolution = dai.MonoCameraProperties.SensorResolution.THE_720_P
pipeline = dai.Pipeline()
queueNames = []
camRgb = pipeline.create(dai.node.ColorCamera)
left = pipeline.create(dai.node.MonoCamera)
right = pipeline.create(dai.node.MonoCamera)
stereo = pipeline.create(dai.node.StereoDepth)
rgbOut = pipeline.create(dai.node.XLinkOut)
depthOut = pipeline.create(dai.node.XLinkOut)
rgbOut.setStreamName('rgb')
queueNames.append('rgb')
depthOut.setStreamName('depth')
queueNames.append('depth')
camRgb.setBoardSocket(dai.CameraBoardSocket.RGB)
camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
camRgb.setFps(fps)
camRgb.setIspScale(2, 3)
camRgb.initialControl.setManualFocus(130)
left.setResolution(monoResolution)
left.setBoardSocket(dai.CameraBoardSocket.LEFT)
left.setFps(fps)
right.setResolution(monoResolution)
right.setBoardSocket(dai.CameraBoardSocket.RIGHT)
right.setFps(fps)
stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY)
stereo.initialConfig.setMedianFilter(dai.MedianFilter.KERNEL_7x7)
stereo.setLeftRightCheck(True)
stereo.setExtendedDisparity(True)
stereo.setDepthAlign(dai.CameraBoardSocket.RGB)
camRgb.isp.link(rgbOut.input)
left.out.link(stereo.left)
right.out.link(stereo.right)
stereo.depth.link(depthOut.input)
return pipeline |
class DownTransition(nn.Module):
def __init__(self, inChans, nConvs, elu, dropout=False):
super(DownTransition, self).__init__()
outChans = (2 * inChans)
self.down_conv = nn.Conv3d(inChans, outChans, kernel_size=2, stride=2)
self.bn1 = torch.nn.BatchNorm3d(outChans)
self.do1 = passthrough
self.relu1 = ELUCons(elu, outChans)
self.relu2 = ELUCons(elu, outChans)
if dropout:
self.do1 = nn.Dropout3d()
self.ops = _make_nConv(outChans, nConvs, elu)
def forward(self, x):
down = self.relu1(self.bn1(self.down_conv(x)))
out = self.do1(down)
out = self.ops(out)
out = self.relu2(torch.add(out, down))
return out |
class FairseqMultiModel(BaseFairseqModel):
def __init__(self, encoders, decoders):
super().__init__()
assert (encoders.keys() == decoders.keys())
self.keys = list(encoders.keys())
for key in self.keys:
assert isinstance(encoders[key], FairseqEncoder)
assert isinstance(decoders[key], FairseqDecoder)
self.models = nn.ModuleDict({key: FairseqEncoderDecoderModel(encoders[key], decoders[key]) for key in self.keys})
def build_shared_embeddings(dicts: Dict[(str, Dictionary)], langs: List[str], embed_dim: int, build_embedding: callable, pretrained_embed_path: Optional[str]=None):
shared_dict = dicts[langs[0]]
if any(((dicts[lang] != shared_dict) for lang in langs)):
raise ValueError('--share-*-embeddings requires a joined dictionary: --share-encoder-embeddings requires a joined source dictionary, --share-decoder-embeddings requires a joined target dictionary, and --share-all-embeddings requires a joint source + target dictionary.')
return build_embedding(shared_dict, embed_dim, pretrained_embed_path)
def forward(self, src_tokens, src_lengths, prev_output_tokens, **kwargs):
raise NotImplementedError
def max_positions(self):
return {key: (self.models[key].encoder.max_positions(), self.models[key].decoder.max_positions()) for key in self.keys}
def max_decoder_positions(self):
return min((model.decoder.max_positions() for model in self.models.values()))
def encoder(self):
return self.models[self.keys[0]].encoder
def decoder(self):
return self.models[self.keys[0]].decoder
def forward_decoder(self, prev_output_tokens, **kwargs):
return self.decoder(prev_output_tokens, **kwargs)
def load_state_dict(self, state_dict, strict=True, model_cfg=None, args: Optional[Namespace]=None):
if ((model_cfg is None) and (args is not None)):
logger.warn("using 'args' is deprecated, please update your code to use dataclass config")
model_cfg = convert_namespace_to_omegaconf(args).model
self.upgrade_state_dict(state_dict)
new_state_dict = prune_state_dict(state_dict, model_cfg)
return super().load_state_dict(new_state_dict, strict) |
class AnimateDiffPipeline(metaclass=DummyObject):
_backends = ['torch', 'transformers']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch', 'transformers'])
def from_config(cls, *args, **kwargs):
requires_backends(cls, ['torch', 'transformers'])
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ['torch', 'transformers']) |
class StreamingEpochBatchIterator(EpochBatchIterating):
def __init__(self, dataset, max_sentences=1, collate_fn=None, epoch=1, num_workers=0, buffer_size=0, timeout=0, persistent_workers=True):
assert isinstance(dataset, torch.utils.data.IterableDataset)
self.dataset = dataset
self.max_sentences = max_sentences
self.collate_fn = collate_fn
self.epoch = max(epoch, 1)
self.num_workers = num_workers
self.persistent_workers = (persistent_workers and (num_workers > 0))
self.buffer_size = min(buffer_size, 20)
self.timeout = timeout
self._current_epoch_iterator = None
def next_epoch_idx(self):
if ((self._current_epoch_iterator is not None) and self.end_of_epoch()):
return (self.epoch + 1)
else:
return self.epoch
def next_epoch_itr(self, shuffle=True, fix_batches_to_gpus=False, set_dataset_epoch=True):
self.epoch = self.next_epoch_idx
if (set_dataset_epoch and hasattr(self.dataset, 'set_epoch')):
self.dataset.set_epoch(self.epoch)
self._current_epoch_iterator = self._get_iterator_for_epoch(self.epoch, shuffle)
return self._current_epoch_iterator
def end_of_epoch(self) -> bool:
return (not self._current_epoch_iterator.has_next())
def iterations_in_epoch(self) -> int:
if (self._current_epoch_iterator is not None):
return self._current_epoch_iterator.n
return 0
def state_dict(self):
return {'epoch': self.epoch}
def load_state_dict(self, state_dict):
self.epoch = state_dict['epoch']
def _get_iterator_for_epoch(self, epoch, shuffle, offset=0):
if (self.num_workers > 0):
os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning'
worker_init_fn = getattr(self.dataset, 'worker_init_fn', None)
itr = torch.utils.data.DataLoader(self.dataset, batch_size=self.max_sentences, collate_fn=self.collate_fn, num_workers=self.num_workers, timeout=self.timeout, worker_init_fn=worker_init_fn, pin_memory=True, persistent_workers=self.persistent_workers)
if (self.buffer_size > 0):
itr = BufferedIterator(self.buffer_size, itr)
itr = CountingIterator(itr, start=offset)
return itr |
class HumanoidEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
mujoco_env.MujocoEnv.__init__(self, 'humanoid.xml', 5)
utils.EzPickle.__init__(self)
def _get_obs(self):
data = self.sim.data
return np.concatenate([data.qpos.flat[2:], data.qvel.flat, data.cinert.flat, data.cvel.flat, data.qfrc_actuator.flat, data.cfrc_ext.flat])
def step(self, a):
pos_before = mass_center(self.model, self.sim)
self.do_simulation(a, self.frame_skip)
pos_after = mass_center(self.model, self.sim)
alive_bonus = 5.0
data = self.sim.data
lin_vel_cost = ((0.25 * (pos_after - pos_before)) / self.model.opt.timestep)
quad_ctrl_cost = (0.1 * np.square(data.ctrl).sum())
quad_impact_cost = (5e-07 * np.square(data.cfrc_ext).sum())
quad_impact_cost = min(quad_impact_cost, 10)
reward = (((lin_vel_cost - quad_ctrl_cost) - quad_impact_cost) + alive_bonus)
qpos = self.sim.data.qpos
done = bool(((qpos[2] < 1.0) or (qpos[2] > 2.0)))
return (self._get_obs(), reward, done, dict(reward_linvel=lin_vel_cost, reward_quadctrl=(- quad_ctrl_cost), reward_alive=alive_bonus, reward_impact=(- quad_impact_cost)))
def reset_model(self):
c = 0.01
self.set_state((self.init_qpos + self.np_random.uniform(low=(- c), high=c, size=self.model.nq)), (self.init_qvel + self.np_random.uniform(low=(- c), high=c, size=self.model.nv)))
return self._get_obs()
def viewer_setup(self):
self.viewer.cam.trackbodyid = 1
self.viewer.cam.distance = (self.model.stat.extent * 1.0)
self.viewer.cam.lookat[2] += 0.8
self.viewer.cam.elevation = (- 20) |
class CodeVersion():
def __init__(self):
self.versions = {'mdir_git': self.git_head_state('mdir')}
def git_head_state(module_name):
if (not hasattr(sys.modules.get(module_name, None), '__file__')):
return None
try:
git_path = (Path(sys.modules[module_name].__file__).parent.parent / '.git')
with (git_path / 'HEAD').open() as handle:
head_content = handle.read().strip()
if head_content.startswith('ref:'):
head_ref = head_content[len('ref:'):].strip()
with (git_path / head_ref).open() as handle:
commit = handle.read().strip()
return {'commit': commit, 'head_ref': head_ref}
return {'commit': head_content, 'head_ref': None}
except FileNotFoundError:
return None |
def parse_args():
parser = argparse.ArgumentParser(description='mmseg test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--aug-test', action='store_true', help='Use Flip and Multi scale aug')
parser.add_argument('--out', help='output result file in pickle format')
parser.add_argument('--format-only', action='store_true', help='Format the output results without perform evaluation. It isuseful when you want to format the result to a specific format and submit it to the test server')
parser.add_argument('--eval', type=str, nargs='+', help='evaluation metrics, which depends on the dataset, e.g., "mIoU" for generic datasets, and "cityscapes" for Cityscapes')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument('--show-dir', help='directory where painted images will be saved')
parser.add_argument('--gpu-collect', action='store_true', help='whether to use gpu to collect results.')
parser.add_argument('--tmpdir', help='tmp directory used for collecting results from multiple workers, available when gpu_collect is not specified')
parser.add_argument('--options', nargs='+', action=DictAction, help='custom options')
parser.add_argument('--eval-options', nargs='+', action=DictAction, help='custom options for evaluation')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if ('LOCAL_RANK' not in os.environ):
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args |
_config
def bsp_small():
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'base_class': 'GenericSidetuneNetwork', 'base_kwargs': {'base_kwargs': {'bsp': True, 'period': 3}, 'side_kwargs': {'bsp': True, 'period': 3}}}}} |
class PDTBEval(object):
def __init__(self, task_path, seed=1111):
self.seed = seed
logging.debug('***** Transfer task : PDTB classification, task path: {} *****\n\n'.format(task_path))
train = self.loadFile(os.path.join(task_path, 'train.txt'))
valid = self.loadFile(os.path.join(task_path, 'valid.txt'))
test = self.loadFile(os.path.join(task_path, 'test.txt'))
self.data = {'train': train, 'valid': valid, 'test': test}
self.labelset = []
with open(os.path.join(task_path, 'labelset.txt')) as fin:
for line in fin:
self.labelset.append(line.strip())
self.nclasses = len(self.labelset)
def do_prepare(self, params, prepare):
samples = (([sent for sents in self.data['train'][:2] for sent in sents] + [sent for sents in self.data['valid'][:2] for sent in sents]) + [sent for sents in self.data['test'][:2] for sent in sents])
return prepare(params, samples)
def loadFile(self, fpath):
(input1, input2, labels) = ([], [], [])
with io.open(fpath, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip().split('\t')
input1.append(line[1].split())
input2.append(line[2].split())
labels.append(int(line[0]))
logging.debug('Loaded {} instances\n'.format(len(labels)))
return (input1, input2, labels)
def run(self, params, batcher):
(self.X, self.y) = ({}, {})
for key in self.data:
if (key not in self.X):
self.X[key] = []
if (key not in self.y):
self.y[key] = []
(input1, input2, mylabels) = self.data[key]
enc_input = []
n_labels = len(mylabels)
for ii in range(0, n_labels, params.batch_size):
batch1 = input1[ii:(ii + params.batch_size)]
batch2 = input2[ii:(ii + params.batch_size)]
if ((len(batch1) == len(batch2)) and (len(batch1) > 0)):
enc1 = batcher(params, batch1)
enc2 = batcher(params, batch2)
enc_input.append(np.hstack((enc1, enc2, (enc1 * enc2), np.abs((enc1 - enc2)))))
if (((ii * params.batch_size) % (20000 * params.batch_size)) == 0):
logging.info(('PROGRESS (encoding): %.2f%%' % ((100 * ii) / n_labels)))
self.X[key] = np.vstack(enc_input)
self.y[key] = np.array(mylabels)
logging.info('encoding X to be: {}'.format(self.X[key].shape))
config = {'nclasses': self.nclasses, 'seed': self.seed, 'usepytorch': params.usepytorch, 'cudaEfficient': True, 'nhid': params.nhid, 'noreg': True}
config_classifier = copy.deepcopy(params.classifier)
config_classifier['max_epoch'] = 15
config_classifier['epoch_size'] = 1
config['classifier'] = config_classifier
clf = SplitClassifier(self.X, self.y, config)
(devacc, testacc) = clf.run()
logging.debug('Dev acc : {0} Test acc : {1} for PDTB\n'.format(devacc, testacc))
return {'devacc': devacc, 'acc': testacc, 'ndev': len(self.data['valid'][0]), 'ntest': len(self.data['test'][0])} |
class RRS(nn.Module):
def __init__(self, encoder, decoder, dl, **kwargs):
super().__init__()
encoder.vocab_size = dl.dataset.src.tokenizer.vocab_size
self.enc = EncoderModel(encoder)
decoder.vocab_size = dl.dataset.tgt.tokenizer.vocab_size
self.dec = DecoderModel(decoder)
self.eval_func = evaluation
self.tokenizer = dl.dataset.tgt_tokenizer
def forward(self, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, encoder_outputs=None, encoder_attention_mask=None, epoch=None, iteration=None, **kwargs):
if torch.cuda.is_available():
input_ids = input_ids.cuda()
attention_mask = attention_mask.cuda()
if (encoder_outputs is None):
(encoder_outputs, encoder_attention_mask) = self.encode(input_ids, attention_mask, **kwargs)
out = self.dec(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_outputs=encoder_outputs, encoder_attention_mask=encoder_attention_mask, **kwargs)
return out
def encode(self, input_ids, attention_mask, **kwargs):
encoder_outputs = self.enc(input_ids, attention_mask, return_dict=True)
return (encoder_outputs.last_hidden_state, attention_mask)
def __repr__(self):
s = 'model: RRS\n'
s += (('(enc):' + str(self.enc)) + '\n')
s += (('(dec):' + str(self.dec)) + '\n')
s += '{}\n'.format(get_n_params(self))
return s |
class LIRCMOP3(LIRCMOP1):
def __init__(self, number_of_variables: int=30):
super(LIRCMOP3, self).__init__(number_of_variables)
def number_of_constraints(self) -> int:
return 3
def evaluate_constraints(self, solution: FloatSolution) -> FloatSolution:
x = solution.variables
constraints = [0.0 for _ in range(self.number_of_constraints)]
a = 0.51
b = 0.5
c = 20.0
constraints[0] = ((a - self.g1(x)) * (self.g1(x) - b))
constraints[1] = ((a - self.g2(x)) * (self.g2(x) - b))
constraints[2] = (sin(((c * pi) * x[0])) - 0.5)
solution.constraints = constraints
return solution
def name(self):
return 'LIR-CMOP3' |
def plot_log_csv(log_path):
(log_dir, _) = osp.split(log_path)
dat = np.genfromtxt(log_path, names=True, delimiter=',', autostrip=True)
train_loss = dat['trainloss']
train_loss_sel = (~ np.isnan(train_loss))
train_loss = train_loss[train_loss_sel]
iter_train_loss = dat['iteration'][train_loss_sel]
train_acc = dat['trainacc']
train_acc_sel = (~ np.isnan(train_acc))
train_acc = train_acc[train_acc_sel]
iter_train_acc = dat['iteration'][train_acc_sel]
val_loss = dat['validloss']
val_loss_sel = (~ np.isnan(val_loss))
val_loss = val_loss[val_loss_sel]
iter_val_loss = dat['iteration'][val_loss_sel]
mean_iu = dat['validacc']
mean_iu_sel = (~ np.isnan(mean_iu))
mean_iu = mean_iu[mean_iu_sel]
iter_mean_iu = dat['iteration'][mean_iu_sel]
(fig, ax) = plt.subplots(nrows=2, ncols=2)
plt.subplot(2, 2, 1)
plt.plot(iter_train_acc, train_acc, label='train')
plt.ylabel('accuracy')
plt.grid()
plt.legend()
plt.tight_layout()
plt.subplot(2, 2, 2)
plt.plot(iter_mean_iu, mean_iu, label='val')
plt.grid()
plt.legend()
plt.tight_layout()
plt.subplot(2, 2, 3)
plt.plot(iter_train_loss, train_loss, label='train')
plt.xlabel('iteration')
plt.ylabel('loss')
plt.grid()
plt.legend()
plt.tight_layout()
plt.subplot(2, 2, 4)
plt.plot(iter_val_loss, val_loss, label='val')
plt.xlabel('iteration')
plt.grid()
plt.legend()
plt.tight_layout()
plt.savefig(osp.join(log_dir, 'log_plots.png'), bbox_inches='tight') |
def parse_args():
parser = argparse.ArgumentParser(description='Train a segmentor')
parser.add_argument('config', help='train config file path')
parser.add_argument('--shape', type=int, nargs='+', default=[2048, 1024], help='input image size')
args = parser.parse_args()
return args |
class _bound_learner(nn.Module):
def __init__(self, point_pred=1, hidden_features=128, im_num=2, ex_num=2):
super().__init__()
self.point_pred = point_pred
self.convolution_mapping_1 = nn.Conv2d(in_channels=128, out_channels=hidden_features, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0), bias=True)
self.convolution_mapping_2 = nn.Conv2d(in_channels=320, out_channels=hidden_features, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0), bias=True)
self.convolution_mapping_3 = nn.Conv2d(in_channels=512, out_channels=hidden_features, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0), bias=True)
normalize_before = True
self.im_ex_boud1 = in_scale_transformer(point_pred_layers=1, num_encoder_layers=im_num, num_decoder_layers=ex_num, d_model=hidden_features, nhead=8, normalize_before=normalize_before)
self.im_ex_boud2 = in_scale_transformer(point_pred_layers=1, num_encoder_layers=im_num, num_decoder_layers=ex_num, d_model=hidden_features, nhead=8, normalize_before=normalize_before)
self.im_ex_boud3 = in_scale_transformer(point_pred_layers=1, num_encoder_layers=im_num, num_decoder_layers=ex_num, d_model=hidden_features, nhead=8, normalize_before=normalize_before)
self.cross_attention_3_1 = xboundlearnerv2(hidden_features, 8)
self.cross_attention_3_2 = xboundlearnerv2(hidden_features, 8)
self.trans_out_conv = nn.Conv2d((hidden_features * 2), 512, 1, 1)
def forward(self, x):
features_1 = x[1]
features_2 = x[2]
features_3 = x[3]
features_1 = self.convolution_mapping_1(features_1)
features_2 = self.convolution_mapping_2(features_2)
features_3 = self.convolution_mapping_3(features_3)
(latent_tensor_1, features_encoded_1, point_maps_1) = self.im_ex_boud1(features_1)
(latent_tensor_2, features_encoded_2, point_maps_2) = self.im_ex_boud2(features_2)
(latent_tensor_3, features_encoded_3, point_maps_3) = self.im_ex_boud3(features_3)
latent_tensor_1 = latent_tensor_1.permute(2, 0, 1)
latent_tensor_2 = latent_tensor_2.permute(2, 0, 1)
latent_tensor_3 = latent_tensor_3.permute(2, 0, 1)
features_encoded_2_2 = self.cross_attention_3_2(features_encoded_2, features_encoded_3, latent_tensor_2, latent_tensor_3)
features_encoded_1_2 = self.cross_attention_3_1(features_encoded_1, features_encoded_2_2, latent_tensor_1, latent_tensor_2)
features_stage2 = [x[0], features_encoded_1_2, features_encoded_2_2, features_encoded_3]
return (features_stage2, point_maps_1, point_maps_2, point_maps_3) |
class Policy4Toyota(tf.Module):
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
tfb = tfp.bijectors
tf.config.experimental.set_visible_devices([], 'GPU')
def __init__(self, args):
super().__init__()
self.args = args
(obs_dim, act_dim) = (self.args.obs_dim, self.args.act_dim)
(n_hiddens, n_units, hidden_activation) = (self.args.num_hidden_layers, self.args.num_hidden_units, self.args.hidden_activation)
(value_model_cls, policy_model_cls) = (NAME2MODELCLS[self.args.value_model_cls], NAME2MODELCLS[self.args.policy_model_cls])
self.policy = policy_model_cls(obs_dim, n_hiddens, n_units, hidden_activation, (act_dim * 2), name='policy', output_activation=self.args.policy_out_activation)
policy_lr_schedule = PolynomialDecay(*self.args.policy_lr_schedule)
self.policy_optimizer = self.tf.keras.optimizers.Adam(policy_lr_schedule, name='adam_opt')
self.vs = value_model_cls(obs_dim, n_hiddens, n_units, hidden_activation, 2, name='vs')
value_lr_schedule = PolynomialDecay(*self.args.value_lr_schedule)
self.value_optimizer = self.tf.keras.optimizers.Adam(value_lr_schedule, name='adam_opt')
self.models = (self.vs, self.policy)
self.optimizers = (self.value_optimizer, self.policy_optimizer)
def save_weights(self, save_dir, iteration):
model_pairs = [(model.name, model) for model in self.models]
optimizer_pairs = [(optimizer._name, optimizer) for optimizer in self.optimizers]
ckpt = self.tf.train.Checkpoint(**dict((model_pairs + optimizer_pairs)))
ckpt.save(((save_dir + '/ckpt_ite') + str(iteration)))
def load_weights(self, load_dir, iteration):
model_pairs = [(model.name, model) for model in self.models]
optimizer_pairs = [(optimizer._name, optimizer) for optimizer in self.optimizers]
ckpt = self.tf.train.Checkpoint(**dict((model_pairs + optimizer_pairs)))
ckpt.restore((((load_dir + '/ckpt_ite') + str(iteration)) + '-1'))
def get_weights(self):
return [model.get_weights() for model in self.models]
def set_weights(self, weights):
for (i, weight) in enumerate(weights):
self.models[i].set_weights(weight)
def apply_gradients(self, iteration, grads):
value_len = len(self.vs.trainable_weights)
(value_grad, policy_grad) = (grads[:value_len], grads[value_len:])
self.value_optimizer.apply_gradients(zip(value_grad, self.vs.trainable_weights))
self.policy_optimizer.apply_gradients(zip(policy_grad, self.policy.trainable_weights))
def compute_mode(self, obs):
logits = self.policy(obs)
(mean, _) = self.tf.split(logits, num_or_size_splits=2, axis=(- 1))
return ((self.args.action_range * self.tf.tanh(mean)) if (self.args.action_range is not None) else mean)
def _logits2dist(self, logits):
(mean, log_std) = self.tf.split(logits, num_or_size_splits=2, axis=(- 1))
act_dist = self.tfd.MultivariateNormalDiag(mean, self.tf.exp(log_std))
if (self.args.action_range is not None):
act_dist = self.tfp.distributions.TransformedDistribution(distribution=act_dist, bijector=self.tfb.Chain([self.tfb.Affine(scale_identity_multiplier=self.args.action_range), self.tfb.Tanh()]))
return act_dist
def compute_action(self, obs):
with self.tf.name_scope('compute_action') as scope:
logits = self.policy(obs)
if self.args.deterministic_policy:
(mean, log_std) = self.tf.split(logits, num_or_size_splits=2, axis=(- 1))
return (((self.args.action_range * self.tf.tanh(mean)) if (self.args.action_range is not None) else mean), 0.0)
else:
act_dist = self._logits2dist(logits)
actions = act_dist.sample()
logps = act_dist.log_prob(actions)
return (actions, logps)
def compute_vs(self, obs):
with self.tf.name_scope('compute_vs') as scope:
return self.vs(obs) |
def compute_impact_geometry(a: Polygon, b: BaseGeometry) -> (np.ndarray, Point):
assert (not a.touches(b))
if a.contains(b):
impact_point = b.centroid
r_ap = (np.array(impact_point.coords[0]) - np.array(a.centroid.coords[0]))
normal = (r_ap / np.linalg.norm(r_ap))
else:
intersecting_points = _find_intersection_points(a, b)
impact_point = LineString(intersecting_points).interpolate(0.5, normalized=True)
(first, second) = intersecting_points
dxdy_surface = ((second[0] - first[0]), (second[1] - first[1]))
normal = np.array([(- dxdy_surface[1]), dxdy_surface[0]])
normal /= np.linalg.norm(normal)
r_ap = (np.array(impact_point.coords[0]) - np.array(a.centroid.coords[0]))
if (np.dot(r_ap, normal) < 0):
normal *= (- 1)
return (normal, impact_point) |
class SimpleTokenizer(Tokenizer):
ALPHA_NUM = '[\\p{L}\\p{N}\\p{M}]+'
NON_WS = '[^\\p{Z}\\p{C}]'
def __init__(self, **kwargs):
self._regexp = regex.compile(('(%s)|(%s)' % (self.ALPHA_NUM, self.NON_WS)), flags=((regex.IGNORECASE + regex.UNICODE) + regex.MULTILINE))
if (len(kwargs.get('annotators', {})) > 0):
logger.warning(('%s only tokenizes! Skipping annotators: %s' % (type(self).__name__, kwargs.get('annotators'))))
self.annotators = set()
def tokenize(self, text):
data = []
matches = [m for m in self._regexp.finditer(text)]
for i in range(len(matches)):
token = matches[i].group()
span = matches[i].span()
start_ws = span[0]
if ((i + 1) < len(matches)):
end_ws = matches[(i + 1)].span()[0]
else:
end_ws = span[1]
data.append((token, text[start_ws:end_ws], span))
return Tokens(data, self.annotators) |
def _onnxruntime_checker():
onnxruntime_installed = (not (find_spec('onnxruntime') is None))
onnx_installed = (not (find_spec('onnx') is None))
return (onnxruntime_installed and onnx_installed) |
def load_svhn(data_dir, use_augmentation=False):
test_transform = transforms.Compose([transforms.ToTensor()])
train_transform = test_transform
train_dataset = torchvision.datasets.SVHN(root=data_dir, split='train', download=True, transform=train_transform)
test_dataset = torchvision.datasets.SVHN(root=data_dir, split='test', download=True, transform=test_transform)
return (train_dataset, test_dataset) |
def log_parameters(log_file, args, classes):
log_params = {}
for (param_name, param_value) in args.__dict__.items():
if any([param_name.startswith(x) for x in list(classes.keys())]):
continue
log_params[param_name] = param_value
for (name, cls) in classes.items():
if isinstance(cls, type):
params = get_all_parameters(cls, args)
params['_name'] = getattr(args, name)
log_params[name] = params
else:
log_params[name] = getattr(cls, '__kwargs', dict())
log_params[name]['_name'] = ((cls.__module__ + '.') + cls.__class__.__name__)
mkdir_p(os.path.dirname(log_file))
with open(log_file, 'w') as f:
json.dump(log_params, f, indent=2, sort_keys=True) |
_grad()
def n_step_guided_p_sample(model, x, cond, t, guide, scale=0.001, t_stopgrad=0, n_guide_steps=1, scale_grad_by_std=True):
model_log_variance = extract(model.posterior_log_variance_clipped, t, x.shape)
model_std = torch.exp((0.5 * model_log_variance))
model_var = torch.exp(model_log_variance)
for _ in range(n_guide_steps):
with torch.enable_grad():
(y, grad) = guide.gradients(x, cond, t)
if scale_grad_by_std:
grad = (model_var * grad)
grad[(t < t_stopgrad)] = 0
x = (x + (scale * grad))
x = apply_conditioning(x, cond, model.action_dim)
(model_mean, _, model_log_variance) = model.p_mean_variance(x=x, cond=cond, t=t)
noise = torch.randn_like(x)
noise[(t == 0)] = 0
return ((model_mean + (model_std * noise)), y) |
def subprocess_fn(rank, args, temp_dir):
dnnlib.util.Logger(should_flush=True)
if (args.num_gpus > 1):
init_file = os.path.abspath(os.path.join(temp_dir, '.torch_distributed_init'))
if (os.name == 'nt'):
init_method = ('file:///' + init_file.replace('\\', '/'))
torch.distributed.init_process_group(backend='gloo', init_method=init_method, rank=rank, world_size=args.num_gpus)
else:
init_method = f'file://{init_file}'
torch.distributed.init_process_group(backend='nccl', init_method=init_method, rank=rank, world_size=args.num_gpus)
sync_device = (torch.device('cuda', rank) if (args.num_gpus > 1) else None)
training_stats.init_multiprocessing(rank=rank, sync_device=sync_device)
if ((rank != 0) or (not args.verbose)):
custom_ops.verbosity = 'none'
device = torch.device('cuda', rank)
torch.backends.cuda.matmul.allow_tf32 = False
torch.backends.cudnn.allow_tf32 = False
conv2d_gradfix.enabled = True
G = copy.deepcopy(args.G).eval().requires_grad_(False).to(device)
if ((rank == 0) and args.verbose):
z = torch.empty([1, G.z_dim], device=device)
c = torch.empty([1, G.c_dim], device=device)
misc.print_module_summary(G, [z, c])
for metric in args.metrics:
if ((rank == 0) and args.verbose):
print(f'Calculating {metric}...')
progress = metric_utils.ProgressMonitor(verbose=args.verbose)
result_dict = metric_main.calc_metric(metric=metric, G=G, dataset_kwargs=args.dataset_kwargs, num_gpus=args.num_gpus, rank=rank, device=device, progress=progress)
if (rank == 0):
metric_main.report_metric(result_dict, run_dir=args.run_dir, snapshot_pkl=args.network_pkl)
if ((rank == 0) and args.verbose):
print()
if ((rank == 0) and args.verbose):
print('Exiting...') |
def display_table(rows, positions):
def display_row(objects, positions):
line = ''
for i in range(len(objects)):
line += str(objects[i])
line = line[:positions[i]]
line += (' ' * (positions[i] - len(line)))
print(line)
for objects in rows:
display_row(objects, positions) |
class NezhaForNextSentencePrediction(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def spectral_worker(G):
eigs = eigvalsh(nx.normalized_laplacian_matrix(G).todense())
(spectral_pmf, _) = np.histogram(eigs, bins=200, range=((- 1e-05), 2), density=False)
spectral_pmf = (spectral_pmf / spectral_pmf.sum())
return spectral_pmf |
def show_semantic_scholar_popup(show_popup: bool, user_id):
conn = getDb()
with closing(conn.cursor()) as cur:
sql = 'update users set show_semantic_scholar_popup = %s where user_id = %s'
cur.execute(sql, (show_popup, user_id))
conn.commit() |
def show_heads():
head_names = heads.__all__
numbers = list(range(1, (len(head_names) + 1)))
print(tabulate({'No.': numbers, 'Heads': head_names}, headers='keys')) |
def test_digits_cosine_stochastic():
model = GraphCutSelection(100, 'cosine', optimizer='stochastic', random_state=0)
model.fit(X_digits)
assert_array_equal(model.ranking, digits_cosine_stochastic_ranking)
assert_array_almost_equal(model.gains, digits_cosine_stochastic_gains, 4)
assert_array_almost_equal(model.subset, X_digits[model.ranking]) |
def unfreeze_model(model):
for layer in model.layers[(- 20):]:
if (not isinstance(layer, layers.BatchNormalization)):
layer.trainable = True
optimizer = tf.keras.optimizers.Adam(learning_rate=0.0001)
model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy']) |
def process_one_data_item(data_item):
print(data_item)
(_, item_name) = os.path.split(data_item[:(- 1)])
output_fd = os.path.join(output_data_dir, item_name)
os.makedirs(output_fd, exist_ok=True)
os.makedirs(os.path.join(output_fd, 'sample'), exist_ok=True)
smpl = objio.load_obj_data(os.path.join(mesh_data_dir, item_name, 'smpl/smpl_mesh.obj'))
pts_data = sio.loadmat(os.path.join(output_fd, 'sample/samples.mat'))
kd_tree = scipy.spatial.KDTree(smpl['v'])
(dist_surface_points_inside, idx_surface_points_inside) = kd_tree.query(pts_data['surface_points_inside'], k=4)
(dist_surface_points_outside, idx_surface_points_outside) = kd_tree.query(pts_data['surface_points_outside'], k=4)
(dist_uniform_points_inside, idx_uniform_points_inside) = kd_tree.query(pts_data['uniform_points_inside'], k=4)
(dist_uniform_points_outside, idx_uniform_points_outside) = kd_tree.query(pts_data['uniform_points_outside'], k=4)
mat_fname = os.path.join(output_fd, 'sample/sample2smpl.mat')
sio.savemat(mat_fname, {'dist_surface_points_inside': dist_surface_points_inside, 'idx_surface_points_inside': idx_surface_points_inside, 'dist_surface_points_outside': dist_surface_points_outside, 'idx_surface_points_outside': idx_surface_points_outside, 'dist_uniform_points_inside': dist_uniform_points_inside, 'idx_uniform_points_inside': idx_uniform_points_inside, 'dist_uniform_points_outside': dist_uniform_points_outside, 'idx_uniform_points_outside': idx_uniform_points_outside}, do_compression=True) |
def quniform(lower: float, upper: float, q: float) -> 'tune.sample.Float':
return tune.quniform(lower, upper, q) |
def test_test_memoryview_from_buffer_nullptr():
if env.PY2:
m.test_memoryview_from_buffer_nullptr()
else:
with pytest.raises(ValueError):
m.test_memoryview_from_buffer_nullptr() |
class TestExampleConfigs():
.parametrize('config_name', list(_CONFIG_LEVELS.keys()))
def testExampleConfigs(self, config_name):
config_module = importlib.import_module(('moog_demos.example_configs.' + config_name))
for level in _CONFIG_LEVELS[config_name]:
config = config_module.get_config(level)
env = environment.Environment(**config)
for _ in range(_NUM_EPISODES):
env.reset()
for _ in range(_NUM_STEPS):
env.step(action=env.action_space.random_action())
def testMultiAgentExample(self):
config_name = 'multi_agent_example.configs.cleanup'
config_module = importlib.import_module(config_name)
config = config_module.get_config(None)
agents = config.pop('agents')
agent_name = config.pop('agent_name')
multi_env = environment.Environment(**config)
env = env_wrappers.MultiAgentEnvironment(environment=multi_env, agent_name=agent_name, **agents)
for _ in range(_NUM_EPISODES):
env.reset()
for _ in range(_NUM_STEPS):
action_space = env.action_space.action_spaces[agent_name]
env.step(action=action_space.random_action()) |
def conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
if could_use_op(input):
return conv2d_gradfix(transpose=False, weight_shape=weight.shape, stride=stride, padding=padding, output_padding=0, dilation=dilation, groups=groups).apply(input, weight, bias)
return F.conv2d(input=input, weight=weight, bias=bias, stride=stride, padding=padding, dilation=dilation, groups=groups) |
class downSample_Generator(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding):
super(downSample_Generator, self).__init__()
self.convLayer = nn.Sequential(nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding), nn.InstanceNorm2d(num_features=out_channels, affine=True))
self.convLayer_gates = nn.Sequential(nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding), nn.InstanceNorm2d(num_features=out_channels, affine=True))
def forward(self, input):
return (self.convLayer(input) * torch.sigmoid(self.convLayer_gates(input))) |
class SpectralNormLoadStateDictPreHook(object):
def __init__(self, fn):
self.fn = fn
def __call__(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
fn = self.fn
version = local_metadata.get('spectral_norm', {}).get((fn.name + '.version'), None)
if ((version is None) or (version < 1)):
with torch.no_grad():
weight_orig = state_dict[((prefix + fn.name) + '_orig')]
weight = state_dict.pop((prefix + fn.name))
sigma = (weight_orig / weight).mean()
weight_mat = fn.reshape_weight_to_matrix(weight_orig)
u = state_dict[((prefix + fn.name) + '_u')]
v = fn._solve_v_and_rescale(weight_mat, u, sigma)
state_dict[((prefix + fn.name) + '_v')] = v |
('requests.sessions.Session.request')
def test_get_data(mock_request):
session = eia.EIASession(api_key='DUMMY_KEY')
mock_response = mock.Mock()
mock_response.json.side_effect = [RETURN_VALUE_1, RETURN_VALUE_2]
mock_response.status_code = 200
mock_request.return_value = mock_response
data = session.get_data('fuel-type-data', start='2023-01-01', end='2023-01-01T23', length=16, params={'facets[respondent][]': 'AECI', 'facets[fueltype][]': 'COL'})
assert (len(mock_request.call_args_list) == 2)
assert (len(data) == 24) |
def get_annotations(fn):
global options
annfn = (path.splitext(fn)[0] + options.annsuffix)
with open(annfn, 'rU') as f:
(textbounds, dict_of_entity, list_of_relns) = parse_textbounds(f)
textbounds = eliminate_overlaps(textbounds, fn)
return (textbounds, dict_of_entity, list_of_relns) |
_ARCH_REGISTRY.register()
class PanopticFPN_baseline(PanopticFPN):
def __init__(self, cfg):
unseen_path = cfg.DATASETS.UNSEEN_LABEL_SET
self.meta = MetadataCatalog.get(cfg.DATASETS.TRAIN[0])
if (unseen_path != ''):
meta = MetadataCatalog.get(cfg.DATASETS.TRAIN[0]).thing_classes
meta = {e: i for (i, e) in enumerate(meta)}
with open(unseen_path, 'r') as f:
lines = [meta[e.replace('\n', '')] for e in f.readlines()]
self.unseen_label_set = lines
self.meta.stuff_classes.append('unknown')
self.meta.stuff_colors.append([20, 220, 60])
self.meta.stuff_dataset_id_to_contiguous_id[201] = 54
else:
self.unseen_label_set = None
super().__init__(cfg)
self.unlabeled_region_on = cfg.MODEL.EOPSN.UNLABELED_REGION
self.ignore_unlabeled_region = cfg.MODEL.EOPSN.IGNORE_UNLABELED_REGION
def forward(self, batched_inputs):
image_path = [x['file_name'] for x in batched_inputs]
if self.training:
flips = [x['flip'] for x in batched_inputs]
else:
flips = None
images = [x['image'].to(self.device) for x in batched_inputs]
images = [((x - self.pixel_mean) / self.pixel_std) for x in images]
images = ImageList.from_tensors(images, self.backbone.size_divisibility)
features = self.backbone(images.tensor)
if ('proposals' in batched_inputs[0]):
proposals = [x['proposals'].to(self.device) for x in batched_inputs]
proposal_losses = {}
if ('sem_seg' in batched_inputs[0]):
gt_sem_seg = [x['sem_seg'].to(self.device) for x in batched_inputs]
gt_sem_seg = ImageList.from_tensors(gt_sem_seg, self.backbone.size_divisibility, self.sem_seg_head.ignore_value).tensor
else:
gt_sem_seg = None
(sem_seg_results, sem_seg_losses) = self.sem_seg_head(features, gt_sem_seg)
if (('integral_sem_seg' in batched_inputs[0]) and self.training):
gt_integral_sem_seg = [x['integral_sem_seg'].to(self.device) for x in batched_inputs]
else:
gt_integral_sem_seg = None
if ('instances' in batched_inputs[0]):
gt_instances = [x['instances'].to(self.device) for x in batched_inputs]
if hasattr(self.roi_heads.box_predictor, 'add_pseudo_label'):
gt_instances = self.roi_heads.box_predictor.add_pseudo_label(gt_instances, image_path, flips)
else:
gt_instances = None
if self.proposal_generator:
(proposals, proposal_losses) = self.proposal_generator(images, features, gt_instances, gt_integral_sem_seg)
(detector_results, detector_losses) = self.roi_heads(images, features, proposals, gt_instances, gt_integral_sem_seg, image_path=image_path, flips=flips)
if self.training:
losses = {}
losses.update(sem_seg_losses)
losses.update({k: (v * self.instance_loss_weight) for (k, v) in detector_losses.items()})
losses.update(proposal_losses)
return losses
processed_results = []
for (sem_seg_result, detector_result, input_per_image, image_size) in zip(sem_seg_results, detector_results, batched_inputs, images.image_sizes):
height = input_per_image.get('height', image_size[0])
width = input_per_image.get('width', image_size[1])
sem_seg_r = sem_seg_postprocess(sem_seg_result, image_size, height, width)
detector_r = detector_postprocess(detector_result, height, width)
processed_results.append({'sem_seg': sem_seg_r, 'instances': detector_r})
if self.combine_on:
panoptic_r = combine_semantic_and_instance_outputs(detector_r, sem_seg_r.argmax(dim=0), self.combine_overlap_threshold, self.combine_stuff_area_limit, self.combine_instances_confidence_threshold)
processed_results[(- 1)]['panoptic_seg'] = panoptic_r
return processed_results |
def copy_to_quad_double_syspool(idx, vrblvl=0):
if (vrblvl > 0):
print('in copy_to_quad_double_syspool, idx :', idx)
phc = get_phcfun()
adim = pointer(c_int32(idx))
bbb = pointer(c_int32(0))
ccc = pointer(c_double(0.0))
vrb = c_int32(vrblvl)
if (vrblvl > 0):
print('-> copy_to_quad_double_syspool calls phc', end='')
retval = phc(609, adim, bbb, ccc, vrb)
if (vrblvl > 0):
print(', return value :', retval)
return retval |
def group_bleu(inp_group, reference: str) -> dict:
scores = defaultdict(list)
for (idx, inp) in enumerate(inp_group):
bleu_score = bleu_scorer.sentence_score(inp, [reference])
scores['bleu'].append(bleu_score.score)
d = {}
for (k, v) in scores.items():
avg = statistics.mean(v)
d[k] = avg
return d |
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU):
super().__init__()
out_features = (out_features or in_features)
hidden_features = (hidden_features or in_features)
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.fc2(x)
return x |
def test_isotropic_eddington_selfconsist_dehnencore_sigmar_directint():
pot = potential.DehnenCoreSphericalPotential(amp=2.5, a=1.15)
dfp = eddingtondf(pot=pot)
tol = 0.001
check_sigmar_against_jeans_directint(dfp, pot, tol, rmin=(pot._scale / 10.0), rmax=(pot._scale * 10.0), bins=31)
return None |
def diapreresnet26(**kwargs):
return get_diapreresnet(blocks=26, bottleneck=False, model_name='diapreresnet26', **kwargs) |
def get_train_data(input_shape, output_dim):
if (input_shape == (1,)):
data = np.linspace((- np.pi), np.pi, 1000)
obs = [{'observations': [[x]], 'returns': [get_labels(input_shape, x, output_dim)]} for x in data]
elif (input_shape == (2,)):
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 10)
data = np.dstack(np.meshgrid(x, y)).reshape((- 1), 2)
obs = [{'observations': [x], 'returns': [get_labels(input_shape, x, output_dim)]} for x in data]
observations = np.concatenate([p['observations'] for p in obs])
returns = np.concatenate([p['returns'] for p in obs])
returns = returns.reshape(((- 1), output_dim))
return (observations, returns) |
def get_parser():
parser = configargparse.ArgumentParser(description='Translate text from speech using a speech translation model on one CPU or GPU', config_file_parser_class=configargparse.YAMLConfigFileParser, formatter_class=configargparse.ArgumentDefaultsHelpFormatter)
parser.add('--config', is_config_file=True, help='Config file path')
parser.add('--config2', is_config_file=True, help='Second config file path that overwrites the settings in `--config`')
parser.add('--config3', is_config_file=True, help='Third config file path that overwrites the settings in `--config` and `--config2`')
parser.add_argument('--ngpu', type=int, default=0, help='Number of GPUs')
parser.add_argument('--dtype', choices=('float16', 'float32', 'float64'), default='float32', help='Float precision (only available in --api v2)')
parser.add_argument('--backend', type=str, default='chainer', choices=['chainer', 'pytorch'], help='Backend library')
parser.add_argument('--debugmode', type=int, default=1, help='Debugmode')
parser.add_argument('--seed', type=int, default=1, help='Random seed')
parser.add_argument('--verbose', '-V', type=int, default=1, help='Verbose option')
parser.add_argument('--batchsize', type=int, default=1, help='Batch size for beam search (0: means no batch processing)')
parser.add_argument('--preprocess-conf', type=str, default=None, help='The configuration file for the pre-processing')
parser.add_argument('--api', default='v1', choices=['v1', 'v2'], help='Beam search APIs v1: Default API. It only supports the ASRInterface.recognize method and DefaultRNNLM. v2: Experimental API. It supports any models that implements ScorerInterface.')
parser.add_argument('--trans-json', type=str, help='Filename of translation data (json)')
parser.add_argument('--result-label', type=str, required=True, help='Filename of result label data (json)')
parser.add_argument('--model', type=str, required=True, help='Model file parameters to read')
parser.add_argument('--nbest', type=int, default=1, help='Output N-best hypotheses')
parser.add_argument('--beam-size', type=int, default=1, help='Beam size')
parser.add_argument('--penalty', type=float, default=0.0, help='Incertion penalty')
parser.add_argument('--maxlenratio', type=float, default=0.0, help='Input length ratio to obtain max output length.\n If maxlenratio=0.0 (default), it uses a end-detect function\n to automatically find maximum hypothesis lengths')
parser.add_argument('--minlenratio', type=float, default=0.0, help='Input length ratio to obtain min output length')
parser.add_argument('--tgt-lang', default=False, type=str, help='target language ID (e.g., <en>, <de>, and <fr> etc.)')
return parser |
class DemoLoader(Dataset):
NUM_CLASS = 1
def __init__(self, dataset_dir, transform=None, base_size=512, crop_size=480, suffix='.png'):
super(DemoLoader, self).__init__()
self.transform = transform
self.images = dataset_dir
self.base_size = base_size
self.crop_size = crop_size
self.suffix = suffix
def _demo_sync_transform(self, img):
base_size = self.base_size
img = img.resize((base_size, base_size), Image.BILINEAR)
img = np.array(img)
return img
def img_preprocess(self):
img_path = self.images
img = Image.open(img_path).convert('RGB')
img = self._demo_sync_transform(img)
if (self.transform is not None):
img = self.transform(img)
return img |
class DreamBoothDataset(Dataset):
def __init__(self, instance_data_root, instance_prompt, tokenizer, class_data_root=None, class_prompt=None, size=512, center_crop=False):
self.size = size
self.center_crop = center_crop
self.tokenizer = tokenizer
self.instance_data_root = Path(instance_data_root)
if (not self.instance_data_root.exists()):
raise ValueError("Instance images root doesn't exists.")
self.instance_images_path = list(Path(instance_data_root).iterdir())
self.num_instance_images = len(self.instance_images_path)
self.instance_prompt = instance_prompt
self._length = self.num_instance_images
if (class_data_root is not None):
self.class_data_root = Path(class_data_root)
self.class_data_root.mkdir(parents=True, exist_ok=True)
self.class_images_path = list(self.class_data_root.iterdir())
self.num_class_images = len(self.class_images_path)
self._length = max(self.num_class_images, self.num_instance_images)
self.class_prompt = class_prompt
else:
self.class_data_root = None
self.image_transforms = transforms.Compose([transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR), (transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size)), transforms.ToTensor(), transforms.Normalize([0.5], [0.5])])
def __len__(self):
return self._length
def __getitem__(self, index):
example = {}
instance_image = Image.open(self.instance_images_path[(index % self.num_instance_images)])
if (not (instance_image.mode == 'RGB')):
instance_image = instance_image.convert('RGB')
example['instance_images'] = self.image_transforms(instance_image)
example['instance_prompt_ids'] = self.tokenizer(self.instance_prompt, padding='do_not_pad', truncation=True, max_length=self.tokenizer.model_max_length).input_ids
if self.class_data_root:
class_image = Image.open(self.class_images_path[(index % self.num_class_images)])
if (not (class_image.mode == 'RGB')):
class_image = class_image.convert('RGB')
example['class_images'] = self.image_transforms(class_image)
example['class_prompt_ids'] = self.tokenizer(self.class_prompt, padding='do_not_pad', truncation=True, max_length=self.tokenizer.model_max_length).input_ids
return example |
def float_to_float16(tensor):
min_val = 5.96e-08
max_val = 65504.0
tensor[((tensor > max_val) & (tensor < float('inf')))] = max_val
tensor[((tensor < min_val) & (tensor > 0))] = min_val
tensor[((tensor > (- min_val)) & (tensor < 0))] = (- min_val)
tensor[((tensor < (- max_val)) & (tensor > float('-inf')))] = (- max_val)
return np.float16(tensor) |
def hdbscan(feat, min_samples=10):
import hdbscan
db = hdbscan.HDBSCAN(min_cluster_size=min_samples)
labels_ = db.fit_predict(feat)
return labels_ |
class GraphRewriter(object):
def __init__(self, input_graph, mode, quantized_input_range, fallback_quantization_range=None):
self.input_graph = input_graph
self.nodes_map = self.create_nodes_map(input_graph)
self.output_graph = None
self.mode = mode
self.final_node_renames = {}
if quantized_input_range:
self.input_range = (quantized_input_range[0], quantized_input_range[1])
if (self.input_range[0] >= self.input_range[1]):
raise ValueError(('Invalid quantized_input_range: [%s,%s]' % self.input_range))
if (self.mode != 'eightbit'):
raise ValueError('quantized_input_range can only be specified in eightbit mode')
else:
self.input_range = None
if fallback_quantization_range:
self.fallback_quantization_range = [fallback_quantization_range[0], fallback_quantization_range[1]]
if (self.fallback_quantization_range[0] >= self.fallback_quantization_range[1]):
raise ValueError(('Invalid fallback_quantization_range: [%s,%s]' % self.fallback_quantization_range))
if (self.mode != 'eightbit'):
raise ValueError('fallback_quantization_range can only be specified in eightbit mode')
else:
self.fallback_quantization_range = None
self.state = None
def create_nodes_map(self, graph):
nodes_map = {}
for node in graph.node:
if (node.name not in nodes_map.keys()):
nodes_map[node.name] = node
else:
raise ValueError('Duplicate node names detected.')
return nodes_map
def rewrite(self, output_node_names):
self.output_graph = graph_pb2.GraphDef()
output_nodes = [self.nodes_map[output_node_name] for output_node_name in output_node_names]
if (self.mode == 'round'):
self.already_visited = {}
for output_node in output_nodes:
self.round_nodes_recursively(output_node)
elif (self.mode == 'quantize'):
self.already_visited = {}
self.already_quantized = {}
for output_node in output_nodes:
self.quantize_nodes_recursively(output_node)
elif (self.mode == 'eightbit'):
self.set_input_graph(graph_util.remove_training_nodes(self.input_graph))
output_nodes = [self.nodes_map[output_node_name] for output_node_name in output_node_names]
self.state = EightbitizeRecursionState(already_visited={}, output_node_stack=[], merged_with_fake_quant={})
for output_node in output_nodes:
self.eightbitize_nodes_recursively(output_node)
self.state = None
if self.input_range:
self.add_output_graph_node(create_constant_node('quantized_input_min_value', self.input_range[0], dtypes.float32, []))
self.add_output_graph_node(create_constant_node('quantized_input_max_value', self.input_range[1], dtypes.float32, []))
if self.fallback_quantization_range:
self.add_output_graph_node(create_constant_node('fallback_quantization_min_value', self.fallback_quantization_range[0], dtypes.float32, []))
self.add_output_graph_node(create_constant_node('fallback_quantization_max_value', self.fallback_quantization_range[1], dtypes.float32, []))
if FLAGS.strip_redundant_quantization:
self.output_graph = self.remove_redundant_quantization(self.output_graph)
self.remove_dead_nodes(output_node_names)
self.apply_final_node_renames()
elif (self.mode == 'weights'):
self.output_graph = self.quantize_weights(self.input_graph, b'MIN_COMBINED')
self.remove_dead_nodes(output_node_names)
elif (self.mode == 'weights_rounded'):
self.output_graph = self.quantize_weights(self.input_graph, self.mode)
self.remove_dead_nodes(output_node_names)
else:
print((('Bad mode - ' + self.mode) + '.'))
return self.output_graph
def round_nodes_recursively(self, current_node):
if self.already_visited[current_node.name]:
return
self.already_visited[current_node.name] = True
for input_node_name in current_node.input:
input_node_name = node_name_from_input(input_node_name)
input_node = self.nodes_map[input_node_name]
self.round_nodes_recursively(input_node)
nodes_to_quantize = ['Conv2D', 'BiasAdd', 'MatMul']
if any(((current_node.op in s) for s in nodes_to_quantize)):
new_node = node_def_pb2.NodeDef()
new_node.CopyFrom(current_node)
new_node.name = (current_node.name + '_original')
self.add_output_graph_node(new_node)
levels = (1 << FLAGS.bitdepth)
constant_name = (current_node.name + '_round_depth')
constant_tensor = constant_op.constant(levels, dtype=dtypes.int32, name=constant_name)
constant_node = constant_tensor.op.node_def
self.add_output_graph_node(constant_node)
quantize_node = node_def_pb2.NodeDef()
quantize_node.op = 'RoundToSteps'
quantize_node.name = current_node.name
quantize_node.input.extend([(current_node.name + '_original')])
quantize_node.input.extend([constant_node.name])
self.add_output_graph_node(quantize_node)
else:
new_node = node_def_pb2.NodeDef()
new_node.CopyFrom(current_node)
self.add_output_graph_node(new_node)
def quantize_nodes_recursively(self, current_node):
if self.already_visited[current_node.name]:
return
self.already_visited[current_node.name] = True
for input_node_name in current_node.input:
input_node_name = node_name_from_input(input_node_name)
input_node = self.nodes_map[input_node_name]
self.quantize_nodes_recursively(input_node)
nodes_to_quantize = ['Conv2D', 'BiasAdd', 'MatMul']
if any(((current_node.op in s) for s in nodes_to_quantize)):
for input_name in current_node.input:
input_name = node_name_from_input(input_name)
input_node = self.nodes_map[input_name]
self.quantize_node(input_node)
self.quantize_node(current_node)
else:
new_node = node_def_pb2.NodeDef()
new_node.CopyFrom(current_node)
self.add_output_graph_node(new_node)
def quantize_node(self, input_node):
input_name = input_node.name
if (input_name in self.already_quantized):
return
self.already_quantized[input_name] = True
original_input_name = (input_name + '_original')
reshape_name = (input_name + '_reshape')
reshape_dims_name = (input_name + '_reshape_dims')
max_name = (input_name + '_max')
min_name = (input_name + '_min')
dims_name = (input_name + '_dims')
quantize_name = (input_name + '_quantize')
dequantize_name = input_name
original_input_node = node_def_pb2.NodeDef()
original_input_node.CopyFrom(input_node)
original_input_node.name = original_input_name
self.add_output_graph_node(original_input_node)
reshape_dims_node = create_constant_node(reshape_dims_name, (- 1), dtypes.int32, [1])
self.add_output_graph_node(reshape_dims_node)
reshape_node = create_node('Reshape', reshape_name, [original_input_name, reshape_dims_name])
set_attr_dtype(reshape_node, 'T', dtypes.float32)
self.add_output_graph_node(reshape_node)
dims_node = create_constant_node(dims_name, 0, dtypes.int32, [1])
self.add_output_graph_node(dims_node)
max_node = create_node('Max', max_name, [reshape_name, dims_name])
set_attr_dtype(max_node, 'T', dtypes.float32)
set_attr_bool(max_node, 'keep_dims', False)
self.add_output_graph_node(max_node)
min_node = create_node('Min', min_name, [reshape_name, dims_name])
set_attr_dtype(min_node, 'T', dtypes.float32)
set_attr_bool(min_node, 'keep_dims', False)
self.add_output_graph_node(min_node)
quantize_node = create_node('Quantize', quantize_name, [original_input_name, min_name, max_name])
set_attr_dtype(quantize_node, 'T', dtypes.quint8)
set_attr_string(quantize_node, 'mode', b'MIN_FIRST')
self.add_output_graph_node(quantize_node)
dequantize_node = create_node('Dequantize', dequantize_name, [quantize_name, min_name, max_name])
set_attr_dtype(dequantize_node, 'T', dtypes.quint8)
set_attr_string(dequantize_node, 'mode', b'MIN_FIRST')
self.add_output_graph_node(dequantize_node)
def should_merge_with_fake_quant_node(self):
if (not self.state.output_node_stack):
return False
top = self.state.output_node_stack[(- 1)]
return ((top[1] == 0) and (top[0].op in ['FakeQuantWithMinMaxVars']))
def should_quantize_const(self, node):
if (not self.state.output_node_stack):
return False
top = self.state.output_node_stack[(- 1)]
if (not top[2]):
return False
dtype = dtypes.as_dtype(node.attr['dtype'].type)
assert (dtype == dtypes.float32), ('Failed to quantized constant %s of type %s' % (node.name, dtype))
return True
def eightbitize_nodes_recursively(self, current_node):
if (current_node.name in self.state.already_visited):
if (self.should_merge_with_fake_quant_node() or (current_node.name in self.state.merged_with_fake_quant)):
raise ValueError('Unsupported graph structure: output of node %s is processed by a FakeQuant* node and should have no other outputs.', current_node.name)
return
self.state.already_visited[current_node.name] = True
for (i, input_node_name) in enumerate(current_node.input):
quantize_input = False
if (current_node.op in ('MatMul', 'Conv2D', 'BiasAdd', 'MaxPool', 'AvgPool', 'Relu', 'Relu6', 'BatchNormWithGlobalNormalization')):
quantize_input = True
elif ((current_node.op == 'Concat') and (i > 0)):
quantize_input = (dtypes.as_dtype(current_node.attr['T'].type) == dtypes.float32)
elif ((current_node.op == 'Reshape') and (i == 0)):
quantize_input = (dtypes.as_dtype(current_node.attr['T'].type) == dtypes.float32)
self.state.output_node_stack.append((current_node, i, quantize_input))
input_node_name = node_name_from_input(input_node_name)
input_node = self.nodes_map[input_node_name]
self.eightbitize_nodes_recursively(input_node)
self.state.output_node_stack.pop()
if (current_node.op == 'MatMul'):
self.eightbitize_mat_mul_node(current_node)
elif (current_node.op == 'Conv2D'):
self.eightbitize_conv_node(current_node)
elif (current_node.op == 'BiasAdd'):
self.eightbitize_bias_add_node(current_node)
elif ((current_node.op == 'MaxPool') or (current_node.op == 'AvgPool')):
self.eightbitize_single_input_tensor_node(current_node, self.add_pool_function)
elif ((current_node.op == 'Relu') or (current_node.op == 'Relu6')):
self.eightbitize_single_input_tensor_node(current_node, self.add_relu_function)
elif ((current_node.op == 'Concat') and (dtypes.as_dtype(current_node.attr['T'].type) == dtypes.float32)):
self.eightbitize_concat_node(current_node)
elif (current_node.op == 'BatchNormWithGlobalNormalization'):
self.eightbitize_batch_norm_node(current_node)
elif ((current_node.op == 'Reshape') and (dtypes.as_dtype(current_node.attr['T'].type) == dtypes.float32)):
self.eightbitize_reshape_node(current_node)
elif (self.input_range and (current_node.op in ('Placeholder', 'PlaceholderV2'))):
self.eightbitize_placeholder_node(current_node)
elif (current_node.op == 'FakeQuantWithMinMaxVars'):
pass
elif (current_node.op == 'Const'):
if self.should_quantize_const(current_node):
for n in quantize_weight_eightbit(current_node, b'MIN_FIRST'):
self.add_output_graph_node(n)
else:
new_node = node_def_pb2.NodeDef()
new_node.CopyFrom(current_node)
self.add_output_graph_node(new_node)
else:
new_node = node_def_pb2.NodeDef()
new_node.CopyFrom(current_node)
self.add_output_graph_node(new_node)
if (self.should_merge_with_fake_quant_node() and (current_node.name not in self.state.merged_with_fake_quant)):
raise ValueError(('FakeQuant* node %s failed to merge with node %s of type %s' % (self.state.output_node_stack[(- 1)][0], current_node.name, current_node.op)))
def add_eightbit_prologue_nodes(self, original_node):
namespace_prefix = (original_node.name + '_eightbit')
(reshape_dims_name, reduction_dims_name) = self.add_common_quantization_nodes(namespace_prefix)
input_names = []
min_max_names = []
for original_input_name in original_node.input:
(quantize_input_name, min_input_name, max_input_name) = self.eightbitize_input_to_node(namespace_prefix, original_input_name, reshape_dims_name, reduction_dims_name)
input_names.append(quantize_input_name)
min_max_names.append(min_input_name)
min_max_names.append(max_input_name)
all_input_names = []
all_input_names.extend(input_names)
all_input_names.extend(min_max_names)
return all_input_names
def add_common_quantization_nodes(self, namespace_prefix):
reshape_dims_name = (namespace_prefix + '_reshape_dims')
reduction_dims_name = (namespace_prefix + '_reduction_dims')
reshape_dims_node = create_constant_node(reshape_dims_name, (- 1), dtypes.int32, [1])
self.add_output_graph_node(reshape_dims_node)
reduction_dims_node = create_constant_node(reduction_dims_name, 0, dtypes.int32, [1])
self.add_output_graph_node(reduction_dims_node)
return (reshape_dims_name, reduction_dims_name)
def eightbitize_input_to_node(self, namespace_prefix, original_input_name, reshape_dims_name, reduction_dims_name):
unique_input_name = unique_node_name_from_input(original_input_name)
reshape_input_name = ((namespace_prefix + '_reshape_') + unique_input_name)
min_input_name = ((namespace_prefix + '_min_') + unique_input_name)
max_input_name = ((namespace_prefix + '_max_') + unique_input_name)
quantize_input_name = ((namespace_prefix + '_quantize_') + unique_input_name)
reshape_input_node = create_node('Reshape', reshape_input_name, [original_input_name, reshape_dims_name])
set_attr_dtype(reshape_input_node, 'T', dtypes.float32)
self.add_output_graph_node(reshape_input_node)
min_input_node = create_node('Min', min_input_name, [reshape_input_name, reduction_dims_name])
set_attr_dtype(min_input_node, 'T', dtypes.float32)
set_attr_bool(min_input_node, 'keep_dims', False)
self.add_output_graph_node(min_input_node)
max_input_node = create_node('Max', max_input_name, [reshape_input_name, reduction_dims_name])
set_attr_dtype(max_input_node, 'T', dtypes.float32)
set_attr_bool(max_input_node, 'keep_dims', False)
self.add_output_graph_node(max_input_node)
quantize_input_node = create_node('QuantizeV2', quantize_input_name, [original_input_name, min_input_name, max_input_name])
set_attr_dtype(quantize_input_node, 'T', dtypes.quint8)
set_attr_string(quantize_input_node, 'mode', b'MIN_FIRST')
self.add_output_graph_node(quantize_input_node)
min_output_name = (quantize_input_name + ':1')
max_output_name = (quantize_input_name + ':2')
return (quantize_input_name, min_output_name, max_output_name)
def add_quantize_down_nodes(self, original_node, quantized_output_name):
quantized_outputs = [quantized_output_name, (quantized_output_name + ':1'), (quantized_output_name + ':2')]
min_max_inputs = None
if self.should_merge_with_fake_quant_node():
fake_quant_node = self.state.output_node_stack[(- 1)][0]
min_max_inputs = [fake_quant_node.input[1], fake_quant_node.input[2]]
assert (original_node.name not in self.state.merged_with_fake_quant)
self.state.merged_with_fake_quant[original_node.name] = True
elif self.fallback_quantization_range:
min_max_inputs = ['fallback_quantization_min_value:0', 'fallback_quantization_max_value:0']
else:
requant_range_node = create_node('RequantizationRange', (original_node.name + '_eightbit_requant_range'), quantized_outputs)
set_attr_dtype(requant_range_node, 'Tinput', dtypes.qint32)
self.add_output_graph_node(requant_range_node)
min_max_inputs = [(requant_range_node.name + ':0'), (requant_range_node.name + ':1')]
requantize_node = create_node('Requantize', (original_node.name + '_eightbit_requantize'), (quantized_outputs + min_max_inputs))
set_attr_dtype(requantize_node, 'Tinput', dtypes.qint32)
set_attr_dtype(requantize_node, 'out_type', dtypes.quint8)
self.add_output_graph_node(requantize_node)
return requantize_node.name
def add_dequantize_result_node(self, quantized_output_name, original_node_name, min_tensor_index=1):
min_max_inputs = [('%s:%s' % (quantized_output_name, min_tensor_index)), ('%s:%s' % (quantized_output_name, (min_tensor_index + 1)))]
dequantize_name = original_node_name
if self.should_merge_with_fake_quant_node():
fake_quant_node = self.state.output_node_stack[(- 1)][0]
if (original_node_name not in self.state.merged_with_fake_quant):
min_max_inputs = [fake_quant_node.input[1], fake_quant_node.input[2]]
self.state.merged_with_fake_quant[original_node_name] = True
dequantize_name = fake_quant_node.name
dequantize_node = create_node('Dequantize', dequantize_name, [quantized_output_name, min_max_inputs[0], min_max_inputs[1]])
set_attr_dtype(dequantize_node, 'T', dtypes.quint8)
set_attr_string(dequantize_node, 'mode', b'MIN_FIRST')
self.add_output_graph_node(dequantize_node)
def eightbitize_mat_mul_node(self, original_node):
quantized_mat_mul_name = (original_node.name + '_eightbit_quantized_mat_mul')
all_input_names = self.add_eightbit_prologue_nodes(original_node)
quantized_mat_mul_node = create_node('QuantizedMatMul', quantized_mat_mul_name, all_input_names)
set_attr_dtype(quantized_mat_mul_node, 'T1', dtypes.quint8)
set_attr_dtype(quantized_mat_mul_node, 'T2', dtypes.quint8)
set_attr_dtype(quantized_mat_mul_node, 'Toutput', dtypes.qint32)
copy_attr(quantized_mat_mul_node, 'transpose_a', original_node.attr['transpose_a'])
copy_attr(quantized_mat_mul_node, 'transpose_b', original_node.attr['transpose_b'])
self.add_output_graph_node(quantized_mat_mul_node)
quantize_down_name = self.add_quantize_down_nodes(original_node, quantized_mat_mul_name)
self.add_dequantize_result_node(quantize_down_name, original_node.name)
def eightbitize_conv_node(self, original_node):
all_input_names = self.add_eightbit_prologue_nodes(original_node)
quantized_conv_name = (original_node.name + '_eightbit_quantized_conv')
quantized_conv_node = create_node('QuantizedConv2D', quantized_conv_name, all_input_names)
copy_attr(quantized_conv_node, 'strides', original_node.attr['strides'])
copy_attr(quantized_conv_node, 'padding', original_node.attr['padding'])
set_attr_dtype(quantized_conv_node, 'Tinput', dtypes.quint8)
set_attr_dtype(quantized_conv_node, 'Tfilter', dtypes.quint8)
set_attr_dtype(quantized_conv_node, 'out_type', dtypes.qint32)
self.add_output_graph_node(quantized_conv_node)
quantize_down_name = self.add_quantize_down_nodes(original_node, quantized_conv_name)
self.add_dequantize_result_node(quantize_down_name, original_node.name)
def eightbitize_bias_add_node(self, original_node):
quantized_bias_add_name = (original_node.name + '_eightbit_quantized_bias_add')
all_input_names = self.add_eightbit_prologue_nodes(original_node)
quantized_bias_add_node = create_node('QuantizedBiasAdd', quantized_bias_add_name, all_input_names)
set_attr_dtype(quantized_bias_add_node, 'T1', dtypes.quint8)
set_attr_dtype(quantized_bias_add_node, 'T2', dtypes.quint8)
set_attr_dtype(quantized_bias_add_node, 'out_type', dtypes.qint32)
self.add_output_graph_node(quantized_bias_add_node)
quantize_down_name = self.add_quantize_down_nodes(original_node, quantized_bias_add_name)
self.add_dequantize_result_node(quantize_down_name, original_node.name)
def eightbitize_single_input_tensor_node(self, original_node, add_op_function):
quantized_op_name = (original_node.name + '_eightbit_quantized')
quantized_op_type = ('Quantized' + original_node.op)
all_input_names = self.add_eightbit_prologue_nodes(original_node)
quantized_op_node = create_node(quantized_op_type, quantized_op_name, all_input_names)
add_op_function(original_node, quantized_op_node)
self.add_output_graph_node(quantized_op_node)
self.add_dequantize_result_node(quantized_op_name, original_node.name)
def add_pool_function(self, original_node, quantized_op_node):
set_attr_dtype(quantized_op_node, 'T', dtypes.quint8)
copy_attr(quantized_op_node, 'ksize', original_node.attr['ksize'])
copy_attr(quantized_op_node, 'strides', original_node.attr['strides'])
copy_attr(quantized_op_node, 'padding', original_node.attr['padding'])
def add_relu_function(self, unused_arg_node, quantized_op_node):
set_attr_dtype(quantized_op_node, 'Tinput', dtypes.quint8)
def eightbitize_concat_node(self, original_node):
namespace_prefix = (original_node.name + '_eightbit')
quantized_concat_name = (namespace_prefix + '_quantized_concat')
(reshape_dims_name, reduction_dims_name) = self.add_common_quantization_nodes(namespace_prefix)
shape_input_name = original_node.input[0]
original_inputs = original_node.input[1:]
input_names = []
min_names = []
max_names = []
for original_input_name in original_inputs:
(quantize_input_name, min_input_name, max_input_name) = self.eightbitize_input_to_node(namespace_prefix, original_input_name, reshape_dims_name, reduction_dims_name)
input_names.append(quantize_input_name)
min_names.append(min_input_name)
max_names.append(max_input_name)
all_input_names = [shape_input_name]
all_input_names.extend(input_names)
all_input_names.extend(min_names)
all_input_names.extend(max_names)
quantized_concat_node = create_node('QuantizedConcat', quantized_concat_name, all_input_names)
set_attr_int(quantized_concat_node, 'N', len(original_inputs))
set_attr_dtype(quantized_concat_node, 'T', dtypes.quint8)
self.add_output_graph_node(quantized_concat_node)
self.add_dequantize_result_node(quantized_concat_name, original_node.name)
def eightbitize_placeholder_node(self, current_node):
name = current_node.name
output_node = node_def_pb2.NodeDef()
output_node.CopyFrom(current_node)
set_attr_dtype(output_node, 'dtype', dtypes.quint8)
output_node.name += '_original_input'
self.add_output_graph_node(output_node)
dequantize_node = create_node('Dequantize', name, [output_node.name, 'quantized_input_min_value', 'quantized_input_max_value'])
set_attr_dtype(dequantize_node, 'T', dtypes.quint8)
set_attr_string(dequantize_node, 'mode', b'MIN_FIRST')
self.add_output_graph_node(dequantize_node)
self.final_node_renames[output_node.name] = name
self.final_node_renames[dequantize_node.name] = (name + '_dequantize')
def eightbitize_reshape_node(self, original_node):
namespace_prefix = (original_node.name + '_eightbit')
quantized_reshape_name = (namespace_prefix + '_quantized_reshape')
(reshape_dims_name, reduction_dims_name) = self.add_common_quantization_nodes(namespace_prefix)
shape_input_name = original_node.input[1]
(quantize_input_name, min_input_name, max_input_name) = self.eightbitize_input_to_node(namespace_prefix, original_node.input[0], reshape_dims_name, reduction_dims_name)
quantized_reshape_node = create_node('QuantizedReshape', quantized_reshape_name, [quantize_input_name, shape_input_name, min_input_name, max_input_name])
set_attr_dtype(quantized_reshape_node, 'T', dtypes.quint8)
self.add_output_graph_node(quantized_reshape_node)
self.add_dequantize_result_node(quantized_reshape_name, original_node.name)
def eightbitize_batch_norm_node(self, original_node):
namespace_prefix = (original_node.name + '_eightbit')
original_input_name = original_node.input[0]
original_mean_name = original_node.input[1]
original_variance_name = original_node.input[2]
original_beta_name = original_node.input[3]
original_gamma_name = original_node.input[4]
quantized_batch_norm_name = (namespace_prefix + '_quantized_batch_norm')
(reshape_dims_name, reduction_dims_name) = self.add_common_quantization_nodes(namespace_prefix)
(quantize_input_name, min_input_name, max_input_name) = self.eightbitize_input_to_node(namespace_prefix, original_input_name, reshape_dims_name, reduction_dims_name)
(quantize_mean_name, min_mean_name, max_mean_name) = self.eightbitize_input_to_node(namespace_prefix, original_mean_name, reshape_dims_name, reduction_dims_name)
(quantize_variance_name, min_variance_name, max_variance_name) = self.eightbitize_input_to_node(namespace_prefix, original_variance_name, reshape_dims_name, reduction_dims_name)
(quantize_beta_name, min_beta_name, max_beta_name) = self.eightbitize_input_to_node(namespace_prefix, original_beta_name, reshape_dims_name, reduction_dims_name)
(quantize_gamma_name, min_gamma_name, max_gamma_name) = self.eightbitize_input_to_node(namespace_prefix, original_gamma_name, reshape_dims_name, reduction_dims_name)
quantized_batch_norm_node = create_node('QuantizedBatchNormWithGlobalNormalization', quantized_batch_norm_name, [quantize_input_name, min_input_name, max_input_name, quantize_mean_name, min_mean_name, max_mean_name, quantize_variance_name, min_variance_name, max_variance_name, quantize_beta_name, min_beta_name, max_beta_name, quantize_gamma_name, min_gamma_name, max_gamma_name])
set_attr_dtype(quantized_batch_norm_node, 'Tinput', dtypes.quint8)
set_attr_dtype(quantized_batch_norm_node, 'out_type', dtypes.qint32)
copy_attr(quantized_batch_norm_node, 'scale_after_normalization', original_node.attr['scale_after_normalization'])
copy_attr(quantized_batch_norm_node, 'variance_epsilon', original_node.attr['variance_epsilon'])
self.add_output_graph_node(quantized_batch_norm_node)
quantize_down_name = self.add_quantize_down_nodes(original_node, quantized_batch_norm_name)
self.add_dequantize_result_node(quantize_down_name, original_node.name)
def add_output_graph_node(self, output_node):
self.output_graph.node.extend([output_node])
def remove_redundant_quantization(self, old_graph):
old_nodes_map = self.create_nodes_map(old_graph)
self.output_graph = graph_pb2.GraphDef()
inputs_to_rename = {}
for node in old_graph.node:
if (node.op not in ['Quantize', 'QuantizeV2']):
continue
dequantize_node_name = node_name_from_input(node.input[0])
if (dequantize_node_name not in old_nodes_map):
raise ValueError((((("Input node name '" + dequantize_node_name) + "' not found in node '") + node.name) + "'"))
dequantize_node = old_nodes_map[dequantize_node_name]
if (dequantize_node.op != 'Dequantize'):
continue
if (node.attr['T'] != dequantize_node.attr['T']):
continue
min_node_name = node_name_from_input(node.input[1])
max_node_name = node_name_from_input(node.input[2])
min_node = old_nodes_map[min_node_name]
max_node = old_nodes_map[max_node_name]
is_min_right_type = (min_node.op in ['Min', 'Dequantize'])
is_max_right_type = (max_node.op in ['Max', 'Dequantize'])
if ((not is_min_right_type) or (not is_max_right_type)):
print(("Didn't find expected types on inputs : %s, %s." % (min_node.op, max_node.op)))
continue
min_node_input_name = node_name_from_input(min_node.input[0])
max_node_input_name = node_name_from_input(max_node.input[0])
is_same_input = False
if (min_node_input_name == max_node_input_name):
is_same_input = True
else:
first_min_node_input = old_nodes_map[min_node_input_name]
if (first_min_node_input.op == 'Concat'):
second_min_node_name = node_name_from_input(first_min_node_input.input[1])
second_min_node = old_nodes_map[second_min_node_name]
if (second_min_node.op == 'Min'):
second_min_node_input_name = node_name_from_input(second_min_node.input[0])
is_same_input = (second_min_node_input_name == max_node_input_name)
if (not is_same_input):
print(('Different min/max inputs: ' + min_node_input_name))
continue
dequantize_source_name = node_name_from_input(dequantize_node.input[0])
node_tensor_name = ensure_tensor_name_has_port(node.name)
min_tensor_name = (node.name + ':1')
max_tensor_name = (node.name + ':2')
inputs_to_rename[node_tensor_name] = dequantize_source_name
inputs_to_rename[min_tensor_name] = dequantize_node.input[1]
inputs_to_rename[max_tensor_name] = dequantize_node.input[2]
for node in old_graph.node:
for (index, input_full_name) in enumerate(node.input):
input_name = ensure_tensor_name_has_port(input_full_name)
if (input_name in inputs_to_rename):
node.input[index] = inputs_to_rename[input_name]
self.add_output_graph_node(node)
return self.output_graph
def apply_final_node_renames(self):
old_graph = self.output_graph
self.output_graph = graph_pb2.GraphDef()
for node in old_graph.node:
node.name = self.final_node_renames.get(node.name, node.name)
for (index, input_name) in enumerate(node.input):
node_name = node_name_from_input(input_name)
input_full_name = ensure_tensor_name_has_port(input_name)
if (node_name in self.final_node_renames):
node.input[index] = ('%s%s' % (self.final_node_renames[node_name], input_full_name[len(node_name):]))
self.add_output_graph_node(node)
return self.output_graph
def remove_dead_nodes(self, output_names):
old_output_graph = self.output_graph
self.output_graph = graph_util.extract_sub_graph(old_output_graph, output_names)
def quantize_weights(self, input_graph, quantization_mode):
output_graph = graph_pb2.GraphDef()
for input_node in input_graph.node:
should_quantize = False
if (input_node.op == 'Const'):
dtype = dtypes.as_dtype(input_node.attr['dtype'].type)
if (dtype == dtypes.float32):
should_quantize = True
if should_quantize:
if (quantization_mode == 'weights_rounded'):
output_graph.node.extend(quantize_weight_rounded(input_node))
elif (quantization_mode in (b'MIN_COMBINED', b'MIN_FIRST')):
output_graph.node.extend(quantize_weight_eightbit(input_node, quantization_mode))
else:
raise ValueError(('Unsupported quantization mode %s.' % quantization_mode))
else:
output_node = node_def_pb2.NodeDef()
output_node.CopyFrom(input_node)
output_graph.node.extend([output_node])
return output_graph
def set_input_graph(self, new_input_graph):
self.input_graph = new_input_graph
self.nodes_map = self.create_nodes_map(self.input_graph) |
class DeepImage(nn.Module):
def __init__(self, pretrained: bool=True, resnet_architecture: int=18, freeze_n: int=6, head_hidden_dims: Optional[List[int]]=None, head_activation: str='relu', head_dropout: float=0.1, head_batchnorm: bool=False, head_batchnorm_last: bool=False, head_linear_first: bool=False):
super(DeepImage, self).__init__()
self.pretrained = pretrained
self.resnet_architecture = resnet_architecture
self.freeze_n = freeze_n
self.head_hidden_dims = head_hidden_dims
self.head_activation = head_activation
self.head_dropout = head_dropout
self.head_batchnorm = head_batchnorm
self.head_batchnorm_last = head_batchnorm_last
self.head_linear_first = head_linear_first
if pretrained:
vision_model = self.select_resnet_architecture(resnet_architecture)
backbone_layers = list(vision_model.children())[:(- 1)]
self.backbone = self._build_backbone(backbone_layers, freeze_n)
else:
self.backbone = self._conv_nn()
self.output_dim = 512
if (self.head_hidden_dims is not None):
assert (self.head_hidden_dims[0] == self.output_dim), 'The output dimension from the backbone ({}) is not consistent with the expected input dimension ({}) of the fc-head'.format(self.output_dim, self.head_hidden_dims[0])
self.imagehead = MLP(head_hidden_dims, head_activation, head_dropout, head_batchnorm, head_batchnorm_last, head_linear_first)
self.output_dim = head_hidden_dims[(- 1)]
def forward(self, x: Tensor) -> Tensor:
x = self.backbone(x)
x = x.view(x.size(0), (- 1))
if (self.head_hidden_dims is not None):
out = self.imagehead(x)
return out
else:
return x
def select_resnet_architecture(resnet_architecture: int):
if (resnet_architecture == 18):
return models.resnet18(pretrained=True)
elif (resnet_architecture == 34):
return models.resnet34(pretrained=True)
elif (resnet_architecture == 50):
return models.resnet50(pretrained=True)
def _conv_nn(self):
return nn.Sequential(conv_layer(3, 64, 3), conv_layer(64, 128, 1, maxpool=False), conv_layer(128, 256, 1, maxpool=False), conv_layer(256, 512, 1, maxpool=False, adaptiveavgpool=True))
def _build_backbone(self, backbone_layers, freeze_n):
if (freeze_n > 8):
raise ValueError("freeze_n' must be less than or equal to 8 for resnet architectures")
frozen_layers = []
trainable_layers = backbone_layers[freeze_n:]
for layer in backbone_layers[:freeze_n]:
for param in layer.parameters():
param.requires_grad = False
frozen_layers.append(layer)
trainable_and_frozen_layers = (frozen_layers + trainable_layers)
return nn.Sequential(*trainable_and_frozen_layers) |
class EfficientNetSqueezeExciteLayer(nn.Module):
def __init__(self, config: EfficientNetConfig, in_dim: int, expand_dim: int, expand: bool=False):
super().__init__()
self.dim = (expand_dim if expand else in_dim)
self.dim_se = max(1, int((in_dim * config.squeeze_expansion_ratio)))
self.squeeze = nn.AdaptiveAvgPool2d(output_size=1)
self.reduce = nn.Conv2d(in_channels=self.dim, out_channels=self.dim_se, kernel_size=1, padding='same')
self.expand = nn.Conv2d(in_channels=self.dim_se, out_channels=self.dim, kernel_size=1, padding='same')
self.act_reduce = ACT2FN[config.hidden_act]
self.act_expand = nn.Sigmoid()
def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
inputs = hidden_states
hidden_states = self.squeeze(hidden_states)
hidden_states = self.reduce(hidden_states)
hidden_states = self.act_reduce(hidden_states)
hidden_states = self.expand(hidden_states)
hidden_states = self.act_expand(hidden_states)
hidden_states = torch.mul(inputs, hidden_states)
return hidden_states |
_task('speech_commands')
class SpeechCommandsTask(FairseqTask):
def add_args(parser):
parser.add_argument('data', metavar='FILE', help='file prefix for data')
parser.add_argument('--num-classes', type=int, default=(- 1), help='number of classes or regression targets')
parser.add_argument('--regression-target', action='store_true', default=False)
parser.add_argument('--no-shuffle', action='store_true', default=False)
parser.add_argument('--shorten-method', default='none', choices=['none', 'truncate', 'random_crop'], help='if not none, shorten sequences that exceed --tokens-per-sample')
parser.add_argument('--shorten-data-split-list', default='', help='comma-separated list of dataset splits to apply shortening to, e.g., "train,valid" (default: all dataset splits)')
parser.add_argument('--sc-all-classes', action='store_true', default=False)
parser.add_argument('--sc-dropped-rate', type=float, default=0.0)
parser.add_argument('--mfcc', action='store_true', default=False)
def __init__(self, args):
super().__init__(args)
if (not hasattr(args, 'max_positions')):
self._max_positions = (args.max_source_positions, args.max_target_positions)
else:
self._max_positions = args.max_positions
args.tokens_per_sample = self._max_positions
def load_dictionary(cls, filename):
raise NotImplementedError
def setup_task(cls, args, **kwargs):
return SpeechCommandsTask(args)
def load_dataset(self, split, combine=False, **kwargs):
dataset = SpeechCommandsDataset(partition=split, length=16000, mfcc=self.args.mfcc, sr=1, dropped_rate=self.args.sc_dropped_rate, path=self.args.data, all_classes=self.args.sc_all_classes)
logger.info('Loaded {0} with #samples: {1}'.format(split, len(dataset)))
self.datasets[split] = dataset
return self.datasets[split]
def build_model(self, args):
from fairseq import models
model = models.build_model(args, self)
return model
def max_positions(self):
return self._max_positions
def target_dictionary(self):
return None |
def sub_UNK(sent, word_dict):
words = sent.split()
for (i, w) in enumerate(words):
if (w not in word_dict):
words[i] = '<UNK>'
return ' '.join(words) |
def solve(pols, tasks=0, mvfocus=0, precision='d', checkin=True, dictionary_output=False, verbose_level=0):
if checkin:
errmsg = 'The blackbox solver accepts only square systems,'
if (not solve_checkin(pols, errmsg)):
return None
if (tasks < 0):
print('The number of tasks must be a nonnegative integer.')
return None
if (precision == 'd'):
set_double_Laurent_system(len(pols), pols, vrblvl=verbose_level)
(nbr, roco) = solve_double_Laurent_system(nbtasks=tasks, mvfocus=mvfocus, vrblvl=verbose_level)
sols = get_double_solutions(vrblvl=verbose_level)
clear_double_solutions(vrblvl=verbose_level)
if dictionary_output:
return formdictlist(sols)
else:
return sols
elif (precision == 'dd'):
set_double_double_Laurent_system(len(pols), pols, vrblvl=verbose_level)
(nbr, roco) = solve_double_double_Laurent_system(nbtasks=tasks, mvfocus=mvfocus, vrblvl=verbose_level)
sols = get_double_double_solutions(vrblvl=verbose_level)
clear_double_double_solutions(vrblvl=verbose_level)
if dictionary_output:
return formdictlist(sols)
else:
return sols
elif (precision == 'qd'):
set_quad_double_Laurent_system(len(pols), pols, vrblvl=verbose_level)
(nbr, roco) = solve_quad_double_Laurent_system(nbtasks=tasks, mvfocus=mvfocus, vrblvl=verbose_level)
sols = get_quad_double_solutions(vrblvl=verbose_level)
clear_quad_double_solutions(vrblvl=verbose_level)
if dictionary_output:
return formdictlist(sols)
else:
return sols
else:
print('wrong level of precision, use d, dd, or qd') |
def _process_image_files(name, filenames, synsets, labels, humans, bboxes, num_shards):
assert (len(filenames) == len(synsets))
assert (len(filenames) == len(labels))
assert (len(filenames) == len(humans))
assert (len(filenames) == len(bboxes))
spacing = np.linspace(0, len(filenames), (FLAGS.num_threads + 1)).astype(np.int)
ranges = []
threads = []
for i in xrange((len(spacing) - 1)):
ranges.append([spacing[i], spacing[(i + 1)]])
print(('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges)))
sys.stdout.flush()
coord = tf.train.Coordinator()
coder = ImageCoder()
threads = []
for thread_index in xrange(len(ranges)):
args = (coder, thread_index, ranges, name, filenames, synsets, labels, humans, bboxes, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
coord.join(threads)
print(('%s: Finished writing all %d images in data set.' % (datetime.now(), len(filenames))))
sys.stdout.flush() |
def _malfunction_prob(rate: float) -> float:
if (rate <= 0):
return 0.0
else:
return (1 - np.exp((- rate))) |
def zero_shot_prompt(example: Example) -> str:
'Creates a zero-shot prompt given a single example. Uses the prompt format from this paper on Scalable Oversight: \n
prompt = base_prompt(example)
prompt += f'''
Format your response as follows: "The correct answer is (insert answer here)"'''
return prompt |
class DenseDecoder(tools.Module):
def __init__(self, shape, layers, units, dist='normal', act=tf.nn.elu):
self._shape = shape
self._layers = layers
self._units = units
self._dist = dist
self._act = act
def __call__(self, features):
x = features
for index in range(self._layers):
x = self.get(f'h{index}', tfkl.Dense, self._units, self._act)(x)
x = self.get(f'hout', tfkl.Dense, np.prod(self._shape))(x)
x = tf.reshape(x, tf.concat([tf.shape(features)[:(- 1)], self._shape], 0))
if (self._dist == 'normal'):
return tfd.Independent(tfd.Normal(x, 1), len(self._shape))
if (self._dist == 'binary'):
return tfd.Independent(tfd.Bernoulli(x), len(self._shape))
raise NotImplementedError(self._dist) |
def print_progress(prefix, start_time, urls_counter, domain_blacklist_counter, extention_blacklist_counter, short_url_counter, malformed_url_counter, duplicate_url_counter):
string = (prefix + ' | ')
string += 'time elapsed (s): {:.2f} | '.format((time.time() - start_time))
string += 'number of urls: {} | '.format(urls_counter)
string += 'domain blacklisted: {} | '.format(domain_blacklist_counter)
string += 'extention blacklisted: {} | '.format(extention_blacklist_counter)
string += 'short urls (<=8): {} | '.format(short_url_counter)
string += 'malformed urls: {} | '.format(malformed_url_counter)
string += 'duplicate urls: {}'.format(duplicate_url_counter)
print(string, flush=True) |
def hydra_conf_load_from_checkpoint(chkpt_file, cfg):
instance_args = dict()
cfg_mask = list()
for k in cfg.keys():
if (OmegaConf.is_dict(cfg[k]) and ('_target_' in cfg[k])):
instance_args[k] = hydra.utils.instantiate(cfg[k])
else:
cfg_mask += [k]
ModuleType = type(hydra.utils.instantiate(cfg))
return ModuleType.load_from_checkpoint(chkpt_file, map_location=(lambda storage, loc: storage), **OmegaConf.masked_copy(cfg, cfg_mask), **instance_args) |
def extract_pattern(message, pattern):
matches = re.findall(pattern, message, re.DOTALL)
for match in matches:
return match.strip()
return None |
def torchify_buffer(buffer_):
if (buffer_ is None):
return
if isinstance(buffer_, np.ndarray):
return torch.from_numpy(buffer_)
elif isinstance(buffer_, torch.Tensor):
return buffer_
contents = tuple((torchify_buffer(b) for b in buffer_))
if (type(buffer_) is tuple):
return contents
return type(buffer_)(*contents) |
class ContextAttentionEncoder(nn.Module):
def __init__(self, input_dim: int, dropout: float, with_addnorm: bool, activation: str):
super(ContextAttentionEncoder, self).__init__()
self.with_addnorm = with_addnorm
self.attn = ContextAttention(input_dim, dropout)
if with_addnorm:
self.attn_addnorm = AddNorm(input_dim, dropout)
self.slp_addnorm = AddNorm(input_dim, dropout)
self.slp = SLP(input_dim, dropout, activation, (not with_addnorm))
def forward(self, X: Tensor) -> Tensor:
if self.with_addnorm:
x = self.attn_addnorm(X, self.attn)
out = self.slp_addnorm(x, self.slp)
else:
out = self.slp(self.attn(X))
return out |
class RandomForestRegressorAlgorithm(SklearnTreesEnsembleRegressorAlgorithm):
algorithm_name = 'Random Forest'
algorithm_short_name = 'Random Forest'
def __init__(self, params):
super(RandomForestRegressorAlgorithm, self).__init__(params)
logger.debug('RandomForestRegressorAlgorithm.__init__')
self.library_version = sklearn.__version__
self.trees_in_step = regression_additional.get('trees_in_step', 5)
self.max_steps = regression_additional.get('max_steps', 3)
self.early_stopping_rounds = regression_additional.get('early_stopping_rounds', 50)
self.model = RandomForestRegressor(n_estimators=self.trees_in_step, criterion=params.get('criterion', 'mse'), max_features=params.get('max_features', 0.8), max_depth=params.get('max_depth', 6), min_samples_split=params.get('min_samples_split', 4), min_samples_leaf=params.get('min_samples_leaf', 1), warm_start=True, n_jobs=params.get('n_jobs', (- 1)), random_state=params.get('seed', 1))
self.max_steps = self.params.get('max_steps', self.max_steps)
def file_extension(self):
return 'random_forest' |
class UNetMidBlock3DCrossAttn(nn.Module):
def __init__(self, in_channels: int, temb_channels: int, dropout: float=0.0, num_layers: int=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_groups: int=32, resnet_pre_norm: bool=True, attn_num_head_channels=1, output_scale_factor=1.0, cross_attention_dim=1280, dual_cross_attention=False, use_linear_projection=False, upcast_attention=False):
super().__init__()
self.has_cross_attention = True
self.attn_num_head_channels = attn_num_head_channels
resnet_groups = (resnet_groups if (resnet_groups is not None) else min((in_channels // 4), 32))
resnets = [ResnetBlock3D(in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm)]
attentions = []
for _ in range(num_layers):
if dual_cross_attention:
raise NotImplementedError
attentions.append(Transformer3DModel(attn_num_head_channels, (in_channels // attn_num_head_channels), in_channels=in_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention))
resnets.append(ResnetBlock3D(in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm))
self.attentions = nn.ModuleList(attentions)
self.resnets = nn.ModuleList(resnets)
def forward(self, hidden_states, temb=None, encoder_hidden_states=None, attention_mask=None, cross_attention_kwargs=None):
hidden_states = self.resnets[0](hidden_states, temb)
for (attn, resnet) in zip(self.attentions, self.resnets[1:]):
hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states).sample
hidden_states = resnet(hidden_states, temb)
return hidden_states |
class Edge_NRI(nn.Module):
def __init__(self, in_channels, w_node2edge, num_atoms, device, dropout=0.0):
super(Edge_NRI, self).__init__()
self.dropout = nn.Dropout(dropout)
self.w_node2edge = w_node2edge
self.w_edge2value = nn.Sequential(nn.Linear(in_channels, (in_channels // 2)), nn.ReLU(), nn.Linear((in_channels // 2), 1))
self.edge_self_evolve = nn.Sequential(nn.Linear(in_channels, (in_channels // 2)), nn.ReLU(), nn.Linear((in_channels // 2), in_channels))
self.num_atoms = num_atoms
self.device = device
self.layer_norm = nn.LayerNorm(in_channels, elementwise_affine=False)
utils.init_network_weights(self.w_edge2value)
utils.init_network_weights(self.edge_self_evolve)
(self.rel_send, self.rel_rec) = self.rel_rec_compute()
def rel_rec_compute(self):
fully_connected = np.ones([self.num_atoms, self.num_atoms])
rel_send = np.array(utils.encode_onehot(np.where(fully_connected)[0]), dtype=np.float32)
rel_rec = np.array(utils.encode_onehot(np.where(fully_connected)[1]), dtype=np.float32)
rel_send = torch.FloatTensor(rel_send).to(self.device)
rel_rec = torch.FloatTensor(rel_rec).to(self.device)
return (rel_send, rel_rec)
def forward(self, node_inputs, edges_input, num_atoms):
node_feature_num = node_inputs.shape[1]
edge_feature_num = edges_input.shape[(- 1)]
node_inputs = node_inputs.view((- 1), num_atoms, node_feature_num)
senders = torch.matmul(self.rel_send, node_inputs)
receivers = torch.matmul(self.rel_rec, node_inputs)
edges = torch.cat([senders, receivers], dim=(- 1))
edges_from_node = F.gelu(self.w_node2edge(edges))
edges_input = self.layer_norm(edges_input)
edges_self = self.edge_self_evolve(edges_input)
edges_self = edges_self.view((- 1), (num_atoms * num_atoms), edge_feature_num)
edges_z = self.dropout((edges_from_node + edges_self))
edge_2_value = torch.squeeze(F.relu(self.w_edge2value(edges_z)), dim=(- 1))
edges_z = edges_z.view((- 1), node_feature_num)
return (edges_z, edge_2_value) |
class ADAINResnetBlock(nn.Module):
def __init__(self, input_nc, output_nc, hidden_nc, feature_nc, nonlinearity=nn.LeakyReLU(), use_spect=False, use_coord=False, learned_shortcut=False):
super(ADAINResnetBlock, self).__init__()
self.learned_shortcut = ((input_nc != output_nc) or learned_shortcut)
self.actvn = nonlinearity
hidden_nc = (min(input_nc, output_nc) if (hidden_nc is None) else hidden_nc)
self.conv_0 = spectral_norm(nn.Conv2d(input_nc, hidden_nc, kernel_size=3, stride=1, padding=1), use_spect)
self.conv_1 = spectral_norm(nn.Conv2d(hidden_nc, output_nc, kernel_size=3, stride=1, padding=1), use_spect)
if self.learned_shortcut:
self.conv_s = spectral_norm(nn.Conv2d(input_nc, output_nc, kernel_size=1, bias=False), use_spect)
self.norm_0 = ADAIN(input_nc, feature_nc)
self.norm_1 = ADAIN(hidden_nc, feature_nc)
if self.learned_shortcut:
self.norm_s = ADAIN(input_nc, feature_nc)
def forward(self, x, z):
x_s = self.shortcut(x, z)
dx = self.conv_0(self.actvn(self.norm_0(x, z)))
dx = self.conv_1(self.actvn(self.norm_1(dx, z)))
out = (x_s + dx)
return out
def shortcut(self, x, z):
if self.learned_shortcut:
x_s = self.conv_s(self.norm_s(x, z))
else:
x_s = x
return x_s |
class GcnInfomax(nn.Module):
def __init__(self, args: Namespace, gamma=0.1):
super(GcnInfomax, self).__init__()
self.args = args
self.gamma = gamma
self.prior = args.prior
self.features_dim = args.hidden_size
self.embedding_dim = args.gcn_hidden3
self.local_d = FF_local(args, self.features_dim)
self.global_d = FF_global(args, self.embedding_dim)
if self.prior:
self.prior_d = PriorDiscriminator(self.embedding_dim)
def forward(self, embeddings, features, adj_tensor, num_drugs):
g_enc = self.global_d(embeddings)
l_enc = self.local_d(features)
measure = 'JSD'
local_global_loss = local_global_drug_loss_(self.args, l_enc, g_enc, adj_tensor, num_drugs, measure)
eps = 1e-05
if self.prior:
prior = torch.rand_like(embeddings)
term_a = torch.log((self.prior_d(prior) + eps)).mean()
term_b = torch.log(((1.0 - self.prior_d(embeddings)) + eps)).mean()
PRIOR = ((- (term_a + term_b)) * self.gamma)
else:
PRIOR = 0
return (local_global_loss + PRIOR) |
def main():
parser = argparse.ArgumentParser(description='PyTorch Object Detection Inference')
parser.add_argument('--config-file', default='/private/home/fmassa/github/detectron.pytorch_v2/configs/e2e_faster_rcnn_R_50_C4_1x_caffe2.yaml', metavar='FILE', help='path to config file')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--ckpt', help='The path to the checkpoint for test, default is the latest checkpoint.', default=None)
parser.add_argument('opts', help='Modify config options using the command-line', default=None, nargs=argparse.REMAINDER)
args = parser.parse_args()
num_gpus = (int(os.environ['WORLD_SIZE']) if ('WORLD_SIZE' in os.environ) else 1)
distributed = (num_gpus > 1)
if distributed:
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
synchronize()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
save_dir = ''
logger = setup_logger('maskrcnn_benchmark', save_dir, get_rank())
logger.info('Using {} GPUs'.format(num_gpus))
logger.info(cfg)
logger.info('Collecting env info (might take some time)')
logger.info(('\n' + collect_env_info()))
model = build_detection_model(cfg)
model.to(cfg.MODEL.DEVICE)
use_mixed_precision = (cfg.DTYPE == 'float16')
amp_handle = amp.init(enabled=use_mixed_precision, verbose=cfg.AMP_VERBOSE)
output_dir = cfg.OUTPUT_DIR
checkpointer = DetectronCheckpointer(cfg, model, save_dir=output_dir)
ckpt = (cfg.MODEL.WEIGHT if (args.ckpt is None) else args.ckpt)
_ = checkpointer.load(ckpt, use_latest=(args.ckpt is None))
iou_types = ('bbox',)
if (cfg.MODEL.MASK_ON and (not cfg.MODEL.KE_ON)):
iou_types = (iou_types + ('segm',))
if cfg.MODEL.KEYPOINT_ON:
iou_types = (iou_types + ('keypoints',))
if cfg.MODEL.KE_ON:
iou_types = (iou_types + ('kes',))
output_folders = ([None] * len(cfg.DATASETS.TEST))
dataset_names = cfg.DATASETS.TEST
if cfg.OUTPUT_DIR:
for (idx, dataset_name) in enumerate(dataset_names):
output_folder = os.path.join(cfg.OUTPUT_DIR, 'inference', dataset_name)
mkdir(output_folder)
output_folders[idx] = output_folder
data_loaders_val = make_data_loader(cfg, is_train=False, is_distributed=distributed)
rec_type = cfg.MODEL.ALIGN.PREDICTOR
for (output_folder, dataset_name, data_loader_val) in zip(output_folders, dataset_names, data_loaders_val):
inference(model, data_loader_val, dataset_name=dataset_name, iou_types=iou_types, rec_type=rec_type, box_only=(False if (cfg.MODEL.FCOS_ON or cfg.MODEL.RETINANET_ON) else cfg.MODEL.RPN_ONLY), device=cfg.MODEL.DEVICE, expected_results=cfg.TEST.EXPECTED_RESULTS, expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL, output_folder=output_folder)
synchronize() |
def save_model(state, output_path):
save_path = os.path.join(output_path, f"final_epoch_{state['epoch']}_val_loss_{state['val_loss']}_dice_{state['val_dice_score']}.pth")
logger.info(f'Saving last to{save_path}')
torch.save(state, save_path) |
def convMeanpool(inplanes, outplanes):
sequence = []
sequence += [conv3x3(inplanes, outplanes)]
sequence += [nn.AvgPool2d(kernel_size=2, stride=2)]
return nn.Sequential(*sequence) |
_REGISTRY.register()
class DAEL(TrainerXU):
def __init__(self, cfg):
super().__init__(cfg)
n_domain = cfg.DATALOADER.TRAIN_X.N_DOMAIN
batch_size = cfg.DATALOADER.TRAIN_X.BATCH_SIZE
if (n_domain <= 0):
n_domain = self.dm.num_source_domains
self.split_batch = (batch_size // n_domain)
self.n_domain = n_domain
self.weight_u = cfg.TRAINER.DAEL.WEIGHT_U
self.conf_thre = cfg.TRAINER.DAEL.CONF_THRE
def check_cfg(self, cfg):
assert (cfg.DATALOADER.TRAIN_X.SAMPLER == 'RandomDomainSampler')
assert (not cfg.DATALOADER.TRAIN_U.SAME_AS_X)
assert (len(cfg.TRAINER.DAEL.STRONG_TRANSFORMS) > 0)
def build_data_loader(self):
cfg = self.cfg
tfm_train = build_transform(cfg, is_train=True)
custom_tfm_train = [tfm_train]
choices = cfg.TRAINER.DAEL.STRONG_TRANSFORMS
tfm_train_strong = build_transform(cfg, is_train=True, choices=choices)
custom_tfm_train += [tfm_train_strong]
self.dm = DataManager(self.cfg, custom_tfm_train=custom_tfm_train)
self.train_loader_x = self.dm.train_loader_x
self.train_loader_u = self.dm.train_loader_u
self.val_loader = self.dm.val_loader
self.test_loader = self.dm.test_loader
self.num_classes = self.dm.num_classes
def build_model(self):
cfg = self.cfg
print('Building F')
self.F = SimpleNet(cfg, cfg.MODEL, 0)
self.F.to(self.device)
print('# params: {:,}'.format(count_num_param(self.F)))
self.optim_F = build_optimizer(self.F, cfg.OPTIM)
self.sched_F = build_lr_scheduler(self.optim_F, cfg.OPTIM)
self.register_model('F', self.F, self.optim_F, self.sched_F)
fdim = self.F.fdim
print('Building E')
self.E = Experts(self.dm.num_source_domains, fdim, self.num_classes)
self.E.to(self.device)
print('# params: {:,}'.format(count_num_param(self.E)))
self.optim_E = build_optimizer(self.E, cfg.OPTIM)
self.sched_E = build_lr_scheduler(self.optim_E, cfg.OPTIM)
self.register_model('E', self.E, self.optim_E, self.sched_E)
def forward_backward(self, batch_x, batch_u):
parsed_data = self.parse_batch_train(batch_x, batch_u)
(input_x, input_x2, label_x, domain_x, input_u, input_u2) = parsed_data
input_x = torch.split(input_x, self.split_batch, 0)
input_x2 = torch.split(input_x2, self.split_batch, 0)
label_x = torch.split(label_x, self.split_batch, 0)
domain_x = torch.split(domain_x, self.split_batch, 0)
domain_x = [d[0].item() for d in domain_x]
with torch.no_grad():
feat_u = self.F(input_u)
pred_u = []
for k in range(self.dm.num_source_domains):
pred_uk = self.E(k, feat_u)
pred_uk = pred_uk.unsqueeze(1)
pred_u.append(pred_uk)
pred_u = torch.cat(pred_u, 1)
(experts_max_p, experts_max_idx) = pred_u.max(2)
(max_expert_p, max_expert_idx) = experts_max_p.max(1)
pseudo_label_u = []
for (i, experts_label) in zip(max_expert_idx, experts_max_idx):
pseudo_label_u.append(experts_label[i])
pseudo_label_u = torch.stack(pseudo_label_u, 0)
pseudo_label_u = create_onehot(pseudo_label_u, self.num_classes)
pseudo_label_u = pseudo_label_u.to(self.device)
label_u_mask = (max_expert_p >= self.conf_thre).float()
loss_x = 0
loss_cr = 0
acc_x = 0
feat_x = [self.F(x) for x in input_x]
feat_x2 = [self.F(x) for x in input_x2]
feat_u2 = self.F(input_u2)
for (feat_xi, feat_x2i, label_xi, i) in zip(feat_x, feat_x2, label_x, domain_x):
cr_s = [j for j in domain_x if (j != i)]
pred_xi = self.E(i, feat_xi)
loss_x += ((- label_xi) * torch.log((pred_xi + 1e-05))).sum(1).mean()
expert_label_xi = pred_xi.detach()
acc_x += compute_accuracy(pred_xi.detach(), label_xi.max(1)[1])[0].item()
cr_pred = []
for j in cr_s:
pred_j = self.E(j, feat_x2i)
pred_j = pred_j.unsqueeze(1)
cr_pred.append(pred_j)
cr_pred = torch.cat(cr_pred, 1)
cr_pred = cr_pred.mean(1)
loss_cr += ((cr_pred - expert_label_xi) ** 2).sum(1).mean()
loss_x /= self.n_domain
loss_cr /= self.n_domain
acc_x /= self.n_domain
pred_u = []
for k in range(self.dm.num_source_domains):
pred_uk = self.E(k, feat_u2)
pred_uk = pred_uk.unsqueeze(1)
pred_u.append(pred_uk)
pred_u = torch.cat(pred_u, 1)
pred_u = pred_u.mean(1)
l_u = ((- pseudo_label_u) * torch.log((pred_u + 1e-05))).sum(1)
loss_u = (l_u * label_u_mask).mean()
loss = 0
loss += loss_x
loss += loss_cr
loss += (loss_u * self.weight_u)
self.model_backward_and_update(loss)
loss_summary = {'loss_x': loss_x.item(), 'acc_x': acc_x, 'loss_cr': loss_cr.item(), 'loss_u': loss_u.item()}
if ((self.batch_idx + 1) == self.num_batches):
self.update_lr()
return loss_summary
def parse_batch_train(self, batch_x, batch_u):
input_x = batch_x['img']
input_x2 = batch_x['img2']
label_x = batch_x['label']
domain_x = batch_x['domain']
input_u = batch_u['img']
input_u2 = batch_u['img2']
label_x = create_onehot(label_x, self.num_classes)
input_x = input_x.to(self.device)
input_x2 = input_x2.to(self.device)
label_x = label_x.to(self.device)
input_u = input_u.to(self.device)
input_u2 = input_u2.to(self.device)
return (input_x, input_x2, label_x, domain_x, input_u, input_u2)
def model_inference(self, input):
f = self.F(input)
p = []
for k in range(self.dm.num_source_domains):
p_k = self.E(k, f)
p_k = p_k.unsqueeze(1)
p.append(p_k)
p = torch.cat(p, 1)
p = p.mean(1)
return p |
class ShortestPathFollowerCompat():
def __init__(self, sim: HabitatSim, goal_radius: float, return_one_hot: bool=True):
assert (getattr(sim, 'geodesic_distance', None) is not None), '{} must have a method called geodesic_distance'.format(type(sim).__name__)
self._sim = sim
self._max_delta = (sim.habitat_config.FORWARD_STEP_SIZE - EPSILON)
self._goal_radius = goal_radius
self._step_size = sim.habitat_config.FORWARD_STEP_SIZE
self._mode = ('geodesic_path' if (getattr(sim, 'get_straight_shortest_path_points', None) is not None) else 'greedy')
self._return_one_hot = return_one_hot
def _get_return_value(self, action) -> Union[(int, np.array)]:
if self._return_one_hot:
return action_to_one_hot(action)
else:
return action
def get_next_action(self, goal_pos: np.array) -> Optional[Union[(int, np.array)]]:
if (self._sim.geodesic_distance(self._sim.get_agent_state().position, goal_pos) <= self._goal_radius):
return None
max_grad_dir = self._est_max_grad_dir(goal_pos)
if (max_grad_dir is None):
return self._get_return_value(HabitatSimActions.MOVE_FORWARD)
return self._step_along_grad(max_grad_dir)
def _step_along_grad(self, grad_dir: np.quaternion) -> Union[(int, np.array)]:
current_state = self._sim.get_agent_state()
alpha = angle_between_quaternions(grad_dir, current_state.rotation)
if (alpha <= (np.deg2rad(self._sim.habitat_config.TURN_ANGLE) + EPSILON)):
return self._get_return_value(HabitatSimActions.MOVE_FORWARD)
else:
sim_action = HabitatSimActions.TURN_LEFT
self._sim.step(sim_action)
best_turn = (HabitatSimActions.TURN_LEFT if (angle_between_quaternions(grad_dir, self._sim.get_agent_state().rotation) < alpha) else HabitatSimActions.TURN_RIGHT)
self._reset_agent_state(current_state)
return self._get_return_value(best_turn)
def _reset_agent_state(self, state: habitat_sim.AgentState) -> None:
self._sim.set_agent_state(state.position, state.rotation, reset_sensors=False)
def _geo_dist(self, goal_pos: np.array) -> float:
return self._sim.geodesic_distance(self._sim.get_agent_state().position, goal_pos)
def _est_max_grad_dir(self, goal_pos: np.array) -> np.array:
current_state = self._sim.get_agent_state()
current_pos = current_state.position
if (self.mode == 'geodesic_path'):
points = self._sim.get_straight_shortest_path_points(self._sim.get_agent_state().position, goal_pos)
if (len(points) < 2):
return None
max_grad_dir = quaternion_from_two_vectors(self._sim.forward_vector, ((points[1] - points[0]) + (EPSILON * np.cross(self._sim.up_vector, self._sim.forward_vector))))
max_grad_dir.x = 0
max_grad_dir = np.normalized(max_grad_dir)
else:
current_rotation = self._sim.get_agent_state().rotation
current_dist = self._geo_dist(goal_pos)
best_geodesic_delta = ((- 2) * self._max_delta)
best_rotation = current_rotation
for _ in range(0, 360, self._sim.habitat_config.TURN_ANGLE):
sim_action = HabitatSimActions.MOVE_FORWARD
self._sim.step(sim_action)
new_delta = (current_dist - self._geo_dist(goal_pos))
if (new_delta > best_geodesic_delta):
best_rotation = self._sim.get_agent_state().rotation
best_geodesic_delta = new_delta
if np.isclose(best_geodesic_delta, self._max_delta, rtol=(1 - np.cos(np.deg2rad(self._sim.habitat_config.TURN_ANGLE)))):
break
self._sim.set_agent_state(current_pos, self._sim.get_agent_state().rotation, reset_sensors=False)
sim_action = HabitatSimActions.TURN_LEFT
self._sim.step(sim_action)
self._reset_agent_state(current_state)
max_grad_dir = best_rotation
return max_grad_dir
def mode(self):
return self._mode
def mode(self, new_mode: str):
assert (new_mode in {'geodesic_path', 'greedy'})
if (new_mode == 'geodesic_path'):
assert (getattr(self._sim, 'get_straight_shortest_path_points', None) is not None)
self._mode = new_mode |
_grad()
def make_convolutional_sample(model, batch_size, vanilla=False, custom_steps=None, eta=1.0):
log = dict()
shape = [batch_size, model.model.diffusion_model.in_channels, model.model.diffusion_model.image_size, model.model.diffusion_model.image_size]
with model.ema_scope('Plotting'):
t0 = time.time()
if vanilla:
(sample, progrow) = convsample(model, shape, make_prog_row=True)
else:
(sample, intermediates) = convsample_ddim(model, steps=custom_steps, shape=shape, eta=eta)
t1 = time.time()
x_sample = model.decode_first_stage(sample)
log['sample'] = x_sample
log['time'] = (t1 - t0)
log['throughput'] = (sample.shape[0] / (t1 - t0))
print(f"Throughput for this batch: {log['throughput']}")
return log |
class FasterRcnnBoxCoderTest(tf.test.TestCase):
def test_get_correct_relative_codes_after_encoding(self):
boxes = [[10.0, 10.0, 20.0, 15.0], [0.2, 0.1, 0.5, 0.4]]
anchors = [[15.0, 12.0, 30.0, 18.0], [0.1, 0.0, 0.7, 0.9]]
expected_rel_codes = [[(- 0.5), (- 0.416666), (- 0.405465), (- 0.182321)], [(- 0.083333), (- 0.222222), (- 0.693147), (- 1.098612)]]
boxes = box_list.BoxList(tf.constant(boxes))
anchors = box_list.BoxList(tf.constant(anchors))
coder = faster_rcnn_box_coder.FasterRcnnBoxCoder()
rel_codes = coder.encode(boxes, anchors)
with self.test_session() as sess:
(rel_codes_out,) = sess.run([rel_codes])
self.assertAllClose(rel_codes_out, expected_rel_codes)
def test_get_correct_relative_codes_after_encoding_with_scaling(self):
boxes = [[10.0, 10.0, 20.0, 15.0], [0.2, 0.1, 0.5, 0.4]]
anchors = [[15.0, 12.0, 30.0, 18.0], [0.1, 0.0, 0.7, 0.9]]
scale_factors = [2, 3, 4, 5]
expected_rel_codes = [[(- 1.0), (- 1.25), (- 1.62186), (- 0.911608)], [(- 0.166667), (- 0.666667), (- 2.772588), (- 5.493062)]]
boxes = box_list.BoxList(tf.constant(boxes))
anchors = box_list.BoxList(tf.constant(anchors))
coder = faster_rcnn_box_coder.FasterRcnnBoxCoder(scale_factors=scale_factors)
rel_codes = coder.encode(boxes, anchors)
with self.test_session() as sess:
(rel_codes_out,) = sess.run([rel_codes])
self.assertAllClose(rel_codes_out, expected_rel_codes)
def test_get_correct_boxes_after_decoding(self):
anchors = [[15.0, 12.0, 30.0, 18.0], [0.1, 0.0, 0.7, 0.9]]
rel_codes = [[(- 0.5), (- 0.416666), (- 0.405465), (- 0.182321)], [(- 0.083333), (- 0.222222), (- 0.693147), (- 1.098612)]]
expected_boxes = [[10.0, 10.0, 20.0, 15.0], [0.2, 0.1, 0.5, 0.4]]
anchors = box_list.BoxList(tf.constant(anchors))
coder = faster_rcnn_box_coder.FasterRcnnBoxCoder()
boxes = coder.decode(rel_codes, anchors)
with self.test_session() as sess:
(boxes_out,) = sess.run([boxes.get()])
self.assertAllClose(boxes_out, expected_boxes)
def test_get_correct_boxes_after_decoding_with_scaling(self):
anchors = [[15.0, 12.0, 30.0, 18.0], [0.1, 0.0, 0.7, 0.9]]
rel_codes = [[(- 1.0), (- 1.25), (- 1.62186), (- 0.911608)], [(- 0.166667), (- 0.666667), (- 2.772588), (- 5.493062)]]
scale_factors = [2, 3, 4, 5]
expected_boxes = [[10.0, 10.0, 20.0, 15.0], [0.2, 0.1, 0.5, 0.4]]
anchors = box_list.BoxList(tf.constant(anchors))
coder = faster_rcnn_box_coder.FasterRcnnBoxCoder(scale_factors=scale_factors)
boxes = coder.decode(rel_codes, anchors)
with self.test_session() as sess:
(boxes_out,) = sess.run([boxes.get()])
self.assertAllClose(boxes_out, expected_boxes)
def test_very_small_Width_nan_after_encoding(self):
boxes = [[10.0, 10.0, 10.0000001, 20.0]]
anchors = [[15.0, 12.0, 30.0, 18.0]]
expected_rel_codes = [[(- 0.833333), 0.0, (- 21.128731), 0.510826]]
boxes = box_list.BoxList(tf.constant(boxes))
anchors = box_list.BoxList(tf.constant(anchors))
coder = faster_rcnn_box_coder.FasterRcnnBoxCoder()
rel_codes = coder.encode(boxes, anchors)
with self.test_session() as sess:
(rel_codes_out,) = sess.run([rel_codes])
self.assertAllClose(rel_codes_out, expected_rel_codes) |
_module(name='NormedConv2d')
class NormedConv2d(nn.Conv2d):
def __init__(self, *args, tempearture: float=20, power: int=1.0, eps: float=1e-06, norm_over_kernel: bool=False, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.tempearture = tempearture
self.power = power
self.norm_over_kernel = norm_over_kernel
self.eps = eps
def forward(self, x: Tensor) -> Tensor:
if (not self.norm_over_kernel):
weight_ = (self.weight / (self.weight.norm(dim=1, keepdim=True).pow(self.power) + self.eps))
else:
weight_ = (self.weight / (self.weight.view(self.weight.size(0), (- 1)).norm(dim=1, keepdim=True).pow(self.power)[(..., None, None)] + self.eps))
x_ = (x / (x.norm(dim=1, keepdim=True).pow(self.power) + self.eps))
x_ = (x_ * self.tempearture)
if hasattr(self, 'conv2d_forward'):
x_ = self.conv2d_forward(x_, weight_)
elif (torch.__version__ >= '1.8'):
x_ = self._conv_forward(x_, weight_, self.bias)
else:
x_ = self._conv_forward(x_, weight_)
return x_ |
def get_flat(sent):
labels = []
for token in sent.tokens:
scopes = token.scope
if (len(scopes) > 0):
label = scopes[(- 1)][(- 1)]
else:
label = 'O'
labels.append(label)
return labels |
def build_attr_dict(attr_triples):
d = dict()
for (e, a, v) in attr_triples:
d.setdefault(e, dict())
d[e][a] = v
return d |
class WhiteSpaceTokenizer(object):
def __init__(self, word_count_path, vocab_size, pad_token='<pad>', bos_token='<s>', eos_token='</s>', unk_token='<unk>', sep_token='<sep>', cls_token='<cls>', mask_token='<mask>', special_token_dict={}):
self.pad_token = pad_token
self.bos_token = bos_token
self.eos_token = eos_token
self.unk_token = unk_token
self.sep_token = sep_token
self.cls_token = cls_token
self.mask_token = mask_token
self.word2id = {}
self.word2prob = {}
self.init_vocab(word_count_path, vocab_size, special_token_dict)
self.id2word = {}
for (k, v) in self.word2id.items():
self.id2word[v] = k
for token_type in ['pad_token', 'bos_token', 'eos_token', 'unk_token', 'sep_token', 'cls_token', 'mask_token']:
token = getattr(self, token_type)
setattr(self, f'{token_type}_id', self.word2id[token])
for (token_type, token) in special_token_dict.items():
setattr(self, f'{token_type}_id', self.word2id[token])
def __len__(self):
return len(self.word2id)
def init_vocab(self, word_count_path, vocab_size, special_token_dict):
for token in [self.pad_token, self.bos_token, self.eos_token, self.unk_token, self.sep_token, self.cls_token, self.mask_token]:
self.word2id[token] = len(self.word2id)
for (token_type, token) in special_token_dict.items():
self.word2id[token] = len(self.word2id)
word_count = {}
with open(word_count_path, 'r', encoding='utf-8') as f:
lines = f.readlines()[:vocab_size]
for line in lines:
if (len(self.word2id) == vocab_size):
break
(token, count) = line.split('\t')
if (token not in self.word2id):
self.word2id[token] = len(self.word2id)
if (token not in word_count):
word_count[token] = float(count)
total_word_count = sum(list(word_count.values()))
for (word, count) in word_count.items():
self.word2prob[word] = (count / total_word_count)
def convert_tokens_to_string(self, tokens):
sent = ' '.join(tokens)
return sent
def convert_string_to_tokens(self, sent):
if (len(sent) == 0):
return []
tokens = sent.split(' ')
return tokens
def convert_tokens_to_ids(self, tokens, bos_and_eos=False, add_eos=False, add_cls=False):
ids = []
if (len(tokens) == 0):
return ids
if bos_and_eos:
tokens = (([self.bos_token] + tokens) + [self.eos_token])
elif add_eos:
tokens = (tokens + [self.eos_token])
if add_cls:
tokens = ([self.cls_token] + tokens)
for token in tokens:
if (token in self.word2id):
token_id = self.word2id[token]
else:
token_id = self.word2id[self.unk_token]
ids.append(token_id)
return ids
def convert_ids_to_tokens(self, ids, trim_bos=False, trim_pad=False, trim_from_eos=False, trim_after_eos=False):
tokens = []
for i in ids:
if (trim_bos and (i == self.bos_token_id)):
continue
if (trim_pad and (i == self.pad_token_id)):
continue
if (trim_from_eos and (i == self.eos_token_id)):
break
tokens.append(self.id2word[i])
if (trim_after_eos and (i == self.eos_token_id)):
break
return tokens
def convert_batch_ids_to_tensor(self, batch_ids):
batch_lens = [len(ids) for ids in batch_ids]
max_len = max(batch_lens)
padded_batch_ids = [(ids + ([self.pad_token_id] * (max_len - len(ids)))) for ids in batch_ids]
batch_tensor = torch.LongTensor(padded_batch_ids)
return batch_tensor |
class UniSpeechConfig(PretrainedConfig):
model_type = 'unispeech'
def __init__(self, vocab_size=32, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout=0.1, activation_dropout=0.1, attention_dropout=0.1, feat_proj_dropout=0.0, feat_quantizer_dropout=0.0, final_dropout=0.1, layerdrop=0.1, initializer_range=0.02, layer_norm_eps=1e-05, feat_extract_norm='group', feat_extract_activation='gelu', conv_dim=(512, 512, 512, 512, 512, 512, 512), conv_stride=(5, 2, 2, 2, 2, 2, 2), conv_kernel=(10, 3, 3, 3, 3, 2, 2), conv_bias=False, num_conv_pos_embeddings=128, num_conv_pos_embedding_groups=16, do_stable_layer_norm=False, apply_spec_augment=True, mask_time_prob=0.05, mask_time_length=10, mask_time_min_masks=2, mask_feature_prob=0.0, mask_feature_length=10, mask_feature_min_masks=0, num_codevectors_per_group=320, num_codevector_groups=2, contrastive_logits_temperature=0.1, num_negatives=100, codevector_dim=256, proj_codevector_dim=256, diversity_loss_weight=0.1, ctc_loss_reduction='mean', ctc_zero_infinity=False, use_weighted_layer_sum=False, classifier_proj_size=256, num_ctc_classes=80, pad_token_id=0, bos_token_id=1, eos_token_id=2, replace_prob=0.5, **kwargs):
super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id)
self.hidden_size = hidden_size
self.feat_extract_norm = feat_extract_norm
self.feat_extract_activation = feat_extract_activation
self.conv_dim = list(conv_dim)
self.conv_stride = list(conv_stride)
self.conv_kernel = list(conv_kernel)
self.conv_bias = conv_bias
self.num_conv_pos_embeddings = num_conv_pos_embeddings
self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups
self.num_feat_extract_layers = len(self.conv_dim)
self.num_hidden_layers = num_hidden_layers
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.num_attention_heads = num_attention_heads
self.hidden_dropout = hidden_dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.feat_proj_dropout = feat_proj_dropout
self.final_dropout = final_dropout
self.layerdrop = layerdrop
self.layer_norm_eps = layer_norm_eps
self.initializer_range = initializer_range
self.num_ctc_classes = num_ctc_classes
self.vocab_size = vocab_size
self.do_stable_layer_norm = do_stable_layer_norm
self.use_weighted_layer_sum = use_weighted_layer_sum
self.classifier_proj_size = classifier_proj_size
if ((len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers)):
raise ValueError(f'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.')
self.apply_spec_augment = apply_spec_augment
self.mask_time_prob = mask_time_prob
self.mask_time_length = mask_time_length
self.mask_time_min_masks = mask_time_min_masks
self.mask_feature_prob = mask_feature_prob
self.mask_feature_length = mask_feature_length
self.mask_feature_min_masks = mask_feature_min_masks
self.num_codevectors_per_group = num_codevectors_per_group
self.num_codevector_groups = num_codevector_groups
self.contrastive_logits_temperature = contrastive_logits_temperature
self.feat_quantizer_dropout = feat_quantizer_dropout
self.num_negatives = num_negatives
self.codevector_dim = codevector_dim
self.proj_codevector_dim = proj_codevector_dim
self.diversity_loss_weight = diversity_loss_weight
self.ctc_loss_reduction = ctc_loss_reduction
self.ctc_zero_infinity = ctc_zero_infinity
self.replace_prob = replace_prob |
class LinearGrad(autograd.Function):
def forward(context, input, weight, bias=None):
context.save_for_backward(input, weight, bias)
output = torch.nn.functional.linear(input, weight, bias)
return output
def backward(context, grad_output):
(input, weight, bias) = context.saved_tensors
grad_input = grad_weight = grad_bias = None
if context.needs_input_grad[0]:
grad_input = grad_output.mm(weight)
if context.needs_input_grad[1]:
grad_weight = grad_output.t().mm(input)
if ((bias is not None) and context.needs_input_grad[2]):
grad_bias = grad_output.sum(0).squeeze(0)
return (grad_input, grad_weight, grad_bias) |
class BasisModel(tf.keras.layers.Layer):
def __init__(self, dimensions, nfunctions, scale, **kwarg):
super(BasisModel, self).__init__(name='attention', **kwarg)
self._degree = nfunctions
self.scale = scale
def build(self, input_shape):
self.centers = np.linspace(0.0, 1.01, self._degree, dtype=np.float32)
self.centers = tf.convert_to_tensor(self.centers)
def call(self, inputs, training=None):
weights = tf.transpose(inputs[0], perm=[0, 2, 1])
weights_std = inputs[1]
positions = inputs[2]
basis_funcs = self.compute_basis_values(positions)
result = tf.linalg.matmul(basis_funcs, weights)
return (result, tf.zeros_like(result))
def get_config(self):
config = super(TopDownAttention, self).get_config()
config.update({'units': self.units})
return config
def compute_basis_values(self, x):
centers = tf.tile(tf.expand_dims(self.centers, 0), [tf.shape(x)[1], 1])
x = tf.expand_dims(x, 2)
funcs = tf.exp((- (tf.math.pow((x - centers), 2) / (2.0 * self.scale))))
return funcs |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.