code stringlengths 101 5.91M |
|---|
def adapt_bn(data, model, cfg):
model = bn_helper.configure_model(model, eps=1e-05, momentum=0.1, reset_stats=False, no_stats=False)
for _ in range(cfg.ITER):
model(**data)
print('Adaptation Done ...')
model.eval()
return model |
class MixedInt8TestTraining(BaseMixedInt8Test):
def setUp(self):
self.model_name = 'facebook/opt-350m'
super().setUp()
def test_training(self):
if (version.parse(importlib_metadata.version('bitsandbytes')) < version.parse('0.37.0')):
return
model = AutoModelForCausalLM.from_pretrained(self.model_name, load_in_8bit=True, device_map='auto')
for param in model.parameters():
param.requires_grad = False
if (param.ndim == 1):
param.data = param.data.to(torch.float32)
for (_, module) in model.named_modules():
if ('OPTAttention' in repr(type(module))):
module.q_proj = LoRALayer(module.q_proj, rank=16)
module.k_proj = LoRALayer(module.k_proj, rank=16)
module.v_proj = LoRALayer(module.v_proj, rank=16)
batch = self.tokenizer('Test batch ', return_tensors='pt').to(0)
with torch.cuda.amp.autocast():
out = model.forward(**batch)
out.logits.norm().backward()
for module in model.modules():
if isinstance(module, LoRALayer):
self.assertTrue((module.adapter[1].weight.grad is not None))
self.assertTrue((module.adapter[1].weight.grad.norm().item() > 0))
elif isinstance(module, nn.Embedding):
self.assertTrue((module.weight.grad is None)) |
('generate-codes')
def main(dataset: str, output: str, model: str, shards: SplitIndices=None, batch_size: int=None, splits: List[str]=None, profile_batch_id: int=None, use_gpu: bool=True):
import torch
from viewformer.utils.torch import load_model
device = ('cpu' if ((not use_gpu) or (torch.cuda.device_count() == 0)) else 'cuda')
device = torch.device(device)
model = load_model(model)
transformer = LatentCodeTransformer(model, batch_size=batch_size, device=device)
transform_dataset(dataset, output, transformer, splits=splits, shards=shards) |
def validate_flags_or_throw(bert_config):
if ((not FLAGS.do_train) and (not FLAGS.do_predict)):
raise ValueError('At least one of `do_train` or `do_predict` must be True.')
if FLAGS.do_train:
if (not FLAGS.train_file):
raise ValueError('If `do_train` is True, then `train_file` must be specified.')
if FLAGS.do_predict:
if (not FLAGS.predict_file):
raise ValueError('If `do_predict` is True, then `predict_file` must be specified.')
if (FLAGS.max_seq_length > bert_config.max_position_embeddings):
raise ValueError(('Cannot use sequence length %d because the BERT model was only trained up to sequence length %d' % (FLAGS.max_seq_length, bert_config.max_position_embeddings)))
if (FLAGS.max_seq_length <= (FLAGS.max_query_length + 3)):
raise ValueError(('The max_seq_length (%d) must be greater than max_query_length (%d) + 3' % (FLAGS.max_seq_length, FLAGS.max_query_length))) |
class UniformFofModel(LogitModel):
def __init__(self, model_id, D=None, vvv=False):
LogitModel.__init__(self, model_id, bounds=((1, 1),), D=D, vvv=vvv)
self.model_type = 'uniform_fof'
self.model_short = 'uf'
self.D['has'] = (self.D.fof > 0)
self.D['choose'] = (1 * ((self.D['has'] & self.D.y) == 1))
def individual_likelihood(self, theta):
DFg = self.D.groupby('choice_id', as_index=False).agg({'has': {'n': len, 'n_fof': np.sum}, 'choose': {'y': max}})
return np.where(DFg.has.n_fof, (DFg.choose.y / DFg.has.n_fof), (1.0 / DFg.has.n))
def grad(self, theta=None, w=None):
return np.array([0]) |
class DecoderType(Enum):
PYAV = 'pyav'
TORCHVISION = 'torchvision'
FRAME = 'frame'
DUMB = 'dumb' |
class SpotPaths():
def from_path(cls, data_path: str, path_prefix: str='', dataset: SpotDatasets=SpotDatasets.TENNIS) -> SpotPaths:
if g_pathmgr.isfile(data_path):
if (Path(data_path).suffix == '.json'):
return SpotPaths.from_json(data_path, path_prefix)
raise NotImplementedError
elif g_pathmgr.isdir(data_path):
NotImplementedError
else:
raise FileNotFoundError(f'{data_path} not found.')
def from_json(cls, json_file: str, path_prefix: str='', dataset: SpotDatasets=SpotDatasets.TENNIS) -> SpotPaths:
assert g_pathmgr.exists(json_file), f'{json_file} not found.'
json_content = json.load(open(json_file))
paths_and_annotations = [{} for _ in json_content]
i = 0
for content in json_content:
new_content = copy.copy(content)
new_content['events'] = [process_event(event, LABELS_SPOT_DATASETS[SpotDatasets(dataset)]) for event in new_content['events']]
new_content['num_frames'] = int(new_content['num_frames'])
new_content['height'] = int(new_content['height'])
new_content['width'] = int(new_content['width'])
new_content['num_events'] = int(new_content['num_events'])
paths_and_annotations[i] = new_content
i += 1
return cls(paths_and_annotations, path_prefix)
def __init__(self, annotations: list[dict[(str, Any)]], path_prefix: (str | Path)='') -> None:
self._annotations = annotations
self._path_prefix = Path(path_prefix)
self._serialize_annotations()
def _serialize_annotations(self):
self._video_paths = np.array([video_content['video'] for video_content in self._annotations]).astype(np.string_)
self._num_frames_per_video = torch.tensor([video_content['num_frames'] for video_content in self._annotations], dtype=torch.int32)
self._cum_num_frames_per_video = self._num_frames_per_video.cumsum(0)
self._fps_per_video = torch.tensor([int(video_content['fps']) for video_content in self._annotations], dtype=torch.uint8)
self._height_per_video = torch.tensor([int(video_content['height']) for video_content in self._annotations], dtype=torch.int32)
self._width_per_video = torch.tensor([int(video_content['width']) for video_content in self._annotations], dtype=torch.int32)
self._num_events_per_video = torch.tensor([int(video_content['num_events']) for video_content in self._annotations], dtype=torch.int32)
self._num_events_per_video = torch.tensor([int(video_content['num_events']) for video_content in self._annotations], dtype=torch.int16)
self._end_event_video_idx = self._num_events_per_video.to(dtype=torch.int32).cumsum(dim=0)
self._start_event_video_idx = torch.roll(self._end_event_video_idx, 1)
self._start_event_video_idx[0] = 0
self._events_video = torch.tensor([video_idx for (video_idx, video_content) in enumerate(self._annotations) for _ in range(len(video_content['events']))], dtype=torch.int32)
self._label_event_per_video = torch.tensor([event_content['label'] for video_content in self._annotations for event_content in video_content['events']], dtype=torch.uint8)
self._frame_event_per_video = torch.tensor([event_content['frame'] for video_content in self._annotations for event_content in video_content['events']])
self._comment_event_per_video = np.array([event_content['comment'] for video_content in self._annotations for event_content in video_content['events']]).astype(np.string_)
def __getitem__(self, index: int) -> dict[(str, Any)]:
video_event_start_idx = self._start_event_video_idx[index]
video_event_end_idx = self._end_event_video_idx[index]
return {'video_path': (self._path_prefix / self._video_paths[index].decode()), 'video_name': self._video_paths[index].decode(), 'num_frames': self._num_frames_per_video[index].item(), 'events': {'label': self._label_event_per_video[video_event_start_idx:video_event_end_idx], 'frame': self._frame_event_per_video[video_event_start_idx:video_event_end_idx], 'comment': self._comment_event_per_video[video_event_start_idx:video_event_end_idx]}}
def num_frames_per_video(self) -> torch.Tensor:
return self._num_frames_per_video
def cum_num_frames_per_video(self) -> torch.Tensor:
return self._cum_num_frames_per_video
def number_of_frames(self) -> int:
return int(self._num_frames_per_video.sum())
def num_videos(self) -> int:
return len(self._video_paths)
def path_prefix(self) -> Path:
return self._path_prefix
_prefix.setter
def path_prefix(self, value: (str | Path)):
self._path_prefix = Path(value)
def global_rank(self):
return get_global_rank()
def worlf_size(self):
return get_world_size()
def __len__(self) -> int:
return len(self._annotations) |
def is_valid_size_dict(size_dict):
if (not isinstance(size_dict, dict)):
return False
size_dict_keys = set(size_dict.keys())
for allowed_keys in VALID_SIZE_DICT_KEYS:
if (size_dict_keys == allowed_keys):
return True
return False |
class Verification(object):
def __init__(self, dim, row_pointers, column_index, degrees, partPtr, part2Node, partSize, dimWorker, warpPerBlock):
self.row_pointers = row_pointers
self.column_index = column_index
self.degrees = degrees
self.partPtr = partPtr
self.part2Node = part2Node
self.warpPerBlock = warpPerBlock
self.partSize = partSize
self.dimWorker = dimWorker
self.num_nodes = (len(row_pointers) - 1)
self.test_embedding = dim
self.output_embedding = dim
self.X = torch.ones(self.num_nodes, self.test_embedding)
self.W = torch.ones(self.test_embedding, self.output_embedding)
self.result = None
self.result_ref = None
def reference(self, column_index, val, num_nodes):
print('# Compute reference on CPU')
self.result_ref = spmm(torch.tensor(column_index, dtype=torch.int64), torch.FloatTensor(val), num_nodes, num_nodes, self.X)
def compute(self):
print('# Compute result on GPU')
X = self.X.cuda()
self.result = GNNA.SAG(X, self.row_pointers, self.column_index, self.degrees, self.partPtr, self.part2Node, self.partSize, self.dimWorker, self.warpPerBlock)
def compare(self):
if ((self.result_ref is None) or (self.result is None)):
raise ValueError('MUST compute result and result reference (CPU) first!!')
equs = torch.eq(self.result_ref, self.result.cpu())
correct = torch.sum(equs)
if ((1 - (correct / self.result_ref.numel())) < 0.0001):
print('# Verification PASSED')
else:
print('# Verification FAILED')
def profile_spmm(self, round=1):
X = self.X.cuda()
print('SpMM profiling size: N: {}, N: {}, K: {}'.format(X.size(0), X.size(0), X.size(1)))
for _ in range(10):
self.result = GNNA.SAG(X, self.row_pointers, self.column_index, self.degrees, self.partPtr, self.part2Node, self.partSize, self.dimWorker, self.warpPerBlock)
torch.cuda.synchronize()
start = time.perf_counter()
for _ in tqdm(range(round)):
self.result = GNNA.SAG(X, self.row_pointers, self.column_index, self.degrees, self.partPtr, self.part2Node, self.partSize, self.dimWorker, self.warpPerBlock)
torch.cuda.synchronize()
dur = (time.perf_counter() - start)
print('=> SpMM profiling avg (ms): {:.3f}'.format(((dur * 1000.0) / round)))
print() |
class ParallelModeOptimization(Optimization):
def __init__(self):
name = 'parallel_mode'
group = 'parallel_mode'
is_tunable = False
super().__init__(name, group, is_tunable)
def tune(self, model_context, config=None, strategy=None, apply_transform=True, time_limit=None):
if apply_transform:
model_context = self.transform(model_context, config)
return (True, config, model_context)
def transform(self, model_context, config=None):
model_context.parallel_mode_config = config
for (name, size) in config[0]:
if ((name == 'data') and (size > 1)):
set_sync_bn_pg(model_context.model, parallel_group('data'))
model_context.add_wrapper('ddp', ParallelModeOptimization.apply_wrapper, 'data', is_pre_wrapper=False)
break
return model_context
def apply_wrapper(model_context, wrapper_name, wrapper_config):
if (wrapper_name == 'ddp'):
if ((parallel_group_size('data') == world_size()) and torch.cuda.is_available()):
device = torch.device(type='cuda', index=local_rank())
materialize_modules_to_device(model_context.model, device)
model_context.model = DDP(model_context.model, process_group=parallel_group(wrapper_config), find_unused_parameters=model_context.find_unused_parameters) |
def _partitioned_variable_assign(partitioned_var, new_value):
axis = (0 if (len(partitioned_var) == 1) else _determine_partitioned_axis(partitioned_var))
partition_sizes = np.array([partition.get_shape()[axis] for partition in partitioned_var])
new_partitioned_values = array_ops.split(new_value, ops.convert_to_tensor(partition_sizes, dtype=np.int32), axis=axis)
op_list = []
for partition in partitioned_var:
op_list.append(_variable_assign(partition, new_partitioned_values[len(op_list)]))
return control_flow_ops.group(*op_list, name=(partitioned_var.name + '_group_assign')) |
def master_only(func):
(func)
def wrapper(*args, **kwargs):
(rank, _) = get_dist_info()
if (rank == 0):
return func(*args, **kwargs)
return wrapper |
class parsingNet(torch.nn.Module):
def __init__(self, size=(288, 800), pretrained=True, backbone='50', cls_dim=(37, 10, 4), use_aux=False):
super(parsingNet, self).__init__()
self.size = size
self.w = size[0]
self.h = size[1]
self.cls_dim = cls_dim
self.use_aux = use_aux
self.total_dim = np.prod(cls_dim)
self.model = resnet(backbone, pretrained=pretrained)
if self.use_aux:
self.aux_header2 = torch.nn.Sequential((conv_bn_relu(128, 128, kernel_size=3, stride=1, padding=1) if (backbone in ['34', '18']) else conv_bn_relu(512, 128, kernel_size=3, stride=1, padding=1)), conv_bn_relu(128, 128, 3, padding=1), conv_bn_relu(128, 128, 3, padding=1), conv_bn_relu(128, 128, 3, padding=1))
self.aux_header3 = torch.nn.Sequential((conv_bn_relu(256, 128, kernel_size=3, stride=1, padding=1) if (backbone in ['34', '18']) else conv_bn_relu(1024, 128, kernel_size=3, stride=1, padding=1)), conv_bn_relu(128, 128, 3, padding=1), conv_bn_relu(128, 128, 3, padding=1))
self.aux_header4 = torch.nn.Sequential((conv_bn_relu(512, 128, kernel_size=3, stride=1, padding=1) if (backbone in ['34', '18']) else conv_bn_relu(2048, 128, kernel_size=3, stride=1, padding=1)), conv_bn_relu(128, 128, 3, padding=1))
self.aux_combine = torch.nn.Sequential(conv_bn_relu(384, 256, 3, padding=2, dilation=2), conv_bn_relu(256, 128, 3, padding=2, dilation=2), conv_bn_relu(128, 128, 3, padding=2, dilation=2), conv_bn_relu(128, 128, 3, padding=4, dilation=4), torch.nn.Conv2d(128, (cls_dim[(- 1)] + 1), 1))
initialize_weights(self.aux_header2, self.aux_header3, self.aux_header4, self.aux_combine)
self.cls = torch.nn.Sequential(torch.nn.Linear(1800, 2048), torch.nn.ReLU(), torch.nn.Linear(2048, self.total_dim))
self.pool = (torch.nn.Conv2d(512, 8, 1) if (backbone in ['34', '18']) else torch.nn.Conv2d(2048, 8, 1))
def forward(self, x):
(x2, x3, fea) = self.model(x)
if self.use_aux:
x2 = self.aux_header2(x2)
x3 = self.aux_header3(x3)
x3 = torch.nn.functional.interpolate(x3, scale_factor=2, mode='bilinear')
x4 = self.aux_header4(fea)
x4 = torch.nn.functional.interpolate(x4, scale_factor=4, mode='bilinear')
aux_seg = torch.cat([x2, x3, x4], dim=1)
aux_seg = self.aux_combine(aux_seg)
else:
aux_seg = None
fea = self.pool(fea).view((- 1), 1800)
group_cls = self.cls(fea).view((- 1), *self.cls_dim)
if self.use_aux:
return (group_cls, aux_seg)
return group_cls |
class ConvBertForTokenClassification(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def create_dummy_object(name, type='pt'):
_pretrained = ['ConfigForCausalLM', 'ForConditionalGeneration', 'ForMaskedLM', 'ForMultipleChoice', 'ForQuestionAnswering', 'ForSequenceClassification', 'ForTokenClassification', 'Model', 'Tokenizer']
assert (type in ['pt', 'tf', 'sentencepiece', 'tokenizers', 'flax'])
if name.isupper():
return DUMMY_CONSTANT.format(name)
elif name.islower():
return DUMMY_FUNCTION[type].format(name)
else:
is_pretrained = False
for part in _pretrained:
if (part in name):
is_pretrained = True
break
if is_pretrained:
template = DUMMY_PRETRAINED_CLASS[type]
else:
template = DUMMY_CLASS[type]
return template.format(name) |
def dump_all_entities(examples, out_path, id2text: dict):
id2entity = {}
relations = set()
for ex in examples:
head_id = ex['head_id']
relations.add(ex['relation'])
if (head_id not in id2entity):
id2entity[head_id] = {'entity_id': head_id, 'entity': ex['head'], 'entity_desc': id2text[head_id]}
tail_id = ex['tail_id']
if (tail_id not in id2entity):
id2entity[tail_id] = {'entity_id': tail_id, 'entity': ex['tail'], 'entity_desc': id2text[tail_id]}
print('Get {} entities, {} relations in total'.format(len(id2entity), len(relations)))
json.dump(list(id2entity.values()), open(out_path, 'w', encoding='utf-8'), ensure_ascii=False, indent=4) |
class ResNet18WithEmbeddingHead(nn.Module):
def __init__(self, num_classes, emb_dim=128, pretrained=True):
super(ResNet18WithEmbeddingHead, self).__init__()
self.model_resnet = models.resnet18(pretrained=pretrained)
self.num_ftrs = self.model_resnet.fc.in_features
self.model_resnet.fc = nn.Identity()
self.fc_classes = nn.Linear(self.num_ftrs, num_classes)
self.fc_emb = nn.Sequential(nn.Linear(self.num_ftrs, 2048), nn.ReLU(inplace=True), nn.Linear(2048, emb_dim))
def forward(self, x):
x = self.model_resnet(x)
out1 = self.fc_classes(x)
out2 = self.fc_emb(x)
return (out1, out2, x)
def adapt(self, num_classes):
self.fc_classes = nn.Linear(self.num_ftrs, num_classes) |
def add_model_ema_configs(_C):
_C.MODEL_EMA = type(_C)()
_C.MODEL_EMA.ENABLED = False
_C.MODEL_EMA.DECAY = 0.999
_C.MODEL_EMA.DEVICE = ''
_C.MODEL_EMA.USE_EMA_WEIGHTS_FOR_EVAL_ONLY = False
_C.MODEL_EMA.YOLOX = False |
class DenseBlock(nn.ModuleDict):
_version = 2
def __init__(self, num_layers, num_input_features, bn_size, growth_rate, norm_layer=nn.ReLU, drop_rate=0.0, memory_efficient=False):
super(DenseBlock, self).__init__()
for i in range(num_layers):
layer = DenseLayer((num_input_features + (i * growth_rate)), growth_rate=growth_rate, bn_size=bn_size, norm_layer=norm_layer, drop_rate=drop_rate, memory_efficient=memory_efficient)
self.add_module(('denselayer%d' % (i + 1)), layer)
def forward(self, init_features):
features = [init_features]
for (name, layer) in self.items():
new_features = layer(features)
features.append(new_features)
return torch.cat(features, 1) |
def fwd2bwd(fwd_flow, fwd_flow_conf):
(_, _, h, w) = fwd_flow.shape
bwd_flow = np.zeros((1, 2, h, w))
flags = np.zeros((1, 2, h, w))
from scipy import interpolate
x_coordinates = []
y_coordinates = []
flow_x_values = []
flow_y_values = []
for i in range(h):
for j in range(w):
shift_x = fwd_flow[(0, 0, i, j)]
shift_y = fwd_flow[(0, 1, i, j)]
target_x = (j + shift_x)
target_y = np.round((i + shift_y))
target_x = np.around(np.maximum(np.minimum(target_x, (w - 1)), 0))
target_y = np.around(np.maximum(np.minimum(target_y, (h - 1)), 0))
target_x = int(target_x)
target_y = int(target_y)
bwd_flow[(0, 0, target_y, target_x)] += (- shift_x)
bwd_flow[(0, 1, target_y, target_x)] += (- shift_y)
flags[(0, 0, target_y, target_x)] += 1
flags[(0, 1, target_y, target_x)] += 1
if (flags[(0, 0, target_y, target_x)] > 1):
bwd_flow[(0, 0, target_y, target_x)] /= flags[(0, 0, target_y, target_x)]
flags[(0, 0, target_y, target_x)] = 1
if (flags[(0, 1, target_y, target_x)] > 1):
bwd_flow[(0, 1, target_y, target_x)] /= flags[(0, 1, target_y, target_x)]
flags[(0, 1, target_y, target_x)] = 1
y_coordinates.append(target_y)
x_coordinates.append(target_x)
flow_x_values.append((- shift_x))
flow_y_values.append((- shift_y))
return bwd_flow |
_module()
class YOLOXPAFPN(BaseModule):
def __init__(self, in_channels, out_channels, num_csp_blocks=3, use_depthwise=False, upsample_cfg=dict(scale_factor=2, mode='nearest'), conv_cfg=None, norm_cfg=dict(type='BN', momentum=0.03, eps=0.001), act_cfg=dict(type='Swish'), init_cfg=dict(type='Kaiming', layer='Conv2d', a=math.sqrt(5), distribution='uniform', mode='fan_in', nonlinearity='leaky_relu')):
super(YOLOXPAFPN, self).__init__(init_cfg)
self.in_channels = in_channels
self.out_channels = out_channels
conv = (DepthwiseSeparableConvModule if use_depthwise else ConvModule)
self.upsample = nn.Upsample(**upsample_cfg)
self.reduce_layers = nn.ModuleList()
self.top_down_blocks = nn.ModuleList()
for idx in range((len(in_channels) - 1), 0, (- 1)):
self.reduce_layers.append(ConvModule(in_channels[idx], in_channels[(idx - 1)], 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg))
self.top_down_blocks.append(CSPLayer((in_channels[(idx - 1)] * 2), in_channels[(idx - 1)], num_blocks=num_csp_blocks, add_identity=False, use_depthwise=use_depthwise, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg))
self.downsamples = nn.ModuleList()
self.bottom_up_blocks = nn.ModuleList()
for idx in range((len(in_channels) - 1)):
self.downsamples.append(conv(in_channels[idx], in_channels[idx], 3, stride=2, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg))
self.bottom_up_blocks.append(CSPLayer((in_channels[idx] * 2), in_channels[(idx + 1)], num_blocks=num_csp_blocks, add_identity=False, use_depthwise=use_depthwise, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg))
self.out_convs = nn.ModuleList()
for i in range(len(in_channels)):
self.out_convs.append(ConvModule(in_channels[i], out_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg))
def forward(self, inputs):
assert (len(inputs) == len(self.in_channels))
inner_outs = [inputs[(- 1)]]
for idx in range((len(self.in_channels) - 1), 0, (- 1)):
feat_heigh = inner_outs[0]
feat_low = inputs[(idx - 1)]
feat_heigh = self.reduce_layers[((len(self.in_channels) - 1) - idx)](feat_heigh)
inner_outs[0] = feat_heigh
upsample_feat = self.upsample(feat_heigh)
inner_out = self.top_down_blocks[((len(self.in_channels) - 1) - idx)](torch.cat([upsample_feat, feat_low], 1))
inner_outs.insert(0, inner_out)
outs = [inner_outs[0]]
for idx in range((len(self.in_channels) - 1)):
feat_low = outs[(- 1)]
feat_height = inner_outs[(idx + 1)]
downsample_feat = self.downsamples[idx](feat_low)
out = self.bottom_up_blocks[idx](torch.cat([downsample_feat, feat_height], 1))
outs.append(out)
for (idx, conv) in enumerate(self.out_convs):
outs[idx] = conv(outs[idx])
return tuple(outs) |
class Node():
type = NodeType.UNKNOWN
addr = ''
name = None
error = False
def __init__(self, type, addr=addr):
self.type = type
self.addr = addr
try:
self.name = socket.gethostbyaddr(addr)[0]
except socket.error as e:
self.name = None
def __eq__(self, other):
_ = (lambda x: socket.inet_pton(socket.AF_INET6, x))
if isinstance(other, str):
if (self.type == NodeType.UNKNOWN):
return False
return (_(self.addr) == _(other))
if (self.type != other.type):
return False
return (_(self.addr) == _(other.addr))
def __str__(self):
if (self.type == NodeType.UNKNOWN):
return '*'
if self.name:
return '{} ({} / {})'.format(self.name, self.addr, str(self.type))
else:
return '{} ({})'.format(self.addr, str(self.type))
def __repr__(self):
return '<Node: {}>'.format(self.__str__()) |
class DatasetConfig():
def __init__(self, file_pattern, split_sizes):
self.file_pattern = file_pattern
self.split_sizes = split_sizes |
class MixtureOfLaplaceNLLLoss(nn.Module):
def __init__(self, eps: float=1e-06, reduction: str='mean') -> None:
super(MixtureOfLaplaceNLLLoss, self).__init__()
self.reduction = reduction
self.nll_loss = LaplaceNLLLoss(eps=eps, reduction='none')
def forward(self, pred: torch.Tensor, target: torch.Tensor, prob: torch.Tensor, mask: torch.Tensor, ptr: Optional[torch.Tensor]=None, joint: bool=False) -> torch.Tensor:
nll = self.nll_loss(pred=pred, target=target.unsqueeze(1))
nll = (nll * mask.view((- 1), 1, target.size((- 2)), 1)).sum(dim=((- 2), (- 1)))
if joint:
if (ptr is None):
nll = nll.sum(dim=0, keepdim=True)
else:
nll = segment_csr(src=nll, indptr=ptr, reduce='sum')
else:
pass
log_pi = F.log_softmax(prob, dim=(- 1))
loss = (- torch.logsumexp((log_pi - nll), dim=(- 1)))
if (self.reduction == 'mean'):
return loss.mean()
elif (self.reduction == 'sum'):
return loss.sum()
elif (self.reduction == 'none'):
return loss
else:
raise ValueError('{} is not a valid value for reduction'.format(self.reduction)) |
def sanitize_html(txt: Union[(str, TokenWithId)]) -> str:
if isinstance(txt, TokenWithId):
txt = txt.token
return txt.replace('<', '<').replace('>', '>') |
class _VIPSReader():
has_levels = True
def __init__(self, path: str, mpp: Optional[float]=None, *, cache_kw: Optional[Dict[(str, Any)]]=None, ignore_missing_mpp: bool=False, pad_missing: bool=True, loaded_image: Optional['vips.Image']=None, use_bounds: bool=False, transforms: Optional[List[int]]=None) -> None:
self.path = path
self.pad_missing = pad_missing
self.cache_kw = (cache_kw if cache_kw else {})
self.loaded_downsample_levels = {}
if (loaded_image is None):
loaded_image = vips.Image.new_from_file(path)
self.vips_loader = loaded_image.get('vips-loader')
self.transforms = transforms
self.properties = {}
for field in loaded_image.get_fields():
self.properties.update({field: loaded_image.get(field)})
self.dimensions = self._detect_dimensions()
if (mpp is not None):
log.debug(f'Setting MPP to {mpp}')
self.properties[OPS_MPP_X] = mpp
elif (OPS_MPP_X not in self.properties.keys()):
log.debug('Microns-Per-Pixel (MPP) not found, Searching EXIF')
mpp = detect_mpp(path, loaded_image)
if (mpp is not None):
self.properties[OPS_MPP_X] = mpp
elif ignore_missing_mpp:
self.properties[OPS_MPP_X] = DEFAULT_JPG_MPP
log.debug(f'Could not detect microns-per-pixel; using default {DEFAULT_JPG_MPP}')
else:
raise errors.SlideMissingMPPError(f'Could not detect microns-per-pixel for slide: {path}')
if isinstance(use_bounds, (list, tuple, np.ndarray)):
self.bounds = tuple(use_bounds)
elif (use_bounds and (OPS_BOUNDS_X in self.properties)):
self.bounds = (int(self.properties[OPS_BOUNDS_X]), int(self.properties[OPS_BOUNDS_Y]), int(self.properties[OPS_BOUNDS_WIDTH]), int(self.properties[OPS_BOUNDS_HEIGHT]))
else:
self.bounds = None
if (self.bounds is not None):
self.dimensions = (self.bounds[2], self.bounds[3])
self._load_levels(loaded_image)
def mpp(self):
return self.properties[OPS_MPP_X]
def has_mpp(self):
return ((OPS_MPP_X in self.properties) and (self.properties[OPS_MPP_X] is not None))
def _detect_dimensions(self) -> Tuple[(int, int)]:
return (int(self.properties[OPS_WIDTH]), int(self.properties[OPS_HEIGHT]))
def _load_levels(self, vips_image: Optional['vips.Image']):
if (vips_image is None):
vips_image = vips.Image.new_from_file(self.path)
if (OPS_LEVEL_COUNT in self.properties):
self.level_count = int(self.properties[OPS_LEVEL_COUNT])
self.levels = []
for lev in range(self.level_count):
width = int(vips_image.get(OPS_LEVEL_WIDTH(lev)))
height = int(vips_image.get(OPS_LEVEL_HEIGHT(lev)))
downsample = float(vips_image.get(OPS_LEVEL_DOWNSAMPLE(lev)))
self.levels += [{'dimensions': (width, height), 'width': width, 'height': height, 'downsample': downsample, 'level': lev}]
elif (('n-pages' in self.properties) and (OPS_LEVEL_COUNT not in self.properties)):
log.debug('Attempting to read non-standard multi-page TIFF')
self.level_count = min((int(self.properties['n-pages']) - 3), 1)
self.levels = []
for lev in range(self.level_count):
temp_img = vips.Image.new_from_file(self.path, page=lev)
width = int(temp_img.get('width'))
height = int(temp_img.get('height'))
downsample = float((int(self.properties[OPS_WIDTH]) / width))
self.levels += [{'dimensions': (width, height), 'width': width, 'height': height, 'downsample': downsample, 'level': lev}]
self.levels = sorted(self.levels, key=(lambda x: x['width']), reverse=True)
else:
self.level_count = 1
self.levels = [{'dimensions': self.dimensions, 'width': int(self.properties[OPS_WIDTH]), 'height': int(self.properties[OPS_HEIGHT]), 'downsample': 1, 'level': 0}]
if (self.bounds is not None):
for lev in range(self.level_count):
self.levels[lev]['width'] = int(np.round((self.bounds[2] / self.levels[lev]['downsample'])))
self.levels[lev]['height'] = int(np.round((self.bounds[3] / self.levels[lev]['downsample'])))
self.levels[lev]['dimensions'] = (self.levels[lev]['width'], self.levels[lev]['height'])
if (self.transforms is not None):
for transform in self.transforms:
if (transform in (ROTATE_90_CLOCKWISE, ROTATE_270_CLOCKWISE)):
for lev in range(self.level_count):
(self.levels[lev]['width'], self.levels[lev]['height']) = (self.levels[lev]['height'], self.levels[lev]['width'])
self.levels[lev]['dimensions'] = (self.levels[lev]['width'], self.levels[lev]['height'])
self.dimensions = (self.dimensions[1], self.dimensions[0])
self.level_downsamples = [lev['downsample'] for lev in self.levels]
self.level_dimensions = [lev['dimensions'] for lev in self.levels]
def _load_downsample_level(self, level: int) -> 'vips.Image':
image = self.read_level(level=level)
if self.cache_kw:
image = image.tilecache(**self.cache_kw)
self.loaded_downsample_levels.update({level: image})
return image
def best_level_for_downsample(self, downsample: float) -> int:
max_downsample = 0
for d in self.level_downsamples:
if (d < downsample):
max_downsample = d
try:
max_level = self.level_downsamples.index(max_downsample)
except Exception:
log.debug(f'Error attempting to read level {max_downsample}')
return 0
return max_level
def get_downsampled_image(self, level: int) -> 'vips.Image':
if (level in range(len(self.levels))):
if (level in self.loaded_downsample_levels):
return self.loaded_downsample_levels[level]
else:
return self._load_downsample_level(level)
else:
return False
def coord_to_raw(self, x, y):
if (self.transforms is not None):
for transform in self.transforms[::(- 1)]:
if (transform == ROTATE_90_CLOCKWISE):
(x, y) = (y, (self.dimensions[0] - x))
if (transform == ROTATE_180_CLOCKWISE):
(x, y) = ((self.dimensions[0] - x), (self.dimensions[1] - y))
if (transform == ROTATE_270_CLOCKWISE):
(x, y) = ((self.dimensions[1] - y), x)
if (transform == FLIP_HORIZONTAL):
x = (self.dimensions[0] - x)
if (transform == FLIP_VERTICAL):
y = (self.dimensions[1] - y)
if (self.bounds is not None):
x += self.bounds[0]
y += self.bounds[1]
return (x, y)
def raw_to_coord(self, x, y):
if (self.bounds is not None):
x -= self.bounds[0]
y -= self.bounds[1]
if (self.transforms is not None):
for transform in self.transforms:
if (transform == ROTATE_90_CLOCKWISE):
(x, y) = ((self.dimensions[0] - y), x)
if (transform == ROTATE_180_CLOCKWISE):
(x, y) = ((self.dimensions[0] - x), (self.dimensions[1] - y))
if (transform == ROTATE_270_CLOCKWISE):
(x, y) = (y, (self.dimensions[1] - x))
if (transform == FLIP_HORIZONTAL):
x = (self.dimensions[0] - x)
if (transform == FLIP_VERTICAL):
y = (self.dimensions[1] - y)
return (x, y)
def bound_and_transform(self, image: vips.Image, level: int) -> vips.Image:
if (self.bounds is not None):
ds = self.level_downsamples[level]
crop_bounds = (int(np.round((self.bounds[0] / ds))), int(np.round((self.bounds[1] / ds))), int(np.round((self.bounds[2] / ds))), int(np.round((self.bounds[3] / ds))))
image = image.crop(*crop_bounds)
if (self.transforms is not None):
for transform in self.transforms:
if (transform == ROTATE_90_CLOCKWISE):
image = image.rot90()
if (transform == ROTATE_180_CLOCKWISE):
image = image.rot180()
if (transform == ROTATE_270_CLOCKWISE):
image = image.rot270()
if (transform == FLIP_HORIZONTAL):
image = image.fliphor()
if (transform == FLIP_VERTICAL):
image = image.flipver()
return image
def read_level(self, fail: bool=True, access=vips.enums.Access.RANDOM, to_numpy: bool=False, level: Optional[int]=None, **kwargs) -> Union[(vips.Image, np.ndarray)]:
if ((self.properties['vips-loader'] == 'tiffload') and (level is not None)):
kwargs['page'] = self.levels[level]['level']
elif (level is not None):
kwargs['level'] = level
image = vips.Image.new_from_file(self.path, fail=fail, access=access, **kwargs)
image = self.bound_and_transform(image, level=level)
if to_numpy:
return vips2numpy(image)
else:
return image
def read_region(self, base_level_dim: Tuple[(int, int)], downsample_level: int, extract_size: Tuple[(int, int)], convert: Optional[str]=None, flatten: bool=False, resize_factor: Optional[float]=None, pad_missing: Optional[bool]=None) -> 'vips.Image':
(base_level_x, base_level_y) = base_level_dim
(extract_width, extract_height) = extract_size
downsample_factor = self.level_downsamples[downsample_level]
downsample_x = int((base_level_x / downsample_factor))
downsample_y = int((base_level_y / downsample_factor))
image = self.get_downsampled_image(downsample_level)
crop_args = (downsample_x, downsample_y, extract_width, extract_height)
if (((pad_missing is not None) and pad_missing) or ((pad_missing is None) and self.pad_missing)):
region = vips_padded_crop(image, *crop_args)
else:
region = image.crop(*crop_args)
if (flatten and (region.bands == 4)):
region = region.flatten()
if (resize_factor is not None):
region = region.resize(resize_factor)
if (convert and (convert.lower() in ('jpg', 'jpeg'))):
return vips2jpg(region)
elif (convert and (convert.lower() == 'png')):
return vips2png(region)
elif (convert == 'numpy'):
return vips2numpy(region)
else:
return region
def read_from_pyramid(self, top_left: Tuple[(int, int)], window_size: Tuple[(int, int)], target_size: Tuple[(int, int)], convert: Optional[str]=None, flatten: bool=False, pad_missing: Optional[bool]=None) -> 'vips.Image':
target_downsample = (window_size[0] / target_size[0])
ds_level = self.best_level_for_downsample(target_downsample)
image = self.get_downsampled_image(ds_level)
resize_factor = (self.level_downsamples[ds_level] / target_downsample)
image = image.resize(resize_factor)
crop_args = (int((top_left[0] / target_downsample)), int((top_left[1] / target_downsample)), min(target_size[0], image.width), min(target_size[1], image.height))
if (((pad_missing is not None) and pad_missing) or ((pad_missing is None) and self.pad_missing)):
image = vips_padded_crop(image, *crop_args)
else:
image = image.crop(*crop_args)
if (flatten and (image.bands == 4)):
image = image.flatten()
if (convert and (convert.lower() in ('jpg', 'jpeg'))):
return vips2jpg(image)
elif (convert and (convert.lower() == 'png')):
return vips2png(image)
elif (convert == 'numpy'):
return vips2numpy(image)
else:
return image
def thumbnail(self, width: int=512, fail: bool=True, access=vips.enums.Access.RANDOM, **kwargs) -> np.ndarray:
if (((OPS_VENDOR in self.properties) and (self.properties[OPS_VENDOR] == 'leica')) or (self.vips_loader == 'tiffload') or self.bounds or self.transforms):
thumbnail = self.read_level(fail=fail, access=access, **kwargs)
else:
thumbnail = vips.Image.thumbnail(self.path, width)
try:
return vips2numpy(thumbnail)
except vips.error.Error as e:
raise sf.errors.SlideLoadError(f'Error loading slide thumbnail: {e}') |
class MBartForSequenceClassification(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def validate() -> None:
val_losses = []
i_val_step = 0
for input in cutpaste_val_loader:
i_val_step += 1
loss = val_step(input)
val_losses.append(loss.item())
if (i_val_step >= config.val_steps):
break
validation_loss = np.mean(val_losses)
log_msg = f'Validation loss on normal samples: {validation_loss}'
print(log_msg)
log({'val/val_loss': validation_loss}, config)
log({'val/original': input[0], 'val/cutpaste': input[1], 'val/scar': input[2]}, config)
return validation_loss |
class TFAlbertForMaskedLM():
def __init__(self, *args, **kwargs):
requires_tf(self)
def from_pretrained(self, *args, **kwargs):
requires_tf(self) |
def parse_affine_component(component, line, line_buffer):
assert ('<LinearParams>' in line)
pairs = dict(re.findall('(<\\w+>) ([\\w.-]+)', line))
weights = parse_weights(line_buffer)
bias = parse_bias(next(line_buffer))
matrix = np.concatenate([weights, bias.T], axis=1)
(_, filename) = tempfile.mkstemp(dir=tmpdir)
with open(filename, 'w') as f:
f.write('[ ')
np.savetxt(f, matrix)
f.write(' ]')
pairs['<Matrix>'] = filename
return pairs |
def ReadStats(pron_stats_handle):
stats = defaultdict(list)
for line in pron_stats_handle.readlines():
splits = line.strip().split()
if (len(splits) == 0):
continue
if (len(splits) < 2):
raise Exception((('Invalid format of line ' + line) + ' in stats file.'))
count = float(splits[0])
word = splits[1]
phones = ' '.join(splits[2:])
stats[word].append((phones, count))
for (word, entry) in stats.items():
entry.sort(key=(lambda x: x[1]))
return stats |
class HTMLParser(_HTMLParser):
def clean(self, html):
html = decode_utf8(html)
html = html.replace('/>', ' />')
html = html.replace(' />', ' />')
html = html.replace('<!', '<!')
html = html.replace('<!DOCTYPE', '<!DOCTYPE')
html = html.replace('<!doctype', '<!doctype')
html = html.replace('<!--', '<!--')
return html |
def is_short(w):
return (is_short_syllable(w[(- 3):]) and (len([ch for ch in w[:(- 3)] if (ch in VOWELS)]) == 0)) |
class VeRi(BaseImageDataset):
dataset_dir = 'VeRi'
def __init__(self, root='', verbose=True, **kwargs):
super(VeRi, self).__init__()
self.dataset_dir = osp.join(root, self.dataset_dir)
self.train_dir = osp.join(self.dataset_dir, 'image_train')
self.query_dir = osp.join(self.dataset_dir, 'image_query')
self.gallery_dir = osp.join(self.dataset_dir, 'image_test')
self._check_before_run()
path_train = 'datasets/keypoint_train.txt'
with open(path_train, 'r') as txt:
lines = txt.readlines()
self.image_map_view_train = {}
for (img_idx, img_info) in enumerate(lines):
content = img_info.split(' ')
viewid = int(content[(- 1)])
self.image_map_view_train[osp.basename(content[0])] = viewid
path_test = 'datasets/keypoint_test.txt'
with open(path_test, 'r') as txt:
lines = txt.readlines()
self.image_map_view_test = {}
for (img_idx, img_info) in enumerate(lines):
content = img_info.split(' ')
viewid = int(content[(- 1)])
self.image_map_view_test[osp.basename(content[0])] = viewid
train = self._process_dir(self.train_dir, relabel=True)
query = self._process_dir(self.query_dir, relabel=False)
gallery = self._process_dir(self.gallery_dir, relabel=False)
if verbose:
print('=> VeRi-776 loaded')
self.print_dataset_statistics(train, query, gallery)
self.train = train
self.query = query
self.gallery = gallery
(self.num_train_pids, self.num_train_imgs, self.num_train_cams, self.num_train_vids) = self.get_imagedata_info(self.train)
(self.num_query_pids, self.num_query_imgs, self.num_query_cams, self.num_query_vids) = self.get_imagedata_info(self.query)
(self.num_gallery_pids, self.num_gallery_imgs, self.num_gallery_cams, self.num_gallery_vids) = self.get_imagedata_info(self.gallery)
def _check_before_run(self):
if (not osp.exists(self.dataset_dir)):
raise RuntimeError("'{}' is not available".format(self.dataset_dir))
if (not osp.exists(self.train_dir)):
raise RuntimeError("'{}' is not available".format(self.train_dir))
if (not osp.exists(self.query_dir)):
raise RuntimeError("'{}' is not available".format(self.query_dir))
if (not osp.exists(self.gallery_dir)):
raise RuntimeError("'{}' is not available".format(self.gallery_dir))
def _process_dir(self, dir_path, relabel=False):
img_paths = glob.glob(osp.join(dir_path, '*.jpg'))
pattern = re.compile('([-\\d]+)_c(\\d+)')
pid_container = set()
for img_path in img_paths:
(pid, _) = map(int, pattern.search(img_path).groups())
if (pid == (- 1)):
continue
pid_container.add(pid)
pid2label = {pid: label for (label, pid) in enumerate(pid_container)}
view_container = set()
dataset = []
count = 0
for img_path in img_paths:
(pid, camid) = map(int, pattern.search(img_path).groups())
if (pid == (- 1)):
continue
assert (0 <= pid <= 776)
assert (1 <= camid <= 20)
camid -= 1
if relabel:
pid = pid2label[pid]
if (osp.basename(img_path) not in self.image_map_view_train.keys()):
try:
viewid = self.image_map_view_test[osp.basename(img_path)]
except:
count += 1
continue
else:
viewid = self.image_map_view_train[osp.basename(img_path)]
view_container.add(viewid)
dataset.append((img_path, pid, camid, viewid))
print(view_container, 'view_container')
print(count, 'samples without viewpoint annotations')
return dataset |
def get_coord_map(FILENAME):
lab_name = FILENAME.replace('JPEGImages', 'SegmentationObject').replace('.jpg', '.png')
annot_name = FILENAME.replace('JPEGImages', 'Annotations').replace('.jpg', '.xml')
img = read_lab(lab_name)
img[(img == 255)] = 0
lab = parse_xml(annot_name)
lab = np.int32(lab)
img = np.int32(img)
coord = lab[img]
return coord |
class UploadCommand(setuptools.Command):
description = 'Build and publish the package.'
user_options = []
def status(s):
print('\x1b[1m{0}\x1b[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
here = os.path.abspath(os.path.dirname(__file__))
self.status('Removing previous builds...')
shutil.rmtree(os.path.join(here, 'dist'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution...')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPI via Twine...')
os.system('twine upload dist/*')
self.status('Pushing git tags...')
os.system('git tag v{0}'.format(get_version()))
os.system('git push --tags')
sys.exit() |
def dropout_train_res(model, train_mode=True):
for m in model.modules():
if hasattr(m, 'dropout_train'):
m.dropout_train = train_mode |
_cache()
def _get_gpu_extra_compile_args():
if torch.cuda.is_available():
return []
else:
return ['-arch=compute_60'] |
def test_tune_hyperparam_randomsearch(df_iris: pd.DataFrame) -> None:
df_iris = df_iris[df_iris['species'].isin(['versicolor', 'virginica'])]
X = ['sepal_length', 'sepal_width', 'petal_length']
y = 'species'
X_types = {'continuous': X}
sk_X = df_iris[X].values
sk_y = df_iris[y].values
scoring = 'accuracy'
np.random.seed(42)
cv_outer = RepeatedKFold(n_splits=2, n_repeats=1)
cv_inner = RepeatedKFold(n_splits=2, n_repeats=1)
model_params = {'svm__C': [0.01, 0.001]}
search_params = {'kind': 'random', 'n_iter': 2, 'cv': cv_inner}
(actual, actual_estimator) = run_cross_validation(X=X, y=y, data=df_iris, X_types=X_types, model='svm', model_params=model_params, search_params=search_params, problem_type='classification', cv=cv_outer, scoring=[scoring], return_estimator='final')
np.random.seed(42)
cv_outer = RepeatedKFold(n_splits=2, n_repeats=1)
cv_inner = RepeatedKFold(n_splits=2, n_repeats=1)
clf = make_pipeline(SVC())
gs = RandomizedSearchCV(clf, {'svc__C': [0.01, 0.001]}, cv=cv_inner, n_iter=2)
expected = cross_validate(gs, sk_X, sk_y, cv=cv_outer, scoring=[scoring])
assert (len(actual.columns) == (len(expected) + 5))
assert (len(actual['test_accuracy']) == len(expected['test_accuracy']))
assert all(((a == b) for (a, b) in zip(actual['test_accuracy'], expected['test_accuracy'])))
clf1 = actual_estimator.best_estimator_.steps[(- 1)][1]
clf2 = clone(gs).fit(sk_X, sk_y).best_estimator_.steps[(- 1)][1]
compare_models(clf1, clf2) |
class FakeEasyDLClient(object):
def get_task(self):
pass
def report_task_result(self):
pass |
class NN(Base):
def __init__(self, args, c_in, c_out, height, width, nn_type, kernel=3):
super().__init__()
Conv2dAct = Conv2dReLU
n_channels = args.n_channels
if (nn_type == 'shallow'):
if (args.network1x1 == 'standard'):
conv1x1 = Conv2dAct(n_channels, n_channels, kernel_size=1, stride=1, padding=0, bias=False)
layers = [Conv2dAct(c_in, n_channels, kernel, padding=1), conv1x1]
layers += [torch.nn.Conv2d(n_channels, c_out, kernel, padding=1)]
elif (nn_type == 'resnet'):
layers = [Conv2dAct(c_in, n_channels, kernel, padding=1), ResidualBlock(n_channels, kernel, Conv2dAct), ResidualBlock(n_channels, kernel, Conv2dAct)]
layers += [torch.nn.Conv2d(n_channels, c_out, kernel, padding=1)]
elif (nn_type == 'densenet'):
layers = [DenseBlock(args=args, n_inputs=c_in, n_outputs=(n_channels + c_in), kernel=kernel, Conv2dAct=Conv2dAct)]
layers += [torch.nn.Conv2d((n_channels + c_in), c_out, kernel, padding=1)]
else:
raise ValueError
self.nn = torch.nn.Sequential(*layers)
if (not UNIT_TESTING):
self.nn[(- 1)].weight.data.zero_()
self.nn[(- 1)].bias.data.zero_()
def forward(self, x):
return self.nn(x) |
class FabiansUNet(SegmentationNetwork):
use_this_for_2D_configuration = .0
use_this_for_3D_configuration = .0
default_blocks_per_stage_encoder = (1, 2, 3, 4, 4, 4, 4, 4, 4, 4, 4)
default_blocks_per_stage_decoder = (1, 1, 1, 1, 1, 1, 1, 1, 1, 1)
default_min_batch_size = 2
def __init__(self, input_channels, base_num_features, num_blocks_per_stage_encoder, feat_map_mul_on_downscale, pool_op_kernel_sizes, conv_kernel_sizes, props, num_classes, num_blocks_per_stage_decoder, deep_supervision=False, upscale_logits=False, max_features=512, initializer=None, block=BasicResidualBlock, props_decoder=None):
super().__init__()
self.conv_op = props['conv_op']
self.num_classes = num_classes
self.encoder = ResidualUNetEncoder(input_channels, base_num_features, num_blocks_per_stage_encoder, feat_map_mul_on_downscale, pool_op_kernel_sizes, conv_kernel_sizes, props, default_return_skips=True, max_num_features=max_features, block=block)
props['dropout_op_kwargs']['p'] = 0
if (props_decoder is None):
props_decoder = props
self.decoder = PlainConvUNetDecoder(self.encoder, num_classes, num_blocks_per_stage_decoder, props_decoder, deep_supervision, upscale_logits)
if (initializer is not None):
self.apply(initializer)
def forward(self, x):
skips = self.encoder(x)
return self.decoder(skips)
def compute_approx_vram_consumption(patch_size, base_num_features, max_num_features, num_modalities, num_classes, pool_op_kernel_sizes, num_conv_per_stage_encoder, num_conv_per_stage_decoder, feat_map_mul_on_downscale, batch_size):
enc = ResidualUNetEncoder.compute_approx_vram_consumption(patch_size, base_num_features, max_num_features, num_modalities, pool_op_kernel_sizes, num_conv_per_stage_encoder, feat_map_mul_on_downscale, batch_size)
dec = PlainConvUNetDecoder.compute_approx_vram_consumption(patch_size, base_num_features, max_num_features, num_classes, pool_op_kernel_sizes, num_conv_per_stage_decoder, feat_map_mul_on_downscale, batch_size)
return (enc + dec) |
def test_ce_loss():
from mmdet.models import build_loss
with pytest.raises(AssertionError):
loss_cfg = dict(type='CrossEntropyLoss', use_mask=True, use_sigmoid=True, loss_weight=1.0)
build_loss(loss_cfg)
loss_cls_cfg = dict(type='CrossEntropyLoss', use_sigmoid=False, class_weight=[0.8, 0.2], loss_weight=1.0)
loss_cls = build_loss(loss_cls_cfg)
fake_pred = torch.Tensor([[100, (- 100)]])
fake_label = torch.Tensor([1]).long()
assert torch.allclose(loss_cls(fake_pred, fake_label), torch.tensor(40.0))
loss_cls_cfg = dict(type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)
loss_cls = build_loss(loss_cls_cfg)
assert torch.allclose(loss_cls(fake_pred, fake_label), torch.tensor(200.0)) |
def check(P):
filename_with_today_date = True
assert (P.num_shots_global == 0)
return filename_with_today_date |
class MaxLengthCriteria(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class PositionalLabel():
def __init__(self, labeled_sections: List[Tuple[(str, Tuple[(float, float)])]]):
if (not labeled_sections):
raise ValueError('Sections must be specified.')
if any(((range is None) for (label, range) in labeled_sections)):
raise ValueError('Range must be specified.')
self.labeled_sections = labeled_sections
self.labels = [word for (word, range) in labeled_sections]
self.label = ' '.join((word for word in self.labels))
def convert_range_to_seconds(self, original_sample_rate: int) -> 'PositionalLabel':
return PositionalLabel(list(((word, ((start_end[0] / original_sample_rate), (start_end[1] / original_sample_rate))) for (word, start_end) in self.labeled_sections)))
def with_corrected_labels(self, correction: Callable[([str], str)]) -> 'PositionalLabel':
return PositionalLabel([(correction(section), range) for (section, range) in self.labeled_sections])
def serialize(self) -> str:
return '\n'.join(('{}|{}|{}'.format(label, start, end) for (label, (start, end)) in self.labeled_sections))
def deserialize(serialized: str) -> 'PositionalLabel':
return PositionalLabel(list(((label, (float(start), float(end))) for (label, start, end) in map((lambda item: item.split('|')), serialized.splitlines())))) |
class AtariPreprocessing(object):
def __init__(self, environment, frame_skip=4, terminal_on_life_loss=False, screen_size=84):
if (frame_skip <= 0):
raise ValueError('Frame skip should be strictly positive, got {}'.format(frame_skip))
if (screen_size <= 0):
raise ValueError('Target screen size should be strictly positive, got {}'.format(screen_size))
self.environment = environment
self.terminal_on_life_loss = terminal_on_life_loss
self.frame_skip = frame_skip
self.screen_size = screen_size
obs_dims = self.environment.observation_space
self.screen_buffer = [np.empty((obs_dims.shape[0], obs_dims.shape[1]), dtype=np.uint8), np.empty((obs_dims.shape[0], obs_dims.shape[1]), dtype=np.uint8)]
self.game_over = False
self.lives = 0
def observation_space(self):
return Box(low=0, high=255, shape=(self.screen_size, self.screen_size, 1), dtype=np.uint8)
def action_space(self):
return self.environment.action_space
def reward_range(self):
return self.environment.reward_range
def metadata(self):
return self.environment.metadata
def reset(self):
self.environment.reset()
self.lives = self.environment.ale.lives()
self._fetch_grayscale_observation(self.screen_buffer[0])
self.screen_buffer[1].fill(0)
return self._pool_and_resize()
def render(self, mode):
return self.environment.render(mode)
def step(self, action):
accumulated_reward = 0.0
for time_step in range(self.frame_skip):
(_, reward, game_over, info) = self.environment.step(action)
accumulated_reward += reward
if self.terminal_on_life_loss:
new_lives = self.environment.ale.lives()
is_terminal = (game_over or (new_lives < self.lives))
self.lives = new_lives
else:
is_terminal = game_over
if is_terminal:
break
elif (time_step >= (self.frame_skip - 2)):
t = (time_step - (self.frame_skip - 2))
self._fetch_grayscale_observation(self.screen_buffer[t])
observation = self._pool_and_resize()
self.game_over = game_over
return (observation, accumulated_reward, is_terminal, info)
def close(self):
self.environment.close()
def _fetch_grayscale_observation(self, output):
self.environment.ale.getScreenGrayscale(output)
return output
def _pool_and_resize(self):
if (self.frame_skip > 1):
np.maximum(self.screen_buffer[0], self.screen_buffer[1], out=self.screen_buffer[0])
transformed_image = cv2.resize(self.screen_buffer[0], (self.screen_size, self.screen_size), interpolation=cv2.INTER_AREA)
int_image = np.asarray(transformed_image, dtype=np.uint8)
return np.expand_dims(int_image, axis=2) |
def create_tmp_dir_multi_session():
ignore_git_pattern = shutil.ignore_patterns(str((path_data_multi_sessions_contrasts_source / '.git')))
remove_tmp_dir()
Path(path_temp).mkdir()
if Path(path_data_multi_sessions_contrasts_source).exists():
shutil.copytree(path_data_multi_sessions_contrasts_source, path_data_multi_sessions_contrasts_tmp, ignore=ignore_git_pattern) |
def main_upper(x_minus, x_plus, y_minus, y_plus, plot=False, num=0, print_info=True):
if print_info:
print('4th orthant upper: using third.main_lower function')
x_minus_new = (- x_plus)
x_plus_new = (- x_minus)
(a, b, c) = third.main_lower(x_minus_new, x_plus_new, y_minus, y_plus, print_info=print_info)
b = (- b)
c = (- c)
if plot:
utils.plot_surface(x_minus[num], x_plus[num], y_minus[num], y_plus[num], a[num], b[num], c[num])
return (a.detach(), b.detach(), c.detach()) |
def _load_groundtruth(filepath):
assert os.path.isfile(filepath)
xmldoc = minidom.parse(filepath)
itemlist = xmldoc.getElementsByTagName('file')
assert (len(itemlist) == 1)
num_frames = None
for e in itemlist[0].getElementsByTagName('attribute'):
if (e.attributes['name'].value == 'NUMFRAMES'):
values = e.getElementsByTagName('data:dvalue')
assert (len(values) == 1)
num_frames = values[0].attributes['value'].value
break
if (num_frames is None):
assert False, 'NUMFRAMES not definedin XML file.'
print(('Number of frames = ' + str(num_frames)))
gt = {}
gt['BallPos'] = []
gt['BallShot'] = []
gt['PlayerInteractingID'] = []
gt['Person'] = defaultdict(list)
itemlist = xmldoc.getElementsByTagName('object')
for e in itemlist:
assert ('name' in e.attributes)
if (e.attributes['name'].value == 'BALL'):
ball_attributes = e.getElementsByTagName('attribute')
for elem in ball_attributes:
if (elem.attributes['name'].value == 'BallPos'):
for e in elem.getElementsByTagName('data:point'):
assert ('framespan' in e.attributes)
assert ('x' in e.attributes)
assert ('y' in e.attributes)
framespan = e.attributes['framespan'].value
x = int(e.attributes['x'].value)
y = int(e.attributes['y'].value)
(start_frame, end_frame) = _parse_framespan(framespan)
gt['BallPos'].append((start_frame, end_frame, x, y))
elif (elem.attributes['name'].value == 'BallShot'):
for e in elem.getElementsByTagName('data:bvalue'):
assert ('framespan' in e.attributes)
assert ('value' in e.attributes)
framespan = e.attributes['framespan'].value
value = (e.attributes['value'].value == 'true')
(start_frame, end_frame) = _parse_framespan(framespan)
gt['BallShot'].append((start_frame, end_frame, value))
elif (elem.attributes['name'].value == 'PlayerInteractingID'):
for e in elem.getElementsByTagName('data:dvalue'):
assert ('framespan' in e.attributes)
assert ('value' in e.attributes)
framespan = e.attributes['framespan'].value
value = int(e.attributes['value'].value)
(start_frame, end_frame) = _parse_framespan(framespan)
gt['PlayerInteractingID'].append((start_frame, end_frame, value))
else:
assert False, ('Unexpected attribute: ' + elem.attributes['name'].value)
elif (e.attributes['name'].value == 'Person'):
person_id = e.attributes['id'].value
person_attributes = e.getElementsByTagName('attribute')
for elem in person_attributes:
if (elem.attributes['name'].value == 'LOCATION'):
for e in elem.getElementsByTagName('data:bbox'):
assert ('framespan' in e.attributes)
assert ('height' in e.attributes)
assert ('width' in e.attributes)
assert ('x' in e.attributes)
assert ('y' in e.attributes)
framespan = e.attributes['framespan'].value
height = int(e.attributes['height'].value)
width = int(e.attributes['width'].value)
x = int(e.attributes['x'].value)
y = int(e.attributes['y'].value)
(start_frame, end_frame) = _parse_framespan(framespan)
gt['Person'][person_id].append((start_frame, end_frame, height, width, x, y))
else:
assert False, ('Unexpected attribute: ' + elem.attributes['name'].value)
return gt |
def test_retina_head_forward():
retina_model = retinanet_config()
s = 128
feats = [torch.rand(1, retina_model.in_channels, (s // (2 ** (i + 2))), (s // (2 ** (i + 2)))) for i in range(len(retina_model.anchor_generator.strides))]
wrap_model = WrapFunction(retina_model.forward)
ort_validate(wrap_model, feats) |
def _attempt_creation(entity_name, type_name, type_dict, provided_kwargs, additional_kwargs):
if (type_name not in type_dict):
raise KeyError(f"{entity_name.title()} '{type_name}' is not registered")
entity_type = type_dict[type_name]
return entity_type(**additional_kwargs, **_remove_type_key(provided_kwargs)) |
class IBNResUnit(nn.Module):
def __init__(self, in_channels, out_channels, stride, conv1_ibn):
super(IBNResUnit, self).__init__()
self.resize_identity = ((in_channels != out_channels) or (stride != 1))
self.body = IBNResBottleneck(in_channels=in_channels, out_channels=out_channels, stride=stride, conv1_ibn=conv1_ibn)
if self.resize_identity:
self.identity_conv = conv1x1_block(in_channels=in_channels, out_channels=out_channels, stride=stride, activation=None)
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = (x + identity)
x = self.activ(x)
return x |
class CodeGenMacroMathjax(CodeGenMathjax):
def __init__(self):
super().__init__(ParserTypeEnum.MACROMATHJAX)
def init_type(self, type_walker, func_name):
super().init_type(type_walker, func_name)
self.code_frame.pre_str = self.pre_str
self.code_frame.post_str = self.post_str
def convert_content(self, symbol):
return symbol.replace('\\', '').replace('`', '').replace('"', '\\\\"').replace("'", "\\\\'")
def visit_assignment(self, node, **kwargs):
sym_list = []
for sym in node.symbols:
sym_list.append("'{}'".format(self.convert_content(filter_subscript(sym))))
sym_list = list(set(sym_list))
sym_list.append("'{}'".format(self.convert_content(node.left[0].get_main_id())))
if (node.right[0].node_type == IRNodeType.Optimize):
content = self.visit(node.right[0], **kwargs)
else:
self.visiting_lhs = True
lhs_list = []
for cur_index in range(len(node.left)):
lhs_list.append(self.visit(node.left[cur_index], **kwargs))
self.visiting_lhs = False
rhs_list = []
for cur_index in range(len(node.right)):
rhs_list.append(self.visit(node.right[cur_index], **kwargs))
content = ((','.join(lhs_list) + ' & = ') + ','.join(rhs_list))
json = '{{"onclick":"event.stopPropagation(); onClickEq(this, \'{}\', [{}], false, [], [], \'{}\');"}}'.format(self.func_name, ', '.join(sym_list), base64.urlsafe_b64encode(node.raw_text.encode('utf-8')).decode('utf-8'))
content = (((content + '\\\\') + '\\eqlabel{{ {} }}{{}}'.format(json)) + '\n')
self.code_frame.expr += content
self.code_frame.expr_dict[node.raw_text] = content
return content
def visit_optimize(self, node, **kwargs):
assign_node = node.get_ancestor(IRNodeType.Assignment)
content = super(CodeGenMacroMathjax, self).visit_optimize(node, **kwargs)
if (not assign_node):
sym_list = []
for sym in node.symbols:
sym_list.append("'{}'".format(self.convert_content(filter_subscript(sym))))
sym_list = list(set(sym_list))
json = '{{"onclick":"event.stopPropagation(); onClickEq(this, \'{}\', [{}], false, [], [], \'{}\', false);"}}'.format(self.func_name, ', '.join(sym_list), base64.urlsafe_b64encode(node.raw_text.encode('utf-8')).decode('utf-8'))
content = (((content + '\\\\') + '\\eqlabel{{ {} }}{{}}'.format(json)) + '\n')
self.code_frame.expr += content
self.code_frame.expr_dict[node.raw_text] = content
return content
def visit_local_func(self, node, **kwargs):
self.local_func_parsing = True
self.visiting_func_name = True
func_name = self.visit(node.name, **kwargs)
self.visiting_func_name = False
self.local_func_name = self.convert_content(filter_subscript(node.name.get_name()))
params_str = ''
local_param_list = []
if (len(node.params) > 0):
for index in range(len(node.params)):
local_param_list.append("'{}'".format(self.convert_content(filter_subscript(node.params[index].get_name()))))
params_str += self.visit(node.params[index], **kwargs)
if (index < (len(node.params) - 1)):
params_str += (node.separators[index] + '')
if (node.def_type == LocalFuncDefType.LocalFuncDefParenthesis):
def_params = (('\\left( ' + params_str) + ' \\right)')
else:
def_params = (('\\left[ ' + params_str) + ' \\right]')
expr_list = []
for cur_index in range(len(node.expr)):
expr_list.append(self.visit(node.expr[cur_index], **kwargs))
content = (((func_name + def_params) + ' & = ') + ', '.join(expr_list))
sym_list = []
for sym in node.symbols:
sym_list.append("'{}'".format(self.convert_content(filter_subscript(sym))))
sym_list = list(set(sym_list))
sym_list.append("'{}'".format(self.convert_content(node.name.get_main_id())))
json = '{{"onclick":"event.stopPropagation(); onClickEq(this, \'{}\', [{}], true, \'{}\', [{}], \'{}\');"}}'.format(self.func_name, ', '.join(sym_list), self.local_func_name, ', '.join(local_param_list), base64.urlsafe_b64encode(node.raw_text.encode('utf-8')).decode('utf-8'))
saved_content = (((content + '\\\\') + '\\eqlabel{{ {} }}{{}}'.format(json)) + '\n')
self.code_frame.expr += (saved_content + '\n')
self.code_frame.expr_dict[node.raw_text] = saved_content
if (len(node.defs) > 0):
par_list = []
for par in node.defs:
par_list.append(self.visit(par, **kwargs))
content += (' \\text{{ where }} ' + ', '.join(par_list))
self.local_func_parsing = False
self.local_func_parsing = True
return content
def visit_id(self, node, **kwargs):
id_str = '{}-{}'.format(self.func_name, self.convert_content(node.get_name()))
sub_str = ''
if node.contain_subscript():
subs_list = []
for subs in node.subs:
subs_list.append(self.convert_unicode(subs))
sub_str = (('_{' + ','.join(subs_list)) + '}')
content = self.convert_unicode(node.main_id)
else:
content = self.convert_unicode(node.get_name())
if ('is_sub' in kwargs):
return (content + sub_str)
use_type = 'use'
if (self.visiting_lhs or self.visiting_func_name):
use_type = 'def'
local_param = False
local_func_name = ''
if self.local_func_parsing:
local_param = self.is_local_param(node.get_name())
local_func_name = self.local_func_name
json = '{{"onclick":"event.stopPropagation(); onClickSymbol(this, \'{}\', \'{}\', \'{}\', {}, \'{}\')", "id":"{}", "sym":"{}", "func":"{}", "localFunc":"{}", "type":"{}", "case":"equation"}}'.format(self.convert_content(node.get_main_id()), self.func_name, use_type, ('true' if local_param else 'false'), local_func_name, id_str, self.convert_content(node.get_main_id()), self.func_name, local_func_name, use_type)
content = '\\idlabel{{ {} }}{{ {{{}}} }}'.format(json, content)
return (content + sub_str) |
def round_robin_strategy(num_tasks, last_task=None):
if (last_task is None):
return 0
return ((last_task + 1) % num_tasks) |
class Sudoku(Environment[State]):
def __init__(self, generator: Optional[Generator]=None, reward_fn: Optional[RewardFn]=None, viewer: Optional[Viewer[State]]=None):
if (generator is None):
file_path = os.path.dirname(os.path.abspath(__file__))
database_file = DATABASES['mixed']
database = jnp.load(os.path.join(file_path, 'data', database_file))
self._generator = (generator or DatabaseGenerator(database=database))
self._reward_fn = (reward_fn or SparseRewardFn())
self._viewer = (viewer or SudokuViewer())
def __repr__(self) -> str:
return f'Sudoku(grid_size={BOARD_WIDTH}x{BOARD_WIDTH})'
def reset(self, key: chex.PRNGKey) -> Tuple[(State, TimeStep[Observation])]:
state = self._generator(key)
obs = Observation(board=state.board, action_mask=state.action_mask)
timestep = restart(observation=obs)
return (state, timestep)
def step(self, state: State, action: chex.Array) -> Tuple[(State, TimeStep[Observation])]:
invalid = (~ state.action_mask[tuple(action)])
updated_board = apply_action(action=action, board=state.board)
updated_action_mask = get_action_mask(board=updated_board)
next_state = State(board=updated_board, action_mask=updated_action_mask, key=state.key)
no_actions_available = (~ jnp.any(updated_action_mask))
done = (invalid | no_actions_available)
reward = self._reward_fn(state=state, new_state=next_state, action=action, done=done)
observation = Observation(board=updated_board, action_mask=updated_action_mask)
timestep = jax.lax.cond(done, termination, transition, reward, observation)
return (next_state, timestep)
def observation_spec(self) -> specs.Spec[Observation]:
board = specs.BoundedArray(shape=(BOARD_WIDTH, BOARD_WIDTH), minimum=(- 1), maximum=BOARD_WIDTH, dtype=jnp.int32, name='board')
action_mask = specs.BoundedArray(shape=(BOARD_WIDTH, BOARD_WIDTH, BOARD_WIDTH), dtype=bool, minimum=False, maximum=True, name='action_mask')
return specs.Spec(Observation, 'ObservationSpec', board=board, action_mask=action_mask)
def action_spec(self) -> specs.MultiDiscreteArray:
return specs.MultiDiscreteArray(num_values=jnp.array([BOARD_WIDTH, BOARD_WIDTH, BOARD_WIDTH]), name='action', dtype=jnp.int32)
def render(self, state: State) -> Any:
return self._viewer.render(state=state)
def animate(self, states: Sequence[State], interval: int=200, save_path: Optional[str]=None) -> matplotlib.animation.FuncAnimation:
return self._viewer.animate(states=states, interval=interval, save_path=save_path) |
def iou(box1, box2):
box1 = np.asarray(box1, np.float32)
box2 = np.asarray(box2, np.float32)
intersection_area = area(intersect(box1, box2))
union_area = ((area(box1) + area(box2)) - intersection_area)
return (intersection_area / union_area) |
class TestQuantization(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
((not torch.cuda.is_available()), 'test requires a GPU')
def test_quantization(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_quantization') as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
_quantize_language_model(data_dir, 'transformer_lm') |
def staticTFTest(data, gt_data):
for couple in gt_data.keys():
if (data[couple] is None):
msg = ('Tf is None for couple %s' % '->'.join(couple))
return (False, msg)
if (any((abs((np.array(data[couple][0]) - np.array(gt_data[couple][0]))) > 1e-05)) or any((abs((np.array(data[couple][1]) - np.array(gt_data[couple][1]))) > 1e-05))):
msg = ('Tf is changed for couple %s' % '->'.join(couple))
return (False, msg)
return (True, '') |
def metadata2dict(filename, header, key_index=0):
d = {}
with open(filename, 'rt') as f:
for row in csv.reader(f):
row = [x.strip().strip('"') for x in row]
c = row[key_index]
d[c] = dict(zip(header, row))
return d |
class TestTemplatePointwiseAttention(unittest.TestCase):
def test_shape(self):
batch_size = consts.batch_size
n_seq = consts.n_seq
c_t = consts.c_t
c_z = consts.c_z
c = 26
no_heads = 13
n_res = consts.n_res
inf = .0
tpa = TemplatePointwiseAttention(c_t, c_z, c, no_heads, inf=inf)
t = torch.rand((batch_size, n_seq, n_res, n_res, c_t))
z = torch.rand((batch_size, n_res, n_res, c_z))
z_update = tpa(t, z, chunk_size=None)
self.assertTrue((z_update.shape == z.shape)) |
def strong_aug_pixel(p=0.5):
print('[DATA]: strong aug pixel')
from albumentations import Transpose, ShiftScaleRotate, Blur, OpticalDistortion, GridDistortion, HueSaturationValue, MultiplicativeNoise, IAAAdditiveGaussianNoise, GaussNoise, MotionBlur, MedianBlur, RandomBrightnessContrast, IAAPiecewiseAffine, IAASharpen, IAAEmboss, Flip, OneOf, Compose, JpegCompression, CLAHE
return Compose([OneOf([MultiplicativeNoise(multiplier=[0.5, 1.5], per_channel=True), JpegCompression(quality_lower=39, quality_upper=80)], p=0.2), OneOf([IAAAdditiveGaussianNoise(), GaussNoise()], p=0.2), OneOf([MotionBlur(p=0.2), MedianBlur(blur_limit=3, p=0.1), Blur(blur_limit=3, p=0.1)], p=0.2), OneOf([CLAHE(clip_limit=2), IAASharpen(), IAAEmboss(), RandomBrightnessContrast()], p=0.3), HueSaturationValue(p=0.3)], p=p) |
def create_inception_graph(pth):
with tf.gfile.FastGFile(pth, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='FID_Inception_Net') |
def categorical_gru_policy_tf_ppo_benchmarks():
iterate_experiments(categorical_gru_policy, STATE_ENV_SET, seeds=_seeds) |
class DataInjector(object):
def __init__(self, def_path, data_path):
self.def_path = def_path
self.data_path = data_path
self.did_use_pb = False
self.params = None
self.load()
def load(self):
if has_pycaffe():
self.load_using_caffe()
else:
self.load_using_pb()
def load_using_caffe(self):
caffe = get_caffe_resolver().caffe
net = caffe.Net(self.def_path, self.data_path, caffe.TEST)
data = (lambda blob: blob.data)
self.params = [(k, map(data, v)) for (k, v) in net.params.items()]
def load_using_pb(self):
data = get_caffe_resolver().NetParameter()
data.MergeFromString(open(self.data_path, 'rb').read())
pair = (lambda layer: (layer.name, self.normalize_pb_data(layer)))
layers = (data.layers or data.layer)
self.params = [pair(layer) for layer in layers if layer.blobs]
self.did_use_pb = True
def normalize_pb_data(self, layer):
transformed = []
for blob in layer.blobs:
if len(blob.shape.dim):
dims = blob.shape.dim
(c_o, c_i, h, w) = map(int, (([1] * (4 - len(dims))) + list(dims)))
else:
c_o = blob.num
c_i = blob.channels
h = blob.height
w = blob.width
data = np.array(blob.data, dtype=np.float32).reshape(c_o, c_i, h, w)
transformed.append(data)
return transformed
def adjust_parameters(self, node, data):
if (not self.did_use_pb):
return data
data = list(data)
squeeze_indices = [1]
if (node.kind == NodeKind.InnerProduct):
squeeze_indices.append(0)
for idx in squeeze_indices:
data[idx] = np.squeeze(data[idx])
return data
def __call__(self, graph):
for (layer_name, data) in self.params:
if (layer_name in graph):
node = graph.get_node(layer_name)
node.data = self.adjust_parameters(node, data)
else:
print_stderr(('Ignoring parameters for non-existent layer: %s' % layer_name))
return graph |
def get_dueling_dqn_agent(network, environment=None, states=None, actions=None, max_episode_timesteps=None, batch_size=32, learning_rate=0.0001, horizon=1, discount=0.99, memory=200000, device='gpu'):
if (environment != None):
agent = Agent.create(agent='dueling_dqn', environment=environment, max_episode_timesteps=max_episode_timesteps, network=network, config=dict(device=device), memory=memory, batch_size=batch_size, learning_rate=learning_rate, horizon=horizon, discount=discount, parallel_interactions=10)
else:
agent = Agent.create(agent='dueling_dqn', states=states, actions=actions, max_episode_timesteps=max_episode_timesteps, network=network, config=dict(device=device), memory=memory, batch_size=batch_size, learning_rate=learning_rate, horizon=horizon, discount=discount, parallel_interactions=10)
return agent |
class TFXLMRobertaForCausalLM(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
def save_dict(log_path, dic, name):
path = os.path.join(log_path, ('%s.json' % name))
f = open(path, 'w')
json.dump(vars(dic), f)
f.close()
path = os.path.join(log_path, ('%s.txt' % name))
f = open(path, 'w')
args_str = [('%s = %s' % (key, value)) for (key, value) in vars(dic).items()]
f.write('\n'.join(args_str))
f.close() |
class dsRLA_MobileNetV2(nn.Module):
def __init__(self, num_classes: int=1000, width_mult: float=1.0, rla_channel: int=32, inverted_residual_setting: Optional[List[List[int]]]=None, round_nearest: int=8, block: Optional[Callable[(..., nn.Module)]]=None, norm_layer: Optional[Callable[(..., nn.Module)]]=None, ECA=False) -> None:
super(dsRLA_MobileNetV2, self).__init__()
if (block is None):
block = InvertedResidual
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
input_channel = 32
last_channel = 1280
if (inverted_residual_setting is None):
inverted_residual_setting = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2], [6, 320, 1, 1]]
if ((len(inverted_residual_setting) == 0) or (len(inverted_residual_setting[0]) != 4)):
raise ValueError('inverted_residual_setting should be non-empty or a 4-element list, got {}'.format(inverted_residual_setting))
input_channel = _make_divisible((input_channel * width_mult), round_nearest)
self.last_channel = _make_divisible((last_channel * max(1.0, width_mult)), round_nearest)
self.conv1 = ConvBNReLU(3, input_channel, stride=2, norm_layer=norm_layer)
self.newcell = [0]
for i in range(1, len(inverted_residual_setting)):
if (inverted_residual_setting[i][3] == 2):
self.newcell.append(i)
num_stages = len(inverted_residual_setting)
stages = ([None] * num_stages)
stage_bns = ([None] * num_stages)
conv_outs = ([None] * num_stages)
recurrent_dsconvs = ([recurrent_dsconv(rla_channel, rla_channel, rla_channel)] * len(self.newcell))
j = 0
for (t, c, n, s) in inverted_residual_setting:
output_channel = _make_divisible((c * width_mult), round_nearest)
stages[j] = []
stage_bns[j] = nn.ModuleList([norm_layer(rla_channel) for _ in range(n)])
conv_outs[j] = conv_out(output_channel, rla_channel)
for i in range(n):
if ECA:
if (c < 96):
ECA_ksize = 1
else:
ECA_ksize = 3
else:
ECA_ksize = None
stride = (s if (i == 0) else 1)
stages[j].append(block(input_channel, output_channel, stride, expand_ratio=t, rla_channel=rla_channel, norm_layer=norm_layer, ECA_ksize=ECA_ksize))
input_channel = output_channel
stages[j] = nn.ModuleList(stages[j])
j += 1
self.stages = nn.ModuleList(stages)
self.conv_outs = nn.ModuleList(conv_outs)
self.recurrent_dsconvs = nn.ModuleList(recurrent_dsconvs)
self.stage_bns = nn.ModuleList(stage_bns)
self.rla_channel = rla_channel
self.flops = False
self.conv2 = ConvBNReLU((input_channel + rla_channel), self.last_channel, kernel_size=1, norm_layer=norm_layer)
self.bn2 = norm_layer(rla_channel)
self.relu = nn.ReLU6(inplace=True)
self.tanh = nn.Tanh()
self.classifier = nn.Sequential(nn.Dropout(0.2), nn.Linear(self.last_channel, num_classes))
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if (m.bias is not None):
nn.init.zeros_(m.bias)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.zeros_(m.bias)
def _forward_impl(self, x: Tensor) -> Tensor:
x = self.conv1(x)
(batch, _, height, width) = x.size()
if self.flops:
h = torch.zeros(batch, self.rla_channel, height, width)
else:
h = torch.zeros(batch, self.rla_channel, height, width, device='cuda')
j = 0
k = (- 1)
for (stage, bns, conv_out) in zip(self.stages, self.stage_bns, self.conv_outs):
if (j in self.newcell):
k += 1
recurrent_dsconv = self.recurrent_dsconvs[k]
for (layer, bn) in zip(stage, bns):
(x, y, h) = layer(x, h)
y_out = conv_out(y)
h = (h + y_out)
h = bn(h)
h = self.tanh(h)
h = recurrent_dsconv(h)
j += 1
h = self.bn2(h)
h = self.relu(h)
x = torch.cat((x, h), dim=1)
x = self.conv2(x)
x = nn.functional.adaptive_avg_pool2d(x, (1, 1)).reshape(x.shape[0], (- 1))
x = self.classifier(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x) |
class TFBaseModelOutputWithPooling(ModelOutput):
last_hidden_state: tf.Tensor = None
pooler_output: tf.Tensor = None
hidden_states: Optional[Tuple[tf.Tensor]] = None
attentions: Optional[Tuple[tf.Tensor]] = None |
def resnet14_cub(num_classes=200, **kwargs):
return get_resnet(num_classes=num_classes, blocks=14, model_name='resnet14_cub', **kwargs) |
def get_reward_stats(lst):
lst = torch.stack(lst)
(v_min, v_max, v_mean) = (lst.min(), lst.max(), torch.mean(lst))
return (v_min, v_max, v_mean) |
def get_sent_paragraph(span):
doc = span.doc
pars = span.doc._.paragraphs
for (idx, s) in enumerate(doc._.sentences):
if (s == span):
return pars[idx]
return 0 |
class SpatialDropout2D(KerasLayer):
def __init__(self, p=0.5, dim_ordering='th', input_shape=None, **kwargs):
super(SpatialDropout2D, self).__init__(None, float(p), dim_ordering, (list(input_shape) if input_shape else None), **kwargs) |
class PPO():
def __init__(self, state_dim, action_dim, action_std, lr, betas, gamma, K_epochs, eps_clip):
self.lr = lr
self.betas = betas
self.gamma = gamma
self.eps_clip = eps_clip
self.K_epochs = K_epochs
self.policy = ActorCritic(state_dim, action_dim, action_std).to(device)
self.optimizer = torch.optim.Adam(self.policy.parameters(), lr=lr, betas=betas)
self.policy_old = ActorCritic(state_dim, action_dim, action_std).to(device)
self.policy_old.load_state_dict(self.policy.state_dict())
self.MseLoss = nn.MSELoss()
def select_action(self, state, memory):
state = torch.FloatTensor(state.reshape(1, (- 1))).to(device)
return self.policy_old.act(state, memory).cpu().data.numpy().flatten()
def update(self, memory):
rewards = []
discounted_reward = 0
for (reward, is_terminal) in zip(reversed(memory.rewards), reversed(memory.is_terminals)):
if is_terminal:
discounted_reward = 0
discounted_reward = (reward + (self.gamma * discounted_reward))
rewards.insert(0, discounted_reward)
rewards = torch.tensor(rewards).to(device)
rewards = ((rewards - rewards.mean()) / (rewards.std() + 1e-05))
old_states = torch.squeeze(torch.stack(memory.states).to(device), 1).detach()
old_actions = torch.squeeze(torch.stack(memory.actions).to(device), 1).detach()
old_logprobs = torch.squeeze(torch.stack(memory.logprobs), 1).to(device).detach()
for _ in range(self.K_epochs):
(logprobs, state_values, dist_entropy) = self.policy.evaluate(old_states, old_actions)
ratios = torch.exp((logprobs - old_logprobs.detach()))
advantages = (rewards - state_values.detach())
surr1 = (ratios * advantages)
surr2 = (torch.clamp(ratios, (1 - self.eps_clip), (1 + self.eps_clip)) * advantages)
loss = (((- torch.min(surr1, surr2)) + (0.5 * self.MseLoss(state_values, rewards))) - (0.01 * dist_entropy))
self.optimizer.zero_grad()
loss.mean().backward()
self.optimizer.step()
self.policy_old.load_state_dict(self.policy.state_dict()) |
def get_cs(e_0=100, z=74):
with open(os.path.join(data_path, 'cs/grid.csv'), 'r') as csvfile:
r = csv.reader(csvfile, delimiter=' ', quotechar='|', quoting=csv.QUOTE_MINIMAL)
t = next(r)
e_e = np.array([float(a) for a in t[0].split(',')])
log_e_e = np.log10(e_e)
t = next(r)
k = np.array([float(a) for a in t[0].split(',')])
t = []
with open(os.path.join(data_path, ('cs/%d.csv' % z)), 'r') as csvfile:
r = csv.reader(csvfile, delimiter=' ', quotechar='|', quoting=csv.QUOTE_MINIMAL)
for row in r:
t.append([float(a) for a in row[0].split(',')])
t = np.array(t)
scaled = interpolate.RectBivariateSpline(log_e_e, k, t, kx=3, ky=1)
m_electron = 511
z2 = (z * z)
return (lambda e_g, u: ((((((u * e_0) + m_electron) ** 2) * z2) / (((u * e_0) * e_g) * ((u * e_0) + (2 * m_electron)))) * scaled(np.log10((u * e_0)), (e_g / (u * e_0))))) |
def process_one_img(file, im):
if (im.mode == 'RGB'):
im.thumbnail((400, 400), Image.ANTIALIAS)
im.save((file + '.jpg'))
os.remove(file)
else:
im = im.convert('RGB')
im.thumbnail((400, 400), Image.ANTIALIAS)
im.save((file + '.jpg'))
os.remove(file) |
def kronecker(matrix1, matrix2):
return torch.ger(matrix1.view((- 1)), matrix2.view((- 1))).reshape(*(matrix1.size() + matrix2.size())).permute([0, 2, 1, 3]).reshape((matrix1.size(0) * matrix2.size(0)), (matrix1.size(1) * matrix2.size(1))) |
def test_digits_cosine_naive_object():
model1 = FacilityLocationSelection(100)
model2 = GraphCutSelection(100)
model = MixtureSelection(100, [model1, model2], [1.0, 0.3], metric='cosine', optimizer=NaiveGreedy(random_state=0))
model.fit(X_digits)
assert_array_equal(model.ranking, digits_cosine_ranking)
assert_array_almost_equal(model.gains, digits_cosine_gains, 4)
assert_array_almost_equal(model.subset, X_digits[model.ranking]) |
def writeWorldDescr(output):
if options.noStaticInit:
output.write('const char* CxxTest::RealWorldDescription::_worldName;\n')
else:
output.write('const char* CxxTest::RealWorldDescription::_worldName = "cxxtest";\n') |
def get_num_frames(video_frame_dir):
max_frame = (- 1)
for img_file in os.listdir(video_frame_dir):
if img_file.endswith('.jpg'):
frame = int(os.path.splitext(img_file)[0])
max_frame = max(frame, max_frame)
return (max_frame + 1) |
def get_feat_dim(feat_dir):
if (feat_dir is None):
return 0
stdout_val = get_command_stdout('feat-to-dim --print-args=false scp:{data}/feats.scp -'.format(data=feat_dir))
feat_dim = int(stdout_val)
return feat_dim |
def wResUnit(data, num_filter, stride, dilate, projection, bottle_neck, dropout=0, momentum=0.9, eps=1e-05, use_global_stats=False, name=None, lr_mult=1, reuse=None, **kwargs):
assert (name is not None)
x = BNRelu(data, fix_gamma=False, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(('bn' + name) + '_branch2a'), lr_mult=lr_mult, reuse=reuse)
if projection:
shortcut = Conv(x, num_filter=num_filter, kernel=(1, 1), stride=((stride,) * 2), pad=(0, 0), no_bias=True, name=(('res' + name) + '_branch1'), lr_mult=lr_mult, reuse=reuse)
else:
shortcut = data
if bottle_neck:
x = Conv(x, num_filter=int((num_filter / 4.0)), kernel=(1, 1), stride=(1, 1), pad=(0, 0), no_bias=True, name=(('res' + name) + '_branch2a'), lr_mult=lr_mult, reuse=reuse)
x = BNRelu(x, fix_gamma=False, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(('bn' + name) + '_branch2b1'), lr_mult=lr_mult, reuse=reuse)
if (dropout > 0):
x = Drop(x, p=dropout)
x = Conv(x, num_filter=int((num_filter / 2.0)), kernel=(3, 3), stride=((stride,) * 2), pad=((dilate,) * 2), dilate=((dilate,) * 2), no_bias=True, name=(('res' + name) + '_branch2b1'), lr_mult=lr_mult, reuse=reuse)
x = BNRelu(x, fix_gamma=False, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(('bn' + name) + '_branch2b2'), lr_mult=lr_mult, reuse=reuse)
if (dropout > 0):
x = Drop(x, p=dropout)
x = Conv(x, num_filter=num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0), no_bias=True, name=(('res' + name) + '_branch2b2'), lr_mult=lr_mult, reuse=reuse)
else:
mid_filter = kwargs.get('mid_filter', num_filter)
fst_dilate = kwargs.get('fst_dilate', dilate)
x = Conv(x, num_filter=mid_filter, kernel=(3, 3), stride=((stride,) * 2), pad=((fst_dilate,) * 2), dilate=((fst_dilate,) * 2), no_bias=True, name=(('res' + name) + '_branch2a'), lr_mult=lr_mult, reuse=reuse)
x = BNRelu(x, fix_gamma=False, momentum=momentum, eps=eps, use_global_stats=use_global_stats, name=(('bn' + name) + '_branch2b1'), lr_mult=lr_mult, reuse=reuse)
x = Conv(x, num_filter=num_filter, kernel=(3, 3), stride=(1, 1), pad=((dilate,) * 2), dilate=((dilate,) * 2), no_bias=True, name=(('res' + name) + '_branch2b1'), lr_mult=lr_mult, reuse=reuse)
x = (x + shortcut)
return x |
_function('min')
class AutogradMin(AutogradFunction):
def forward(ctx, *args, **kwargs):
assert (len(args) >= 1)
if (len(args) == 1):
(input,) = args
dim = kwargs.get('dim', None)
else:
assert (len(args) == 2)
assert ('dim' not in kwargs)
(input, dim) = args
keepdim = kwargs.get('keepdim', False)
one_hot = kwargs.get('one_hot', True)
if (dim is None):
argmin = input.argmin(one_hot=one_hot)
min = input.mul(argmin).sum()
else:
(min, argmin) = input.min(dim, keepdim=keepdim, one_hot=one_hot)
ctx.save_multiple_for_backward((dim, keepdim, argmin, one_hot))
if (dim is None):
return min
else:
ctx.mark_non_differentiable(argmin)
return (min, argmin)
def backward(ctx, grad_output):
(dim, keepdim, argmin, one_hot) = ctx.saved_tensors
assert one_hot, 'cannot backpropagate through min layer that does notuse one-hot representation because private indexing is unsupported'
if (len(argmin.size()) == 0):
return grad_output
if ((not keepdim) and (dim is not None)):
grad_output = grad_output.unsqueeze(dim)
return grad_output.mul(argmin) |
.parametrize('a_val, b_val, x_val, y_val, vector', [(1.0, 1.0, 1.0, 1.0, [10.0, 20.0]), (5.0, 10.0, (- 2.0), 5.0, [0.0, (- 1.0)]), (0.0, 0.0, 1.1, 0.02, [0.0, 0.0]), ((- 2.2), (- 1.5), (- 12.3), 34.8, [2.2, 5.3]), ((- 1.5), 0.0, (- 0.002), 4.93, [0.1, (- 0.02)])])
def test_hessian_vector_product_2x2_non_diagonal(a_val, b_val, x_val, y_val, vector):
obs = [torch.tensor([a_val]), torch.tensor([b_val])]
vector = torch.tensor([vector])
x = torch.tensor([x_val], requires_grad=True)
y = torch.tensor([y_val], requires_grad=True)
def f():
(a, b) = (obs[0], obs[1])
kl = ((((a * (x ** 3)) + (b * (y ** 3))) + ((x ** 2) * y)) + ((y ** 2) * x))
return kl
expected_hessian = compute_hessian(f(), [x, y])
expected_hvp = torch.mm(vector, expected_hessian).detach()
f_Ax = _build_hessian_vector_product(f, [x, y])
hvp = f_Ax(vector[0]).detach()
assert np.allclose(hvp, expected_hvp) |
def insert(text, vocab, n_max_tokens=3):
tokens = text.split()
n_insert_token = random.randint(1, n_max_tokens)
for _ in range(n_insert_token):
insert_token_idx = random.randint(0, (len(tokens) - 1))
insert_token = random.choice(vocab)
tokens = ((tokens[:insert_token_idx] + [insert_token]) + tokens[insert_token_idx:])
new_text = ' '.join(tokens)
return new_text |
class FeatureLabelPreprocessing(Preprocessing):
def __init__(self, feature_transformer, label_transformer, bigdl_type='float'):
super(FeatureLabelPreprocessing, self).__init__(bigdl_type, feature_transformer, label_transformer) |
.parametrize('t', [t0, t1, t2, t3, t4])
def test_sort(t):
print()
o = []
ray_len = len(t)
j = 0
while (j < (len(t) - 1)):
offset = 1
if (t[j] is None):
j += 1
continue
dn = t[j][0]
clear_self = False
while (((j + offset) < ray_len) and ((t[(j + offset)] is None) or (abs((t[(j + offset)][0] - dn)) < 1e-06))):
if ((t[(j + offset)] is not None) and (t[j][1] == t[(j + offset)][1])):
if (t[j][2] != t[(j + offset)][2]):
clear_self = True
t[(j + offset)] = None
offset += 1
if clear_self:
t[j] = None
j += 1
j = 0
has_error = False
while (j < len(t)):
if (t[j] is None):
j += 1
continue
(d, f, f2) = t[j]
offset = 1
dn = d
real_offset = 1
while (((j + offset) < len(t)) and ((real_offset < 3) or (t[(j + offset)] is None) or (abs((t[(j + offset)][0] - dn)) < 1e-09))):
if (t[(j + offset)] is not None):
if (t[(j + offset)][1] == f):
o.append(t[j])
o.append(t[(j + offset)])
print(f)
print(t[(j + offset)][1])
if (offset == 1):
j += 1
else:
t[(j + offset)] = None
break
dn = t[(j + offset)][0]
real_offset += 1
offset += 1
else:
if (real_offset > 1):
has_error = True
print(f'error here ro:{real_offset}')
j += 1
if has_error:
raise RuntimeError('Error on the ray')
print(len(t))
print(len(o)) |
class Logger():
def __init__(self, basedir):
self.logfile = os.path.join(basedir, 'log.txt')
def log(self, msg, out=False):
with open(self.logfile, 'a+') as logfile:
logfile.write(msg)
logfile.write('\n')
if out:
print(msg)
def logo(self, msg):
self.log(str(msg), True) |
def ILP_protocol_w_compression(reference_summary: str, sent_units: List[str], compression: List[dict], min_word_limit=30, max_word_limit=40, step=3):
print('Compression')
constraint_list = []
ref_toks = reference_summary.split(' ')
ref_toks = [x.lower() for x in ref_toks]
ref_toks_set = list(set(ref_toks))
uniq_tok_num = len(ref_toks_set)
y_tok = cp.Variable(shape=uniq_tok_num, boolean=True)
len_doc = len(sent_units)
sent_var = cp.Variable(shape=len_doc, boolean=True)
len_of_each_sentence = cp.Constant([len(x) for x in sent_units])
length_constraint_sents = (sent_var * len_of_each_sentence)
constraint_list.append((length_constraint_sents <= max_word_limit))
obj = cp.Maximize(cp.sum(ref_toks))
prob = cp.Problem(obj, constraints=constraint_list)
print(prob)
prob.solve()
print(prob.status)
print(obj.value)
print(y_tok.value)
print(sent_var.value)
exit() |
class TFAutoModelForQuestionAnswering(object):
def __init__(self):
raise EnvironmentError('TFAutoModelForQuestionAnswering is designed to be instantiated using the `TFAutoModelForQuestionAnswering.from_pretrained(pretrained_model_name_or_path)` or `TFAutoModelForQuestionAnswering.from_config(config)` methods.')
_list_option_in_docstrings(TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, use_model_types=False)
def from_config(cls, config):
if (type(config) in TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()):
return TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING[type(config)](config)
raise ValueError('Unrecognized configuration class {} for this kind of TFAutoModel: {}.\nModel type should be one of {}.'.format(config.__class__, cls.__name__, ', '.join((c.__name__ for c in TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()))))
_list_option_in_docstrings(TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING)
_start_docstrings('Instantiate one of the model classes of the library---with a question answering head---from a pretrained model.', TF_AUTO_MODEL_PRETRAINED_DOCSTRING)
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
config = kwargs.pop('config', None)
if (not isinstance(config, PretrainedConfig)):
(config, kwargs) = AutoConfig.from_pretrained(pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs)
if (type(config) in TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()):
return TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING[type(config)].from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs)
raise ValueError('Unrecognized configuration class {} for this kind of TFAutoModel: {}.\nModel type should be one of {}.'.format(config.__class__, cls.__name__, ', '.join((c.__name__ for c in TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())))) |
def _mark_lines(linewavs, wavemin, wavemax, thisax, lams, spec):
ylims = thisax.get_ylim()
yspan = (ylims[1] - ylims[0])
for linewav in linewavs:
spindx = numpy.argmin(numpy.fabs((linewav - lams)))
ylevel = numpy.nanmin(spec[(spindx - 2):(spindx + 3)])
thisax.plot([(linewav - _LAMBDASUB), (linewav - _LAMBDASUB)], [(ylevel - (0.35 * yspan)), (ylevel - (0.1 * yspan))], 'k-', zorder=0)
return None |
class Regularizer(object):
def __init__(self, l1=0.0, l2=0.0, maxnorm=0.0, l2norm=False, frobnorm=False, ignored_prefixes=[]):
ignored_prefixes = set(ignored_prefixes)
self.__dict__.update(locals())
def max_norm(self, p, maxnorm):
if (maxnorm > 0):
norms = T.sqrt(T.sum(T.sqr(p), axis=0))
desired = T.clip(norms, 0, maxnorm)
p = (p * (desired / (1e-07 + norms)))
return p
def l2_norm(self, p):
return (p / l2norm(p, axis=0))
def frob_norm(self, p, nrows):
return ((p / T.sqrt(T.sum(T.sqr(p)))) * T.sqrt(nrows))
def ignored(self, p):
return ((p.name is not None) and (any((p.name.startswith(pre) for pre in self.ignored_prefixes)) or any(((('/' + pre) in p.name) for pre in self.ignored_prefixes))))
def gradient_regularize(self, p, g):
if self.ignored(p):
return g
if (self.l2 != 0):
g += (p * self.l2)
if (self.l1 != 0):
g += (T.sgn(p) * self.l1)
return g
def weight_regularize(self, p):
if self.ignored(p):
return p
p = self.max_norm(p, self.maxnorm)
if self.l2norm:
p = self.l2_norm(p)
if (self.frobnorm > 0):
p = self.frob_norm(p, self.frobnorm)
return p |
class VOC12ImageDataset(Dataset):
def __init__(self, img_name_list_path, voc12_root, resize_long=None, rescale=None, img_normal=TorchvisionNormalize(), hor_flip=False, crop_size=None, crop_method=None, to_torch=True):
self.img_name_list = load_img_name_list(img_name_list_path)
self.voc12_root = voc12_root
self.resize_long = resize_long
self.rescale = rescale
self.crop_size = crop_size
self.img_normal = img_normal
self.hor_flip = hor_flip
self.crop_method = crop_method
self.to_torch = to_torch
def __len__(self):
return len(self.img_name_list)
def __getitem__(self, idx):
name = self.img_name_list[idx]
name_str = decode_int_filename(name)
img = np.asarray(imageio.imread(get_img_path(name_str, self.voc12_root)))
if self.resize_long:
img = imutils.random_resize_long(img, self.resize_long[0], self.resize_long[1])
if self.rescale:
img = imutils.random_scale(img, scale_range=self.rescale, order=3)
if self.img_normal:
img = self.img_normal(img)
if self.hor_flip:
img = imutils.random_lr_flip(img)
if self.crop_size:
if (self.crop_method == 'random'):
img = imutils.random_crop(img, self.crop_size, 0)
else:
img = imutils.top_left_crop(img, self.crop_size, 0)
if self.to_torch:
img = imutils.HWC_to_CHW(img)
return {'name': name_str, 'img': img} |
class Caffe2Tracer():
def __init__(self, cfg, model, inputs):
assert isinstance(cfg, CN), cfg
assert isinstance(model, torch.nn.Module), type(model)
if ('EXPORT_CAFFE2' not in cfg):
cfg = add_export_config(cfg)
self.cfg = cfg
self.model = model
self.inputs = inputs
def _get_traceable(self):
C2MetaArch = META_ARCH_CAFFE2_EXPORT_TYPE_MAP[self.cfg.MODEL.META_ARCHITECTURE]
traceable_model = C2MetaArch(self.cfg, copy.deepcopy(self.model))
traceable_inputs = traceable_model.get_caffe2_inputs(self.inputs)
return (traceable_model, traceable_inputs)
def export_caffe2(self):
(model, inputs) = self._get_traceable()
(predict_net, init_net) = export_caffe2_detection_model(model, inputs)
return Caffe2Model(predict_net, init_net)
def export_onnx(self):
(model, inputs) = self._get_traceable()
return export_onnx_model_impl(model, (inputs,))
def export_torchscript(self):
(model, inputs) = self._get_traceable()
logger = logging.getLogger(__name__)
logger.info('Tracing the model with torch.jit.trace ...')
with torch.no_grad():
return torch.jit.trace(model, (inputs,), optimize=True) |
class Seq2SeqSequenceClassifierOutput(ModelOutput):
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
past_key_values: Optional[List[torch.FloatTensor]] = None
decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None |
def test_register_model() -> None:
register_model('dt', classification_cls=DecisionTreeClassifier, regression_cls=DecisionTreeRegressor)
classification = get_model('dt', 'classification')
regression = get_model('dt', 'regression')
assert isinstance(classification, DecisionTreeClassifier)
assert isinstance(regression, DecisionTreeRegressor)
reset_model_register()
with pytest.raises(ValueError, match='The specified model '):
classification = get_model('dt', 'classification') |
def ewc_loss(params: Params, model: nn.Module, grads=None):
try:
losses = []
for (n, p) in model.named_parameters():
n = n.replace('.', '__')
mean = getattr(model, '{}_mean'.format(n))
fisher = getattr(model, '{}_fisher'.format(n))
losses.append((fisher * ((p - mean) ** 2)).sum())
loss = ((model.lamda / 2) * sum(losses))
if grads:
loss.backward()
grads = get_grads(params, model, loss)
return (loss, grads)
else:
return (loss, None)
except AttributeError:
print('exception')
return (torch.zeros(1).to(params.device), grads) |
_model
def tf_efficientnetv2_m_in21ft1k(pretrained=False, **kwargs):
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnetv2_m('tf_efficientnetv2_m_in21ft1k', pretrained=pretrained, **kwargs)
return model |
def get_plane_params_in_global(planes, camera_info):
tran = camera_info['position']
rot = camera_info['rotation']
start = (np.ones((len(planes), 3)) * tran)
end = (planes * np.array([1, (- 1), (- 1)]))
end = ((quaternion.as_rotation_matrix(rot) end.T).T + tran)
a = end
b = (end - start)
planes_world = (((a * b).sum(axis=1) / (np.linalg.norm(b, axis=1) ** 2)).reshape((- 1), 1) * b)
return planes_world |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.