code stringlengths 101 5.91M |
|---|
class Director():
def __init__(self, *, tls: bool=True, root_certificate: Union[(Path, str)]=None, private_key: Union[(Path, str)]=None, certificate: Union[(Path, str)]=None, sample_shape: list=None, target_shape: list=None, review_plan_callback: Union[(None, Callable)]=None, envoy_health_check_period: int=60, install_requirements: bool=False) -> None:
(self.sample_shape, self.target_shape) = (sample_shape, target_shape)
self._shard_registry = {}
self.tls = tls
self.root_certificate = root_certificate
self.private_key = private_key
self.certificate = certificate
self.experiments_registry = ExperimentsRegistry()
self.col_exp_queues = defaultdict(asyncio.Queue)
self.col_exp = {}
self.review_plan_callback = review_plan_callback
self.envoy_health_check_period = envoy_health_check_period
self.install_requirements = install_requirements
def acknowledge_shard(self, shard_info: dict) -> bool:
is_accepted = False
if ((self.sample_shape != shard_info['sample_shape']) or (self.target_shape != shard_info['target_shape'])):
logger.info('Request was not accepted')
return is_accepted
logger.info('Request was accepted')
self._shard_registry[shard_info['node_info']['name']] = {'shard_info': shard_info, 'is_online': True, 'is_experiment_running': False, 'valid_duration': (2 * self.envoy_health_check_period), 'last_updated': time.time()}
is_accepted = True
return is_accepted
async def set_new_experiment(self, *, experiment_name: str, sender_name: str, tensor_dict: dict, collaborator_names: Iterable[str], experiment_archive_path: Path) -> bool:
experiment = Experiment(name=experiment_name, archive_path=experiment_archive_path, collaborators=list(collaborator_names), users=[sender_name], sender=sender_name, init_tensor_dict=tensor_dict)
self.experiments_registry.add(experiment)
return True
async def get_experiment_status(self, experiment_name: str, caller: str):
if ((experiment_name not in self.experiments_registry) or (caller not in self.experiments_registry[experiment_name].users)):
logger.error('No experiment data in the stash')
return None
return self.experiments_registry[experiment_name].status
def get_trained_model(self, experiment_name: str, caller: str, model_type: str):
if ((experiment_name not in self.experiments_registry) or (caller not in self.experiments_registry[experiment_name].users)):
logger.error('No experiment data in the stash')
return None
aggregator = self.experiments_registry[experiment_name].aggregator
if (aggregator.last_tensor_dict is None):
logger.error('Aggregator have no aggregated model to return')
return None
if (model_type == 'best'):
return aggregator.best_tensor_dict
elif (model_type == 'last'):
return aggregator.last_tensor_dict
else:
logger.error('Unknown model type required.')
return None
def get_experiment_data(self, experiment_name: str) -> Path:
return self.experiments_registry[experiment_name].archive_path
async def wait_experiment(self, envoy_name: str) -> str:
experiment_name = self.col_exp.get(envoy_name)
if (experiment_name and (experiment_name in self.experiments_registry)):
experiment = self.experiments_registry[experiment_name]
if (experiment.aggregator.round_number < experiment.aggregator.rounds_to_train):
return experiment_name
self.col_exp[envoy_name] = None
queue = self.col_exp_queues[envoy_name]
experiment_name = (await queue.get())
self.col_exp[envoy_name] = experiment_name
return experiment_name
def get_dataset_info(self):
return (self.sample_shape, self.target_shape)
def get_registered_shards(self) -> list:
return [shard_status['shard_info'] for shard_status in self._shard_registry.values()]
async def stream_metrics(self, experiment_name: str, caller: str):
if ((experiment_name not in self.experiments_registry) or (caller not in self.experiments_registry[experiment_name].users)):
raise Exception(f'No experiment name "{experiment_name}" in experiments list, or caller "{caller}" does not have access to this experiment')
while (not self.experiments_registry[experiment_name].aggregator):
(await asyncio.sleep(1))
aggregator = self.experiments_registry[experiment_name].aggregator
while True:
if (not aggregator.metric_queue.empty()):
(yield aggregator.metric_queue.get())
continue
if (aggregator.all_quit_jobs_sent() and aggregator.metric_queue.empty()):
return
(yield None)
def remove_experiment_data(self, experiment_name: str, caller: str):
if ((experiment_name in self.experiments_registry) and (caller in self.experiments_registry[experiment_name].users)):
self.experiments_registry.remove(experiment_name)
def set_experiment_failed(self, *, experiment_name: str, collaborator_name: str):
if (experiment_name not in self.experiments_registry):
return
aggregator = self.experiments_registry[experiment_name].aggregator
aggregator.stop(failed_collaborator=collaborator_name)
self.experiments_registry[experiment_name].status = Status.FAILED
def update_envoy_status(self, *, envoy_name: str, is_experiment_running: bool, cuda_devices_status: list=None) -> int:
shard_info = self._shard_registry.get(envoy_name)
if (not shard_info):
raise ShardNotFoundError(f'Unknown shard {envoy_name}')
shard_info['is_online']: True
shard_info['is_experiment_running'] = is_experiment_running
shard_info['valid_duration'] = (2 * self.envoy_health_check_period)
shard_info['last_updated'] = time.time()
if (cuda_devices_status is not None):
for i in range(len(cuda_devices_status)):
shard_info['shard_info']['node_info']['cuda_devices'][i] = cuda_devices_status[i]
return self.envoy_health_check_period
def get_envoys(self) -> list:
logger.info(f'Shard registry: {self._shard_registry}')
for envoy_info in self._shard_registry.values():
envoy_info['is_online'] = (time.time() < (envoy_info.get('last_updated', 0) + envoy_info.get('valid_duration', 0)))
envoy_name = envoy_info['shard_info']['node_info']['name']
envoy_info['experiment_name'] = self.col_exp[envoy_name]
return self._shard_registry.values()
def get_experiments_list(self, caller: str) -> list:
experiments = self.experiments_registry.get_user_experiments(caller)
result = []
for exp in experiments:
exp_data = {'name': exp.name, 'status': exp.status, 'collaborators_amount': len(exp.collaborators)}
progress = _get_experiment_progress(exp)
if (progress is not None):
exp_data['progress'] = progress
if exp.aggregator:
tasks_amount = len({task['function'] for task in exp.aggregator.assigner.tasks.values()})
exp_data['tasks_amount'] = tasks_amount
result.append(exp_data)
return result
def get_experiment_description(self, caller: str, name: str) -> dict:
exp = self.experiments_registry.get(name)
if ((not exp) or (caller not in exp.users)):
return {}
progress = _get_experiment_progress(exp)
model_statuses = _get_model_download_statuses(exp)
tasks = _get_experiment_tasks(exp)
collaborators = _get_experiment_collaborators(exp)
result = {'name': name, 'status': exp.status, 'current_round': exp.aggregator.round_number, 'total_rounds': exp.aggregator.rounds_to_train, 'download_statuses': {'models': model_statuses, 'logs': [{'name': 'aggregator', 'status': 'ready'}]}, 'collaborators': collaborators, 'tasks': tasks, 'progress': progress}
return result
async def start_experiment_execution_loop(self):
loop = asyncio.get_event_loop()
while True:
async with self.experiments_registry.get_next_experiment() as experiment:
if self.review_plan_callback:
if (not (await experiment.review_experiment(self.review_plan_callback))):
logger.info(f'"{experiment.name}" Plan was rejected by the Director manager.')
continue
run_aggregator_future = loop.create_task(experiment.start(root_certificate=self.root_certificate, certificate=self.certificate, private_key=self.private_key, tls=self.tls, install_requirements=self.install_requirements))
for col_name in experiment.collaborators:
queue = self.col_exp_queues[col_name]
(await queue.put(experiment.name))
(await run_aggregator_future) |
class Conv1x1(nn.Module):
def __init__(self, in_channels, out_channels, bn_norm, stride=1, groups=1):
super(Conv1x1, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, 1, stride=stride, padding=0, bias=False, groups=groups)
self.bn = get_norm(bn_norm, out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x |
def random_rotation():
range = 1
phi = my_rand(0, ((range * math.pi) * 2))
theta = my_rand(0, (range * math.pi))
psi = my_rand(0, ((range * math.pi) * 2))
R0 = []
R0.append(((math.cos(psi) * math.cos(phi)) - ((math.cos(theta) * math.sin(phi)) * math.sin(psi))))
R0.append(((math.cos(psi) * math.sin(phi)) + ((math.cos(theta) * math.cos(phi)) * math.sin(psi))))
R0.append((math.sin(psi) * math.sin(theta)))
R1 = []
R1.append((((- math.sin(psi)) * math.cos(phi)) - ((math.cos(theta) * math.sin(phi)) * math.cos(psi))))
R1.append((((- math.sin(psi)) * math.sin(phi)) + ((math.cos(theta) * math.cos(phi)) * math.cos(psi))))
R1.append((math.cos(psi) * math.sin(theta)))
R2 = []
R2.append((math.sin(theta) * math.sin(phi)))
R2.append(((- math.sin(theta)) * math.cos(phi)))
R2.append(math.cos(theta))
R = []
R.append(R0)
R.append(R1)
R.append(R2)
return np.array(R) |
.parametrize('seed', range(3))
.parametrize('monotonic_cst', (MonotonicConstraint.NO_CST, MonotonicConstraint.POS, MonotonicConstraint.NEG))
def test_nodes_values(monotonic_cst, seed):
rng = np.random.RandomState(seed)
n_samples = 1000
n_features = 1
X_binned = rng.randint(0, 255, size=(n_samples, n_features), dtype=np.uint8)
X_binned = np.asfortranarray(X_binned)
gradients = rng.normal(size=n_samples).astype(G_H_DTYPE)
hessians = np.ones(shape=1, dtype=G_H_DTYPE)
grower = TreeGrower(X_binned, gradients, hessians, monotonic_cst=[monotonic_cst], shrinkage=0.1)
grower.grow()
for leave in grower.finalized_leaves:
leave.value /= grower.shrinkage
predictor = grower.make_predictor(binning_thresholds=np.zeros((X_binned.shape[1], (X_binned.max() + 1))))
assert_children_values_monotonic(predictor, monotonic_cst)
assert_children_values_bounded(grower, monotonic_cst)
assert_leaves_values_monotonic(predictor, monotonic_cst) |
def generate_content(index: int, task_id: str, base_filename: str, num_files: int) -> str:
if (index == 1):
return f'''This task_id is {task_id}
Read the file {base_filename}{(index + 1)}.txt'''
if (index != num_files):
return f'Read the file {base_filename}{(index + 1)}.txt'
return 'Write the task_id into the file output.txt\nShutdown' |
def eval_success(result_file) -> list:
df = pd.read_csv(result_file)
return df['success'].tolist() |
def train(model, x_train, y_train, batch_size, optimizer):
model.train()
total_loss = 0
for idx in DataLoader(range(y_train.size(0)), batch_size, shuffle=True):
optimizer.zero_grad()
loss = F.cross_entropy(model(x_train[idx]), y_train[idx])
loss.backward()
optimizer.step()
total_loss += (float(loss) * idx.numel())
return (total_loss / y_train.size(0)) |
def get_train_val_split(train_dataset, val_split=0.2):
val_dataset = deepcopy(train_dataset)
train_dataset = deepcopy(train_dataset)
train_classes = np.unique(train_dataset.targets)
train_idxs = []
val_idxs = []
for cls in train_classes:
cls_idxs = np.where((train_dataset.targets == cls))[0]
v_ = np.random.choice(cls_idxs, replace=False, size=(int((val_split * len(cls_idxs))),))
t_ = [x for x in cls_idxs if (x not in v_)]
train_idxs.extend(t_)
val_idxs.extend(v_)
train_dataset = subsample_dataset(train_dataset, train_idxs)
val_dataset = subsample_dataset(val_dataset, val_idxs)
return (train_dataset, val_dataset) |
class SineLR(lr_scheduler._LRScheduler):
def __init__(self, optimizer, lr_min, lr_max, step_size):
self.lr_min = lr_min
self.lr_max = lr_max
self.step_size = step_size
self.iteration = 0
super().__init__(optimizer, (- 1))
def get_lr(self):
lr = (self.lr_min + ((self.lr_max - self.lr_min) * sin(((self.iteration / self.step_size) * pi))))
self.iteration += 1
if (self.iteration == self.step_size):
self.iteration = 0
return [lr for base_lr in self.base_lrs] |
class EvalBoxes():
def __init__(self):
self.boxes = defaultdict(list)
def __repr__(self):
return 'EvalBoxes with {} boxes across {} samples'.format(len(self.all), len(self.sample_tokens))
def __getitem__(self, item) -> List[EvalBoxType]:
return self.boxes[item]
def __eq__(self, other):
if (not (set(self.sample_tokens) == set(other.sample_tokens))):
return False
for token in self.sample_tokens:
if (not (len(self[token]) == len(other[token]))):
return False
for (box1, box2) in zip(self[token], other[token]):
if (box1 != box2):
return False
return True
def __len__(self):
return len(self.boxes)
def all(self) -> List[EvalBoxType]:
ab = []
for sample_token in self.sample_tokens:
ab.extend(self[sample_token])
return ab
def sample_tokens(self) -> List[str]:
return list(self.boxes.keys())
def add_boxes(self, sample_token: str, boxes: List[EvalBoxType]) -> None:
self.boxes[sample_token].extend(boxes)
def serialize(self) -> dict:
return {key: [box.serialize() for box in boxes] for (key, boxes) in self.boxes.items()}
def deserialize(cls, content: dict, box_cls):
eb = cls()
for (sample_token, boxes) in content.items():
eb.add_boxes(sample_token, [box_cls.deserialize(box) for box in boxes])
return eb |
def build_roi_heads(cfg, in_channels):
roi_heads = []
if cfg.MODEL.RETINANET_ON:
return []
if (not cfg.MODEL.RPN_ONLY):
roi_heads.append(('box', build_roi_box_head(cfg, in_channels)))
if cfg.MODEL.MASK_ON:
roi_heads.append(('mask', build_roi_mask_head(cfg, in_channels)))
if cfg.MODEL.KEYPOINT_ON:
roi_heads.append(('keypoint', build_roi_keypoint_head(cfg, in_channels)))
if roi_heads:
roi_heads = CombinedROIHeads(cfg, roi_heads)
return roi_heads |
def check_nsp(dist, attr, value):
ns_packages = value
assert_string_list(dist, attr, ns_packages)
for nsp in ns_packages:
if (not dist.has_contents_for(nsp)):
raise DistutilsSetupError(('Distribution contains no modules or packages for ' + ('namespace package %r' % nsp)))
(parent, sep, child) = nsp.rpartition('.')
if (parent and (parent not in ns_packages)):
distutils.log.warn('WARNING: %r is declared as a package namespace, but %r is not: please correct this in setup.py', nsp, parent) |
def set_default_fp_sort(ebits, sbits, ctx=None):
global _dflt_fpsort_ebits
global _dflt_fpsort_sbits
_dflt_fpsort_ebits = ebits
_dflt_fpsort_sbits = sbits |
def is_image_file(filename):
filename_lower = filename.lower()
return any((filename_lower.endswith(ext) for ext in IMG_EXTENSIONS)) |
class _CountryNameDict(LazyDict):
def _fill(self):
data = {}
zone_tab = open_resource('iso3166.tab')
try:
for line in zone_tab.readlines():
line = line.decode('UTF-8')
if line.startswith('#'):
continue
(code, name) = line.split(None, 1)
data[code] = name.strip()
self.data = data
finally:
zone_tab.close() |
def register_Ns3Icmpv4L4Protocol_methods(root_module, cls):
cls.add_constructor([param('ns3::Icmpv4L4Protocol const &', 'arg0')])
cls.add_constructor([])
cls.add_method('GetDownTarget', 'ns3::IpL4Protocol::DownTargetCallback', [], is_const=True, is_virtual=True)
cls.add_method('GetDownTarget6', 'ns3::IpL4Protocol::DownTargetCallback6', [], is_const=True, is_virtual=True)
cls.add_method('GetProtocolNumber', 'int', [], is_const=True, is_virtual=True)
cls.add_method('GetStaticProtocolNumber', 'uint16_t', [], is_static=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('Receive', 'ns3::IpL4Protocol::RxStatus', [param('ns3::Ptr< ns3::Packet >', 'p'), param('ns3::Ipv4Header const &', 'header'), param('ns3::Ptr< ns3::Ipv4Interface >', 'incomingInterface')], is_virtual=True)
cls.add_method('Receive', 'ns3::IpL4Protocol::RxStatus', [param('ns3::Ptr< ns3::Packet >', 'p'), param('ns3::Ipv6Header const &', 'header'), param('ns3::Ptr< ns3::Ipv6Interface >', 'incomingInterface')], is_virtual=True)
cls.add_method('SendDestUnreachFragNeeded', 'void', [param('ns3::Ipv4Header', 'header'), param('ns3::Ptr< ns3::Packet const >', 'orgData'), param('uint16_t', 'nextHopMtu')])
cls.add_method('SendDestUnreachPort', 'void', [param('ns3::Ipv4Header', 'header'), param('ns3::Ptr< ns3::Packet const >', 'orgData')])
cls.add_method('SendTimeExceededTtl', 'void', [param('ns3::Ipv4Header', 'header'), param('ns3::Ptr< ns3::Packet const >', 'orgData'), param('bool', 'isFragment')])
cls.add_method('SetDownTarget', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::Packet >, ns3::Ipv4Address, ns3::Ipv4Address, unsigned char, ns3::Ptr< ns3::Ipv4Route >, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_virtual=True)
cls.add_method('SetDownTarget6', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::Packet >, ns3::Ipv6Address, ns3::Ipv6Address, unsigned char, ns3::Ptr< ns3::Ipv6Route >, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')], is_virtual=True)
cls.add_method('SetNode', 'void', [param('ns3::Ptr< ns3::Node >', 'node')])
cls.add_static_attribute('PROT_NUMBER', 'uint8_t const', is_const=True)
cls.add_method('NotifyNewAggregate', 'void', [], visibility='protected', is_virtual=True)
cls.add_method('DoDispose', 'void', [], visibility='private', is_virtual=True)
return |
def test_method_jit():
A = np.random.rand(20)
cls = MyTestClass(10)
assert np.allclose(cls.method_jit(A), (A + 10)) |
class TwoAFCDataset(Dataset):
def __init__(self, root_dir: str, split: str='train', load_size: int=224, interpolation: transforms.InterpolationMode=transforms.InterpolationMode.BICUBIC, preprocess: str='DEFAULT', **kwargs):
self.root_dir = root_dir
self.csv = pd.read_csv(os.path.join(self.root_dir, 'data.csv'))
self.csv = self.csv[(self.csv['votes'] >= 6)]
self.split = split
self.load_size = load_size
self.interpolation = interpolation
self.preprocess_fn = get_preprocess_fn(preprocess, self.load_size, self.interpolation)
if ((self.split == 'train') or (self.split == 'val')):
self.csv = self.csv[(self.csv['split'] == split)]
elif (split == 'test_imagenet'):
self.csv = self.csv[(self.csv['split'] == 'test')]
self.csv = self.csv[(self.csv['is_imagenet'] == True)]
elif (split == 'test_no_imagenet'):
self.csv = self.csv[(self.csv['split'] == 'test')]
self.csv = self.csv[(self.csv['is_imagenet'] == False)]
else:
raise ValueError(f'Invalid split: {split}')
def __len__(self):
return len(self.csv)
def __getitem__(self, idx):
id = self.csv.iloc[(idx, 0)]
p = self.csv.iloc[(idx, 2)].astype(np.float32)
img_ref = self.preprocess_fn(Image.open(os.path.join(self.root_dir, self.csv.iloc[(idx, 4)])))
img_left = self.preprocess_fn(Image.open(os.path.join(self.root_dir, self.csv.iloc[(idx, 5)])))
img_right = self.preprocess_fn(Image.open(os.path.join(self.root_dir, self.csv.iloc[(idx, 6)])))
return (img_ref, img_left, img_right, p, id) |
.parametrize('sampling_strategy, sampling_method', [({10: 10}, 'under-sampling'), ({10: 10}, 'over-sampling'), ([10], 'clean-sampling')])
def test_sampling_strategy_class_target_unknown(sampling_strategy, sampling_method):
y = np.array(((([1] * 50) + ([2] * 100)) + ([3] * 25)))
with pytest.raises(ValueError, match='are not present in the data.'):
check_sampling_strategy(sampling_strategy, y, sampling_method) |
def load_hparam_str(hp_str):
path = 'temp-restore.yaml'
with open(path, 'w') as f:
f.write(hp_str)
ret = HParam(path)
os.remove(path)
return ret |
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]):
if (len(gpu_ids) > 0):
assert torch.cuda.is_available()
net.to(gpu_ids[0])
if (len(gpu_ids) > 1):
net = torch.nn.DataParallel(net, gpu_ids)
init_weights(net, init_type, init_gain=init_gain)
return net |
class DataTrainingArguments():
dataset_name: Optional[str] = field(default=None, metadata={'help': 'The name of the dataset to use (via the datasets library).'})
dataset_config_name: Optional[str] = field(default=None, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
train_file: Optional[str] = field(default=None, metadata={'help': 'The input training data file (a text file).'})
validation_file: Optional[str] = field(default=None, metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'})
train_ref_file: Optional[str] = field(default=None, metadata={'help': 'An optional input train ref data file for whole word masking in Chinese.'})
validation_ref_file: Optional[str] = field(default=None, metadata={'help': 'An optional input validation ref data file for whole word masking in Chinese.'})
overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'})
validation_split_percentage: Optional[int] = field(default=5, metadata={'help': "The percentage of the train set used as validation set in case there's no validation split"})
max_seq_length: Optional[int] = field(default=None, metadata={'help': 'The maximum total input sequence length after tokenization. Sequences longer than this will be truncated. Default to the max input length of the model.'})
preprocessing_num_workers: Optional[int] = field(default=None, metadata={'help': 'The number of processes to use for the preprocessing.'})
mlm_probability: float = field(default=0.15, metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'})
pad_to_max_length: bool = field(default=False, metadata={'help': 'Whether to pad all samples to `max_seq_length`. If False, will pad the samples dynamically when batching to the maximum length in the batch.'})
def __post_init__(self):
if ((self.dataset_name is None) and (self.train_file is None) and (self.validation_file is None)):
raise ValueError('Need either a dataset name or a training/validation file.')
else:
if (self.train_file is not None):
extension = self.train_file.split('.')[(- 1)]
assert (extension in ['csv', 'json', 'txt']), '`train_file` should be a csv, a json or a txt file.'
if (self.validation_file is not None):
extension = self.validation_file.split('.')[(- 1)]
assert (extension in ['csv', 'json', 'txt']), '`validation_file` should be a csv, a json or a txt file.' |
.dataclass
class Stats():
loss: float
losses: float
weight_l2: float
psnr: float
psnrs: float
grad_norm: float
grad_abs_max: float
grad_norm_clipped: float |
class Tracker():
def __init__(self, log_dir, n_train_batch):
self.log_dir = log_dir
self.n_train_batch = n_train_batch
self.loss = defaultdict(list)
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s', datefmt='%Y-%m-%d %H:%M:%S', handlers=[logging.FileHandler((log_dir / 'log.txt')), logging.StreamHandler(sys.stdout)])
self.print_header = True
def log(self, epoch, loss_breakdown, epoch_time, time_elapsed):
for (loss_name, loss_val) in loss_breakdown.items():
self.loss[loss_name].append((loss_val / self.n_train_batch))
if self.print_header:
logging.info(((' ' * 7) + ' '.join((f'{key:>12}' for key in sorted(self.loss)))))
self.print_header = False
logging.info((f'[{epoch:4}] ' + ' '.join((f'{val[(- 1)]:12.4f}' for (_, val) in sorted(self.loss.items())))))
torch.save(self.loss, str((self.log_dir / 'log.pth')))
with (self.log_dir / 'time.txt').open('a') as f:
print(epoch, epoch_time, time_elapsed, file=f) |
def test_numpytype_int32_parameter():
t = NumpyType('int32', {'__array__': 'Something'})
assert (str(parser.parse(str(t))) == str(t)) |
def getattribute_from_module(module, attr):
if (attr is None):
return None
if isinstance(attr, tuple):
return tuple((getattribute_from_module(module, a) for a in attr))
if hasattr(module, attr):
return getattr(module, attr)
transformers_module = importlib.import_module('transformers')
return getattribute_from_module(transformers_module, attr) |
class BinarizedF(Function):
def forward(ctx, input, threshold):
ctx.save_for_backward(input, threshold)
a = torch.ones_like(input).cuda()
b = torch.zeros_like(input).cuda()
output = torch.where((input >= threshold), a, b)
return output
def backward(ctx, grad_output):
(input, threshold) = ctx.saved_tensors
grad_input = grad_weight = None
if ctx.needs_input_grad[0]:
grad_input = (0.2 * grad_output)
if ctx.needs_input_grad[1]:
grad_weight = (- grad_output)
return (grad_input, grad_weight) |
def show_progress(iterable, total=None, desc=None, silent=False, start_delay=10):
return ShowProgress(iterable, total, desc, silent, start_delay) |
def parse_keras_history(logs):
if hasattr(logs, 'history'):
if (not hasattr(logs, 'epoch')):
return (None, [], {})
logs.history['epoch'] = logs.epoch
logs = logs.history
else:
logs = {log_key: [single_dict[log_key] for single_dict in logs] for log_key in logs[0]}
lines = []
for i in range(len(logs['epoch'])):
epoch_dict = {log_key: log_value_list[i] for (log_key, log_value_list) in logs.items()}
values = {}
for (k, v) in epoch_dict.items():
if k.startswith('val_'):
k = ('validation_' + k[4:])
elif (k != 'epoch'):
k = ('train_' + k)
splits = k.split('_')
name = ' '.join([part.capitalize() for part in splits])
values[name] = v
lines.append(values)
eval_results = lines[(- 1)]
return (logs, lines, eval_results) |
def load_tf2_state_dict_in_pytorch_model(pt_model, tf_state_dict, allow_missing_keys=False, output_loading_info=False):
import torch
new_pt_params_dict = {}
current_pt_params_dict = dict(pt_model.named_parameters())
start_prefix_to_remove = ''
if (not any((s.startswith(pt_model.base_model_prefix) for s in current_pt_params_dict.keys()))):
start_prefix_to_remove = (pt_model.base_model_prefix + '.')
tf_weights_map = {}
for (name, tf_weight) in tf_state_dict.items():
(pt_name, transpose) = convert_tf_weight_name_to_pt_weight_name(name, start_prefix_to_remove=start_prefix_to_remove, tf_weight_shape=tf_weight.shape)
tf_weights_map[pt_name] = (tf_weight, transpose)
all_tf_weights = set(tf_weights_map.keys())
loaded_pt_weights_data_ptr = {}
missing_keys_pt = []
for (pt_weight_name, pt_weight) in current_pt_params_dict.items():
if (pt_weight.data_ptr() in loaded_pt_weights_data_ptr):
new_pt_params_dict[pt_weight_name] = loaded_pt_weights_data_ptr[pt_weight.data_ptr()]
continue
if (pt_weight_name not in tf_weights_map):
if allow_missing_keys:
missing_keys_pt.append(pt_weight_name)
continue
raise AttributeError(f'{pt_weight_name} not found in TF 2.0 model')
(array, transpose) = tf_weights_map[pt_weight_name]
array = apply_transpose(transpose, array, pt_weight.shape, pt_to_tf=False)
if numpy.isscalar(array):
array = numpy.array(array)
if ((not is_torch_tensor(array)) and (not is_numpy_array(array))):
array = array.numpy()
if is_numpy_array(array):
array = torch.from_numpy(array)
new_pt_params_dict[pt_weight_name] = array
loaded_pt_weights_data_ptr[pt_weight.data_ptr()] = array
all_tf_weights.discard(pt_weight_name)
(missing_keys, unexpected_keys) = pt_model.load_state_dict(new_pt_params_dict, strict=False)
missing_keys += missing_keys_pt
if (pt_model._keys_to_ignore_on_load_missing is not None):
for pat in pt_model._keys_to_ignore_on_load_missing:
missing_keys = [k for k in missing_keys if (re.search(pat, k) is None)]
if (pt_model._keys_to_ignore_on_load_unexpected is not None):
for pat in pt_model._keys_to_ignore_on_load_unexpected:
unexpected_keys = [k for k in unexpected_keys if (re.search(pat, k) is None)]
if (len(unexpected_keys) > 0):
logger.warning(f'''Some weights of the TF 2.0 model were not used when initializing the PyTorch model {pt_model.__class__.__name__}: {unexpected_keys}
- This IS expected if you are initializing {pt_model.__class__.__name__} from a TF 2.0 model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a TFBertForPreTraining model).
- This IS NOT expected if you are initializing {pt_model.__class__.__name__} from a TF 2.0 model that you expect to be exactly identical (e.g. initializing a BertForSequenceClassification model from a TFBertForSequenceClassification model).''')
else:
logger.warning(f'''All TF 2.0 model weights were used when initializing {pt_model.__class__.__name__}.
''')
if (len(missing_keys) > 0):
logger.warning(f'''Some weights of {pt_model.__class__.__name__} were not initialized from the TF 2.0 model and are newly initialized: {missing_keys}
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.''')
else:
logger.warning(f'''All the weights of {pt_model.__class__.__name__} were initialized from the TF 2.0 model.
If your task is similar to the task the model of the checkpoint was trained on, you can already use {pt_model.__class__.__name__} for predictions without further training.''')
logger.info(f'Weights or buffers not loaded from TF 2.0 model: {all_tf_weights}')
if output_loading_info:
loading_info = {'missing_keys': missing_keys, 'unexpected_keys': unexpected_keys}
return (pt_model, loading_info)
return pt_model |
class CommonMetricPrinter(EventWriter):
def __init__(self, yaml, max_iter):
self.max_iter = max_iter
self.yaml = yaml
logger = logging.getLogger('Training')
logger.setLevel(logging.DEBUG)
logger.propagate = False
plain_formatter = logging.Formatter('[%(asctime)s] %(message)s', datefmt='%m-%d %H:%M:%S')
self.logger = setup_logging(self.yaml, logger, local_plain_formatter=plain_formatter)
def write(self, epoch, max_epoch, **kwargs):
storage = get_event_storage()
iteration = storage.iter
(data_time, time, metrics) = (None, None, {})
eta_string = 'N/A'
try:
data_time = storage.history('data_time').avg(20)
time = storage.history('time').global_avg()
if (max_epoch is not None):
eta_iter = (((max_epoch * self.max_iter) - iteration) - 1)
iteration = (iteration % self.max_iter)
else:
eta_iter = (self.max_iter - iteration)
eta_seconds = (time * eta_iter)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
for (k, v) in storage.latest().items():
if ('acc' in k):
metrics[k] = v
except KeyError:
pass
try:
lr = '{:.6f}'.format(storage.history('lr').latest())
except KeyError:
lr = 'N/A'
if torch.cuda.is_available():
max_mem_mb = ((torch.cuda.max_memory_allocated() / 1024.0) / 1024.0)
else:
max_mem_mb = None
losses = ['{}: {:.4f}'.format(k, v.median(20)) for (k, v) in storage.histories().items() if (('loss' in k) and ('total' not in k))]
skip_losses = (len(losses) == 1)
lines = '|-[{yaml}]-{epoch}[iter: {iter}/{max_iter}]-[lr: {lr}]-[eta: {eta}]\n |-[{memory}]-[{time}]-[{data_time}] \n |-[total loss: {total_loss}]{losses}\n'.format(yaml=(self.yaml.split('/')[(- 1)] + '.yaml'), eta=eta_string, iter=(iteration + 1), epoch=('' if (epoch is None) else '[epoch: {}/{}]-'.format(epoch, max_epoch)), max_iter=self.max_iter, lr=lr, memory=('max_mem: {:.0f}M'.format(max_mem_mb) if (max_mem_mb is not None) else ''), time=('iter_time: {:.4f}'.format(time) if (time is not None) else 'iter_time: N/A'), data_time=('data_time: {:.4f}'.format(data_time) if (data_time is not None) else ''), total_loss='{:.4f}'.format(storage.histories()['total_loss'].median(20)), losses=('-[losses]-[{}]'.format(' '.join(losses)) if (not skip_losses) else ''))
if len(metrics):
lines += ' {metrics}'.format(metrics=('|' + ''.join(['-[{}: {:.4f}]'.format(k, v) for (k, v) in metrics.items()])))
else:
lines = lines[:(- 1)]
logging_rank(lines, self.logger) |
_module()
class IndexNetEncoder(nn.Module):
def __init__(self, in_channels, out_stride=32, width_mult=1, index_mode='m2o', aspp=True, norm_cfg=dict(type='BN'), freeze_bn=False, use_nonlinear=True, use_context=True):
super().__init__()
if (out_stride not in [16, 32]):
raise ValueError(f'out_stride must 16 or 32, got {out_stride}')
self.out_stride = out_stride
self.width_mult = width_mult
if (index_mode == 'holistic'):
index_block = HolisticIndexBlock
elif (index_mode in ('o2o', 'm2o')):
index_block = partial(DepthwiseIndexBlock, mode=index_mode)
else:
raise NameError('Unknown index block mode {}'.format(index_mode))
initial_channels = 32
inverted_residual_setting = [[1, initial_channels, 16, 1, 1, 1], [6, 16, 24, 2, 2, 1], [6, 24, 32, 3, 2, 1], [6, 32, 64, 4, 2, 1], [6, 64, 96, 3, 1, 1], [6, 96, 160, 3, 2, 1], [6, 160, 320, 1, 1, 1]]
initial_channels = int((initial_channels * width_mult))
for layer_setting in inverted_residual_setting:
layer_setting[1] = int((layer_setting[1] * self.width_mult))
layer_setting[2] = int((layer_setting[2] * self.width_mult))
if (out_stride == 32):
self.downsampled_layers = [0, 2, 3, 4, 6]
else:
self.downsampled_layers = [0, 2, 3, 4]
inverted_residual_setting[5][5] = 2
inverted_residual_setting[6][5] = 2
self.layers = nn.ModuleList([ConvModule(in_channels, initial_channels, 3, padding=1, norm_cfg=norm_cfg, act_cfg=dict(type='ReLU6'))])
for layer_setting in inverted_residual_setting:
self.layers.append(self._make_layer(layer_setting, norm_cfg))
if freeze_bn:
self.freeze_bn()
self.index_layers = nn.ModuleList()
for layer in self.downsampled_layers:
self.index_layers.append(index_block(inverted_residual_setting[layer][1], norm_cfg, use_context, use_nonlinear))
self.avg_pool = nn.AvgPool2d(2, stride=2)
if aspp:
dilation = ((2, 4, 8) if (out_stride == 32) else (6, 12, 18))
self.dconv = ASPP((320 * self.width_mult), 160, mid_channels=int((256 * self.width_mult)), dilations=dilation, norm_cfg=norm_cfg, act_cfg=dict(type='ReLU6'), separable_conv=True)
else:
self.dconv = ConvModule((320 * self.width_mult), 160, 1, norm_cfg=norm_cfg, act_cfg=dict(type='ReLU6'))
self.out_channels = 160
def _make_layer(self, layer_setting, norm_cfg):
(expand_ratio, in_channels, out_channels, num_blocks, stride, dilation) = layer_setting
dilation0 = (max((dilation // 2), 1) if (stride == 2) else dilation)
layers = [InvertedResidual(in_channels, out_channels, 1, dilation0, expand_ratio, norm_cfg)]
in_channels = out_channels
for _ in range(1, num_blocks):
layers.append(InvertedResidual(in_channels, out_channels, 1, dilation, expand_ratio, norm_cfg, use_res_connect=True))
return nn.Sequential(*layers)
def freeze_bn(self):
for m in self.modules():
if isinstance(m, (nn.BatchNorm2d, SyncBatchNorm)):
m.eval()
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif (pretrained is None):
for m in self.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m)
elif isinstance(m, nn.BatchNorm2d):
constant_init(m, 1)
def forward(self, x):
dec_idx_feat_list = list()
shortcuts = list()
for (i, layer) in enumerate(self.layers):
x = layer(x)
if (i in self.downsampled_layers):
(enc_idx_feat, dec_idx_feat) = self.index_layers[self.downsampled_layers.index(i)](x)
x = (enc_idx_feat * x)
shortcuts.append(x)
dec_idx_feat_list.append(dec_idx_feat)
x = (4 * self.avg_pool(x))
elif (i != 7):
shortcuts.append(x)
dec_idx_feat_list.append(None)
x = self.dconv(x)
return {'out': x, 'shortcuts': shortcuts, 'dec_idx_feat_list': dec_idx_feat_list} |
def Welchs_t_test(sample, full, alpha=0.05, axis=0, equal_var=False):
np.warnings.filterwarnings('ignore')
mask = (sample[axis] == 0.0).values
n_space = full[axis].size
npfull = np.reshape(full.values, (full.time.size, n_space))
npsample = np.reshape(sample.values, (sample.shape[axis], n_space))
(T, pval) = scipy.stats.ttest_ind(npsample, npfull, axis=0, equal_var=equal_var, nan_policy='omit')
pval = np.array(np.reshape(pval, n_space))
T = np.reshape(T, n_space)
mask_sig = (pval > alpha)
mask_sig[mask] = True
return (T, pval, mask_sig) |
class actor(nn.Module):
def __init__(self, env_params):
super(actor, self).__init__()
self.max_action = env_params['action_max']
self.fc1 = nn.Linear((env_params['obs'] + env_params['goal']), 256)
self.fc2 = nn.Linear(256, 256)
self.fc3 = nn.Linear(256, 256)
self.action_out = nn.Linear(256, env_params['action'])
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
actions = (self.max_action * torch.tanh(self.action_out(x)))
return actions |
class SimpleClient(CachingClient):
def __init__(self, cache_config: CacheConfig):
super().__init__(cache_config=cache_config)
def make_request(self, request: Request) -> RequestResult:
raw_request = {'engine': request.model_engine, 'prompt': request.prompt, 'n': request.num_completions}
if (request.model_engine == 'model1'):
def do_it():
return self.invoke_model1(raw_request)
cache_key = CachingClient.make_cache_key(raw_request, request)
(response, cached) = self.cache.get(cache_key, wrap_request_time(do_it))
completions = [Sequence(text=text, logprob=logprob, tokens=[Token(text=text, logprob=logprob, top_logprobs=response['completions'])]) for (text, logprob) in response['completions'].items()]
else:
raise ValueError(f'Invalid model: {request.model}')
return RequestResult(success=True, cached=False, request_time=0, request_datetime=response.get('request_datetime'), completions=completions, embedding=[])
def invoke_model1(self, raw_request: Dict) -> Dict:
prompt_tokens: List[str] = SimpleTokenizer.tokenize_by_space(raw_request['prompt'])
choices = reversed(prompt_tokens[(- raw_request['n']):])
response = {'completions': dict(((text, (- i)) for (i, text) in enumerate(choices)))}
return response |
(('%s.visualize_utils.mmcv.imshow' % __name__))
(('%s.visualize_utils.mmcv.imwrite' % __name__))
def test_imshow_text_char_boundary(mock_imshow, mock_imwrite):
img = './tests/data/test_img1.jpg'
text_quads = [[0, 0, 1, 0, 1, 1, 0, 1]]
boundaries = [[0, 0, 1, 0, 1, 1, 0, 1]]
char_quads = [[[0, 0, 1, 0, 1, 1, 0, 1], [0, 0, 1, 0, 1, 1, 0, 1]]]
chars = [['a', 'b']]
show = (True,)
out_file = tempfile.NamedTemporaryFile().name
visualize_utils.imshow_text_char_boundary(img, text_quads, boundaries, char_quads, chars, show=show, out_file=out_file)
mock_imwrite.assert_called_once()
mock_imshow.assert_called_once() |
def get_home_dir():
_home_dir = os.environ.get('AUTO_MM_BENCH_HOME', os.path.join('~', '.auto_mm_bench'))
_home_dir = os.path.expanduser(_home_dir)
return _home_dir |
class Composer():
def __init__(self):
self.anchors = {}
def check_node(self):
if self.check_event(StreamStartEvent):
self.get_event()
return (not self.check_event(StreamEndEvent))
def get_node(self):
if (not self.check_event(StreamEndEvent)):
return self.compose_document()
def get_single_node(self):
self.get_event()
document = None
if (not self.check_event(StreamEndEvent)):
document = self.compose_document()
if (not self.check_event(StreamEndEvent)):
event = self.get_event()
raise ComposerError('expected a single document in the stream', document.start_mark, 'but found another document', event.start_mark)
self.get_event()
return document
def compose_document(self):
self.get_event()
node = self.compose_node(None, None)
self.get_event()
self.anchors = {}
return node
def compose_node(self, parent, index):
if self.check_event(AliasEvent):
event = self.get_event()
anchor = event.anchor
if (anchor not in self.anchors):
raise ComposerError(None, None, ('found undefined alias %r' % anchor), event.start_mark)
return self.anchors[anchor]
event = self.peek_event()
anchor = event.anchor
if (anchor is not None):
if (anchor in self.anchors):
raise ComposerError(('found duplicate anchor %r; first occurrence' % anchor), self.anchors[anchor].start_mark, 'second occurrence', event.start_mark)
self.descend_resolver(parent, index)
if self.check_event(ScalarEvent):
node = self.compose_scalar_node(anchor)
elif self.check_event(SequenceStartEvent):
node = self.compose_sequence_node(anchor)
elif self.check_event(MappingStartEvent):
node = self.compose_mapping_node(anchor)
self.ascend_resolver()
return node
def compose_scalar_node(self, anchor):
event = self.get_event()
tag = event.tag
if ((tag is None) or (tag == '!')):
tag = self.resolve(ScalarNode, event.value, event.implicit)
node = ScalarNode(tag, event.value, event.start_mark, event.end_mark, style=event.style)
if (anchor is not None):
self.anchors[anchor] = node
return node
def compose_sequence_node(self, anchor):
start_event = self.get_event()
tag = start_event.tag
if ((tag is None) or (tag == '!')):
tag = self.resolve(SequenceNode, None, start_event.implicit)
node = SequenceNode(tag, [], start_event.start_mark, None, flow_style=start_event.flow_style)
if (anchor is not None):
self.anchors[anchor] = node
index = 0
while (not self.check_event(SequenceEndEvent)):
node.value.append(self.compose_node(node, index))
index += 1
end_event = self.get_event()
node.end_mark = end_event.end_mark
return node
def compose_mapping_node(self, anchor):
start_event = self.get_event()
tag = start_event.tag
if ((tag is None) or (tag == '!')):
tag = self.resolve(MappingNode, None, start_event.implicit)
node = MappingNode(tag, [], start_event.start_mark, None, flow_style=start_event.flow_style)
if (anchor is not None):
self.anchors[anchor] = node
while (not self.check_event(MappingEndEvent)):
item_key = self.compose_node(node, None)
item_value = self.compose_node(node, item_key)
node.value.append((item_key, item_value))
end_event = self.get_event()
node.end_mark = end_event.end_mark
return node |
def load_data_normalised(root_path):
(data_train, data_validate, data_test) = load_data(root_path)
data = np.vstack((data_train, data_validate))
mu = data.mean(axis=0)
s = data.std(axis=0)
data_train = ((data_train - mu) / s)
data_validate = ((data_validate - mu) / s)
data_test = ((data_test - mu) / s)
return (data_train, data_validate, data_test) |
class ResidueReductionMap(Morphism):
def _create_(R, k):
if R.is_field():
from sage.categories.sets_with_partial_maps import SetsWithPartialMaps
cat = SetsWithPartialMaps()
else:
from sage.categories.rings import Rings
cat = Rings()
from sage.categories.homset import Hom
kfield = R.residue_field()
N = k.cardinality()
q = kfield.cardinality()
n = N.exact_log(q)
if (N != (q ** n)):
raise RuntimeError('N must be a power of q')
H = Hom(R, k, cat)
f = H.__make_element_class__(ResidueReductionMap)(H)
f._n = n
if (kfield is k):
f._field = True
else:
f._field = False
return f
def is_surjective(self):
return True
def is_injective(self):
return False
def _call_(self, x):
return x.residue(self._n, field=self._field, check_prec=self._field)
def section(self):
return ResidueLiftingMap._create_(self.codomain(), self.domain())
def _repr_type(self):
return 'Reduction'
def _richcmp_(self, other, op):
if (type(self) is not type(other)):
return NotImplemented
return richcmp((self.domain(), self.codomain()), (other.domain(), other.codomain()), op) |
def test_horizon_0_180_days(tmp_path: pathlib.Path):
time_horizon = TimeHorizon(datetime.timedelta(days=0), datetime.timedelta(days=180))
labeler = DummyLabeler([2], time_horizon)
events_with_labels: EventsWithLabels = [(event((2015, 1, 3), 2, None), 'duplicate'), (event((2015, 1, 3), 1, None), 'duplicate'), (event((2015, 1, 3), 3, None), True), (event((2015, 10, 5), 1, None), False), (event((2018, 1, 3), 2, None), True), (event((2018, 3, 3), 1, None), True), (event((2018, 5, 3), 2, None), True), (event((2018, 5, 3, 11), 1, None), False), (event((2018, 5, 4), 1, None), False), (event((2018, 12, 4), 1, None), 'out of range')]
run_test_for_labeler(labeler, events_with_labels, help_text='test_horizon_0_180_days') |
def box_viz(df: pd.DataFrame, x: str, plot_width: int, plot_height: int, box: Box, y: Optional[str]=None, ttl_grps: Optional[int]=None) -> Panel:
if (y and ttl_grps):
width = 0.7
grp_cnt_stats = {f'{x}_ttl': ttl_grps, f'{x}_shw': len(df)}
title = (_make_title(grp_cnt_stats, x, y) if ttl_grps else f'{y} by {x}')
elif y:
(width, title) = (0.93, f'{y} by {x}')
endpts = ([grp.left for grp in df['grp']] + [df['grp'][(len(df) - 1)].right])
df['grp'] = df['grp'].astype(str)
else:
(width, title) = (0.7, f'{x}')
(df['x0'], df['x1']) = ((df.index + 0.2), (df.index + 0.8))
fig = figure(plot_width=plot_width, plot_height=plot_height, title=title, toolbar_location=None, x_range=df['grp'])
low = fig.segment(x0='x0', y0='lw', x1='x1', y1='lw', line_color='black', source=df)
ltail = fig.segment(x0='grp', y0='lw', x1='grp', y1='q1', line_color='black', source=df)
lbox = fig.vbar(x='grp', width=width, top='q2', bottom='q1', fill_color=box.color, line_color='black', source=df)
ubox = fig.vbar(x='grp', width=width, top='q3', bottom='q2', fill_color=box.color, line_color='black', source=df)
utail = fig.segment(x0='grp', y0='uw', x1='grp', y1='q3', line_color='black', source=df)
upw = fig.segment(x0='x0', y0='uw', x1='x1', y1='uw', line_color='black', source=df)
df.loc[(df['otlrs'].isna(), 'otlrs')] = pd.Series(([[]] * df['otlrs'].isna().sum()), dtype=np.float64).values
otlrs = [otl for otls in df['otlrs'] for otl in otls]
if otlrs:
gps = [grp for (grp, ols) in zip(df['grp'], df['otlrs']) for _ in range(len(ols))]
circ = fig.circle(x=gps, y=otlrs, size=3, line_color='black', color='black', fill_alpha=0.6)
fig.add_tools(HoverTool(renderers=[circ], tooltips=[('Outlier', '')]))
tooltips = [('Upper Whisker', ''), ('Upper Quartile', ''), ('Median', ''), ('Lower Quartile', ''), ('Lower Whisker', '')]
if y:
lbl = (f'{x}' if ttl_grps else 'Bin')
tooltips.insert(0, (lbl, ''))
fig.add_tools(HoverTool(renderers=[upw, utail, ubox, lbox, ltail, low], tooltips=tooltips))
tweak_figure(fig, 'box')
if (y is None):
fig.xaxis.major_tick_line_color = None
fig.xaxis.major_label_text_font_size = '0pt'
fig.xaxis.axis_label = (x if (y is not None) else None)
fig.yaxis.axis_label = (x if (y is None) else y)
minw = (min(otlrs) if otlrs else np.nan)
maxw = (max(otlrs) if otlrs else np.nan)
_format_axis(fig, min(df['lw'].min(), minw), max(df['uw'].max(), maxw), 'y')
if (y and (not ttl_grps)):
round_to = (- len(str(max([abs(int(ept)) for ept in endpts]))))
ticks = np.round(endpts, round_to)
nticks = ((len(df) // 5) + 1)
show_ticks = [ticks[i] for i in range(len(ticks)) if ((i % nticks) == 0)]
while (len(set(show_ticks)) != len(show_ticks)):
round_to += 1
ticks = np.round(endpts, round_to)
show_ticks = [ticks[i] for i in range(len(ticks)) if ((i % nticks) == 0)]
ticks = [(int(tick) if tick.is_integer() else tick) for tick in ticks]
ticks = _format_ticks(ticks)
fig.xaxis.ticker = list(range((len(df) + 1)))
fig.xaxis.formatter = FuncTickFormatter(args={'vals': ticks, 'mod': nticks}, code='\n if (index % mod == 0) return vals[index];\n return "";\n ')
tweak_figure(fig, 'boxnum')
fig.xaxis.major_label_text_font_size = '10pt'
return Panel(child=row(fig), title='Box Plot') |
class DataGenerationMethod(str, Enum):
positive = 'positive'
negative = 'negative'
def default(cls) -> DataGenerationMethod:
return cls.positive
def all(cls) -> list[DataGenerationMethod]:
return list(DataGenerationMethod)
def as_short_name(self) -> str:
return {DataGenerationMethod.positive: 'P', DataGenerationMethod.negative: 'N'}[self]
def is_negative(self) -> bool:
return (self == DataGenerationMethod.negative)
def ensure_list(cls, value: DataGenerationMethodInput) -> list[DataGenerationMethod]:
if isinstance(value, DataGenerationMethod):
return [value]
return list(value) |
def op_t5_3b_tied_lmheads_512_4_8p_bw12_async_squad1_mpipe():
return dict(model_type='new_t5_stateless', model_name_or_path='t5-3b', do_lower_case=False, output_past=False, output_attentions=False, output_hidden_states=False, do_resize_token_embedding=True, explicitly_set_dict={'return_dict': False, 'use_cache': False, 'output_only': True, 'output_attentions': False, 'precomputed_masks': False, 'output_hidden_states': False}, stateless_tied=True) |
def _vgg(arch: str, cfg: str, batch_norm: bool, pretrained: bool, progress: bool, **kwargs: Any) -> VGG:
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfgs[cfg], batch_norm=batch_norm), **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)
model.load_state_dict(state_dict, strict=False)
return model |
def test_functional_exceptions(variable_x):
x = variable_x
with pytest.raises(TypeError):
f = sn.Functional(x)
with pytest.raises(TypeError):
ft = sn.Functional('ft', (2 * [10]))
with pytest.raises(TypeError):
ft = sn.Functional('ft', x, 'tanh')
with pytest.raises(TypeError):
ft = sn.Functional('ft', x, (2 * [10]), 12) |
def make_sdfg(make_tmp_local: bool):
sdfg = dace.SDFG('instrumentation_test')
sdfg.add_array('in0', (16,), dace.float32)
sdfg.add_array('in1', (16,), dace.float32)
sdfg.add_array('in2', (16,), dace.float32)
sdfg.add_array('tmp0', (16,), dace.float32, transient=True)
sdfg.add_array('tmp1', (16,), dace.float32, transient=True)
sdfg.add_array('out0', (16,), dace.float32)
sdfg.add_array('out1', (16,), dace.float32)
state = sdfg.add_state('instrumentation_test')
in0 = state.add_read('in0')
in1 = state.add_read('in1')
tmp0 = state.add_access('tmp0')
tmp1 = state.add_access('tmp1')
out0 = state.add_write('out0')
(entry_left, exit_left) = state.add_map('left_map', {'i': '0:16'})
tasklet_left = state.add_tasklet('left_tasklet', {'_in'}, {'_tmp'}, '_tmp = _in + 1')
state.add_memlet_path(in0, entry_left, tasklet_left, dst_conn='_in', memlet=dace.Memlet('in0[i]'))
state.add_memlet_path(tasklet_left, exit_left, tmp0, src_conn='_tmp', memlet=dace.Memlet('tmp0[i]'))
(entry_right, exit_right) = state.add_map('right_map', {'i': '0:16'})
tasklet_right = state.add_tasklet('right_tasklet', {'_in'}, {'_tmp'}, '_tmp = _in + 1')
state.add_memlet_path(in1, entry_right, tasklet_right, dst_conn='_in', memlet=dace.Memlet('in1[i]'))
state.add_memlet_path(tasklet_right, exit_right, tmp1, src_conn='_tmp', memlet=dace.Memlet('tmp1[i]'))
(entry_after, exit_after) = state.add_map('after_map', {'i': '0:16'})
tasklet_after = state.add_tasklet('after_tasklet', {'_tmp0', '_tmp1'}, {'_c'}, '_c = 2 * (_tmp0 + _tmp1)')
state.add_memlet_path(tmp0, entry_after, tasklet_after, dst_conn='_tmp0', memlet=dace.Memlet('tmp0[i]'))
state.add_memlet_path(tmp1, entry_after, tasklet_after, dst_conn='_tmp1', memlet=dace.Memlet('tmp1[i]'))
state.add_memlet_path(tasklet_after, exit_after, out0, src_conn='_c', memlet=dace.Memlet('out0[i]'))
in2 = state.add_read('in2')
out1 = state.add_write('out1')
(entry_extra, exit_extra) = state.add_map('extra_map', {'i': '0:16'})
tasklet_extra = state.add_tasklet('extra_tasklet', {'_in'}, {'_out'}, '_out = _in * _in')
state.add_memlet_path(in2, entry_extra, tasklet_extra, dst_conn='_in', memlet=dace.Memlet('in2[i]'))
state.add_memlet_path(tasklet_extra, exit_extra, out1, src_conn='_out', memlet=dace.Memlet('out1[i]'))
assert (sdfg.apply_transformations(FPGATransformSDFG) == 1)
assert (sdfg.apply_transformations(InlineSDFG) == 1)
if make_tmp_local:
made_local = 0
for (name, desc) in sdfg.arrays.items():
if ('tmp' in name):
desc.storage = dace.StorageType.FPGA_Local
made_local += 1
assert (made_local == 2)
for s in sdfg.states():
if is_fpga_kernel(sdfg, s):
s.instrument = dace.InstrumentationType.FPGA
break
else:
raise RuntimeError('FPGA state was not found.')
return sdfg |
def GetRndWalkRestart_PNGraph(Graph, JumpProb, JumpNId, RwrNIdH):
return _snap.GetRndWalkRestart_PNGraph(Graph, JumpProb, JumpNId, RwrNIdH) |
class SqueezeBertTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', tokenize_chinese_chars=True, strip_accents=None, **kwargs):
super().__init__(do_lower_case=do_lower_case, do_basic_tokenize=do_basic_tokenize, never_split=never_split, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, **kwargs)
if (not os.path.isfile(vocab_file)):
raise ValueError(f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained model use `tokenizer = SqueezeBertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict([(ids, tok) for (tok, ids) in self.vocab.items()])
self.do_basic_tokenize = do_basic_tokenize
if do_basic_tokenize:
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token)
def do_lower_case(self):
return self.basic_tokenizer.do_lower_case
def vocab_size(self):
return len(self.vocab)
def get_vocab(self):
return dict(self.vocab, **self.added_tokens_encoder)
def _tokenize(self, text):
split_tokens = []
if self.do_basic_tokenize:
for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens):
if (token in self.basic_tokenizer.never_split):
split_tokens.append(token)
else:
split_tokens += self.wordpiece_tokenizer.tokenize(token)
else:
split_tokens = self.wordpiece_tokenizer.tokenize(text)
return split_tokens
def _convert_token_to_id(self, token):
return self.vocab.get(token, self.vocab.get(self.unk_token))
def _convert_id_to_token(self, index):
return self.ids_to_tokens.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
out_string = ' '.join(tokens).replace(' ##', '').strip()
return out_string
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
if (token_ids_1 is None):
return (([self.cls_token_id] + token_ids_0) + [self.sep_token_id])
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return ((((cls + token_ids_0) + sep) + token_ids_1) + sep)
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
if (token_ids_1 is not None):
return (((([1] + ([0] * len(token_ids_0))) + [1]) + ([0] * len(token_ids_1))) + [1])
return (([1] + ([0] * len(token_ids_0))) + [1])
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return (len(((cls + token_ids_0) + sep)) * [0])
return ((len(((cls + token_ids_0) + sep)) * [0]) + (len((token_ids_1 + sep)) * [1]))
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
index = 0
if os.path.isdir(save_directory):
vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']))
else:
vocab_file = (((filename_prefix + '-') if filename_prefix else '') + save_directory)
with open(vocab_file, 'w', encoding='utf-8') as writer:
for (token, token_index) in sorted(self.vocab.items(), key=(lambda kv: kv[1])):
if (index != token_index):
logger.warning(f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive. Please check that the vocabulary is not corrupted!')
index = token_index
writer.write((token + '\n'))
index += 1
return (vocab_file,) |
def make_sdfg(dtype):
n = dace.symbol('n')
sdfg = dace.SDFG('mpi_bcast')
state = sdfg.add_state('dataflow')
sdfg.add_array('x', [n], dtype, transient=False)
sdfg.add_array('root', [1], dace.dtypes.int32, transient=False)
x = state.add_access('x')
xout = state.add_access('x')
root = state.add_access('root')
bcast_node = mpi.nodes.bcast.Bcast('bcast')
state.add_memlet_path(x, bcast_node, dst_conn='_inbuffer', memlet=Memlet.simple(x, '0:n', num_accesses=n))
state.add_memlet_path(root, bcast_node, dst_conn='_root', memlet=Memlet.simple(root, '0:1', num_accesses=1))
state.add_memlet_path(bcast_node, xout, src_conn='_outbuffer', memlet=Memlet.simple(xout, '0:n', num_accesses=1))
return sdfg |
def self_attention(x, channels, sn=False, scope='self_attention'):
with tf.variable_scope(scope):
f = conv(x, (channels // 8), kernel=1, stride=1, sn=sn, scope='f_conv')
g = conv(x, (channels // 8), kernel=1, stride=1, sn=sn, scope='g_conv')
h = conv(x, channels, kernel=1, stride=1, sn=sn, scope='h_conv')
s = tf.matmul(hw_flatten(g), hw_flatten(f), transpose_b=True)
beta = tf.nn.softmax(s)
o = tf.matmul(beta, hw_flatten(h))
gamma = tf.get_variable('gamma', [1], initializer=tf.constant_initializer(0.0))
o = tf.reshape(o, shape=x.shape)
x = ((gamma * o) + x)
return x |
def get_final_weights(weights, lora_module_list, cache):
final_state_dict = {}
keys = cache[lora_module_list[0]].keys()
for (i, peft_model_id) in enumerate(lora_module_list):
lora_state_dict = cache[peft_model_id]
if (i == 0):
for key in keys:
final_state_dict[key] = (weights[i] * lora_state_dict[key])
else:
for key in keys:
final_state_dict[key] = (final_state_dict[key] + (weights[i] * lora_state_dict[key]))
return final_state_dict |
def conv2d_gradfix(transpose, weight_shape, stride, padding, output_padding, dilation, groups):
ndim = 2
weight_shape = tuple(weight_shape)
stride = ensure_tuple(stride, ndim)
padding = ensure_tuple(padding, ndim)
output_padding = ensure_tuple(output_padding, ndim)
dilation = ensure_tuple(dilation, ndim)
key = (transpose, weight_shape, stride, padding, output_padding, dilation, groups)
if (key in conv2d_gradfix_cache):
return conv2d_gradfix_cache[key]
common_kwargs = dict(stride=stride, padding=padding, dilation=dilation, groups=groups)
def calc_output_padding(input_shape, output_shape):
if transpose:
return [0, 0]
return [(((input_shape[(i + 2)] - ((output_shape[(i + 2)] - 1) * stride[i])) - (1 - (2 * padding[i]))) - (dilation[i] * (weight_shape[(i + 2)] - 1))) for i in range(ndim)]
class Conv2d(autograd.Function):
def forward(ctx, input, weight, bias):
if (not transpose):
out = F.conv2d(input=input, weight=weight, bias=bias, **common_kwargs)
else:
out = F.conv_transpose2d(input=input, weight=weight, bias=bias, output_padding=output_padding, **common_kwargs)
ctx.save_for_backward(input, weight)
return out
def backward(ctx, grad_output):
(input, weight) = ctx.saved_tensors
(grad_input, grad_weight, grad_bias) = (None, None, None)
if ctx.needs_input_grad[0]:
p = calc_output_padding(input_shape=input.shape, output_shape=grad_output.shape)
grad_input = conv2d_gradfix(transpose=(not transpose), weight_shape=weight_shape, output_padding=p, **common_kwargs).apply(grad_output, weight, None)
if (ctx.needs_input_grad[1] and (not weight_gradients_disabled)):
grad_weight = Conv2dGradWeight.apply(grad_output, input)
if ctx.needs_input_grad[2]:
grad_bias = grad_output.sum((0, 2, 3))
return (grad_input, grad_weight, grad_bias)
class Conv2dGradWeight(autograd.Function):
def forward(ctx, grad_output, input):
op = torch._C._jit_get_operation(('aten::cudnn_convolution_backward_weight' if (not transpose) else 'aten::cudnn_convolution_transpose_backward_weight'))
flags = [torch.backends.cudnn.benchmark, torch.backends.cudnn.deterministic, torch.backends.cudnn.allow_tf32]
grad_weight = op(weight_shape, grad_output, input, padding, stride, dilation, groups, *flags)
ctx.save_for_backward(grad_output, input)
return grad_weight
def backward(ctx, grad_grad_weight):
(grad_output, input) = ctx.saved_tensors
(grad_grad_output, grad_grad_input) = (None, None)
if ctx.needs_input_grad[0]:
grad_grad_output = Conv2d.apply(input, grad_grad_weight, None)
if ctx.needs_input_grad[1]:
p = calc_output_padding(input_shape=input.shape, output_shape=grad_output.shape)
grad_grad_input = conv2d_gradfix(transpose=(not transpose), weight_shape=weight_shape, output_padding=p, **common_kwargs).apply(grad_output, grad_grad_weight, None)
return (grad_grad_output, grad_grad_input)
conv2d_gradfix_cache[key] = Conv2d
return Conv2d |
(repr=False, eq=False, frozen=True)
class FunctionCounts(object):
_data: Tuple[(FunctionCount, ...)]
inclusive: bool
truncate_rows: bool = True
_linewidth: Optional[int] = None
def __iter__(self) -> Generator[(FunctionCount, None, None)]:
for i in self._data:
(yield i)
def __len__(self) -> int:
return len(self._data)
def __getitem__(self, item: Any) -> 'Union[FunctionCount, FunctionCounts]':
data: Union[(FunctionCount, Tuple[(FunctionCount, ...)])] = self._data[item]
return (FunctionCounts(cast(Tuple[(FunctionCount, ...)], data), self.inclusive, truncate_rows=False) if isinstance(data, tuple) else data)
def __repr__(self) -> str:
count_len = 0
for (c, _) in self:
count_len = max(count_len, (len(str(c)) + int((c < 0))))
lines = []
linewidth = (self._linewidth or torch._tensor_str.PRINT_OPTS.linewidth)
fn_str_len = max(((linewidth - count_len) - 4), 40)
for (c, fn) in self:
if (len(fn) > fn_str_len):
left_len = int(((fn_str_len - 5) // 2))
fn = ((fn[:left_len] + ' ... ') + fn[(- ((fn_str_len - left_len) - 5)):])
lines.append(f' {c:>{count_len}} {fn}')
if (self.truncate_rows and (len(lines) > 18)):
lines = ((lines[:9] + ['...'.rjust((count_len + 2))]) + lines[(- 9):])
if (not self.inclusive):
lines.extend(['', f'Total: {self.sum()}'])
return '\n'.join(([super().__repr__()] + lines))
def __add__(self, other) -> 'FunctionCounts':
return self._merge(other, (lambda c: c))
def __sub__(self, other) -> 'FunctionCounts':
return self._merge(other, (lambda c: (- c)))
def __mul__(self, other: Union[(int, float)]) -> 'FunctionCounts':
return self._from_dict({fn: int((c * other)) for (c, fn) in self._data}, self.inclusive)
def transform(self, map_fn: Callable[([str], str)]) -> 'FunctionCounts':
counts: DefaultDict[(str, int)] = collections.defaultdict(int)
for (c, fn) in self._data:
counts[map_fn(fn)] += c
return self._from_dict(counts, self.inclusive)
def filter(self, filter_fn: Callable[([str], bool)]) -> 'FunctionCounts':
return FunctionCounts(tuple((i for i in self if filter_fn(i.function))), self.inclusive)
def sum(self) -> int:
return sum((c for (c, _) in self))
def denoise(self) -> 'FunctionCounts':
return self.filter((lambda fn: ('dictobject.c:lookdict_unicode' not in fn)))
def _merge(self, second, merge_fn: Callable[([int], int)]) -> 'FunctionCounts':
assert (self.inclusive == second.inclusive), 'Cannot merge inclusive and exclusive counts.'
counts: DefaultDict[(str, int)] = collections.defaultdict(int)
for (c, fn) in self:
counts[fn] += c
for (c, fn) in second:
counts[fn] += merge_fn(c)
return self._from_dict(counts, self.inclusive)
def _from_dict(counts: Dict[(str, int)], inclusive: bool) -> 'FunctionCounts':
flat_counts = (FunctionCount(c, fn) for (fn, c) in counts.items() if c)
return FunctionCounts(tuple(sorted(flat_counts, reverse=True)), inclusive) |
def store(self):
old1(self)
db = os.path.join(self.variant_dir, EXTRA_LOCK)
env = ConfigSet.ConfigSet()
env.SRCDIR = self.srcnode.abspath()
env.store(db) |
def streams(patch, params):
o_retina = siam_stream(F.pad(patch, (((- 16),) * 4)), params, 'retina')
o_fovea = siam_stream(F.avg_pool2d(patch, 2, 2), params, 'fovea')
return torch.cat([o_retina, o_fovea], dim=1) |
class StatementCheckedTestSuiteFitnessFunction(TestSuiteFitnessFunction):
def compute_fitness(self, individual) -> float:
results = self._run_test_suite_chromosome(individual)
merged_trace = analyze_results(results)
tracer = self._executor.tracer
return (len(tracer.get_subject_properties().existing_lines) - len(merged_trace.checked_lines))
def compute_is_covered(self, individual) -> bool:
results = self._run_test_suite_chromosome(individual)
merged_trace = analyze_results(results)
tracer = self._executor.tracer
return compute_checked_coverage_statement_fitness_is_covered(merged_trace, tracer.get_subject_properties())
def is_maximisation_function(self) -> bool:
return False |
class MultiHeadedAttention(nn.Module):
def __init__(self, d_model, head, p=0.1):
super().__init__()
self.query_embedding = nn.Linear(d_model, d_model)
self.value_embedding = nn.Linear(d_model, d_model)
self.key_embedding = nn.Linear(d_model, d_model)
self.output_linear = nn.Linear(d_model, d_model)
self.attention = Attention(p=p)
self.head = head
def forward(self, x):
(b, n, c) = x.size()
c_h = (c // self.head)
key = self.key_embedding(x)
key = key.view(b, n, self.head, c_h).permute(0, 2, 1, 3)
query = self.query_embedding(x)
query = query.view(b, n, self.head, c_h).permute(0, 2, 1, 3)
value = self.value_embedding(x)
value = value.view(b, n, self.head, c_h).permute(0, 2, 1, 3)
(att, _) = self.attention(query, key, value)
att = att.permute(0, 2, 1, 3).contiguous().view(b, n, c)
output = self.output_linear(att)
return output |
class OperationInfo():
def __init__(self, bound_name, op_name, ast_node, position, perf_hints):
self.bound_name = bound_name
self.op_name = op_name
self.ast_node = ast_node
self.position = position
self.perf_hints = perf_hints
self.usages = []
self.runtime_us = 0
def set_usages(self, usages):
self.usages = usages
def add_to_runtime_us(self, runtime_us):
self.runtime_us += runtime_us |
_numpy_output(check_dtype=True)
def test_ufunc_arccos_c(A: dace.complex64[10]):
return np.arccos(A) |
_criterion('sentence_prediction', dataclass=SentencePredictionConfig)
class SentencePredictionCriterion(FairseqCriterion):
def __init__(self, cfg: SentencePredictionConfig, task):
super().__init__(task)
self.classification_head_name = cfg.classification_head_name
self.regression_target = cfg.regression_target
def forward(self, model, sample, reduce=True):
assert (hasattr(model, 'classification_heads') and (self.classification_head_name in model.classification_heads)), 'model must provide sentence classification head for --criterion=sentence_prediction'
(logits, _) = model(**sample['net_input'], features_only=True, classification_head_name=self.classification_head_name)
targets = model.get_targets(sample, [logits]).view((- 1))
sample_size = targets.numel()
if (not self.regression_target):
lprobs = F.log_softmax(logits, dim=(- 1), dtype=torch.float32)
loss = F.nll_loss(lprobs, targets, reduction='sum')
else:
logits = logits.view((- 1)).float()
targets = targets.float()
loss = F.mse_loss(logits, targets, reduction='sum')
logging_output = {'loss': loss.data, 'ntokens': sample['ntokens'], 'nsentences': sample_size, 'sample_size': sample_size}
if (not self.regression_target):
preds = logits.argmax(dim=1)
logging_output['ncorrect'] = (preds == targets).sum()
return (loss, sample_size, logging_output)
def reduce_metrics(logging_outputs) -> None:
loss_sum = sum((log.get('loss', 0) for log in logging_outputs))
ntokens = sum((log.get('ntokens', 0) for log in logging_outputs))
nsentences = sum((log.get('nsentences', 0) for log in logging_outputs))
sample_size = sum((log.get('sample_size', 0) for log in logging_outputs))
metrics.log_scalar('loss', ((loss_sum / sample_size) / math.log(2)), sample_size, round=3)
if (sample_size != ntokens):
metrics.log_scalar('nll_loss', ((loss_sum / ntokens) / math.log(2)), ntokens, round=3)
if ((len(logging_outputs) > 0) and ('ncorrect' in logging_outputs[0])):
ncorrect = sum((log.get('ncorrect', 0) for log in logging_outputs))
metrics.log_scalar('accuracy', ((100.0 * ncorrect) / nsentences), nsentences, round=1)
def logging_outputs_can_be_summed() -> bool:
return True |
def return_rel_docs_for_dict(labels: dict, dpr_dict: dict):
label_keys = [key for key in labels]
dpr_keys = [key for key in dpr_dict]
assert (label_keys.sort() == dpr_keys.sort())
print('im here')
filtered_dict = {}
for query_id in labels.keys():
filtered_dict.update({query_id: {}})
rel_docs = labels.get(query_id)
for doc in rel_docs:
if dpr_dict.get(query_id).get(doc):
filtered_dict.get(query_id).update({doc: dpr_dict.get(query_id).get(doc)})
return filtered_dict |
def test_initialize_object_binary_policy(digraph_with_object_policy):
with pytest.raises(ValueError):
digraph_with_object_policy._initialize_binary_policy() |
def get_datapoints():
base = '/ssd_scratch/cvit/aditya1/processed_vlog_dataset_copy'
valid_videos_json_path = os.path.join(base, 'valid_folders.json')
min_landmark_files = 3
def get_name(x):
return '/'.join(x.split('/')[(- 4):])
with open(valid_videos_json_path) as r:
valid_videos = json.load(r)
video_segments = glob((base + '/*/*/*/*.mp4'))
def is_good_video(dir):
name = get_name(dir)
return ((name in valid_videos) and (len(glob((dir.split('.')[0] + '/*_landmarks.npz'))) > min_landmark_files))
return [x.replace('.mp4', '') for x in video_segments if is_good_video(x)] |
def test_points2polygon():
with pytest.raises(AssertionError):
points = 2
utils.points2polygon(points)
with pytest.raises(AssertionError):
points = [1, 2, 3, 4, 5, 6, 7]
utils.points2polygon(points)
with pytest.raises(AssertionError):
points = [1, 2, 3, 4, 5, 6]
utils.points2polygon(points)
points = np.array([1, 2, 3, 4, 5, 6, 7, 8])
poly = utils.points2polygon(points)
i = 0
for coord in poly.exterior.coords[:(- 1)]:
assert (coord[0] == points[i])
assert (coord[1] == points[(i + 1)])
i += 2
points = [1, 2, 3, 4, 5, 6, 7, 8]
poly = utils.points2polygon(points)
i = 0
for coord in poly.exterior.coords[:(- 1)]:
assert (coord[0] == points[i])
assert (coord[1] == points[(i + 1)])
i += 2 |
def get_suffix_path(current_path, levels=1):
current_new = current_path
for i in range((levels + 1)):
current_new = os.path.dirname(current_new)
return os.path.relpath(current_path, current_new) |
def test_merge():
trace0 = ExecutionTrace()
trace1 = ExecutionTrace()
trace0.merge(trace1)
assert (trace0 == ExecutionTrace()) |
def multi_func(member_check):
def f(col):
return member_check(col.name, col)
return f |
def lengths_to_encoder_padding_mask(lengths, batch_first: bool=False):
max_lengths = torch.max(lengths).item()
bsz = lengths.size(0)
encoder_padding_mask = (torch.arange(max_lengths).to(lengths.device).view(1, max_lengths).expand(bsz, (- 1)) > lengths.view(bsz, 1).expand((- 1), max_lengths))
if (not batch_first):
return (encoder_padding_mask.t(), max_lengths)
else:
return (encoder_padding_mask, max_lengths) |
class BetaSobolev(ProcessingPlasmaProperty):
outputs = ('beta_sobolev',)
latex_name = ('\\beta_{\\textrm{sobolev}}',)
def calculate(self, tau_sobolevs):
if (getattr(self, 'beta_sobolev', None) is None):
initial = 0.0
else:
initial = self.beta_sobolev
beta_sobolev = pd.DataFrame(initial, index=tau_sobolevs.index, columns=tau_sobolevs.columns)
self.calculate_beta_sobolev(tau_sobolevs.values.ravel(), beta_sobolev.values.ravel())
return beta_sobolev
(nopython=True, parallel=True)
def calculate_beta_sobolev(tau_sobolevs, beta_sobolevs):
for i in prange(len(tau_sobolevs)):
if (tau_sobolevs[i] > 1000.0):
beta_sobolevs[i] = (tau_sobolevs[i] ** (- 1))
elif (tau_sobolevs[i] < 0.0001):
beta_sobolevs[i] = (1 - (0.5 * tau_sobolevs[i]))
else:
beta_sobolevs[i] = ((1 - np.exp((- tau_sobolevs[i]))) / tau_sobolevs[i])
return beta_sobolevs |
class InputFeatures(object):
def __init__(self, unique_id, example_index, doc_span_index, tokens, token_to_orig_map, token_is_max_context, input_ids, input_mask, segment_ids, cls_index, p_mask, paragraph_len, start_position=None, end_position=None, is_impossible=None):
self.unique_id = unique_id
self.example_index = example_index
self.doc_span_index = doc_span_index
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.token_is_max_context = token_is_max_context
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.cls_index = cls_index
self.p_mask = p_mask
self.paragraph_len = paragraph_len
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible |
class RandomCrop(object):
def __init__(self, size, padding=0):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
self.padding = padding
def get_params(img, output_size):
(w, h) = img.size
(th, tw) = output_size
if ((w == tw) and (h == th)):
return (0, 0, h, w)
i = random.randint(0, (h - th))
j = random.randint(0, (w - tw))
return (i, j, th, tw)
def __call__(self, img, target):
if (self.padding > 0):
img = F.pad(img, self.padding)
target = F.pad(target, self.padding)
(i, j, h, w) = self.get_params(img, self.size)
return (F.crop(img, i, j, h, w), F.crop(target, i, j, h, w))
def __repr__(self):
return (self.__class__.__name__ + '(size={0})'.format(self.size)) |
def conv2d(input_, output_dim, ks=7, s=2, stddev=0.02, padding='SAME', name='conv2d'):
with tf.variable_scope(name):
return slim.conv2d(input_, output_dim, ks, s, padding=padding, activation_fn=None, weights_initializer=tf.truncated_normal_initializer(stddev=stddev), biases_initializer=None) |
class Plotter():
__plotters = []
def __init__(self, env, policy, sess=None, graph=None, rollout=default_rollout):
Plotter.__plotters.append(self)
self.env = env
self.policy = policy
self.sess = (tf.compat.v1.get_default_session() if (sess is None) else sess)
self.graph = (tf.compat.v1.get_default_graph() if (graph is None) else graph)
self.rollout = rollout
self.worker_thread = Thread(target=self._start_worker, daemon=True)
self.queue = Queue()
if ('Darwin' in platform.platform()):
self.rollout(env, policy, max_path_length=np.inf, animated=True, speedup=5)
def _start_worker(self):
env = None
policy = None
max_length = None
initial_rollout = True
try:
with self.sess.as_default(), self.sess.graph.as_default():
while True:
msgs = {}
if initial_rollout:
msg = self.queue.get()
msgs[msg.op] = msg
while (not self.queue.empty()):
msg = self.queue.get()
msgs[msg.op] = msg
else:
while (not self.queue.empty()):
msg = self.queue.get_nowait()
msgs[msg.op] = msg
if (Op.STOP in msgs):
self.queue.task_done()
break
if (Op.UPDATE in msgs):
(env, policy) = msgs[Op.UPDATE].args
self.queue.task_done()
if (Op.DEMO in msgs):
(param_values, max_length) = msgs[Op.DEMO].args
policy.set_param_values(param_values)
initial_rollout = False
self.rollout(env, policy, max_path_length=max_length, animated=True, speedup=5)
self.queue.task_done()
elif max_length:
self.rollout(env, policy, max_path_length=max_length, animated=True, speedup=5)
except KeyboardInterrupt:
pass
def close(self):
if self.worker_thread.is_alive():
while (not self.queue.empty()):
self.queue.get()
self.queue.task_done()
self.queue.put(Message(op=Op.STOP, args=None, kwargs=None))
self.queue.join()
self.worker_thread.join()
def get_plotters():
return Plotter.__plotters
def start(self):
if (not self.worker_thread.is_alive()):
tf.compat.v1.get_variable_scope().reuse_variables()
self.worker_thread.start()
self.queue.put(Message(op=Op.UPDATE, args=(self.env, self.policy), kwargs=None))
atexit.register(self.close)
def update_plot(self, policy, max_length=np.inf):
if self.worker_thread.is_alive():
self.queue.put(Message(op=Op.DEMO, args=(policy.get_param_values(), max_length), kwargs=None)) |
def test_generation_sort_by_len(file_factory, trained_model):
with file_factory() as results_file:
(trained_model_pickle, model_type) = trained_model
log = invoke_wfp_script('generate', model_pickle=trained_model_pickle.name, data_path=DATA_PATH, output_pickle=results_file.name, sort_by_len=True, iter_lim=0)
results = pd.read_pickle(results_file.name)
prev_len = None
for (_, row) in results.iterrows():
assert ((prev_len is None) or (len(row.x) <= prev_len))
prev_len = len(row.x) |
def save_svg(state: State, filename: Union[(str, Path)], *, color_theme: Optional[Literal[('light', 'dark')]]=None, scale: Optional[float]=None) -> None:
assert str(filename).endswith('.svg')
if state.env_id.startswith('minatar'):
state.save_svg(filename=filename)
else:
v = Visualizer(color_theme=color_theme, scale=scale)
v.get_dwg(states=state).saveas(filename) |
class SelectPolicy():
def __init__(self, fuzzer: GPTFuzzer):
self.fuzzer = fuzzer
def select(self) -> PromptNode:
raise NotImplementedError('SelectPolicy must implement select method.')
def update(self, prompt_nodes: 'list[PromptNode]'):
pass |
def MkdirFileLock(*args, **kwds):
from . import mkdirlockfile
return _fl_helper(mkdirlockfile.MkdirLockFile, 'lockfile.mkdirlockfile', *args, **kwds) |
class Highway(torch.nn.Module):
def __init__(self, input_dim: int, num_layers: int=1):
super(Highway, self).__init__()
self.input_dim = input_dim
self.layers = nn.ModuleList([nn.Linear(input_dim, (input_dim * 2)) for _ in range(num_layers)])
self.activation = nn.ReLU()
self.reset_parameters()
def reset_parameters(self):
for layer in self.layers:
nn.init.constant_(layer.bias[self.input_dim:], 1)
nn.init.constant_(layer.bias[:self.input_dim], 0)
nn.init.xavier_normal_(layer.weight)
def forward(self, x: torch.Tensor):
for layer in self.layers:
projection = layer(x)
(proj_x, gate) = projection.chunk(2, dim=(- 1))
proj_x = self.activation(proj_x)
gate = F.sigmoid(gate)
x = ((gate * x) + ((1 - gate) * proj_x))
return x |
class RL2PPO(RL2):
def __init__(self, rl2_max_path_length, meta_batch_size, task_sampler, env_spec, policy, baseline, scope=None, max_path_length=500, discount=0.99, gae_lambda=1, center_adv=True, positive_adv=False, fixed_horizon=False, lr_clip_range=0.01, max_kl_step=0.01, optimizer_args=None, policy_ent_coeff=0.0, use_softplus_entropy=False, use_neg_logli_entropy=False, stop_entropy_gradient=False, entropy_method='no_entropy', flatten_input=True, meta_evaluator=None, n_epochs_per_eval=10, name='PPO'):
if (optimizer_args is None):
optimizer_args = dict()
super().__init__(rl2_max_path_length=rl2_max_path_length, meta_batch_size=meta_batch_size, task_sampler=task_sampler, env_spec=env_spec, policy=policy, baseline=baseline, scope=scope, max_path_length=max_path_length, discount=discount, gae_lambda=gae_lambda, center_adv=center_adv, positive_adv=positive_adv, fixed_horizon=fixed_horizon, pg_loss='surrogate_clip', lr_clip_range=lr_clip_range, max_kl_step=max_kl_step, optimizer=FirstOrderOptimizer, optimizer_args=optimizer_args, policy_ent_coeff=policy_ent_coeff, use_softplus_entropy=use_softplus_entropy, use_neg_logli_entropy=use_neg_logli_entropy, stop_entropy_gradient=stop_entropy_gradient, entropy_method=entropy_method, flatten_input=flatten_input, meta_evaluator=meta_evaluator, n_epochs_per_eval=n_epochs_per_eval, name=name) |
def compute_num_params(G0, growth_factor, T0, D, levels):
num_params = 0
for l in range(levels):
G = compute_grid_size(G0, growth_factor, T0, l)
T = compute_table_size(G, T0)
num_params_l = force_align((T * D))
num_params += num_params_l
return num_params |
class SawyerAssemblyV1Policy(Policy):
_fully_parsed
def _parse_obs(obs):
return {'hand_pos': obs[:3], 'wrench_pos': obs[3:6], 'peg_pos': obs[9:], 'unused_info': obs[6:9]}
def get_action(self, obs):
o_d = self._parse_obs(obs)
action = Action({'delta_pos': np.arange(3), 'grab_effort': 3})
action['delta_pos'] = move(o_d['hand_pos'], to_xyz=self._desired_pos(o_d), p=10.0)
action['grab_effort'] = self._grab_effort(o_d)
return action.array
def _desired_pos(o_d):
pos_curr = o_d['hand_pos']
pos_wrench = (o_d['wrench_pos'] + np.array([0.01, 0.0, 0.0]))
pos_peg = (o_d['peg_pos'] + np.array([0.07, 0.0, 0.15]))
if (np.linalg.norm((pos_curr[:2] - pos_wrench[:2])) > 0.02):
return (pos_wrench + np.array([0.0, 0.0, 0.1]))
elif (np.linalg.norm((pos_curr[:2] - pos_peg[:2])) <= 0.02):
return (pos_peg + np.array([0.0, 0.0, (- 0.07)]))
elif (abs((pos_curr[2] - pos_wrench[2])) > 0.05):
return (pos_wrench + np.array([0.0, 0.0, 0.03]))
elif (abs((pos_curr[2] - pos_peg[2])) > 0.04):
return np.array([pos_curr[0], pos_curr[1], pos_peg[2]])
else:
return pos_peg
def _grab_effort(o_d):
pos_curr = o_d['hand_pos']
pos_wrench = (o_d['wrench_pos'] + np.array([0.01, 0.0, 0.0]))
pos_peg = (o_d['peg_pos'] + np.array([0.07, 0.0, 0.15]))
if ((np.linalg.norm((pos_curr[:2] - pos_wrench[:2])) > 0.02) or (abs((pos_curr[2] - pos_wrench[2])) > 0.1)):
return 0.0
elif (np.linalg.norm((pos_curr[:2] - pos_peg[:2])) > 0.01):
return 0.6
else:
return (- 1.0) |
.parametrize('action_dist, estimated_rewards_by_reg_model, description_1', valid_input_of_create_estimator_inputs)
.parametrize('metric, ground_truth_policy_value, description_2', valid_input_of_evaluation_performance_of_estimators)
def test_meta_evaluate_performance_of_estimators_using_valid_input_data(action_dist, estimated_rewards_by_reg_model, description_1: str, metric, ground_truth_policy_value, description_2: str, synthetic_bandit_feedback: BanditFeedback) -> None:
if (metric == 'relative-ee'):
eval_metric_ope_dict = {'ipw': np.abs((((mock_policy_value + ipw.eps) - ground_truth_policy_value) / ground_truth_policy_value)), 'ipw3': np.abs((((mock_policy_value + ipw3.eps) - ground_truth_policy_value) / ground_truth_policy_value))}
else:
eval_metric_ope_dict = {'ipw': (((mock_policy_value + ipw.eps) - ground_truth_policy_value) ** 2), 'ipw3': (((mock_policy_value + ipw3.eps) - ground_truth_policy_value) ** 2)}
ope_ = OffPolicyEvaluation(bandit_feedback=synthetic_bandit_feedback, ope_estimators=[ipw, ipw3])
performance = ope_.evaluate_performance_of_estimators(ground_truth_policy_value=ground_truth_policy_value, estimated_rewards_by_reg_model=estimated_rewards_by_reg_model, action_dist=action_dist, metric=metric)
for (k, v) in performance.items():
assert (k in eval_metric_ope_dict), 'Invalid key of performance response'
assert (v == eval_metric_ope_dict[k]), 'Invalid value of performance response'
performance_df = ope_.summarize_estimators_comparison(ground_truth_policy_value=ground_truth_policy_value, estimated_rewards_by_reg_model=estimated_rewards_by_reg_model, action_dist=action_dist, metric=metric)
(assert_frame_equal(performance_df, pd.DataFrame(eval_metric_ope_dict, index=[metric]).T), 'Invalid summarization (performance)') |
class MatchFirst(ParseExpression):
def __init__(self, exprs, savelist=False):
super(MatchFirst, self).__init__(exprs, savelist)
if self.exprs:
self.mayReturnEmpty = any((e.mayReturnEmpty for e in self.exprs))
else:
self.mayReturnEmpty = True
def streamline(self):
super(MatchFirst, self).streamline()
if __compat__.collect_all_And_tokens:
self.saveAsList = any((e.saveAsList for e in self.exprs))
return self
def parseImpl(self, instring, loc, doActions=True):
maxExcLoc = (- 1)
maxException = None
for e in self.exprs:
try:
ret = e._parse(instring, loc, doActions)
return ret
except ParseException as err:
if (err.loc > maxExcLoc):
maxException = err
maxExcLoc = err.loc
except IndexError:
if (len(instring) > maxExcLoc):
maxException = ParseException(instring, len(instring), e.errmsg, self)
maxExcLoc = len(instring)
else:
if (maxException is not None):
maxException.msg = self.errmsg
raise maxException
else:
raise ParseException(instring, loc, 'no defined alternatives to match', self)
def __ior__(self, other):
if isinstance(other, basestring):
other = self._literalStringClass(other)
return self.append(other)
def __str__(self):
if hasattr(self, 'name'):
return self.name
if (self.strRepr is None):
self.strRepr = (('{' + ' | '.join((_ustr(e) for e in self.exprs))) + '}')
return self.strRepr
def checkRecursion(self, parseElementList):
subRecCheckList = (parseElementList[:] + [self])
for e in self.exprs:
e.checkRecursion(subRecCheckList)
def _setResultsName(self, name, listAllMatches=False):
if ((not __compat__.collect_all_And_tokens) and __diag__.warn_multiple_tokens_in_named_alternation):
if any((isinstance(e, And) for e in self.exprs)):
warnings.warn('{0}: setting results name {1!r} on {2} expression may only return a single token for an And alternative, in future will return the full list of tokens'.format('warn_multiple_tokens_in_named_alternation', name, type(self).__name__), stacklevel=3)
return super(MatchFirst, self)._setResultsName(name, listAllMatches) |
def read_csv_Raed(path):
orig = pd.read_csv(path)
orig = orig.drop('Unnamed: 0', axis=1)
orig.index = pd.to_datetime([f'{y}-01-01' for y in orig.Year])
orig.index.name = 'time'
return orig.drop('Year', 1) |
def api_test(env: Env, num: int=100, use_key=True):
api_test_single(env, num, use_key)
api_test_batch(env, num, use_key) |
def disable_text_training(cfg):
new_cfg = copy.deepcopy(cfg)
new_cfg['models']['MultimodalTextModel']['search_space']['model.num_trainable_layers'] = 0
new_cfg['models']['MultimodalTextModel']['search_space']['model._disable_update'] = True
new_cfg['models']['MultimodalTextModel']['search_space']['optimization.num_train_epochs'] = 1
new_cfg['models']['MultimodalTextModel']['search_space']['preprocessing.categorical.convert_to_text'] = True
new_cfg['models']['MultimodalTextModel']['search_space']['optimization.lr'] = 0.0
return new_cfg |
class BaseTokenizer():
def __init__(self, tokens: List[str], starting_index=None, init_token='[CLS]', eos_token='[SEP]', pad_token='[PAD]', unk_token='[UNK]'):
if (starting_index is None):
starting_index = 4
self.pad_token = pad_token
self.bos_token = init_token
self.eos_token = eos_token
self.unk_token = unk_token
self.i2s = {(i + starting_index): c for (i, c) in enumerate(tokens)}
self.i2s[0] = self.unk_token
self.i2s[1] = self.pad_token
self.i2s[2] = self.bos_token
self.i2s[3] = self.eos_token
self.s2i = {c: i for (i, c) in self.i2s.items()}
self.pad_token_id = self.s2i[self.pad_token]
self.bos_token_id = self.s2i[self.bos_token]
self.eos_token_id = self.s2i[self.eos_token]
self.unk_token_id = self.s2i[self.unk_token]
def __len__(self):
return len(self.i2s)
def vocab(self):
return list(self.i2s.values())
def text_to_tokens(self, text: str) -> List[str]:
raise NotImplementedError()
def tokens_to_text(self, tokens: List[str]) -> str:
raise NotImplementedError()
def tokenize(self, text: str, bos=False, eos=False):
tokens = [self.s2i[c] for c in self.text_to_tokens(text)]
if bos:
tokens.insert(0, self.bos_token_id)
if eos:
tokens.append(self.eos_token_id)
return tokens
def detokenize(self, tokens: List[int]):
if (len(tokens) == 0):
return ''
if (tokens[0] == self.bos_token_id):
tokens = tokens[1:]
if (tokens[(- 1)] == self.eos_token_id):
tokens = tokens[:(- 1)]
try:
padding_index = tokens.index(self.pad_token_id)
tokens = tokens[:padding_index]
except ValueError:
pass
return self.tokens_to_text([self.i2s[t] for t in tokens])
def __call__(self, texts: (List[str] | torch.Tensor), is_tokenized=False, device=None):
if (not is_tokenized):
all_tokens = [self.tokenize(text) for text in texts]
else:
all_tokens = texts.tolist()
tokens_batch = zero_pad_collator([{'tokens_ids': torch.tensor(tokens, dtype=torch.long, device=device), 'attention_mask': torch.ones(len(tokens), dtype=torch.bool, device=device), 'positions': torch.arange(0, len(tokens), dtype=torch.int, device=device)} for tokens in all_tokens])
tokens_batch['attention_mask'] = torch.logical_not(tokens_batch['attention_mask'])
return tokens_batch |
def nag(opfunc, x, config, state=None):
if ((config is None) and (state is None)):
raise ValueError('nag requires a dictionary to retain state between iterations')
state = (state if (state is not None) else config)
lr = config.get('learningRate', 0.001)
lrd = config.get('learningRateDecay', 0)
wd = config.get('weightDecay', 0)
mom = config.get('momentum', 0.9)
damp = config.get('dampening', mom)
lrs = config.get('learningRates', None)
state['evalCounter'] = state.get('evalCounter', 0)
if (mom <= 0):
raise ValueError('Momentum must be positive for Nesterov Accelerated Gradient')
if ('dfdx' in state):
x.add_(mom, state['dfdx'])
(fx, dfdx) = opfunc(x)
if (wd != 0):
dfdx.add_(wd, x)
clr = (lr / (1 + (state['evalCounter'] * lrd)))
if ('dfdx' not in state):
state['dfdx'] = dfdx.new().resize_as_(dfdx).zero_()
else:
state['dfdx'].mul_(mom)
if (lrs is not None):
if ('deltaParameters' in state):
state['deltaParameters'] = x.new().resize_as_(dfdx)
state['deltaParameters'].copy_(lrs).mul_(dfdx)
x.add_((- clr), state['deltaParameters'])
state['dfdx'].add_((- clr), state['deltaParameters'])
else:
x.add_((- clr), dfdx)
state['dfdx'].add_((- clr), dfdx)
state['evalCounter'] += 1
return (x, fx) |
class ResNet(tf.keras.Model):
_MODEL_CONFIG = {10: {'block': residual_block, 'layers': [1, 1, 1, 1]}, 14: {'block': bottleneck_block, 'layers': [1, 1, 1, 1]}, 18: {'block': residual_block, 'layers': [2, 2, 2, 2]}, 26: {'block': bottleneck_block, 'layers': [2, 2, 2, 2]}, 34: {'block': residual_block, 'layers': [3, 4, 6, 3]}, 50: {'block': bottleneck_block, 'layers': [3, 4, 6, 3]}, 101: {'block': bottleneck_block, 'layers': [3, 4, 23, 3]}, 152: {'block': bottleneck_block, 'layers': [3, 8, 36, 3]}, 200: {'block': bottleneck_block, 'layers': [3, 24, 36, 3]}}
def __init__(self, input_shape, depth, checkpoint=None, normalization_op_params=None):
input_layer = tf.keras.Input(shape=input_shape, name='resnet_input')
outputs = resnet_fn(input_layer, block_fn=ResNet._MODEL_CONFIG[depth]['block'], layers=ResNet._MODEL_CONFIG[depth]['layers'], normalization_op_params=normalization_op_params)
super(ResNet, self).__init__(inputs=[input_layer], outputs=outputs, name=('resnet_' + str(depth)))
if checkpoint:
latest_checkpoint = tf.train.latest_checkpoint(checkpoint)
self.load_weights(latest_checkpoint).assert_consumed()
logging.info('Initialized weights from {}'.format(latest_checkpoint))
else:
logging.warning('Proceeding with random initialization!') |
def sn_dense(inputs, units, name='sn_dense'):
with tf.variable_scope(name) as scope:
weight = tf.get_variable('w', [inputs.get_shape()[1], units], tf.float32, initializer=DENSE_KERNEL_INITIALIZER)
bias = tf.get_variable('b', [units], initializer=tf.zeros_initializer())
return (tf.matmul(inputs, spectral_normalize(weight)) + bias) |
class RefLion(MixinWeightDecayFused, RefSolver):
def __init__(self, lr, beta1, beta2):
super().__init__()
self.lr = _f(lr)
self.beta1 = _f(beta1)
self.beta2 = _f(beta2)
self.m = {}
self.t = {}
def _set_state_impl(self, key, param):
self.m[key] = np.zeros_like(param)
self.t[key] = 0
def _update_impl(self, key, p, g):
self.t[key] = min((self.t[key] + 1), np.iinfo(np.int32).max)
_update_lion(p, g, self.m[key], self.lr, self.beta1, self.beta2, self.weight_decay_rate) |
class UNet(nn.Module):
def __init__(self, n_channels, n_classes, bilinear=False):
super(UNet, self).__init__()
self.n_channels = n_channels
self.n_classes = n_classes
self.bilinear = bilinear
self.in_conv = UNetConvBlock(self.n_channels, 64)
self.Down1 = Down(64, 128)
self.Down2 = Down(128, 256)
self.Down3 = Down(256, 512)
self.Down4 = Down(512, 512)
self.Up1 = Up((512 + 512), 256, self.bilinear)
self.Up2 = Up((256 + 256), 128, self.bilinear)
self.Up3 = Up((128 + 128), 64, self.bilinear)
self.Up4 = Up((64 + 64), 64, self.bilinear)
self.out_conv = OutConv(64, n_classes)
def forward(self, x):
x1 = self.in_conv(x)
x2 = self.Down1(x1)
x3 = self.Down2(x2)
x4 = self.Down3(x3)
x5 = self.Down4(x4)
x = self.Up1(x5, x4)
x = self.Up2(x, x3)
x = self.Up3(x, x2)
x = self.Up4(x, x1)
out = self.out_conv(x)
return out |
def test_bucket_deletion():
print('Running test_bucket_deletion')
storage_1 = storage.Storage(name=TEST_BUCKET_NAME, source=LOCAL_SOURCE_PATH)
storage_1.add_store(StoreType.S3)
storage_1.add_store(StoreType.GCS)
storage_1.delete() |
class GradientsInputs(VanillaGradients):
def compute_gradients(images, model, class_index):
gradients = VanillaGradients.compute_gradients(images, model, class_index)
inputs = tf.cast(images, tf.float32)
return tf.multiply(inputs, gradients) |
def visualize(base_path, test_dataset, plot_dir, batch_size=4):
device = torch.device('cuda')
dataset = HeadDataset(test_dataset, base_path, dataset_param={}, train=False)
batch_iterator = iter(data.DataLoader(dataset, batch_size, shuffle=False, num_workers=4, collate_fn=coco_collate))
for (ind, (images, targets)) in enumerate(tqdm(batch_iterator)):
images = list((img.to(device) for img in images))
np_images = [(ims.cpu().numpy() * 255.0).astype(np.uint8) for ims in images]
gt_boxes = [gt['boxes'].numpy().astype(np.float64) for gt in targets]
for (np_im, gt_box) in zip(np_images, gt_boxes):
plot_images = plot_ims(np_im, [], gt_box)
imsave(osp.join(plot_dir, (str(ind) + '.jpg')), plot_images) |
class OpenAIGPTTokenizerFast():
def __init__(self, *args, **kwargs):
requires_tokenizers(self)
def from_pretrained(self, *args, **kwargs):
requires_tokenizers(self) |
class TestBMUF(unittest.TestCase):
def bmuf_process(self, args, iterations):
processes = []
results = Manager().dict()
ctx = torch.multiprocessing.get_context('spawn')
for rank in range(args.distributed_world_size):
p = ctx.Process(target=single_gpu_training, args=(args, rank, iterations, results))
p.start()
processes.append(p)
for p in processes:
p.join()
assert (len(results) == 2)
self.assertAlmostEqual(results[0], results[1])
def test_bmuf_sync(self):
args = setup_args()
iterations = 1
self.bmuf_process(args, iterations)
def test_warmup_sync(self):
args = setup_args()
args.warmup_iterations = 20
iterations = 20
self.bmuf_process(args, iterations)
def test_warmup_sync_bmuf_sync(self):
args = setup_args()
args.warmup_iterations = 20
args.global_sync_iter = 5
iterations = 25
self.bmuf_process(args, iterations)
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), 'size mismatch')
self.assertLess((t1 - t2).abs().max(), 0.0001) |
def parse_match_formulas(match_parse):
assert isinstance(match_parse, MatchParse)
match_atoms = []
for (label, terms) in match_parse.match_dict.iteritems():
for term in terms:
assert isinstance(term, FormulaNode)
if issubtype(term.return_type, 'entity'):
if (term.signature.id == 'Angle'):
res = FormulaNode(signatures['Ge'], [FormulaNode(signatures['Pi'], []), FormulaNode(signatures['MeasureOf'], [term])])
match_atoms.append(res)
continue
left_term = prefix_to_formula(expression_parser.parse_prefix(label))
atom = FormulaNode(signatures['Equals'], [left_term, term])
match_atoms.append(atom)
if (term.signature.id == 'Div'):
res = FormulaNode(signatures['Ge'], [180, left_term])
match_atoms.append(res)
return match_atoms |
class ASTNode():
def __init__(self, nb=None, depth=None, children=None):
self.id = nb
self.depth = depth
self.children = children
self.production = None |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.