code stringlengths 101 5.91M |
|---|
def resolve_input_config(args, model_config=None, model=None):
if (not isinstance(args, dict)):
args = vars(args)
input_config = {}
if ((not model_config) and (model is not None) and hasattr(model, 'config')):
model_config = model.config
in_chans = 3
input_size = (in_chans, 512, 512)
if ('input_size' in model_config):
input_size = tuple(model_config['input_size'])
elif ('image_size' in model_config):
input_size = ((in_chans,) + tuple(model_config['image_size']))
assert (isinstance(input_size, tuple) and (len(input_size) == 3))
input_config['input_size'] = input_size
input_config['interpolation'] = 'bicubic'
if (('interpolation' in args) and args['interpolation']):
input_config['interpolation'] = args['interpolation']
elif ('interpolation' in model_config):
input_config['interpolation'] = model_config['interpolation']
input_config['mean'] = IMAGENET_DEFAULT_MEAN
if (('mean' in args) and (args['mean'] is not None)):
mean = tuple(args['mean'])
if (len(mean) == 1):
mean = tuple((list(mean) * in_chans))
else:
assert (len(mean) == in_chans)
input_config['mean'] = mean
elif ('mean' in model_config):
input_config['mean'] = model_config['mean']
input_config['std'] = IMAGENET_DEFAULT_STD
if (('std' in args) and (args['std'] is not None)):
std = tuple(args['std'])
if (len(std) == 1):
std = tuple((list(std) * in_chans))
else:
assert (len(std) == in_chans)
input_config['std'] = std
elif ('std' in model_config):
input_config['std'] = model_config['std']
input_config['fill_color'] = 'mean'
if (('fill_color' in args) and (args['fill_color'] is not None)):
input_config['fill_color'] = args['fill_color']
elif ('fill_color' in model_config):
input_config['fill_color'] = model_config['fill_color']
return input_config |
def local_errors(ignore=False):
errors = []
error_stack.append(errors)
try:
(yield errors)
finally:
release_errors(ignore=ignore) |
def main(top_block_cls=tx_rx_hier_functionality_check, options=None):
tb = top_block_cls()
def sig_handler(sig=None, frame=None):
tb.stop()
tb.wait()
sys.exit(0)
signal.signal(signal.SIGINT, sig_handler)
signal.signal(signal.SIGTERM, sig_handler)
tb.start()
try:
input('Press Enter to quit: ')
except EOFError:
pass
tb.stop()
tb.wait() |
class _AtrousSpatialPyramidPoolingModule(nn.Module):
def __init__(self, in_dim, reduction_dim=256, output_stride=16, rates=(6, 12, 18)):
super(_AtrousSpatialPyramidPoolingModule, self).__init__()
print('output_stride = ', output_stride)
if (output_stride == 8):
rates = [(2 * r) for r in rates]
elif (output_stride == 4):
rates = [(4 * r) for r in rates]
elif (output_stride == 16):
pass
elif (output_stride == 32):
rates = [(r // 2) for r in rates]
else:
raise 'output stride of {} not supported'.format(output_stride)
self.features = []
self.features.append(nn.Sequential(nn.Conv2d(in_dim, reduction_dim, kernel_size=1, bias=False), Norm2d(reduction_dim), nn.ReLU(inplace=True)))
for r in rates:
self.features.append(nn.Sequential(nn.Conv2d(in_dim, reduction_dim, kernel_size=3, dilation=r, padding=r, bias=False), Norm2d(reduction_dim), nn.ReLU(inplace=True)))
self.features = torch.nn.ModuleList(self.features)
self.img_pooling = nn.AdaptiveAvgPool2d(1)
self.img_conv = nn.Sequential(nn.Conv2d(in_dim, 256, kernel_size=1, bias=False), Norm2d(256), nn.ReLU(inplace=True))
def forward(self, x):
x_size = x.size()
img_features = self.img_pooling(x)
img_features = self.img_conv(img_features)
img_features = Upsample(img_features, x_size[2:])
out = img_features
for f in self.features:
y = f(x)
out = torch.cat((out, y), 1)
return out |
class ResNet50LatencyTable(LatencyTable):
def query(self, **kwargs):
raise NotImplementedError
def predict_network_latency(self, net, image_size):
raise NotImplementedError
def predict_network_latency_given_config(self, net_config, image_size):
raise NotImplementedError
def count_flops_given_config(net_config, image_size=224):
flops = 0
for layer_config in net_config['input_stem']:
if (layer_config['name'] != 'ConvLayer'):
layer_config = layer_config['conv']
in_channel = layer_config['in_channels']
out_channel = layer_config['out_channels']
out_image_size = int((((image_size - 1) / layer_config['stride']) + 1))
flops += count_conv_flop(out_image_size, in_channel, out_channel, layer_config['kernel_size'], layer_config.get('groups', 1))
image_size = out_image_size
image_size = int((((image_size - 1) / 2) + 1))
for block_config in net_config['blocks']:
in_channel = block_config['in_channels']
out_channel = block_config['out_channels']
out_image_size = int((((image_size - 1) / block_config['stride']) + 1))
mid_channel = (block_config['mid_channels'] if (block_config['mid_channels'] is not None) else round((out_channel * block_config['expand_ratio'])))
mid_channel = make_divisible(mid_channel, MyNetwork.CHANNEL_DIVISIBLE)
flops += count_conv_flop(image_size, in_channel, mid_channel, 1, 1)
flops += count_conv_flop(out_image_size, mid_channel, mid_channel, block_config['kernel_size'], block_config['groups'])
flops += count_conv_flop(out_image_size, mid_channel, out_channel, 1, 1)
if ((block_config['stride'] == 1) and (in_channel == out_channel)):
pass
else:
flops += count_conv_flop(out_image_size, in_channel, out_channel, 1, 1)
image_size = out_image_size
flops += count_conv_flop(1, net_config['classifier']['in_features'], net_config['classifier']['out_features'], 1, 1)
return (flops / 1000000.0) |
class TestEntity(unittest.TestCase):
def setUp(self):
self.entity1 = StringEntity(0.95, ExtractionMethod.NER, 'some string')
self.entity2 = StringEntity(0.95, ExtractionMethod.NER, 'some string')
self.entity3 = StringEntity(0.95, ExtractionMethod.SPELLING, 'some string')
self.entity4 = PersonEntity(0.95, ExtractionMethod.SPELLING, 'some string')
self.entity5 = StringEntity(1, ExtractionMethod.NER, '94301')
self.entity6 = ZipCodeEntity(1, ExtractionMethod.NER, '94301')
def test_equality_with_same_attributes(self):
self.assertEqual(self.entity1, self.entity2)
def test_inequality_with_different_attributes(self):
self.assertNotEqual(self.entity1, self.entity3)
def test_inequality_with_different_classes1(self):
self.assertNotEqual(self.entity3, self.entity4)
def test_inequality_with_different_classes2(self):
self.assertNotEqual(self.entity5, self.entity6)
def test_unique(self):
entities = [self.entity1, self.entity2, self.entity3, self.entity4, self.entity5, self.entity6]
unique_entities = Entity.unique(entities)
self.assertEqual(unique_entities, [self.entity1, self.entity5])
def test_unique_with_different_values_but_same_user_utt_value(self):
entities = [ZipCodeEntity(0.8, ExtractionMethod.NER, '94301', '94301', span={'end': 5}), LocationEntity(0.7, ExtractionMethod.NER, '94301', (('ZipCode', '94301'),), span={'end': 5})]
unique_entities = Entity.unique(entities)
self.assertEqual(unique_entities, [entities[0]])
def test_unique_with_different_values_but_same_user_utt_value_order_swap(self):
entities = [LocationEntity(0.7, ExtractionMethod.NER, '94301', (('ZipCode', '94301'),), span={'end': 5}), ZipCodeEntity(0.8, ExtractionMethod.NER, '94301', '94301', span={'end': 5})]
unique_entities = Entity.unique(entities)
self.assertEqual(unique_entities, [entities[1]])
def test_unique_with_different_values_but_same_user_utt_value_different_span(self):
entities = [LocationEntity(0.7, ExtractionMethod.NER, '94301', (('ZipCode', '94301'),), span={'end': 5}), ZipCodeEntity(0.8, ExtractionMethod.NER, '94301', '94301', span={'start': 5, 'end': 10})]
unique_entities = Entity.unique(entities)
self.assertEqual(unique_entities, [entities[1]])
def test_unique_by_value_wtih_all_unique_items(self):
entities = [LocationEntity(0.7, ExtractionMethod.NER, user_utt_value='94301', normalized_value=(('ZipCode', '94301'),), span={'end': 5}), ZipCodeEntity(0.8, ExtractionMethod.NER, user_utt_value='94301', normalized_value='94301', span={'start': 5, 'end': 10})]
unique_entities = Entity.unique_by_value(entities)
self.assertEqual(unique_entities, entities)
def test_unique_by_value_wtih_nonunique_items(self):
entities = [StringEntity(0.6, ExtractionMethod.NER, user_utt_value='94301', span={'start': 5, 'end': 10}), ZipCodeEntity(0.8, ExtractionMethod.NER, user_utt_value='94301', normalized_value='94301', span={'start': 5, 'end': 10})]
unique_entities = Entity.unique_by_value(entities)
self.assertEqual(unique_entities, [entities[1]])
def test_unique_by_user_utt_value_wtih_unique_items(self):
entities = [ZipCodeEntity(0.6, ExtractionMethod.NER, user_utt_value='24307', normalized_value='24307', span={'start': 5, 'end': 10}), ZipCodeEntity(0.8, ExtractionMethod.NER, user_utt_value='94301', normalized_value='94301', span={'start': 5, 'end': 10})]
unique_entities = Entity.unique_by_user_utt_value(entities)
self.assertEqual(unique_entities, entities)
def test_unique_by_user_utterance_value_with_nonunique_items(self):
entities = [LocationEntity(0.7, ExtractionMethod.NER, user_utt_value='94301', normalized_value=(('ZipCode', '94301'),), span={'end': 5}), ZipCodeEntity(0.8, ExtractionMethod.NER, user_utt_value='94301', normalized_value='94301', span={'start': 5, 'end': 10})]
unique_entities = Entity.unique_by_user_utt_value(entities)
self.assertEqual(unique_entities, [entities[1]]) |
class SamplerTestCase(unittest.TestCase):
def test_training_sampler(self):
sampler = TrainingSampler(5)
for i in sampler:
print(i) |
class Normal(base.Prior):
def __init__(self, mean, var):
self.mean = mean
self.var = var
self.rho = (self.var + (self.mean ** 2))
def iter_v(self, a):
return (1.0 / (a + (1.0 / self.var)))
def eval_i(self, a):
return ((0.5 * ((self.rho * a) + ((self.mean ** 2) / self.var))) - (0.5 * np.log((1 + (a * self.var)))))
def eval_rho(self, rho_prev):
return self.rho |
def parse_argsV2():
parser = argparse.ArgumentParser(description='SaliencySegmentation')
parser.add_argument('--config', '-c', required=True, type=str)
parser.add_argument('--num_workers', dest='num_workers', help='num_workers', default=4, type=int)
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--print_freq', dest='print_freq', help='Frequency of statistics printing', default=1, type=int)
parser.add_argument('--loadepoch', dest='loadepoch', help='epoch to load model', default=None, type=str)
parser.add_argument('--task', dest='task', help='task in <train, eval>', default='train', type=str)
parser.add_argument('--pretrained', dest='pretrained', help='load pretrained weights for PWCNet', default='weights/pwc_net.pth.tar', type=str)
parser.add_argument('--wts', '-w', dest='wts', help='weights file to resume training', default=None, type=str)
parser.add_argument('--show_image_summary', dest='show_image_summary', help='load the best model', default=False, type=bool)
args = parser.parse_args()
return args |
def calc_derv4gp(netD, conditional_strategy, real_data, fake_data, real_labels, device):
(batch_size, c, h, w) = real_data.shape
alpha = torch.rand(batch_size, 1)
alpha = alpha.expand(batch_size, (real_data.nelement() // batch_size)).contiguous().view(batch_size, c, h, w)
alpha = alpha.to(device)
real_data = real_data.to(device)
interpolates = ((alpha * real_data) + ((1 - alpha) * fake_data))
interpolates = interpolates.to(device)
interpolates = autograd.Variable(interpolates, requires_grad=True)
if (conditional_strategy in ['ContraGAN', 'Proxy_NCA_GAN', 'NT_Xent_GAN']):
(_, _, disc_interpolates) = netD(interpolates, real_labels)
elif (conditional_strategy in ['ProjGAN', 'no']):
disc_interpolates = netD(interpolates, real_labels)
elif (conditional_strategy == 'ACGAN'):
(_, disc_interpolates) = netD(interpolates, real_labels)
else:
raise NotImplementedError
gradients = autograd.grad(outputs=disc_interpolates, inputs=interpolates, grad_outputs=torch.ones(disc_interpolates.size()).to(device), create_graph=True, retain_graph=True, only_inputs=True)[0]
gradients = gradients.view(gradients.size(0), (- 1))
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean()
return gradient_penalty |
class _REGISTRY_KEYS_NT(NamedTuple):
X_KEY: str = 'X'
BATCH_KEY: str = 'batch'
LABELS_KEY: str = 'labels'
PROTEIN_EXP_KEY: str = 'proteins'
CAT_COVS_KEY: str = 'extra_categorical_covs'
CONT_COVS_KEY: str = 'extra_continuous_covs'
INDICES_KEY: str = 'ind_x'
SIZE_FACTOR_KEY: str = 'size_factor'
MINIFY_TYPE_KEY: str = 'minify_type'
LATENT_QZM_KEY: str = 'latent_qzm'
LATENT_QZV_KEY: str = 'latent_qzv'
OBSERVED_LIB_SIZE: str = 'observed_lib_size' |
def unpack_traced_args_and_kwargs(*traced_args, **traced_kwargs):
args = [a._data for a in traced_args]
kwargs = {k: v._data for (k, v) in traced_kwargs.items()}
return (args, kwargs) |
def test_compile_tf_graph_enc_dec_simple_recurrent_step():
tmp_dir = tempfile.mkdtemp()
with open(os.path.join(tmp_dir, 'returnn.config'), 'wt') as config:
config.write(rec_encoder_decoder_simple_config)
args = ['tools/compile_tf_graph.py', '--output_file', os.path.join(tmp_dir, 'graph.metatxt'), '--rec_step_by_step', 'output', os.path.join(tmp_dir, 'returnn.config')]
run(*args) |
class MockIntentClassifier(MockProcessingUnitMixin, IntentClassifier):
def fit(self, dataset):
self.fitted = True
return self
def get_intent(self, text, intents_filter):
return None
def get_intents(self, text):
return [] |
def match_subj_with_event(verb_text, verb_index, subj_text, subj_index, sent, is_gold):
event = match_event(verb_text, verb_index, sent, is_gold)
if ((event is not None) and (event.arg0 is None)):
entity = match_entity(subj_text, subj_index, sent, is_gold)
if (entity is not None):
if ((event.arg1 is not None) and (event.arg1 == (entity.mention_str, entity.mention_id))):
return
if ((event.amloc is not None) and (event.amloc == (entity.mention_str, entity.mention_id))):
return
if ((event.amtmp is not None) and (event.amtmp == (entity.mention_str, entity.mention_id))):
return
event.arg0 = (entity.mention_str, entity.mention_id)
entity.add_predicate((event.mention_str, event.mention_id), 'A0') |
class XLMRobertaForCausalLM(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def profile(x, ops, n=100, device=None):
device = (device or torch.device(('cuda:0' if torch.cuda.is_available() else 'cpu')))
x = x.to(device)
x.requires_grad = True
print(torch.__version__, device.type, (torch.cuda.get_device_properties(0) if (device.type == 'cuda') else ''))
print(f'''
{'Params':>12s}{'GFLOPS':>12s}{'forward (ms)':>16s}{'backward (ms)':>16s}{'input':>24s}{'output':>24s}''')
for m in (ops if isinstance(ops, list) else [ops]):
m = (m.to(device) if hasattr(m, 'to') else m)
m = (m.half() if (hasattr(m, 'half') and isinstance(x, torch.Tensor) and (x.dtype is torch.float16)) else m)
(dtf, dtb, t) = (0.0, 0.0, [0.0, 0.0, 0.0])
try:
flops = ((thop.profile(m, inputs=(x,), verbose=False)[0] / .0) * 2)
except:
flops = 0
for _ in range(n):
t[0] = time_synchronized()
y = m(x)
t[1] = time_synchronized()
try:
_ = y.sum().backward()
t[2] = time_synchronized()
except:
t[2] = float('nan')
dtf += (((t[1] - t[0]) * 1000) / n)
dtb += (((t[2] - t[1]) * 1000) / n)
s_in = (tuple(x.shape) if isinstance(x, torch.Tensor) else 'list')
s_out = (tuple(y.shape) if isinstance(y, torch.Tensor) else 'list')
p = (sum(list((x.numel() for x in m.parameters()))) if isinstance(m, nn.Module) else 0)
print(f'{p:12}{flops:12.4g}{dtf:16.4g}{dtb:16.4g}{str(s_in):>24s}{str(s_out):>24s}') |
class ETHZ(BenchmarkDataset):
def __init__(self, **kwargs):
citation = 'Each individual network has its own DOI. From publicly available data:\nCH:
seisbench.logger.warning('Check available storage and memory before downloading and general use of ETHZ dataset. Dataset size: waveforms.hdf5 ~22Gb, metadata.csv ~13Mb')
self._client = None
super().__init__(citation=citation, repository_lookup=True, **kwargs)
def _fdsn_client(cls):
return Client('ETH')
def client(self):
if (self._client is None):
self._client = self._fdsn_client()
return self._client
def _download_dataset(self, writer, time_before=60, time_after=60, **kwargs):
seisbench.logger.info('No pre-processed version of ETHZ dataset found. Download and conversion of raw data will now be performed. This may take a while.')
writer.data_format = {'dimension_order': 'CW', 'component_order': 'ZNE', 'measurement': 'velocity', 'unit': 'counts', 'instrument_response': 'not restituted'}
inv = self.client.get_stations(includerestricted=False)
inventory_mapper = InventoryMapper(inv)
if (self.path / 'ethz_events.xml').exists():
seisbench.logger.info('Reading quakeml event catalog from cache.')
catalog = obspy.read_events(str((self.path / 'ethz_events.xml')), format='QUAKEML')
else:
catalog = self._download_ethz_events_xml()
self.not_in_inv_catches = 0
self.no_data_catches = 0
for event in catalog:
(origin, mag, fm, event_params) = self._get_event_params(event)
seisbench.logger.info(f'Downloading {event.resource_id}')
station_groups = defaultdict(list)
for pick in event.picks:
if (pick.phase_hint is None):
continue
station_groups[waveform_id_to_network_station_location(pick.waveform_id.id)].append(pick)
for picks in station_groups.values():
try:
trace_params = self._get_trace_params(picks[0], inventory_mapper, event_params)
except KeyError as e:
self.not_in_inv_catches += 1
seisbench.logger.debug(e)
continue
t_start = (min((pick.time for pick in picks)) - time_before)
t_end = (max((pick.time for pick in picks)) + time_after)
try:
waveforms = self.client.get_waveforms(network=trace_params['station_network_code'], station=trace_params['station_code'], location='*', channel=f"{trace_params['trace_channel']}*", starttime=t_start, endtime=t_end)
except FDSNNoDataException as e:
seisbench.logger.debug(e)
self.no_data_catches += 1
continue
rotate_stream_to_zne(waveforms, inv)
if (len(waveforms) == 0):
seisbench.logger.debug(f"Found no waveforms for {waveform_id_to_network_station_location(picks[0].waveform_id.id)} in event {event_params['source_id']}")
continue
sampling_rate = waveforms[0].stats.sampling_rate
if any(((trace.stats.sampling_rate != sampling_rate) for trace in waveforms)):
seisbench.logger.warning(f'Found inconsistent sampling rates for {waveform_id_to_network_station_location(picks[0].waveform_id.id)} in event {event}.Resampling traces to common sampling rate.')
waveforms.resample(sampling_rate)
trace_params['trace_name'] = f"{event_params['source_id']}_{waveform_id_to_network_station_location(picks[0].waveform_id.id)}"
stream = waveforms.slice(t_start, t_end)
(actual_t_start, data, completeness) = stream_to_array(stream, component_order=writer.data_format['component_order'])
if ((int(((t_end - t_start) * sampling_rate)) + 1) > data.shape[1]):
completeness *= (data.shape[1] / (int(((t_end - t_start) * sampling_rate)) + 1))
trace_params['trace_sampling_rate_hz'] = sampling_rate
trace_params['trace_completeness'] = completeness
trace_params['trace_has_spikes'] = trace_has_spikes(data)
trace_params['trace_start_time'] = str(actual_t_start)
for pick in picks:
sample = ((pick.time - actual_t_start) * sampling_rate)
trace_params[f'trace_{pick.phase_hint}_arrival_sample'] = int(sample)
trace_params[f'trace_{pick.phase_hint}_status'] = pick.evaluation_mode
if (pick.polarity is None):
trace_params[f'trace_{pick.phase_hint}_polarity'] = 'undecidable'
else:
trace_params[f'trace_{pick.phase_hint}_polarity'] = pick.polarity
writer.add_trace({**event_params, **trace_params}, data)
def _download_ethz_events_xml(self, starttime=obspy.UTCDateTime(2013, 1, 1), endtime=obspy.UTCDateTime(2021, 1, 1), minmagnitude=1.5):
query = f'
resp = requests.get(query)
ev_ids = [line.decode(sys.stdout.encoding).split('|')[0] for line in resp._content.splitlines()[1:]]
catalog = obspy.Catalog(events=[])
with tqdm(desc='Downloading quakeml event meta from FDSNWS', total=len(ev_ids)) as pbar:
for ev_id in ev_ids:
catalog += self.client.get_events(eventid=ev_id, includearrivals=True)
pbar.update()
catalog.write(str((self.path / 'ethz_events.xml')), format='QUAKEML')
return catalog
def _get_event_params(event):
origin = event.preferred_origin()
mag = event.preferred_magnitude()
fm = event.preferred_focal_mechanism()
if (str(event.resource_id).split('/')[(- 1)] == '1'):
chars = (string.ascii_lowercase + string.digits)
source_id = ('sb_id_' + ''.join((random.choice(chars) for _ in range(6))))
else:
source_id = str(event.resource_id).split('/')[(- 1)]
event_params = {'source_id': source_id, 'source_origin_time': str(origin.time), 'source_origin_uncertainty_sec': origin.time_errors['uncertainty'], 'source_latitude_deg': origin.latitude, 'source_latitude_uncertainty_km': origin.latitude_errors['uncertainty'], 'source_longitude_deg': origin.longitude, 'source_longitude_uncertainty_km': origin.longitude_errors['uncertainty'], 'source_depth_km': (origin.depth / 1000.0), 'source_depth_uncertainty_km': (origin.depth_errors['uncertainty'] / 1000.0)}
if (str(origin.time) < '2019-01-08'):
split = 'train'
elif (str(origin.time) < '2019-09-04'):
split = 'dev'
else:
split = 'test'
event_params['split'] = split
if (mag is not None):
event_params['source_magnitude'] = mag.mag
event_params['source_magnitude_uncertainty'] = mag.mag_errors['uncertainty']
event_params['source_magnitude_type'] = mag.magnitude_type
event_params['source_magnitude_author'] = mag.creation_info.agency_id
if (fm is not None):
try:
(t_axis, p_axis, n_axis) = (fm.principal_axes.t_axis, fm.principal_axes.p_axis, fm.principal_axes.n_axis)
event_params['source_focal_mechanism_t_azimuth'] = t_axis.azimuth
event_params['source_focal_mechanism_t_plunge'] = t_axis.plunge
event_params['source_focal_mechanism_t_length'] = t_axis.length
event_params['source_focal_mechanism_p_azimuth'] = p_axis.azimuth
event_params['source_focal_mechanism_p_plunge'] = p_axis.plunge
event_params['source_focal_mechanism_p_length'] = p_axis.length
event_params['source_focal_mechanism_n_azimuth'] = n_axis.azimuth
event_params['source_focal_mechanism_n_plunge'] = n_axis.plunge
event_params['source_focal_mechanism_n_length'] = n_axis.length
except AttributeError:
pass
return (origin, mag, fm, event_params)
def _get_trace_params(pick, inventory, event_params):
net = pick.waveform_id.network_code
sta = pick.waveform_id.station_code
(lat, lon, elev) = inventory.get_station_location(network=net, station=sta)
if (not np.isnan((lat * lon))):
back_azimuth = gps2dist_azimuth(event_params['source_latitude_deg'], event_params['source_longitude_deg'], lat, lon)[2]
else:
back_azimuth = np.nan
trace_params = {'path_back_azimuth_deg': back_azimuth, 'station_network_code': net, 'station_code': sta, 'trace_channel': pick.waveform_id.channel_code[:2], 'station_location_code': pick.waveform_id.location_code, 'station_latitude_deg': lat, 'station_longitude_deg': lon, 'station_elevation_m': elev}
return trace_params |
class Model(nn.Module):
def __init__(self, mono=False, scale=2, odd_length=False, pad_type='zero', dropout=0.0, batchnorm=False):
super(Model, self).__init__()
assert (not (batchnorm and dropout))
self.scale = scale
if mono:
n_input_ch = 1
n_output_ch = 1
else:
n_input_ch = 2
n_output_ch = 2
ch_down_net = [n_input_ch, 128, 256, 512, 512]
ker_down_net = [65, 33, 17, 9]
ch_down_bottle = 512
ker_down_bottle = 9
ch_up_bottle = 512
ker_up_bottle = 9
ch_up_net = [512, 512, 256, 128, n_output_ch]
ker_up_net = [17, 33, 65, 9]
activation = nn.ReLU(inplace=True)
self.down_net = nn.ModuleList()
for i in range(len(ker_down_net)):
down_block = nn.ModuleList()
down_block.append(conv1d_halve(ch_down_net[i], ch_down_net[(i + 1)], ker_down_net[i], pad_type=pad_type))
if batchnorm:
down_block.append(nn.BatchNorm1d(ch_down_net[(i + 1)]))
down_block.append(activation)
down_block = nn.Sequential(*down_block)
self.down_net.append(down_block)
bottleneck = nn.ModuleList()
bottleneck.append(conv1d_halve(ch_down_net[(- 1)], ch_down_bottle, ker_down_bottle, pad_type=pad_type))
if (dropout > 0):
bottleneck.append(nn.Dropout(dropout))
bottleneck.append(activation)
bottleneck.append(conv1d_same(ch_down_bottle, (ch_up_bottle * scale), ker_up_bottle, pad_type=pad_type))
if (dropout > 0):
bottleneck.append(nn.Dropout(dropout))
bottleneck.append(activation)
bottleneck.append(PixelUpscale(self.scale, odd_output=odd_length))
self.bottleneck = nn.Sequential(*bottleneck)
self.up_net = nn.ModuleList()
for i in range(len(ker_up_net)):
n_ch_in = (ch_up_net[i] * 2)
n_ch_out = (ch_up_net[(i + 1)] * self.scale)
up_block = nn.ModuleList()
up_block.append(conv1d_same(n_ch_in, n_ch_out, ker_up_net[i], pad_type=pad_type))
if (dropout > 0):
up_block.append(nn.Dropout(dropout))
if (i < (len(ker_up_net) - 1)):
up_block.append(activation)
up_block.append(PixelUpscale(self.scale, odd_output=odd_length))
up_block = nn.Sequential(*up_block)
self.up_net.append(up_block)
def forward(self, x):
y = x.clone()
res = []
for down_block in self.down_net:
y = down_block(y)
res.append(y)
y = self.bottleneck(y)
for (i, up_block) in enumerate(self.up_net):
y = torch.cat((y, res[((- i) - 1)]), dim=1)
y = up_block(y)
y = (y + x)
return y |
def sudo():
if IS_WINDOWS:
return with_options({'runas': True})
elif (os.geteuid() != 0):
return prefix('sudo')
else:
return with_options({}) |
class deeplab_xception_transfer_basemodel_synBN(deeplab_xception_synBN.DeepLabv3_plus):
def __init__(self, nInputChannels=3, n_classes=7, os=16, input_channels=256, hidden_layers=128, out_channels=256):
super(deeplab_xception_transfer_basemodel_synBN, self).__init__(nInputChannels=nInputChannels, n_classes=n_classes, os=os)
self.target_featuremap_2_graph = gcn.Featuremaps_to_Graph(input_channels=input_channels, hidden_layers=hidden_layers, nodes=n_classes)
self.target_graph_conv1 = gcn.GraphConvolution(hidden_layers, hidden_layers)
self.target_graph_conv2 = gcn.GraphConvolution(hidden_layers, hidden_layers)
self.target_graph_conv3 = gcn.GraphConvolution(hidden_layers, hidden_layers)
self.target_graph_2_fea = gcn.Graph_to_Featuremaps(input_channels=input_channels, output_channels=out_channels, hidden_layers=hidden_layers, nodes=n_classes)
self.target_skip_conv = nn.Sequential(*[nn.Conv2d(input_channels, input_channels, kernel_size=1), nn.ReLU(True)])
def load_source_model(self, state_dict):
own_state = self.state_dict()
new_state_dict = OrderedDict()
for (name, param) in state_dict.items():
name = name.replace('module.', '')
if (('graph' in name) and ('source' not in name) and ('target' not in name)):
if ('featuremap_2_graph' in name):
name = name.replace('featuremap_2_graph', 'source_featuremap_2_graph')
else:
name = name.replace('graph', 'source_graph')
new_state_dict[name] = 0
if (name not in own_state):
if ('num_batch' in name):
continue
print('unexpected key "{}" in state_dict'.format(name))
continue
if isinstance(param, Parameter):
param = param.data
try:
own_state[name].copy_(param)
except:
print('While copying the parameter named {}, whose dimensions in the model are {} and whose dimensions in the checkpoint are {}, ...'.format(name, own_state[name].size(), param.size()))
continue
own_state[name].copy_(param)
missing = (set(own_state.keys()) - set(new_state_dict.keys()))
if (len(missing) > 0):
print('missing keys in state_dict: "{}"'.format(missing))
def get_target_parameter(self):
l = []
other = []
for (name, k) in self.named_parameters():
if (('target' in name) or ('semantic' in name)):
l.append(k)
else:
other.append(k)
return (l, other)
def get_semantic_parameter(self):
l = []
for (name, k) in self.named_parameters():
if ('semantic' in name):
l.append(k)
return l
def get_source_parameter(self):
l = []
for (name, k) in self.named_parameters():
if ('source' in name):
l.append(k)
return l
def forward(self, input, adj1_target=None, adj2_source=None, adj3_transfer=None):
(x, low_level_features) = self.xception_features(input)
x1 = self.aspp1(x)
x2 = self.aspp2(x)
x3 = self.aspp3(x)
x4 = self.aspp4(x)
x5 = self.global_avg_pool(x)
x5 = F.upsample(x5, size=x4.size()[2:], mode='bilinear', align_corners=True)
x = torch.cat((x1, x2, x3, x4, x5), dim=1)
x = self.concat_projection_conv1(x)
x = self.concat_projection_bn1(x)
x = self.relu(x)
x = F.upsample(x, size=low_level_features.size()[2:], mode='bilinear', align_corners=True)
low_level_features = self.feature_projection_conv1(low_level_features)
low_level_features = self.feature_projection_bn1(low_level_features)
low_level_features = self.relu(low_level_features)
x = torch.cat((x, low_level_features), dim=1)
x = self.decoder(x)
graph = self.target_featuremap_2_graph(x)
graph = self.target_graph_conv1.forward(graph, adj=adj1_target, relu=True)
graph = self.target_graph_conv2.forward(graph, adj=adj1_target, relu=True)
graph = self.target_graph_conv3.forward(graph, adj=adj1_target, relu=True)
graph = self.target_graph_2_fea.forward(graph, x)
x = self.target_skip_conv(x)
x = (x + graph)
x = self.semantic(x)
x = F.upsample(x, size=input.size()[2:], mode='bilinear', align_corners=True)
return x |
def run(args):
dataset = VOCSemanticSegmentationDataset(split=args.chainer_eval_set, data_dir=args.voc12_root)
labels = [dataset.get_example_by_keys(i, (1,))[0] for i in range(len(dataset))]
preds = []
for id in dataset.ids:
cam_dict = np.load(os.path.join(args.cam_out_dir, (id + '.npy')), allow_pickle=True).item()
cams = cam_dict['high_res']
cams = np.pad(cams, ((1, 0), (0, 0), (0, 0)), mode='constant', constant_values=args.cam_eval_thres)
keys = np.pad((cam_dict['keys'] + 1), (1, 0), mode='constant')
cls_labels = np.argmax(cams, axis=0)
cls_labels = keys[cls_labels]
preds.append(cls_labels.copy())
confusion = calc_semantic_segmentation_confusion(preds, labels)
gtj = confusion.sum(axis=1)
resj = confusion.sum(axis=0)
gtjresj = np.diag(confusion)
denominator = ((gtj + resj) - gtjresj)
iou = (gtjresj / denominator)
print({'iou': iou, 'miou': np.nanmean(iou)}) |
class Wide_ResNet(nn.Module):
def __init__(self, depth, widen_factor, dropout_rate, num_classes):
super(Wide_ResNet, self).__init__()
self.in_planes = 16
assert (((depth - 4) % 6) == 0), 'Wide-resnet depth should be 6n+4'
n = ((depth - 4) / 6)
k = widen_factor
print(('| Wide-Resnet %dx%d' % (depth, k)))
nStages = [16, (16 * k), (32 * k), (64 * k)]
self.conv1 = conv3x3(3, nStages[0])
self.layer1 = self._wide_layer(wide_basic, nStages[1], n, dropout_rate, stride=1)
self.layer2 = self._wide_layer(wide_basic, nStages[2], n, dropout_rate, stride=2)
self.layer3 = self._wide_layer(wide_basic, nStages[3], n, dropout_rate, stride=2)
self.bn1 = nn.BatchNorm2d(nStages[3], momentum=0.1)
self.linear = nn.Linear(2560, num_classes)
def _wide_layer(self, block, planes, num_blocks, dropout_rate, stride):
strides = ([stride] + ([1] * (int(num_blocks) - 1)))
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, dropout_rate, stride))
self.in_planes = planes
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.relu(self.bn1(out))
out = F.avg_pool2d(out, 7)
out = out.view(out.size(0), (- 1))
out = self.linear(out)
return out |
def register_types(module):
root_module = module.get_root()
module.add_enum('EnvironmentType', ['UrbanEnvironment', 'SubUrbanEnvironment', 'OpenAreasEnvironment'])
module.add_enum('CitySize', ['SmallCity', 'MediumCity', 'LargeCity'])
module.add_class('AttributeConstructionList', import_from_module='ns.core')
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
typehandlers.add_type_alias(u'std::list< ns3::AttributeConstructionList::Item > const_iterator', u'ns3::AttributeConstructionList::CIterator')
typehandlers.add_type_alias(u'std::list< ns3::AttributeConstructionList::Item > const_iterator*', u'ns3::AttributeConstructionList::CIterator*')
typehandlers.add_type_alias(u'std::list< ns3::AttributeConstructionList::Item > const_iterator&', u'ns3::AttributeConstructionList::CIterator&')
module.add_class('CallbackBase', import_from_module='ns.core')
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeChecker'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeValue'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation'])
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor'])
module.add_class('Hasher', import_from_module='ns.core')
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
module.add_class('ObjectDeleter', import_from_module='ns.core')
module.add_class('PropagationCache', template_parameters=['ns3::JakesProcess'])
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('TimeWithUnit', import_from_module='ns.core')
module.add_class('TypeId', import_from_module='ns.core')
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
module.add_enum('SupportLevel', ['SUPPORTED', 'DEPRECATED', 'OBSOLETE'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
typehandlers.add_type_alias(u'uint32_t', u'ns3::TypeId::hash_t')
typehandlers.add_type_alias(u'uint32_t*', u'ns3::TypeId::hash_t*')
typehandlers.add_type_alias(u'uint32_t&', u'ns3::TypeId::hash_t&')
module.add_class('Vector2D', import_from_module='ns.core')
module.add_class('Vector3D', import_from_module='ns.core')
module.add_class('empty', import_from_module='ns.core')
module.add_class('int64x64_t', import_from_module='ns.core')
module.add_enum('impl_type', ['int128_impl', 'cairo_impl', 'ld_impl'], outer_class=root_module['ns3::int64x64_t'], import_from_module='ns.core')
module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object'])
module.add_class('PropagationDelayModel', parent=root_module['ns3::Object'])
module.add_class('PropagationLossModel', parent=root_module['ns3::Object'])
module.add_class('RandomPropagationDelayModel', parent=root_module['ns3::PropagationDelayModel'])
module.add_class('RandomPropagationLossModel', parent=root_module['ns3::PropagationLossModel'])
module.add_class('RandomVariableStream', import_from_module='ns.core', parent=root_module['ns3::Object'])
module.add_class('RangePropagationLossModel', parent=root_module['ns3::PropagationLossModel'])
module.add_class('SequentialRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
module.add_class('ThreeLogDistancePropagationLossModel', parent=root_module['ns3::PropagationLossModel'])
module.add_class('Time', import_from_module='ns.core')
module.add_enum('Unit', ['Y', 'D', 'H', 'MIN', 'S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time )', u'ns3::Time::TracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time )*', u'ns3::Time::TracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time )&', u'ns3::Time::TracedCallback&')
root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t'])
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
module.add_class('TriangularRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('TwoRayGroundPropagationLossModel', parent=root_module['ns3::PropagationLossModel'])
module.add_class('UniformRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('WeibullRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('ZetaRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('ZipfRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('ConstantRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('ConstantSpeedPropagationDelayModel', parent=root_module['ns3::PropagationDelayModel'])
module.add_class('Cost231PropagationLossModel', parent=root_module['ns3::PropagationLossModel'])
module.add_class('DeterministicRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('EmpiricalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('EmptyAttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::AttributeAccessor'])
module.add_class('EmptyAttributeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('ErlangRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('ExponentialRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('FixedRssLossModel', parent=root_module['ns3::PropagationLossModel'])
module.add_class('FriisPropagationLossModel', parent=root_module['ns3::PropagationLossModel'])
module.add_class('GammaRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('ItuR1411LosPropagationLossModel', parent=root_module['ns3::PropagationLossModel'])
module.add_class('ItuR1411NlosOverRooftopPropagationLossModel', parent=root_module['ns3::PropagationLossModel'])
module.add_class('JakesProcess', parent=root_module['ns3::Object'])
module.add_class('JakesPropagationLossModel', parent=root_module['ns3::PropagationLossModel'])
module.add_class('Kun2600MhzPropagationLossModel', parent=root_module['ns3::PropagationLossModel'])
module.add_class('LogDistancePropagationLossModel', parent=root_module['ns3::PropagationLossModel'])
module.add_class('LogNormalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('MatrixPropagationLossModel', parent=root_module['ns3::PropagationLossModel'])
module.add_class('MobilityModel', import_from_module='ns.mobility', parent=root_module['ns3::Object'])
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::MobilityModel const > )', u'ns3::MobilityModel::TracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::MobilityModel const > )*', u'ns3::MobilityModel::TracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::MobilityModel const > )&', u'ns3::MobilityModel::TracedCallback&')
module.add_class('NakagamiPropagationLossModel', parent=root_module['ns3::PropagationLossModel'])
module.add_class('NormalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('OkumuraHataPropagationLossModel', parent=root_module['ns3::PropagationLossModel'])
module.add_class('ParetoRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('Vector2DChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
module.add_class('Vector2DValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('Vector3DChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
module.add_class('Vector3DValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['ns3::ObjectBase *', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<const ns3::MobilityModel>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
typehandlers.add_type_alias(u'ns3::Vector3D', u'ns3::Vector')
typehandlers.add_type_alias(u'ns3::Vector3D*', u'ns3::Vector*')
typehandlers.add_type_alias(u'ns3::Vector3D&', u'ns3::Vector&')
module.add_typedef(root_module['ns3::Vector3D'], 'Vector')
typehandlers.add_type_alias(u'ns3::Vector3DValue', u'ns3::VectorValue')
typehandlers.add_type_alias(u'ns3::Vector3DValue*', u'ns3::VectorValue*')
typehandlers.add_type_alias(u'ns3::Vector3DValue&', u'ns3::VectorValue&')
module.add_typedef(root_module['ns3::Vector3DValue'], 'VectorValue')
typehandlers.add_type_alias(u'ns3::Vector3DChecker', u'ns3::VectorChecker')
typehandlers.add_type_alias(u'ns3::Vector3DChecker*', u'ns3::VectorChecker*')
typehandlers.add_type_alias(u'ns3::Vector3DChecker&', u'ns3::VectorChecker&')
module.add_typedef(root_module['ns3::Vector3DChecker'], 'VectorChecker')
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
nested_module = module.add_cpp_namespace('Hash')
register_types_ns3_Hash(nested_module)
nested_module = module.add_cpp_namespace('TracedValueCallback')
register_types_ns3_TracedValueCallback(nested_module) |
def get_output_dir(module):
outdir = osp.abspath(osp.join(__C.ROOT_DIR, 'output', 'tracktor', module))
if (not os.path.exists(outdir)):
os.makedirs(outdir)
return outdir |
.parametrize('clear_buffer, clear_no_need_grad', [(False, False), (True, False), (False, True)])
def test_intermediate_outputs(clear_buffer, clear_no_need_grad):
rng = np.random.RandomState(311)
nn.prefer_cached_array(False)
x = nn.Variable.from_numpy_array(rng.randn(2, 10))
h1 = (x + 1)
y1 = (h1 + 1)
h2 = (x + 1)
h2.persistent = True
y2 = (h2 + 1)
nn.forward_all([h1, y1], clear_buffer=clear_buffer, clear_no_need_grad=clear_no_need_grad)
nn.forward_all([h2, y2], clear_buffer=clear_buffer, clear_no_need_grad=clear_no_need_grad)
assert_allclose(h1.d, h2.d)
assert_allclose(y1.d, y2.d)
nn.prefer_cached_array(True) |
def main(argv):
default_options = {}
default_options['agent'] = 'mc_aixi_ctw'
default_options['agent-horizon'] = 5
default_options['ct-depth'] = 30
default_options['environment'] = 'coin_flip'
default_options['exploration'] = 0.0
default_options['explore-decay'] = 1.0
default_options['learning-period'] = 0
default_options['mc-simulations'] = 300
default_options['profile'] = False
default_options['terminate-age'] = 0
default_options['verbose'] = False
command_line_options = {}
try:
(opts, args) = getopt.gnu_getopt(argv, 'd:e:h:l:m:o:pr:t:vx:', ['explore-decay=', 'environment=', 'agent-horizon=', 'learning-period=', 'mc-simulations=', 'option', 'profile', 'terminate-age=', 'ct-depth=', 'verbose', 'exploration='])
for (opt, arg) in opts:
if (opt == '--help'):
usage()
if (opt in ('-d', '--explore-decay')):
command_line_options['explore-decay'] = float(arg)
continue
if (opt in ('-e', '--environment')):
command_line_options['environment'] = str(arg)
continue
if (opt in ('-h', '--agent-horizon')):
command_line_options['agent-horizon'] = int(arg)
continue
if (opt in ('-l', '--learning-period')):
command_line_options['learning-period'] = int(arg)
continue
if (opt in ('-m', '--mc-simulations')):
command_line_options['mc-simulations'] = int(arg)
continue
if (opt in ('-o', '--option')):
parts = arg.split('=')
if (len(parts) > 1):
key = parts[0].strip()
value = '='.join(parts[1:])
command_line_options[key] = value
else:
print((("Extra option '-o %s' is invalid. " % str(arg)) + ("This needs to be in '-o key=value' format." % str(arg))))
usage()
continue
if (opt in ('-p', '--profile')):
command_line_options['profile'] = True
continue
if (opt in ('-r', '--terminate-age')):
command_line_options['terminate-age'] = int(arg)
continue
if (opt in ('-t', '--ct-depth')):
command_line_options['ct-depth'] = int(arg)
continue
if (opt in ('-v', '--verbose')):
command_line_options['verbose'] = True
continue
if (opt in ('-x', '--exploration')):
command_line_options['exploration'] = float(arg)
continue
except getopt.GetoptError as e:
usage()
if (len(args) > 0):
filename = args[0]
if (not os.path.exists(filename)):
print(("Expected argument '%s' to be a configuration filename." % str(filename)))
usage()
config_contents = open(filename, 'r').read()
if (config_contents.find('[environment]') == (- 1)):
config_contents = (('[environment]' + os.linesep) + config_contents)
config_stringio = StringIO.StringIO(config_contents)
config = configparser.RawConfigParser(default_options)
config.readfp(config_stringio)
options = dict(config.items('environment'))
else:
options = default_options
options.update(command_line_options)
verbose = bool(options.get('verbose', False))
if verbose:
for (option_name, option_value) in list(options.items()):
print(("OPTION: '%s' = '%s'" % (str(option_name), str(option_value))))
message = ('cycle, observation, reward, action, explored, ' + 'explore_rate, total reward, average reward, time, model size')
print(message)
agent_name = options['agent']
if (agent_name.count('.') == 0):
agent_package_name = ('pyaixi.agents.' + agent_name)
else:
agent_package_name = agent_name
try:
agent_module = __import__(agent_package_name, globals(), locals(), [agent_name], 0)
except Exception as e:
sys.stderr.write((("ERROR: loading agent module '%s' caused error '%s'. Exiting." % (str(agent_name), str(e))) + os.linesep))
sys.exit(1)
agent_class = None
agent_classname = ''
for name in dir(agent_module):
obj = getattr(agent_module, name)
if (inspect.isclass(obj) and ('Agent' in [cls.__name__ for cls in obj.__bases__])):
agent_class = obj
agent_classname = name
break
if (agent_class is None):
sys.stderr.write(((("ERROR: agent module '%s' does not contain " % str(agent_name)) + ("a valid AIXI agent subclass. (Got '%s' instead.) Exiting." % str(agent_classname))) + os.linesep))
sys.exit(1)
environment_name = options['environment']
if (environment_name.count('.') == 0):
environment_package_name = ('pyaixi.environments.' + environment_name)
else:
environment_package_name = environment_name
try:
environment_module = __import__(environment_package_name, globals(), locals(), [environment_name], 0)
except Exception as e:
sys.stderr.write((("ERROR: loading environment module '%s' caused error '%s'. Exiting." % (str(environment_name), str(e))) + os.linesep))
sys.exit(1)
environment_class = None
environment_classname = ''
for (name, obj) in inspect.getmembers(environment_module):
if (hasattr(obj, '__bases__') and ('Environment' in [cls.__name__ for cls in obj.__bases__])):
environment_class = obj
environment_classname = name
break
if (environment_class is None):
sys.stderr.write(((("ERROR: environment module '%s' does not contain " % str(environment_name)) + ("a valid AIXI environment subclass. (Got '%s' instead.) Exiting." % str(environment_classname))) + os.linesep))
sys.exit(1)
environment = environment_class(options=options)
options['action-bits'] = environment.action_bits()
options['observation-bits'] = environment.observation_bits()
options['percept-bits'] = environment.percept_bits()
options['reward-bits'] = environment.reward_bits()
options['max-action'] = environment.maximum_action()
options['max-observation'] = environment.maximum_observation()
options['max-reward'] = environment.maximum_reward()
agent = agent_class(environment=environment, options=options)
if bool(options.get('profile', False)):
profile.runctx('interaction_loop(agent = agent, environment = environment, options = options)', globals(), locals())
else:
interaction_loop(agent=agent, environment=environment, options=options) |
def reduce_monos(lrtoks):
i = 0
while (i < len(lrtoks)):
if (lrtoks[i] == 'NOT'):
args = [lrtoks[i], lrtoks[(i + 1)]]
lrtoks[i] = eval_mon_op(args)
del lrtoks[(i + 1)]
i += 1 |
class title(html_tag):
def _get_text(self):
return u''.join(self.get(basestring))
def _set_text(self, text):
self.clear()
self.add(text)
text = property(_get_text, _set_text) |
class DeconvolutionDataGrad(LinearDataGrad):
def __init__(self, ctx, base_axis=1, pad=None, stride=None, dilation=None, group=1, channel_last=False, output_padding=None):
super(DeconvolutionDataGrad, self).__init__(ctx)
self._linear = _F.Deconvolution(ctx, base_axis, pad, stride, dilation, group, channel_last, output_padding) |
.parametrize('clf', [SAGClassifier(loss='log', max_iter=20, verbose=0, random_state=0), SAGAClassifier(loss='log', max_iter=20, verbose=0, random_state=0), PySAGClassifier(loss='log', max_iter=20, random_state=0)])
def test_auto_stepsize(clf, bin_train_data):
(X_bin, y_bin) = bin_train_data
clf.fit(X_bin, y_bin)
assert (clf.score(X_bin, y_bin) == 1.0) |
def get_parents(phrases, p1_idx, p2_idx):
parents1 = get_parent_trajectory(phrases, p1_idx)
parents2 = get_parent_trajectory(phrases, p2_idx)
for (i, p) in enumerate(parents1):
if (p in parents2):
closest_common_parent = p
break
common_parents = parents1[i:]
return common_parents |
def _get_clipfn(size, signed=True):
maxval = _get_maxval(size, signed)
minval = _get_minval(size, signed)
return (lambda val: builtin_max(min(val, maxval), minval)) |
def add_economic_dynamics(model, config):
def ygrosseq(model, time):
return (model.YGROSS[time] == ((config.al(time) * ((config.L[time] / 1000) ** (1 - config.gama))) * (model.K[time] ** config.gama)))
add_constraint(model, ygrosseq)
def yneteq(model, time):
return (model.YNET[time] == (model.YGROSS[time] * (1 - model.DAMFRAC[time])))
add_constraint(model, yneteq)
def yyeq(model, time):
return (model.Y[time] == (model.YNET[time] - model.ABATECOST[time]))
add_constraint(model, yyeq)
def cc(model, time):
return (model.C[time] == (model.Y[time] - model.I[time]))
add_constraint(model, cc)
def cpce(model, time):
return (model.CPC[time] == ((1000 * model.C[time]) / config.L[time]))
add_constraint(model, cpce)
def seq(model, time):
return (model.I[time] == (model.S[time] * model.Y[time]))
add_constraint(model, seq)
def kkeq(model, time):
if (time < config.numPeriods):
return (model.K[(time + 1)] <= ((((1 - config.dk) ** config.tstep) * model.K[time]) + (config.tstep * model.I[time])))
return Constraint.Skip
add_constraint(model, kkeq)
def rieq(model, time):
if (time < config.numPeriods):
return (model.RI[time] == (((1 + config.prstp) * ((model.CPC[(time + 1)] / model.CPC[time]) ** (config.elasmu / config.tstep))) - 1))
return Constraint.Skip
add_constraint(model, rieq) |
def test_sdca_smooth_hinge_l1_only(bin_train_data):
(X_bin, y_bin) = bin_train_data
clf = SDCAClassifier(alpha=0.5, l1_ratio=1.0, loss='smooth_hinge', tol=0.01, max_iter=200, random_state=0)
clf.fit(X_bin, y_bin)
assert (clf.score(X_bin, y_bin) == 1.0) |
def load_candidate_from_stream_with_score(f):
qid_to_ranked_candidate_passages = {}
for l in f:
try:
l = l.strip().split('\t')
qid = int(l[0])
pid = int(l[1])
rank = int(l[2])
score = float(l[3])
if (qid not in qid_to_ranked_candidate_passages):
qid_to_ranked_candidate_passages[qid] = []
qid_to_ranked_candidate_passages[qid].append((pid, rank, score))
except:
raise IOError(('"%s" is not valid format' % l))
return qid_to_ranked_candidate_passages |
def _impl(array, axis, keepdims, mask_identity, highlevel, behavior, attrs):
axis = regularize_axis(axis)
with HighLevelContext(behavior=behavior, attrs=attrs) as ctx:
layout = ctx.unwrap(array, allow_record=False, primitive_policy='error')
reducer = ak._reducers.ArgMin()
out = ak._do.reduce(layout, reducer, axis=axis, mask=mask_identity, keepdims=keepdims, behavior=ctx.behavior)
return ctx.wrap(out, highlevel=highlevel, allow_other=True) |
class PMMNet(object):
thisown = _swig_property((lambda x: x.this.own()), (lambda x, v: x.this.own(v)), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError('No constructor defined')
__repr__ = _swig_repr
def New():
return _snap.PMMNet_New()
New = staticmethod(New)
__swig_destroy__ = _snap.delete_PMMNet
def Save(self, SOut):
return _snap.PMMNet_Save(self, SOut)
def __deref__(self):
return _snap.PMMNet___deref__(self)
def __ref__(self):
return _snap.PMMNet___ref__(self)
def __call__(self):
return _snap.PMMNet___call__(self)
def Empty(self):
return _snap.PMMNet_Empty(self)
def Clr(self):
return _snap.PMMNet_Clr(self)
def GetRefs(self):
return _snap.PMMNet_GetRefs(self)
CRef = _swig_property(_snap.PMMNet_CRef_get)
def AddModeNet(self, ModeName):
return _snap.PMMNet_AddModeNet(self, ModeName)
def DelModeNet(self, *args):
return _snap.PMMNet_DelModeNet(self, *args)
def AddCrossNet(self, *args):
return _snap.PMMNet_AddCrossNet(self, *args)
def DelCrossNet(self, *args):
return _snap.PMMNet_DelCrossNet(self, *args)
def Load(self, SIn):
return _snap.PMMNet_Load(self, SIn)
def GetModeId(self, ModeName):
return _snap.PMMNet_GetModeId(self, ModeName)
def GetModeName(self, ModeId):
return _snap.PMMNet_GetModeName(self, ModeId)
def GetCrossId(self, CrossName):
return _snap.PMMNet_GetCrossId(self, CrossName)
def GetCrossName(self, CrossId):
return _snap.PMMNet_GetCrossName(self, CrossId)
def GetModeNetByName(self, ModeName):
return _snap.PMMNet_GetModeNetByName(self, ModeName)
def GetModeNetById(self, ModeId):
return _snap.PMMNet_GetModeNetById(self, ModeId)
def GetCrossNetByName(self, CrossName):
return _snap.PMMNet_GetCrossNetByName(self, CrossName)
def GetCrossNetById(self, CrossId):
return _snap.PMMNet_GetCrossNetById(self, CrossId)
def GetCrossNetI(self, *args):
return _snap.PMMNet_GetCrossNetI(self, *args)
def BegCrossNetI(self, *args):
return _snap.PMMNet_BegCrossNetI(self, *args)
def EndCrossNetI(self, *args):
return _snap.PMMNet_EndCrossNetI(self, *args)
def GetModeNetI(self, *args):
return _snap.PMMNet_GetModeNetI(self, *args)
def BegModeNetI(self, *args):
return _snap.PMMNet_BegModeNetI(self, *args)
def EndModeNetI(self, *args):
return _snap.PMMNet_EndModeNetI(self, *args)
def GetModeNets(self):
return _snap.PMMNet_GetModeNets(self)
def GetCrossNets(self):
return _snap.PMMNet_GetCrossNets(self)
def GetSubgraphByCrossNet(self, CrossNetTypes):
return _snap.PMMNet_GetSubgraphByCrossNet(self, CrossNetTypes)
def GetSubgraphByModeNet(self, ModeNetTypes):
return _snap.PMMNet_GetSubgraphByModeNet(self, ModeNetTypes)
def ToNetwork(self, CrossNetTypes, NodeAttrMap, EdgeAttrMap):
return _snap.PMMNet_ToNetwork(self, CrossNetTypes, NodeAttrMap, EdgeAttrMap)
def ToNetwork2(self, CrossNetTypes, NodeAttrMap, EdgeAttrMap):
return _snap.PMMNet_ToNetwork2(self, CrossNetTypes, NodeAttrMap, EdgeAttrMap)
def ToNetworkMP(self, CrossNetNames):
return _snap.PMMNet_ToNetworkMP(self, CrossNetNames) |
def build_resnext():
model = resnext.resnet101(num_classes=400, shortcut_type='B', cardinality=32, sample_size=112, sample_duration=16, last_fc=False)
model = model.cuda()
assert os.path.exists('pretrained/resnext-101-kinetics.pth')
model_data = torch.load('pretrained/resnext-101-kinetics.pth', map_location='cpu')
new_state_dict = OrderedDict()
for (k, v) in model_data['state_dict'].items():
if ('module' in k):
name = k[7:]
new_state_dict[name] = v
model.load_state_dict(new_state_dict)
model.eval()
return model |
class StubRpcAgent():
def __init__(self, world_size):
self.world_size = world_size
def get_worker_infos(self):
return {rpc.WorkerInfo(name=worker_name(rank), id=rank) for rank in range(self.world_size)} |
class _MyCustomMapDatasetThrowingExceptionAtItem(MapDatasetBase):
def __init__(self):
super().__init__(data_types={'data': {'shape': (None, 3)}})
def __len__(self):
return 2
def __getitem__(self, item):
if (item == 0):
return {'data': numpy.zeros((5, 3))}
raise _MyCustomMapDatasetException('test exception at getitem') |
def pwdist_means_only(M1, M2=None, symmetric=False, device=None):
if ((M2 is None) or symmetric):
symmetric = True
M2 = M1
D = torch.cdist(M1, M2)
if device:
D = D.to(device)
return D |
def compile_extension(temp_dir, install=False, verbose=True):
env = {**os.environ, 'TUNING_SOURCE_DIR': str(temp_dir), 'TUNING_EXTENSION_NAME': str(temp_dir.stem)}
output = subprocess.run([sys.executable, 'tuning_setup.py', ('build' if (not install) else 'develop')], cwd=temp_dir, env=env, capture_output=True)
if verbose:
print(output)
print(('Done compiling' if (not install) else 'Done installing')) |
class Partition1(nn.Module):
LAYER_SCOPES = ['VisionTransformer/ModuleList[blocks]/Block[2]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[2]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[3]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[3]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[3]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[3]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[3]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[3]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[3]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[3]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[3]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[3]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[3]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[3]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[3]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[4]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[4]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[4]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[4]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[4]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[4]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[4]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[4]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[4]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[4]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[4]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[4]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[4]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[5]/LayerNorm[norm1]', 'VisionTransformer/ModuleList[blocks]/Block[5]/Attention[attn]/Linear[qkv]', 'VisionTransformer/ModuleList[blocks]/Block[5]/Attention[attn]/Dropout[attn_drop]', 'VisionTransformer/ModuleList[blocks]/Block[5]/Attention[attn]/Linear[proj]', 'VisionTransformer/ModuleList[blocks]/Block[5]/Attention[attn]/Dropout[proj_drop]', 'VisionTransformer/ModuleList[blocks]/Block[5]/Identity[drop_path]', 'VisionTransformer/ModuleList[blocks]/Block[5]/LayerNorm[norm2]', 'VisionTransformer/ModuleList[blocks]/Block[5]/Mlp[mlp]/Linear[fc1]', 'VisionTransformer/ModuleList[blocks]/Block[5]/Mlp[mlp]/GELU[act]', 'VisionTransformer/ModuleList[blocks]/Block[5]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[5]/Mlp[mlp]/Linear[fc2]', 'VisionTransformer/ModuleList[blocks]/Block[5]/Mlp[mlp]/Dropout[drop]', 'VisionTransformer/ModuleList[blocks]/Block[5]/Identity[drop_path]']
TENSORS = []
def __init__(self, layers, tensors, device='cuda:1'):
super().__init__()
for (idx, layer_scope) in enumerate(self.LAYER_SCOPES):
self.add_module(f'l_{idx}', layers[layer_scope])
b = p = 0
for tensor_scope in self.TENSORS:
tensor = tensors[tensor_scope]
if isinstance(tensor, nn.Parameter):
self.register_parameter(f'p_{p}', tensor)
p += 1
else:
self.register_buffer(f'b_{b}', tensor)
b += 1
self.device = torch.device(device)
self.input_structure = [1, 1]
self.lookup = {'l_0': 'blocks.2.mlp.drop', 'l_1': 'blocks.2.drop_path', 'l_2': 'blocks.3.norm1', 'l_3': 'blocks.3.attn.qkv', 'l_4': 'blocks.3.attn.attn_drop', 'l_5': 'blocks.3.attn.proj', 'l_6': 'blocks.3.attn.proj_drop', 'l_7': 'blocks.3.drop_path', 'l_8': 'blocks.3.norm2', 'l_9': 'blocks.3.mlp.fc1', 'l_10': 'blocks.3.mlp.act', 'l_11': 'blocks.3.mlp.drop', 'l_12': 'blocks.3.mlp.fc2', 'l_13': 'blocks.3.mlp.drop', 'l_14': 'blocks.3.drop_path', 'l_15': 'blocks.4.norm1', 'l_16': 'blocks.4.attn.qkv', 'l_17': 'blocks.4.attn.attn_drop', 'l_18': 'blocks.4.attn.proj', 'l_19': 'blocks.4.attn.proj_drop', 'l_20': 'blocks.4.drop_path', 'l_21': 'blocks.4.norm2', 'l_22': 'blocks.4.mlp.fc1', 'l_23': 'blocks.4.mlp.act', 'l_24': 'blocks.4.mlp.drop', 'l_25': 'blocks.4.mlp.fc2', 'l_26': 'blocks.4.mlp.drop', 'l_27': 'blocks.4.drop_path', 'l_28': 'blocks.5.norm1', 'l_29': 'blocks.5.attn.qkv', 'l_30': 'blocks.5.attn.attn_drop', 'l_31': 'blocks.5.attn.proj', 'l_32': 'blocks.5.attn.proj_drop', 'l_33': 'blocks.5.drop_path', 'l_34': 'blocks.5.norm2', 'l_35': 'blocks.5.mlp.fc1', 'l_36': 'blocks.5.mlp.act', 'l_37': 'blocks.5.mlp.drop', 'l_38': 'blocks.5.mlp.fc2', 'l_39': 'blocks.5.mlp.drop', 'l_40': 'blocks.5.drop_path'}
self.to(self.device)
def forward(self, *args):
(x0, x1) = unflatten(args, self.input_structure)
t_0 = self.l_0(x1)
t_0 = self.l_1(t_0)
t_0 = (x0 + t_0)
t_1 = self.l_2(t_0)
t_2 = t_1.shape
t_3 = t_2[0]
t_4 = t_2[1]
t_2 = t_2[2]
t_1 = self.l_3(t_1)
t_5 = (t_2 // 16)
t_5 = t_1.reshape(t_3, t_4, 3, 16, t_5)
t_5 = t_5.permute(2, 0, 3, 1, 4)
t_1 = t_5[0]
t_6 = t_5[1]
t_5 = t_5[2]
t_6 = t_6.transpose((- 2), (- 1))
t_6 = (t_1 t_6)
t_6 = (t_6 * 0.125)
t_6 = t_6.softmax(dim=(- 1))
t_6 = self.l_4(t_6)
t_5 = (t_6 t_5)
t_5 = t_5.transpose(1, 2)
t_2 = t_5.reshape(t_3, t_4, t_2)
t_2 = self.l_5(t_2)
t_2 = self.l_6(t_2)
t_2 = self.l_7(t_2)
t_2 = (t_0 + t_2)
t_0 = self.l_8(t_2)
t_0 = self.l_9(t_0)
t_0 = self.l_10(t_0)
t_0 = self.l_11(t_0)
t_0 = self.l_12(t_0)
t_0 = self.l_13(t_0)
t_0 = self.l_14(t_0)
t_0 = (t_2 + t_0)
t_2 = self.l_15(t_0)
t_4 = t_2.shape
t_3 = t_4[0]
t_5 = t_4[1]
t_4 = t_4[2]
t_2 = self.l_16(t_2)
t_6 = (t_4 // 16)
t_6 = t_2.reshape(t_3, t_5, 3, 16, t_6)
t_6 = t_6.permute(2, 0, 3, 1, 4)
t_2 = t_6[0]
t_1 = t_6[1]
t_6 = t_6[2]
t_1 = t_1.transpose((- 2), (- 1))
t_1 = (t_2 t_1)
t_1 = (t_1 * 0.125)
t_1 = t_1.softmax(dim=(- 1))
t_1 = self.l_17(t_1)
t_6 = (t_1 t_6)
t_6 = t_6.transpose(1, 2)
t_4 = t_6.reshape(t_3, t_5, t_4)
t_4 = self.l_18(t_4)
t_4 = self.l_19(t_4)
t_4 = self.l_20(t_4)
t_4 = (t_0 + t_4)
t_0 = self.l_21(t_4)
t_0 = self.l_22(t_0)
t_0 = self.l_23(t_0)
t_0 = self.l_24(t_0)
t_0 = self.l_25(t_0)
t_0 = self.l_26(t_0)
t_0 = self.l_27(t_0)
t_0 = (t_4 + t_0)
t_4 = self.l_28(t_0)
t_5 = t_4.shape
t_3 = t_5[0]
t_6 = t_5[1]
t_5 = t_5[2]
t_4 = self.l_29(t_4)
t_1 = (t_5 // 16)
t_1 = t_4.reshape(t_3, t_6, 3, 16, t_1)
t_1 = t_1.permute(2, 0, 3, 1, 4)
t_4 = t_1[0]
t_2 = t_1[1]
t_1 = t_1[2]
t_2 = t_2.transpose((- 2), (- 1))
t_2 = (t_4 t_2)
t_2 = (t_2 * 0.125)
t_2 = t_2.softmax(dim=(- 1))
t_2 = self.l_30(t_2)
t_1 = (t_2 t_1)
t_1 = t_1.transpose(1, 2)
t_5 = t_1.reshape(t_3, t_6, t_5)
t_5 = self.l_31(t_5)
t_5 = self.l_32(t_5)
t_5 = self.l_33(t_5)
t_5 = (t_0 + t_5)
t_0 = self.l_34(t_5)
t_0 = self.l_35(t_0)
t_0 = self.l_36(t_0)
t_0 = self.l_37(t_0)
t_0 = self.l_38(t_0)
t_0 = self.l_39(t_0)
t_0 = self.l_40(t_0)
t_0 = (t_5 + t_0)
return (t_0,)
def state_dict(self, *args, **kwargs):
return state_dict(self, *args, **kwargs)
def load_state_dict(self, state):
return load_state_dict(self, state)
def named_parameters(self, recurse=True):
return named_parameters(self, recurse=recurse)
def named_buffers(self, recurse=True):
return named_buffers(self, recurse=recurse)
def cpu(self):
return cpu(self)
def cuda(self, device=None):
return cuda(self, device=device)
def to(self, *args, **kwargs):
return to(self, *args, **kwargs) |
def ngrams_for_evaluation(sequence, max_n, predict_first=False):
if (max_n <= 0):
raise ValueError('Max N must be >=1')
iterator = iter(sequence)
history = []
if (not predict_first):
history.append(next(iterator))
for token in iterator:
if (len(history) == max_n):
del history[0]
(yield (token, tuple(history)))
history.append(token)
return |
def preemption_setup(config):
if (config.tolerance.id is None):
return config
resume_dir = os.path.join(get_original_cwd(), config.tolerance.logdir, str(config.tolerance.id))
if os.path.exists(resume_dir):
print(f'Resuming from {resume_dir}')
with open(os.path.join(resume_dir, 'hydra.txt'), 'r') as f:
hydra_paths = list(f.readlines())
checkpoint_path = None
for hydra_path in reversed(hydra_paths):
hydra_path = hydra_path.rstrip('\n')
last_path = os.path.join(hydra_path, 'checkpoints', 'last.ckpt')
if os.path.exists(last_path):
print('\tFound checkpoint at', last_path)
config.train.ckpt = last_path
config.train.pretrained_model_path = None
config.train.pretrained_model_state_hook._name_ = None
break
if (checkpoint_path is None):
print('\tNo suitable checkpoint found, starting from scratch')
if os.path.exists(os.path.join(hydra_path, 'wandb')):
run_info = [e for e in os.listdir(os.path.join(hydra_path, 'wandb')) if e.startswith('run-')][0]
run_id = run_info.split('-')[(- 1)]
try:
config.wandb.id = run_id
except AttributeError:
pass
os.makedirs(resume_dir, exist_ok=True)
with open(os.path.join(resume_dir, 'hydra.txt'), 'a') as f:
f.write((os.getcwd() + '\n'))
return config |
def typeset_term_tables(fd, table):
scattab = [('_st_', 2), ('_sd_', 0), ('_adj_', 0), ('_tl_', 1), ('_ul_', 1), ('_th', 2), ('_eth', 2), ('_of_', 2), ('de_', 3)]
new_tabs = [[], [], [], []]
for term_name in six.iterkeys(table):
for (term_tag, tab_id) in scattab:
if (term_tag in term_name):
new_tabs[tab_id].append(term_name)
break
basic_keys = list(((((set(table.keys()) - set(new_tabs[0])) - set(new_tabs[1])) - set(new_tabs[2])) - set(new_tabs[3])))
typeset_term_table(fd, basic_keys, table, 'basic')
typeset_term_table(fd, new_tabs[0], table, 'sensitivity')
typeset_term_table(fd, new_tabs[1], table, 'large deformation')
typeset_term_table(fd, new_tabs[2], table, 'special')
typeset_term_table(fd, new_tabs[3], table, 'multi-linear')
fd.write(newpage) |
_class
class VELoss():
def __init__(self, sigma_min=0.02, sigma_max=100):
self.sigma_min = sigma_min
self.sigma_max = sigma_max
def __call__(self, net, images, labels, augment_pipe=None):
rnd_uniform = torch.rand([images.shape[0], 1, 1, 1], device=images.device)
sigma = (self.sigma_min * ((self.sigma_max / self.sigma_min) ** rnd_uniform))
weight = (1 / (sigma ** 2))
(y, augment_labels) = (augment_pipe(images) if (augment_pipe is not None) else (images, None))
n = (torch.randn_like(y) * sigma)
D_yn = net((y + n), sigma, labels, augment_labels=augment_labels)
loss = (weight * ((D_yn - y) ** 2))
return loss |
class mumps_struc_c_x(ctypes.Structure):
_fields_ = [('sym', mumps_int), ('par', mumps_int), ('job', mumps_int), ('comm_fortran', mumps_int), ('icntl', (mumps_int * 40)), ('aux', (ctypes.c_uint8 * AUX_LENGTH))] |
def test_sample_hiddens():
rng = np.random.RandomState(0)
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=2, batch_size=5, n_iter=5, random_state=42)
rbm1.fit(X)
h = rbm1._mean_hiddens(X[0])
hs = np.mean([rbm1._sample_hiddens(X[0], rng) for i in range(100)], 0)
assert_almost_equal(h, hs, decimal=1) |
class KitModel(nn.Module):
def __init__(self, weight_file):
super(KitModel, self).__init__()
global __weights_dict
__weights_dict = load_weights(weight_file)
self.res5a_branch1 = self.__conv(2, name='res5a_branch1', in_channels=1024, out_channels=2048, kernel_size=(1, 1), stride=(2, 2), groups=1, bias=False)
self.res5a_branch2a = self.__conv(2, name='res5a_branch2a', in_channels=1024, out_channels=512, kernel_size=(1, 1), stride=(2, 2), groups=1, bias=False)
self.bn5a_branch1 = self.__batch_normalization(2, 'bn5a_branch1', num_features=2048, eps=9.e-06, momentum=0.0)
self.bn5a_branch2a = self.__batch_normalization(2, 'bn5a_branch2a', num_features=512, eps=9.e-06, momentum=0.0)
self.res5a_branch2b = self.__conv(2, name='res5a_branch2b', in_channels=512, out_channels=512, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=False)
self.bn5a_branch2b = self.__batch_normalization(2, 'bn5a_branch2b', num_features=512, eps=9.e-06, momentum=0.0)
self.res5a_branch2c = self.__conv(2, name='res5a_branch2c', in_channels=512, out_channels=2048, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.bn5a_branch2c = self.__batch_normalization(2, 'bn5a_branch2c', num_features=2048, eps=9.e-06, momentum=0.0)
self.res5b_branch2a = self.__conv(2, name='res5b_branch2a', in_channels=2048, out_channels=512, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.bn5b_branch2a = self.__batch_normalization(2, 'bn5b_branch2a', num_features=512, eps=9.e-06, momentum=0.0)
self.res5b_branch2b = self.__conv(2, name='res5b_branch2b', in_channels=512, out_channels=512, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=False)
self.bn5b_branch2b = self.__batch_normalization(2, 'bn5b_branch2b', num_features=512, eps=9.e-06, momentum=0.0)
self.res5b_branch2c = self.__conv(2, name='res5b_branch2c', in_channels=512, out_channels=2048, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.bn5b_branch2c = self.__batch_normalization(2, 'bn5b_branch2c', num_features=2048, eps=9.e-06, momentum=0.0)
self.res5c_branch2a = self.__conv(2, name='res5c_branch2a', in_channels=2048, out_channels=512, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.bn5c_branch2a = self.__batch_normalization(2, 'bn5c_branch2a', num_features=512, eps=9.e-06, momentum=0.0)
self.res5c_branch2b = self.__conv(2, name='res5c_branch2b', in_channels=512, out_channels=512, kernel_size=(3, 3), stride=(1, 1), groups=1, bias=False)
self.bn5c_branch2b = self.__batch_normalization(2, 'bn5c_branch2b', num_features=512, eps=9.e-06, momentum=0.0)
self.res5c_branch2c = self.__conv(2, name='res5c_branch2c', in_channels=512, out_channels=2048, kernel_size=(1, 1), stride=(1, 1), groups=1, bias=False)
self.bn5c_branch2c = self.__batch_normalization(2, 'bn5c_branch2c', num_features=2048, eps=9.e-06, momentum=0.0)
self.bbox_pred_1 = self.__dense(name='bbox_pred_1', in_features=2048, out_features=8, bias=True)
self.cls_score_1 = self.__dense(name='cls_score_1', in_features=2048, out_features=2, bias=True)
def forward(self, x):
res5a_branch1 = self.res5a_branch1(x)
res5a_branch2a = self.res5a_branch2a(x)
bn5a_branch1 = self.bn5a_branch1(res5a_branch1)
bn5a_branch2a = self.bn5a_branch2a(res5a_branch2a)
res5a_branch2a_relu = F.relu(bn5a_branch2a)
res5a_branch2b = F.conv2d(res5a_branch2a_relu, weight=self.res5a_branch2b.weight, bias=self.res5a_branch2b.bias, stride=self.res5a_branch2b.stride, padding=(2, 2), dilation=2, groups=self.res5a_branch2b.groups)
bn5a_branch2b = self.bn5a_branch2b(res5a_branch2b)
res5a_branch2b_relu = F.relu(bn5a_branch2b)
res5a_branch2c = self.res5a_branch2c(res5a_branch2b_relu)
bn5a_branch2c = self.bn5a_branch2c(res5a_branch2c)
res5a = (bn5a_branch1 + bn5a_branch2c)
res5a_relu = F.relu(res5a)
res5b_branch2a = self.res5b_branch2a(res5a_relu)
bn5b_branch2a = self.bn5b_branch2a(res5b_branch2a)
res5b_branch2a_relu = F.relu(bn5b_branch2a)
res5b_branch2b = F.conv2d(res5b_branch2a_relu, weight=self.res5b_branch2b.weight, bias=self.res5b_branch2b.bias, stride=self.res5b_branch2b.stride, padding=(2, 2), dilation=2, groups=self.res5b_branch2b.groups)
bn5b_branch2b = self.bn5b_branch2b(res5b_branch2b)
res5b_branch2b_relu = F.relu(bn5b_branch2b)
res5b_branch2c = self.res5b_branch2c(res5b_branch2b_relu)
bn5b_branch2c = self.bn5b_branch2c(res5b_branch2c)
res5b = (res5a_relu + bn5b_branch2c)
res5b_relu = F.relu(res5b)
res5c_branch2a = self.res5c_branch2a(res5b_relu)
bn5c_branch2a = self.bn5c_branch2a(res5c_branch2a)
res5c_branch2a_relu = F.relu(bn5c_branch2a)
res5c_branch2b = F.conv2d(res5c_branch2a_relu, weight=self.res5c_branch2b.weight, bias=self.res5c_branch2b.bias, stride=self.res5c_branch2b.stride, padding=(2, 2), dilation=2, groups=self.res5c_branch2b.groups)
bn5c_branch2b = self.bn5c_branch2b(res5c_branch2b)
res5c_branch2b_relu = F.relu(bn5c_branch2b)
res5c_branch2c = self.res5c_branch2c(res5c_branch2b_relu)
bn5c_branch2c = self.bn5c_branch2c(res5c_branch2c)
res5c = (res5b_relu + bn5c_branch2c)
res5c_relu = F.relu(res5c)
pool5 = F.avg_pool2d(res5c_relu, kernel_size=(7, 7), stride=(1, 1), padding=(0,), ceil_mode=False, count_include_pad=False)
bbox_pred_0 = pool5.view(pool5.size(0), (- 1))
cls_score_0 = pool5.view(pool5.size(0), (- 1))
bbox_pred_1 = self.bbox_pred_1(bbox_pred_0)
cls_score_1 = self.cls_score_1(cls_score_0)
cls_prob = F.softmax(cls_score_1, dim=1)
return (bbox_pred_1, cls_prob, cls_score_1)
def __batch_normalization(dim, name, **kwargs):
if ((dim == 0) or (dim == 1)):
layer = nn.BatchNorm1d(**kwargs)
elif (dim == 2):
layer = nn.BatchNorm2d(**kwargs)
elif (dim == 3):
layer = nn.BatchNorm3d(**kwargs)
else:
raise NotImplementedError()
if ('scale' in __weights_dict[name]):
layer.state_dict()['weight'].copy_(torch.from_numpy(__weights_dict[name]['scale']))
else:
layer.weight.data.fill_(1)
if ('bias' in __weights_dict[name]):
layer.state_dict()['bias'].copy_(torch.from_numpy(__weights_dict[name]['bias']))
else:
layer.bias.data.fill_(0)
layer.state_dict()['running_mean'].copy_(torch.from_numpy(__weights_dict[name]['mean']))
layer.state_dict()['running_var'].copy_(torch.from_numpy(__weights_dict[name]['var']))
return layer
def __conv(dim, name, **kwargs):
print(name)
print(kwargs)
if (dim == 1):
layer = nn.Conv1d(**kwargs)
elif (dim == 2):
layer = nn.Conv2d(**kwargs)
elif (dim == 3):
layer = nn.Conv3d(**kwargs)
else:
raise NotImplementedError()
layer.state_dict()['weight'].copy_(torch.from_numpy(__weights_dict[name]['weights']))
if ('bias' in __weights_dict[name]):
layer.state_dict()['bias'].copy_(torch.from_numpy(__weights_dict[name]['bias']))
return layer
def __dense(name, **kwargs):
layer = nn.Linear(**kwargs)
layer.state_dict()['weight'].copy_(torch.from_numpy(__weights_dict[name]['weights']))
if ('bias' in __weights_dict[name]):
layer.state_dict()['bias'].copy_(torch.from_numpy(__weights_dict[name]['bias']))
return layer |
def preprocess_all():
for (name, dataset) in datasets.items():
print(('Preprocessing ' + name))
dataset.preprocess(CACHE_LOCATION, OUTPUT_LOCATION) |
def dtype_from_name(name):
if (name == 'bit'):
return core.TYPE_BIT
elif (name == 'fp32'):
return core.TYPE_FP32
elif (name == 'fp64'):
return core.TYPE_FP64
elif (name == 'int8'):
return core.TYPE_INT8
elif (name == 'int16'):
return core.TYPE_INT16
elif (name == 'int32'):
return core.TYPE_INT32
elif (name == 'int64'):
return core.TYPE_INT64
elif (name == 'uint8'):
return core.TYPE_UINT8
elif (name == 'uint16'):
return core.TYPE_UINT16
elif (name == 'uint32'):
return core.TYPE_UINT32
elif (name == 'uint64'):
return core.TYPE_UINT64
return None |
def get_reference_score(aligner, input_text, context, aligner_type, remove_stopwords):
if isinstance(input_text, list):
align_list = []
for ref in input_text:
if (aligner_type == 'bleurt'):
i = context
c = ref
else:
i = ref
c = context
score = aligner.get_score(input_text=i, context=c, remove_stopwords=remove_stopwords)
align_list.append(score)
align_score = np.array(align_list).mean()
else:
if (aligner_type == 'bleurt'):
i = context
c = input_text
else:
i = input_text
c = context
align_score = aligner.get_score(input_text=i, context=c, remove_stopwords=remove_stopwords)
return align_score |
class NpDataset(Dataset):
def _init_params_to_attrs(self, params):
self._input_file = params.input_file
self._output_file = params.output_file
self._batch_size = params.batch_size
self._horizon = params.horizon
self._save_every_n_steps = params.save_every_n_steps
self._all_names = (self._env_spec.names + ['done', 'rollout_timestep'])
self._last_save = 0
self._data_len = 0
def _init_setup(self):
(self._datadict, self._split_indices) = self._load_np()
def _load_np(self):
local_dict = AttrDict()
if (self._input_file is None):
for key in self._all_names:
local_dict[key] = []
split_indices = np.array([])
else:
logger.debug(('Loading ' + self._input_file))
datadict = np.load(self._input_file, mmap_mode='r', allow_pickle=True)
self._data_len = len(datadict['done'])
split_indices = (np.where(datadict['done'])[0] + 1)
if (0 < self._data_len == split_indices[(- 1)]):
split_indices = np.delete(split_indices, (- 1))
for key in self._all_names:
assert (key in datadict), f'{key} not in np file'
assert (len(datadict[key]) == self._data_len)
local_dict[key] = np.split(datadict[key], split_indices)
logger.debug('Dataset length: {}'.format(self._data_len))
return (local_dict, split_indices)
def hor_chunk(self, array):
rg = max(((len(array) - self._horizon) - 1), 1)
idx = np.random.choice(rg)
return array[idx:((idx + self._horizon) + 1)]
def get_batch(self, indices=None, torch_device=None):
assert (indices is None)
num_eps = len(self._datadict.done)
indices = np.random.choice(num_eps, self._batch_size, replace=False)
sampled_datadict = self._datadict.leaf_apply((lambda list_of_arr: np.stack([self.hor_chunk(list_of_arr[i]) for i in indices])))
inputs = AttrDict()
outputs = AttrDict()
for key in self._env_spec.observation_names:
inputs[key] = sampled_datadict[key]
for key in self._env_spec.action_names:
inputs[key] = sampled_datadict[key]
for key in self._env_spec.output_observation_names:
outputs[key] = sampled_datadict[key]
outputs.done = sampled_datadict.done.astype(bool)
if (torch_device is not None):
for d in (inputs, outputs):
d.leaf_modify((lambda x: torch.from_numpy(x).to(torch_device)))
return (inputs, outputs)
def add_episode(self, obs, goal, action, done):
for oname in self._env_spec.observation_names:
self._datadict[oname].append(obs[oname].copy())
for aname in self._env_spec.action_names:
self._datadict[aname].append(action[aname].copy())
for gname in self._env_spec.goal_names:
self._datadict[gname].append(goal[gname].copy())
self._split_indices = np.append(self._split_indices, self._data_len)
self._datadict.done.append(done)
self._datadict.rollout_timestep.append(np.arange(len(done)))
self._data_len += len(done)
if ((self._data_len - self._last_save) >= self._save_every_n_steps):
print('SAVING:', self._data_len)
self.save()
self._last_save = self._data_len
def save(self):
save_dict = {}
for key in self._all_names:
save_dict[key] = np.concatenate(self._datadict[key])
path = os.path.join(self._file_manager.exp_dir, self._output_file)
np.savez_compressed(path, **save_dict)
def __len__(self):
return self._data_len |
def braid_from_piecewise(strands):
L = strands
i = min((val[1][0] for val in L))
totalpoints = [[[a[0][1], a[0][2]]] for a in L]
indices = [1 for a in range(len(L))]
while (i < 1):
for (j, val) in enumerate(L):
if (val[indices[j]][0] > i):
xauxr = val[(indices[j] - 1)][1]
xauxi = val[(indices[j] - 1)][2]
yauxr = val[indices[j]][1]
yauxi = val[indices[j]][2]
aaux = val[(indices[j] - 1)][0]
baux = val[indices[j]][0]
interpolar = (xauxr + (((yauxr - xauxr) * (i - aaux)) / (baux - aaux)))
interpolai = (xauxi + (((yauxi - xauxi) * (i - aaux)) / (baux - aaux)))
totalpoints[j].append([interpolar, interpolai])
else:
totalpoints[j].append([val[indices[j]][1], val[indices[j]][2]])
indices[j] = (indices[j] + 1)
i = min((val[indices[k]][0] for (k, val) in enumerate(L)))
for (j, val) in enumerate(L):
totalpoints[j].append([val[(- 1)][1], val[(- 1)][2]])
braid = []
G = SymmetricGroup(len(totalpoints))
def sgn(x, y):
if (x < y):
return 1
if (x > y):
return (- 1)
return 0
for i in range((len(totalpoints[0]) - 1)):
l1 = [totalpoints[j][i] for j in range(len(L))]
l2 = [totalpoints[j][(i + 1)] for j in range(len(L))]
M = [[l1[s], l2[s]] for s in range(len(l1))]
M.sort()
l1 = [a[0] for a in M]
l2 = [a[1] for a in M]
cruces = []
for (j, l2j) in enumerate(l2):
l1j = l1[j]
for k in range(j):
if (l2j < l2[k]):
t = ((l1j[0] - l1[k][0]) / ((l2[k][0] - l2j[0]) + (l1j[0] - l1[k][0])))
s = sgn(((l1[k][1] * (1 - t)) + (t * l2[k][1])), ((l1j[1] * (1 - t)) + (t * l2j[1])))
cruces.append([t, k, j, s])
if cruces:
cruces.sort()
P = G(Permutation([]))
while cruces:
crucesl = [c for c in cruces if (c[0] == cruces[0][0])]
crossesl = [((P((c[2] + 1)) - P((c[1] + 1))), c[1], c[2], c[3]) for c in crucesl]
cruces = cruces[len(crucesl):]
while crossesl:
crossesl.sort()
c = crossesl.pop(0)
braid.append((c[3] * min(map(P, [(c[1] + 1), (c[2] + 1)]))))
P = (G(Permutation([((c[1] + 1), (c[2] + 1))])) * P)
crossesl = [((P((cr[2] + 1)) - P((cr[1] + 1))), cr[1], cr[2], cr[3]) for cr in crossesl]
B = BraidGroup(len(L))
return B(braid) |
_utils.test(require=ti.extension.quant_basic, arch=[ti.cpu, ti.cuda, ti.vulkan], exclude=[vk_on_mac, cuda_on_windows], debug=True)
def test_quant_store_fusion(capfd):
x = ti.field(dtype=ti.types.quant.int(16, True))
y = ti.field(dtype=ti.types.quant.int(16, True))
v = ti.BitpackedFields(max_num_bits=32)
v.place(x, y)
ti.root.dense(ti.i, 10).place(v)
def store():
ti.loop_config(serialize=True)
for i in range(10):
x[i] = i
y[i] = (i + 1)
print(x[i], y[i])
store()
ti.sync()
(out, err) = capfd.readouterr()
expected_out = '0 1\n1 2\n2 3\n3 4\n4 5\n5 6\n6 7\n7 8\n8 9\n9 10\n'
assert ((out == expected_out) and (err == '')) |
def test_nonlocal_failure():
import pybind11_cross_module_tests as cm
with pytest.raises(RuntimeError) as excinfo:
cm.register_nonlocal()
assert (str(excinfo.value) == 'generic_type: type "NonLocalType" is already registered!') |
class QueryOnTrilineGradQuery(PythonFunction):
def __init__(self, ctx, min_, max_, boundary_check=False):
super(QueryOnTrilineGradQuery, self).__init__(ctx)
self._min = min_
self._max = max_
self._boundary_check = boundary_check
def name(self):
return self.__class__.__name__
def min_outputs(self):
return 1
def setup_impl(self, inputs, outputs):
query = inputs[1]
outputs[0].reset_shape(query.shape, True)
def forward_impl(self, inputs, outputs):
grad_query = outputs[0]
grad_output = inputs[0]
query = inputs[1]
feature = inputs[2]
batch_sizes = query.shape[:(- 1)]
G = feature.shape[1]
B = np.prod(batch_sizes)
D = feature.shape[(- 1)]
grad_query_ptr = grad_query.data.data_ptr(np.float32, self.ctx)
grad_output_ptr = grad_output.data.data_ptr(np.float32, self.ctx)
query_ptr = query.data.data_ptr(np.float32, self.ctx)
feature_ptr = feature.data.data_ptr(np.float32, self.ctx)
cosine_triline_feature_cuda.grad_query(((B * D) * 3), grad_query_ptr, grad_output_ptr, query_ptr, feature_ptr, G, D, self._min, self._max, self._boundary_check, False)
def backward_impl(self, inputs, outputs, propagate_down, accum):
grad_query = outputs[0]
grad_output = inputs[0]
query = inputs[1]
feature = inputs[2]
batch_sizes = query.shape[:(- 1)]
G = feature.shape[1]
B = np.prod(batch_sizes)
D = feature.shape[(- 1)]
grad_grad_query_ptr = grad_query.grad.data_ptr(np.float32, self.ctx)
grad_output_ptr = grad_output.data.data_ptr(np.float32, self.ctx)
query_ptr = query.data.data_ptr(np.float32, self.ctx)
feature_ptr = feature.data.data_ptr(np.float32, self.ctx)
grad_grad_output_ptr = grad_output.grad.data_ptr(np.float32, self.ctx)
grad_query_ptr = query.grad.data_ptr(np.float32, self.ctx)
grad_feature_ptr = feature.grad.data_ptr(np.float32, self.ctx)
if propagate_down[0]:
cosine_triline_feature_cuda.grad_query_grad_grad_output(((B * D) * 3), grad_grad_output_ptr, grad_grad_query_ptr, query_ptr, feature_ptr, G, D, self._min, self._max, self._boundary_check, accum[0])
if propagate_down[2]:
cosine_triline_feature_cuda.grad_query_grad_feature(((B * D) * 3), grad_feature_ptr, grad_grad_query_ptr, grad_output_ptr, query_ptr, G, D, self._min, self._max, self._boundary_check, accum[2])
def grad_depends_output_data(self, i, o):
return False
def grad_depends_input_data(self, i, j):
if ((i == 0) and ((j == 1) or (j == 2))):
return True
if (i == 1):
return True
if ((i == 2) and ((j == 0) or (j == 1))):
return True
return False |
class ExprVisitor(GenericVisitor):
_interp: Interpreter
_in_values: List[Any]
_out_value: Any
_unary_dispatch_table: ClassVar[Dict[(UnaryOperator, Callable[([Any], Any)])]] = {UnaryOperator.NOT: (lambda x: (not x)), UnaryOperator.NEG: (lambda x: (- x))}
_binary_dispatch_table: ClassVar[Dict[(BinaryOperator, Callable[([Any, Any], Any)])]] = {BinaryOperator.ADD: (lambda x, y: (x + y)), BinaryOperator.SUB: (lambda x, y: (x - y)), BinaryOperator.MUL: (lambda x, y: (x * y)), BinaryOperator.DIV: (lambda x, y: (x / y)), BinaryOperator.MOD: (lambda x, y: (x % y)), BinaryOperator.EQ: (lambda x, y: (x == y)), BinaryOperator.NE: (lambda x, y: (x != y)), BinaryOperator.LT: (lambda x, y: (x < y)), BinaryOperator.LE: (lambda x, y: (x <= y)), BinaryOperator.GT: (lambda x, y: (x > y)), BinaryOperator.GE: (lambda x, y: (x >= y)), BinaryOperator.AND: (lambda x, y: (x and y)), BinaryOperator.OR: (lambda x, y: (x or y)), BinaryOperator.IMPLY: (lambda x, y: ((not x) or y))}
def __init__(self, interp: Interpreter, in_values: List[Any], out_value: Any):
self._interp = interp
self._in_values = in_values
self._out_value = out_value
def visit_const_expr(self, const_expr: ConstExpr):
return const_expr.value
def visit_param_expr(self, param_expr: ParamExpr):
if (param_expr.index == 0):
return self._out_value
else:
return self._in_values[(param_expr.index - 1)]
def visit_unary_expr(self, unary_expr: UnaryExpr):
arg = self.visit(unary_expr.operand)
return self._unary_dispatch_table[unary_expr.operator](arg)
def visit_binary_expr(self, binary_expr: BinaryExpr):
larg = self.visit(binary_expr.lhs)
rarg = self.visit(binary_expr.rhs)
return self._binary_dispatch_table[binary_expr.operator](larg, rarg)
def visit_cond_expr(self, cond_expr: CondExpr):
cond_arg = self.visit(cond_expr.condition)
if cond_arg:
return self.visit(cond_expr.true_value)
else:
return self.visit(cond_expr.false_value)
def visit_property_expr(self, prop_expr: PropertyExpr):
arg = self.visit(prop_expr.operand)
method_name = self._apply_method_name(prop_expr.name)
method = getattr(self._interp, method_name, None)
if (method is None):
raise ValueError('Cannot find the required apply method: {}'.format(method_name))
return method(arg)
def _apply_method_name(name):
return ('apply_' + name) |
def test_multi_objective_gradients_losses_same():
with pytest.raises(ValueError):
multi_cdv.get_descent_vector(losses, np.zeros(shape=(3, 1))) |
def include_kernels_h(specification):
print('Generating awkward-cpp/include/awkward/kernels.h...')
with open(os.path.join(CURRENT_DIR, '..', 'awkward-cpp', 'include', 'awkward', 'kernels.h'), 'w') as header:
header.write(f'''// AUTO GENERATED ON {reproducible_datetime()}
// DO NOT EDIT BY HAND!
//
// To regenerate file, run
//
// python dev/generate-kernel-signatures.py
//
// (It is usually run as part of pip install . or localbuild.py.)
#ifndef AWKWARD_KERNELS_H_
#define AWKWARD_KERNELS_H_
#include "awkward/common.h"
extern "C" {{
''')
for spec in specification['kernels']:
for childfunc in spec['specializations']:
header.write(((' ' * 2) + 'EXPORT_SYMBOL ERROR\n'))
header.write((((' ' * 2) + childfunc['name']) + '(\n'))
for (i, arg) in enumerate(childfunc['args']):
header.write(((((' ' * 4) + type_to_ctype(arg['type'])) + ' ') + arg['name']))
if (i == (len(childfunc['args']) - 1)):
header.write(');\n')
else:
header.write(',\n')
header.write('\n')
header.write('}\n\n#endif // AWKWARD_KERNELS_H_\n')
print(' awkward-cpp/include/awkward/kernels.h.') |
_task('bitod_nlg')
class BiTODNLG(BiTOD):
def __init__(self, name, args):
super().__init__(name, args)
self._metrics = ['casedbleu']
def get_splits(self, root, **kwargs):
kwargs['train_target'] = 'rg'
kwargs['e2e_evaluation'] = self.args.e2e_dialogue_evaluation
return E2EDialogueDataset.return_splits(path=root, make_example=self._make_example, **kwargs) |
class ConcatChannels(nn.Module):
def __init__(self, channels):
super(ConcatChannels, self).__init__()
self.channels = int(channels)
def forward(self, x):
return torch.cat((x, Variable(torch.zeros(x.size()).type_as(x.data).repeat(1, self.channels, 1, 1))), dim=1) |
def test_get_default_graph_def_by_name():
module_creators = [ModuleCreator(TSTNetNormal(), [(4, 3, 32, 32), (4, 3, 32, 32)]), ModuleCreator(ResUnit(16), [(4, 3, 32, 32)]), ModuleCreator(NestedTestNet(), [(4, 3, 32, 32), (4, 3, 32, 32)])]
network_names = ['network1', 'network2', 'network3']
for (module_creator, network_name) in zip(module_creators, network_names):
module = module_creator.module
proto_variable_inputs = [nn.ProtoVariable(shape) for shape in module_creator.input_shape]
with nn.graph_def.graph_name(network_name):
outputs = module(*proto_variable_inputs)
for (module_creator, network_name) in zip(module_creators, network_names):
variable_inputs = module_creator.get_variable_inputs()
g = nn.graph_def.get_default_graph(network_name)
outputs = g(*variable_inputs)
ref_outputs = module_creator.module(*variable_inputs)
forward_variable_and_check_equal(outputs, ref_outputs) |
def generate_scale_factor(rng):
scale_factor = 1
r = rng.uniform(0, 1)
if (0.7 <= r <= 0.8):
scale_factor = 2
elif (0.8 <= r):
scale_factor = 4
return scale_factor |
def main():
parser = argparse.ArgumentParser(description='Caffe2: ImageNet Trainer')
parser.add_argument('--train_data', type=str, default=None, required=True, help="Path to training data (or 'null' to simulate)")
parser.add_argument('--num_layers', type=int, default=50, help='The number of layers in ResNe(X)t model')
parser.add_argument('--resnext_num_groups', type=int, default=1, help='The cardinality of resnext')
parser.add_argument('--resnext_width_per_group', type=int, default=64, help='The cardinality of resnext')
parser.add_argument('--test_data', type=str, default=None, help='Path to test data')
parser.add_argument('--image_mean_per_channel', type=float, nargs='+', help='The per channel mean for the images')
parser.add_argument('--image_std_per_channel', type=float, nargs='+', help='The per channel standard deviation for the images')
parser.add_argument('--test_epoch_size', type=int, default=50000, help='Number of test images')
parser.add_argument('--db_type', type=str, default='lmdb', help='Database type (such as lmdb or leveldb)')
parser.add_argument('--gpus', type=str, help='Comma separated list of GPU devices to use')
parser.add_argument('--num_gpus', type=int, default=1, help='Number of GPU devices (instead of --gpus)')
parser.add_argument('--num_channels', type=int, default=3, help='Number of color channels')
parser.add_argument('--image_size', type=int, default=224, help='Input image size (to crop to)')
parser.add_argument('--num_labels', type=int, default=1000, help='Number of labels')
parser.add_argument('--batch_size', type=int, default=32, help='Batch size, total over all GPUs')
parser.add_argument('--epoch_size', type=int, default=1500000, help='Number of images/epoch, total over all machines')
parser.add_argument('--num_epochs', type=int, default=1000, help='Num epochs.')
parser.add_argument('--base_learning_rate', type=float, default=0.1, help='Initial learning rate.')
parser.add_argument('--weight_decay', type=float, default=0.0001, help='Weight decay (L2 regularization)')
parser.add_argument('--cudnn_workspace_limit_mb', type=int, default=64, help='CuDNN workspace limit in MBs')
parser.add_argument('--num_shards', type=int, default=1, help='Number of machines in distributed run')
parser.add_argument('--shard_id', type=int, default=0, help='Shard id.')
parser.add_argument('--run_id', type=str, help='Unique run identifier (e.g. uuid)')
parser.add_argument('--redis_host', type=str, help='Host of Redis server (for rendezvous)')
parser.add_argument('--redis_port', type=int, default=6379, help='Port of Redis server (for rendezvous)')
parser.add_argument('--file_store_path', type=str, default='/tmp', help='Path to directory to use for rendezvous')
parser.add_argument('--save_model_name', type=str, default='resnext_model', help='Save the trained model to a given name')
parser.add_argument('--load_model_path', type=str, default=None, help='Load previously saved model to continue training')
parser.add_argument('--use_cpu', action='store_true', help='Use CPU instead of GPU')
parser.add_argument('--use_nccl', action='store_true', help='Use nccl for inter-GPU collectives')
parser.add_argument('--use_ideep', type=bool, default=False, help='Use ideep')
parser.add_argument('--dtype', default='float', choices=['float', 'float16'], help='Data type used for training')
parser.add_argument('--float16_compute', action='store_true', help='Use float 16 compute, if available')
parser.add_argument('--enable_tensor_core', action='store_true', help='Enable Tensor Core math for Conv and FC ops')
parser.add_argument('--distributed_transport', type=str, default='tcp', help='Transport to use for distributed run [tcp|ibverbs]')
parser.add_argument('--distributed_interfaces', type=str, default='', help='Network interfaces to use for distributed run')
parser.add_argument('--first_iter_timeout', type=int, default=1200, help='Timeout (secs) of the first iteration (default: %(default)s)')
parser.add_argument('--timeout', type=int, default=60, help='Timeout (secs) of each (except the first) iteration (default: %(default)s)')
parser.add_argument('--model', default='resnext', const='resnext', nargs='?', choices=['shufflenet', 'resnext'], help='List of models which can be run')
args = parser.parse_args()
Train(args) |
def _format(message, category, filename, lineno, line=None):
w = '{}: {}\n'.format(category.__name__, message)
return w |
class SetShuffleProduct(ShuffleProduct_abstract):
def __init__(self, l1, l2, element_constructor=None):
assert (isinstance(l1, Iterable) and isinstance(l2, Iterable))
assert all((isinstance(elem, Iterable) for elem in l1))
assert all((isinstance(elem, Iterable) for elem in l2))
if (element_constructor is None):
try:
e = next(iter(l1))
try:
element_constructor = e.parent()._element_constructor_
except AttributeError:
pass
except StopIteration:
pass
ShuffleProduct_abstract.__init__(self, list(l1), list(l2), element_constructor)
def _repr_(self):
return ('Shuffle set product of: %s and %s' % (self._element_constructor_(self._l1), self._element_constructor_(self._l2)))
def _ascii_art_(self):
from sage.typeset.ascii_art import ascii_art
return (ascii_art('Set shuffle product of:') * ((ascii_art(self._l1) + ascii_art(' and ')) + ascii_art(self._l2)))
def __iter__(self):
return itertools.chain.from_iterable((ShuffleProduct(*pair, element_constructor=self._element_constructor_) for pair in itertools.product(self._l1, self._l2)))
def cardinality(self):
def comp_binom(el1, el2):
ll1 = Integer(len(el1))
ll2 = Integer(len(el2))
return (ll1 + ll2).binomial(ll2)
return sum((comp_binom(el1, el2) for (el1, el2) in itertools.product(self._l1, self._l2))) |
def collect_results_gpu(result_part, size):
(rank, world_size) = get_dist_info()
part_tensor = torch.tensor(bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda')
shape_tensor = torch.tensor(part_tensor.shape, device='cuda')
shape_list = [shape_tensor.clone() for _ in range(world_size)]
dist.all_gather(shape_list, shape_tensor)
shape_max = torch.tensor(shape_list).max()
part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda')
part_send[:shape_tensor[0]] = part_tensor
part_recv_list = [part_tensor.new_zeros(shape_max) for _ in range(world_size)]
dist.all_gather(part_recv_list, part_send)
if (rank == 0):
part_list = []
for (recv, shape) in zip(part_recv_list, shape_list):
part_result = pickle.loads(recv[:shape[0]].cpu().numpy().tobytes())
part_list.append(part_result)
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
ordered_results = ordered_results[:size]
return ordered_results |
def run_simulation(model, loader, device):
model.eval()
with torch.no_grad():
predictions = eval_model(model, loader, device, return_predictions=True)
return predictions |
def load_mumps_libraries():
mumps_libs['dmumps'] = load_library('dmumps').dmumps_c
mumps_libs['zmumps'] = load_library('zmumps').zmumps_c |
def get_configs_from_args(args):
config = {}
args = args[1:].copy()
if os.path.isfile(args[0]):
config.update(read_config_from_file(args[0]))
args = args[1:]
elif os.path.isdir(args[0]):
config_path = os.path.join(args[0], 'config_input.yaml')
config.update(read_config_from_file(config_path))
args = args[1:]
if (len(args) > 0):
additional_configs = yaml.load('\n'.join(args), Loader=yaml_Loader)
config.update(additional_configs)
if (not (KEY_SCHEME in config)):
raise ValueError(f'"{KEY_SCHEME}" is not in config!')
return config |
def fuse_conv_bn(module):
last_conv = None
last_conv_name = None
for (name, child) in module.named_children():
if isinstance(child, (nn.modules.batchnorm._BatchNorm, nn.SyncBatchNorm)):
if (last_conv is None):
continue
fused_conv = _fuse_conv_bn(last_conv, child)
module._modules[last_conv_name] = fused_conv
module._modules[name] = nn.Identity()
last_conv = None
elif isinstance(child, nn.Conv2d):
last_conv = child
last_conv_name = name
else:
fuse_conv_bn(child)
return module |
def standard_lane(offset=3.6, rm=STD_ROADMARK_BROKEN):
lc = Lane(a=offset)
lc.add_roadmark(rm)
return lc |
class NAG(Optimizer):
def __init__(self, params, lr=required, momentum=0, weight_decay=0):
defaults = dict(lr=lr, lr_old=lr, momentum=momentum, weight_decay=weight_decay)
super(NAG, self).__init__(params, defaults)
def step(self, closure=None):
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
lr = group['lr']
lr_old = group.get('lr_old', lr)
lr_correct = (lr / lr_old)
for p in group['params']:
if (p.grad is None):
continue
d_p = p.grad.data
param_state = self.state[p]
if ('momentum_buffer' not in param_state):
param_state['momentum_buffer'] = d_p.clone().zero_()
buf = param_state['momentum_buffer']
if (weight_decay != 0):
p.data.mul_((1 - (lr * weight_decay)))
p.data.add_(((momentum * momentum) * lr_correct), buf)
p.data.add_(((- (1 + momentum)) * lr), d_p)
buf.mul_((momentum * lr_correct)).add_((- lr), d_p)
group['lr_old'] = lr
return loss |
class UNetDiscriminatorAesrgan(nn.Module):
def __init__(self, num_in_ch, num_feat=64, skip_connection=True):
super(UNetDiscriminatorAesrgan, self).__init__()
norm = spectral_norm
self.conv0 = nn.Conv2d(num_in_ch, num_feat, kernel_size=3, stride=1, padding=1)
self.conv1 = norm(nn.Conv2d(num_feat, (num_feat * 2), 3, 2, 1, bias=False))
self.conv2 = norm(nn.Conv2d((num_feat * 2), (num_feat * 4), 3, 2, 1, bias=False))
self.conv3 = norm(nn.Conv2d((num_feat * 4), (num_feat * 8), 3, 2, 1, bias=False))
self.gating = norm(nn.Conv2d((num_feat * 8), (num_feat * 4), 1, 1, 1, bias=False))
self.attn_1 = add_attn(x_channels=(num_feat * 4), g_channels=(num_feat * 4))
self.attn_2 = add_attn(x_channels=(num_feat * 2), g_channels=(num_feat * 4))
self.attn_3 = add_attn(x_channels=num_feat, g_channels=(num_feat * 4))
self.cat_1 = unetCat(dim_in=(num_feat * 8), dim_out=(num_feat * 4))
self.cat_2 = unetCat(dim_in=(num_feat * 4), dim_out=(num_feat * 2))
self.cat_3 = unetCat(dim_in=(num_feat * 2), dim_out=num_feat)
self.conv4 = norm(nn.Conv2d((num_feat * 8), (num_feat * 4), 3, 1, 1, bias=False))
self.conv5 = norm(nn.Conv2d((num_feat * 4), (num_feat * 2), 3, 1, 1, bias=False))
self.conv6 = norm(nn.Conv2d((num_feat * 2), num_feat, 3, 1, 1, bias=False))
self.conv7 = norm(nn.Conv2d(num_feat, num_feat, 3, 1, 1, bias=False))
self.conv8 = norm(nn.Conv2d(num_feat, num_feat, 3, 1, 1, bias=False))
self.conv9 = nn.Conv2d(num_feat, 1, 3, 1, 1)
def forward(self, x):
x0 = F.leaky_relu(self.conv0(x), negative_slope=0.2, inplace=True)
x1 = F.leaky_relu(self.conv1(x0), negative_slope=0.2, inplace=True)
x2 = F.leaky_relu(self.conv2(x1), negative_slope=0.2, inplace=True)
x3 = F.leaky_relu(self.conv3(x2), negative_slope=0.2, inplace=True)
gated = F.leaky_relu(self.gating(x3), negative_slope=0.2, inplace=True)
attn1 = self.attn_1(x2, gated)
attn2 = self.attn_2(x1, gated)
attn3 = self.attn_3(x0, gated)
x3 = self.cat_1(attn1, x3)
x4 = F.leaky_relu(self.conv4(x3), negative_slope=0.2, inplace=True)
x4 = self.cat_2(attn2, x4)
x5 = F.leaky_relu(self.conv5(x4), negative_slope=0.2, inplace=True)
x5 = self.cat_3(attn3, x5)
x6 = F.leaky_relu(self.conv6(x5), negative_slope=0.2, inplace=True)
out = F.leaky_relu(self.conv7(x6), negative_slope=0.2, inplace=True)
out = F.leaky_relu(self.conv8(out), negative_slope=0.2, inplace=True)
out = self.conv9(out)
return out |
def runShardedTrainLoop(opts, myTrainFun):
start_epoch = 0
pretrained_model = opts['model_param']['pretrained_model']
if ((pretrained_model != '') and os.path.exists(pretrained_model)):
(start_epoch, prev_checkpointed_lr, best_metric) = checkpoint.initialize_params_from_file(model=None, weights_file=pretrained_model, num_xpus=1, opts=opts, broadcast_computed_param=True, reset_epoch=opts['model_param']['reset_epoch'])
log.info('start epoch: {}'.format(start_epoch))
pretrained_model = (None if (pretrained_model == '') else pretrained_model)
ret = None
pretrained_model = ''
shard_results = []
for epoch in range(start_epoch, opts['epoch_iter']['num_epochs'], opts['epoch_iter']['num_epochs_per_flow_schedule']):
checkpoint_model = (None if (epoch == start_epoch) else ret['model'])
pretrained_model = (None if (epoch > start_epoch) else pretrained_model)
shard_results = []
for shard_id in range(opts['distributed']['num_shards']):
opts['temp_var']['shard_id'] = shard_id
opts['temp_var']['pretrained_model'] = pretrained_model
opts['temp_var']['checkpoint_model'] = checkpoint_model
opts['temp_var']['epoch'] = epoch
opts['temp_var']['start_epoch'] = start_epoch
shard_ret = myTrainFun(opts)
shard_results.append(shard_ret)
ret = None
for shard_ret in shard_results:
if (shard_ret is not None):
ret = shard_ret
opts['temp_var']['metrics_output'] = ret['metrics']
break
log.info('ret is: {}'.format(str(ret)))
return ret |
def main():
n_generators = 5
dataset = 'mnist'
path_teacher = f'../pretrained/{dataset}.pth.tar'
path_out = f'../out/{dataset}'
generators = []
for i in range(n_generators):
path_gen = f'{path_out}/generator-{i}'
path_model = train_generator(dataset, path_teacher, path_gen, i)
generators.append(path_model)
data_dist = 'kegnet'
option = 1
seed = 0
path_cls = f'{path_out}/classifier-{seed}'
train_student(dataset, data_dist, path_cls, seed, path_teacher, generators, option) |
class ConvSN3D(Conv3D):
def build(self, input_shape):
if (self.data_format == 'channels_first'):
channel_axis = 1
else:
channel_axis = (- 1)
if (input_shape[channel_axis] is None):
raise ValueError('The channel dimension of the inputs should be defined. Found `None`.')
input_dim = input_shape[channel_axis]
kernel_shape = (self.kernel_size + (input_dim, self.filters))
self.kernel = self.add_weight(shape=kernel_shape, initializer=self.kernel_initializer, name='kernel', regularizer=self.kernel_regularizer, constraint=self.kernel_constraint)
self.u = self.add_weight(shape=tuple([1, self.kernel.shape.as_list()[(- 1)]]), initializer=initializers.RandomNormal(0, 1), name='sn', trainable=False)
if self.use_bias:
self.bias = self.add_weight(shape=(self.filters,), initializer=self.bias_initializer, name='bias', regularizer=self.bias_regularizer, constraint=self.bias_constraint)
else:
self.bias = None
self.input_spec = InputSpec(ndim=(self.rank + 2), axes={channel_axis: input_dim})
self.built = True
def call(self, inputs, training=None):
def _l2normalize(v, eps=1e-12):
return (v / ((K.sum((v ** 2)) ** 0.5) + eps))
def power_iteration(W, u):
_u = u
_v = _l2normalize(K.dot(_u, K.transpose(W)))
_u = _l2normalize(K.dot(_v, W))
return (_u, _v)
W_shape = self.kernel.shape.as_list()
W_reshaped = K.reshape(self.kernel, [(- 1), W_shape[(- 1)]])
(_u, _v) = power_iteration(W_reshaped, self.u)
sigma = K.dot(_v, W_reshaped)
sigma = K.dot(sigma, K.transpose(_u))
W_bar = (W_reshaped / sigma)
if (training in {0, False}):
W_bar = K.reshape(W_bar, W_shape)
else:
with tf.control_dependencies([self.u.assign(_u)]):
W_bar = K.reshape(W_bar, W_shape)
outputs = K.conv3d(inputs, W_bar, strides=self.strides, padding=self.padding, data_format=self.data_format, dilation_rate=self.dilation_rate)
if self.use_bias:
outputs = K.bias_add(outputs, self.bias, data_format=self.data_format)
if (self.activation is not None):
return self.activation(outputs)
return outputs |
def compare_commands_to_demonstrator(out_directory: str, parameters: Dict[(str, Union[(int, float, str, bool)])], loaded_policies: Iterable[policies.RvS], attribute_dicts: List[Dict[(str, Union[(int, float, str)])]], env: offline_env.OfflineEnv, goals: Union[(np.ndarray, List[np.ndarray])], goal_names: List[Union[(str, int, float)]], file_tag: str='Iter', title: Optional[str]=None, trajectory_samples: int=200, dynamic_demonstrator: bool=False, wandb_run: Optional[Run]=None) -> None:
assert (len(goals) == len(goal_names))
all_reward_vecs = []
all_attribute_dicts = []
for (loaded_policy, attribute_dict) in zip(loaded_policies, attribute_dicts):
all_reward_vecs += step.evaluate_goals(loaded_policy, env, goals, trajectory_samples=trajectory_samples)
all_attribute_dicts += [{**attribute_dict, 'Goal': goal_name} for goal_name in goal_names]
if dynamic_demonstrator:
all_reward_vecs.append(step.sample_episode_performance(loaded_policy, env, parameters['env_name'], parameters['max_episode_steps'], traj_samples=trajectory_samples, kitchen_subtask='dynamic'))
all_attribute_dicts.append({**attribute_dict, 'Goal': 'dynamic'})
if (not dynamic_demonstrator):
all_reward_vecs.append(visualize.get_demonstrator_reward_vec(env))
all_attribute_dicts.append({'Policy': 'Demonstrator'})
visualize.visualize_cumulative_reward(all_reward_vecs, all_attribute_dicts, parameters, out_directory, x='Goal', file_tag=file_tag, title=title, wandb_run=wandb_run) |
def main():
plot_collapse()
plot_ETF()
plot_WH_relation()
plot_residual()
plot_train_acc()
plot_test_acc() |
def val_generator(source_path, folder_list, batch_size):
print('Source path = ', source_path, '; batch size =', batch_size)
while True:
t = np.random.permutation(folder_list)
num_batches = (len(folder_list) // batch_size)
for batch in range(num_batches):
(yield val_load_batch_images(source_path, folder_list, batch, batch_size, t))
if (len(folder_list) != (batch_size * num_batches)):
batch_size = (len(folder_list) - (batch_size * num_batches))
(yield val_load_batch_images(source_path, folder_list, num_batches, batch_size, t)) |
def read_pfm(file):
file = open(file, 'rb')
color = None
width = None
height = None
scale = None
endian = None
header = file.readline().rstrip()
header = str(bytes.decode(header, encoding='utf-8'))
if (header == 'PF'):
color = True
elif (header == 'Pf'):
color = False
else:
raise Exception('Not a PFM file.')
pattern = '^(\\d+)\\s(\\d+)\\s$'
temp_str = str(bytes.decode(file.readline(), encoding='utf-8'))
dim_match = re.match(pattern, temp_str)
if dim_match:
(width, height) = map(int, dim_match.groups())
else:
temp_str += str(bytes.decode(file.readline(), encoding='utf-8'))
dim_match = re.match(pattern, temp_str)
if dim_match:
(width, height) = map(int, dim_match.groups())
else:
raise Exception('Malformed PFM header: width, height cannot be found')
scale = float(file.readline().rstrip())
if (scale < 0):
endian = '<'
scale = (- scale)
else:
endian = '>'
data = np.fromfile(file, (endian + 'f'))
shape = ((height, width, 3) if color else (height, width))
data = np.reshape(data, shape)
file.close()
return (data, scale) |
def get_dataset_names(data_name):
name_dt = {'image_pair_mnist': ['cifar10', 'mnist'], 'image_pair_rotation': ['cifar10', 'cifar10-rotated'], 'image_pair_flip': ['cifar10', 'cifar10-flipped'], 'image_pair_mnist_sound': ['mnist', 'fdss'], 'kinetics_sounds': ['kinetics-sounds-slowfast', 'kinetics-sounds-vggish']}
data_name = data_name.lower()
assert (data_name in name_dt), f'invalid data pair type: {data_name}'
return name_dt[data_name] |
class ResultHandler(ScorerHandler):
def get(self):
r = json.dumps(self.scorer.score())
self.write(r) |
class UnboundSymbols(EnvTransform, SkipDeclarations):
def __init__(self):
CythonTransform.__init__(self, None)
self.unbound = set()
def visit_NameNode(self, node):
if (not self.current_env().lookup(node.name)):
self.unbound.add(node.name)
return node
def __call__(self, node):
super(UnboundSymbols, self).__call__(node)
return self.unbound |
class ConstantImportanceMetric(BaseImportanceMetric):
first_num_oc = None
second_num_oc = None
simd = 1
def __init__(self, **kwargs):
pass
def get_entry_node_to_simd_score(self, entry_nodes: List[BaseNode]):
grouped_indices = {entry_nodes[0]: [np.arange(i, min((i + ConstantImportanceMetric.simd), ConstantImportanceMetric.first_num_oc)) for i in range(0, ConstantImportanceMetric.first_num_oc, ConstantImportanceMetric.simd)], entry_nodes[1]: [np.arange(i, min((i + ConstantImportanceMetric.simd), ConstantImportanceMetric.second_num_oc)) for i in range(0, ConstantImportanceMetric.second_num_oc, ConstantImportanceMetric.simd)]}
entry_node_to_simd_score = {entry_nodes[0]: [(- np.min(np.arange(i, min((i + ConstantImportanceMetric.simd), ConstantImportanceMetric.first_num_oc)))) for i in range(0, ConstantImportanceMetric.first_num_oc, ConstantImportanceMetric.simd)], entry_nodes[1]: [(- np.min(np.arange(i, min((i + ConstantImportanceMetric.simd), ConstantImportanceMetric.second_num_oc)))) for i in range(0, ConstantImportanceMetric.second_num_oc, ConstantImportanceMetric.simd)]}
return (entry_node_to_simd_score, grouped_indices) |
def make_graph_from_vectors(X, *, knn_edges, random_edges=0, virtual_vertices=0, deduplicate=True, directed=True, verbose=False, squared=True, **kwargs):
(num_vectors, vector_dim) = X.shape
X = np.require(check_numpy(X), dtype=np.float32, requirements=['C_CONTIGUOUS'])
if (virtual_vertices != 0):
if verbose:
print('Creating virtual vertices by k-means')
X_clusters = KMeans(virtual_vertices).fit(X).cluster_centers_
X = np.concatenate([X, X_clusters])
if verbose:
print('Searching for nearest neighbors')
try:
from faiss import IndexFlatL2
index = IndexFlatL2(vector_dim)
index.add(X)
(neighbor_distances, neighbor_indices) = index.search(X, (knn_edges + 1))
except ImportError:
warn('faiss not found, using slow knn instead')
(neighbor_distances, neighbor_indices) = NearestNeighbors(n_neighbors=(knn_edges + 1)).fit(X).kneighbors(X)
if verbose:
print('Adding knn edges')
(edges_from, edges_to, distances) = ([], [], [])
for vertex_i in np.arange(num_vectors):
for (neighbor_i, distance) in zip(neighbor_indices[vertex_i], neighbor_distances[vertex_i]):
if (vertex_i == neighbor_i):
continue
if (neighbor_i == (- 1)):
continue
if (not squared):
distance **= 0.5
edges_from.append(vertex_i)
edges_to.append(neighbor_i)
distances.append(distance)
if (random_edges != 0):
if verbose:
print('Adding random edges')
random_from = np.random.randint(0, num_vectors, (num_vectors * random_edges))
random_to = np.random.randint(0, num_vectors, (num_vectors * random_edges))
for (vertex_i, neighbor_i) in zip(random_from, random_to):
if (vertex_i != neighbor_i):
distance = np.sum(((X[vertex_i] - X[neighbor_i]) ** 2))
if (not squared):
distance **= 0.5
edges_from.append(vertex_i)
edges_to.append(neighbor_i)
distances.append(distance)
if deduplicate:
if verbose:
print('Deduplicating edges')
unique_edges_dict = {}
for (from_i, to_i, distance) in zip(edges_from, edges_to, distances):
edge_iijj = (int(from_i), int(to_i))
if (not directed):
edge_iijj = tuple(sorted(edge_iijj))
unique_edges_dict[edge_iijj] = distance
(edges_iijj, distances) = zip(*unique_edges_dict.items())
(edges_from, edges_to) = zip(*edges_iijj)
(edges_from, edges_to, distances) = map(np.asanyarray, [edges_from, edges_to, distances])
if verbose:
print('Total edges: {}, mean edges per vertex: {}, mean distance: {}'.format(len(edges_from), (len(edges_from) / float(num_vectors)), np.mean(distances)))
return GraphEmbedding(edges_from, edges_to, initial_weights=distances, directed=directed, **kwargs) |
class RowStandardTableauTuples_all(RowStandardTableauTuples, DisjointUnionEnumeratedSets):
def __init__(self):
RowStandardTableauTuples.__init__(self)
from sage.combinat.partition_tuple import PartitionTuples
DisjointUnionEnumeratedSets.__init__(self, Family(PartitionTuples(), RowStandardTableauTuples_shape), facade=True, keepkey=False)
def _repr_(self):
return 'Row standard tableau tuples'
def an_element(self):
return self.element_class(self, reversed([[range((2 ** (i - 1)), (2 ** i))] for i in range(1, 4)])) |
def squad_convert_examples_to_features(examples, tokenizer, max_seq_length, doc_stride, max_query_length, is_training, threads=1):
features = []
threads = min(threads, cpu_count())
with Pool(threads, initializer=squad_convert_example_to_features_init, initargs=(tokenizer,)) as p:
annotate_ = partial(squad_convert_example_to_features, max_seq_length=max_seq_length, doc_stride=doc_stride, max_query_length=max_query_length, is_training=is_training)
features = list(tqdm(p.imap(annotate_, examples, chunksize=32), total=len(examples), desc='convert squad examples to features'))
new_features = []
unique_id =
example_index = 0
for example_features in tqdm(features, total=len(features), desc='add example index and unique id'):
if (not example_features):
continue
for example_feature in example_features:
example_feature.example_index = example_index
example_feature.unique_id = unique_id
new_features.append(example_feature)
unique_id += 1
example_index += 1
features = new_features
del new_features
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_masks = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
all_cls_index = torch.tensor([f.cls_index for f in features], dtype=torch.long)
all_p_mask = torch.tensor([f.p_mask for f in features], dtype=torch.float)
all_is_impossible = torch.tensor([f.is_impossible for f in features], dtype=torch.float)
if (not is_training):
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_attention_masks, all_token_type_ids, all_example_index, all_cls_index, all_p_mask)
else:
all_start_positions = torch.tensor([f.start_position for f in features], dtype=torch.long)
all_end_positions = torch.tensor([f.end_position for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_attention_masks, all_token_type_ids, all_start_positions, all_end_positions, all_cls_index, all_p_mask, all_is_impossible)
return (features, dataset) |
class TestDSWrapper():
def test_implicit_coco_initialization(self):
ds_wrapper = DSWrapper(data_path=data_path)
assert (ds_wrapper.parent_folder == base_ds)
assert (ds_wrapper.data_path == data_path), 'root datset should be equal'
assert (ds_wrapper.data_input == input_path), 'DSWrapper should find data input path'
assert (ds_wrapper.json_annotations == json_annotations_path), 'DSWrapper should find json annotations'
assert (ds_wrapper.get_annotations() == [json_annotations_path])
def test_explicit_coco_initialization(self):
ds_wrapper = DSWrapper(data_input=input_path, json_annotations=json_annotations_path)
assert (ds_wrapper.parent_folder == base_ds)
assert (ds_wrapper.data_path == data_path), 'root datset should be equal'
assert (ds_wrapper.data_input == input_path), 'DSWrapper should find data input path'
assert (ds_wrapper.json_annotations == json_annotations_path), 'DSWrapper should find json annotations'
assert (ds_wrapper.get_annotations() == [json_annotations_path])
def test_implicit_geojson_initialization(self):
ds_wrapper = DSWrapper(data_path=data_path_geojson)
assert (ds_wrapper.parent_folder == base_ds)
assert (ds_wrapper.data_path == data_path_geojson), 'root datset should be equal'
assert (ds_wrapper.data_input == input_path_geojson), 'DSWrapper should find data input path'
assert (ds_wrapper.geojson_annotations == geojson_annotations_path), 'DSWrapper should find json annotations'
assert (ds_wrapper.get_annotations() == [geojson_annotations_path])
def test_explicit_geojson_initialization(self):
ds_wrapper = DSWrapper(data_input=input_path_geojson, geojson_annotations=geojson_annotations_path)
assert (ds_wrapper.parent_folder == base_ds)
assert (ds_wrapper.data_path == data_path_geojson), 'root datset should be equal'
assert (ds_wrapper.data_input == input_path_geojson), 'DSWrapper should find data input path'
assert (ds_wrapper.geojson_annotations == geojson_annotations_path), 'DSWrapper should find json annotations'
assert (ds_wrapper.get_annotations() == [geojson_annotations_path])
def test_log_parameters(self):
expected_log_params = {'ds_name': ds_name}
expected_extended_log_params = {'ds_name': ds_name, 'test': 'test'}
ds_wrapper = DSWrapper(data_path=data_path)
assert (ds_wrapper.log_parameters() == expected_log_params), 'Base log parameters should be the same'
ds_wrapper.set_log_parameters({'test': 'test'})
assert (ds_wrapper.log_parameters() == expected_extended_log_params), 'Extended log parameters should be the same' |
class SimpleExperiment():
def __init__(self):
self.data = {}
def log_hparams(self, params: dict[(str, Any)]) -> None:
def log_metrics(self, metrics: dict[(str, float)], step: Optional[int]=None) -> None:
def _handle_value(value):
if isinstance(value, torch.Tensor):
return value.item()
return value
if ('epoch' in metrics.keys()):
time_point = metrics.pop('epoch')
time_point_name = 'epoch'
elif ('step' in metrics.keys()):
time_point = metrics.pop('step')
time_point_name = 'step'
else:
time_point = step
time_point_name = 'step'
for (metric, value) in metrics.items():
if (metric not in self.data):
self.data[metric] = pd.DataFrame(columns=[metric])
self.data[metric].index.name = time_point_name
self.data[metric].loc[(time_point, metric)] = _handle_value(value)
def save(self) -> None: |
class TFMobileBertForSequenceClassification():
def __init__(self, *args, **kwargs):
requires_tf(self)
def from_pretrained(self, *args, **kwargs):
requires_tf(self) |
def word_normalise(words):
ret = []
for word in words:
if (word.lower() in months):
word = months[word.lower()]
if (word.lower() in replace_words):
word = replace_words[word.lower()]
for regex in replace_vocab:
word = re.sub(regex, '', word)
word = re.sub('[\\.\\,\\!\\?;\\/]', '', word)
ret.append(word)
return ret |
def create_RepVGG_B2g4(last_stride, norm_type):
return RepVGG(last_stride, norm_type, num_blocks=[4, 6, 16, 1], width_multiplier=[2.5, 2.5, 2.5, 5], override_groups_map=g4_map) |
.parametrize('data_dict, name, source, type, hint, result', [pytest.param('full_spark_dataset', 'gender', None, None, None, ['user_id', 'item_id', 'timestamp', 'rating', 'category_id', 'feature1'], marks=pytest.mark.spark), pytest.param('full_spark_dataset', 'feature1', FeatureSource.ITEM_FEATURES, FeatureType.NUMERICAL, None, ['user_id', 'item_id', 'timestamp', 'gender'], marks=pytest.mark.spark), pytest.param('full_spark_dataset', None, FeatureSource.ITEM_FEATURES, None, None, ['user_id', 'item_id', 'timestamp', 'rating', 'gender'], marks=pytest.mark.spark), pytest.param('full_pandas_dataset', 'gender', None, None, None, ['user_id', 'item_id', 'timestamp', 'rating', 'category_id', 'feature1'], marks=pytest.mark.core), pytest.param('full_pandas_dataset', 'feature1', FeatureSource.ITEM_FEATURES, FeatureType.NUMERICAL, None, ['user_id', 'item_id', 'timestamp', 'gender'], marks=pytest.mark.core), pytest.param('full_pandas_dataset', None, FeatureSource.ITEM_FEATURES, None, None, ['user_id', 'item_id', 'timestamp', 'rating', 'gender'], marks=pytest.mark.core)])
def test_feature_schema_schema_drop(data_dict, name, source, type, hint, result, request):
dataset = create_dataset(request.getfixturevalue(data_dict))
assert (dataset.feature_schema.drop(column=name, feature_source=source, feature_type=type, feature_hint=hint).columns == result) |
def cla1_adv_ll_clamp(input, target, class_freq):
target_freq = class_freq[target]
limit = target_freq.clamp(min=1e-08).log()
return torch.max(torch.gather(input, 1, target.unsqueeze(1)).squeeze(1), limit).mean() |
class miniImageNetMultiCrop(miniImageNet):
def __init__(self, root, mode, num_patch=9, image_sz=84):
super().__init__(root, mode)
self.num_patch = num_patch
self.transform = transforms.Compose([transforms.RandomResizedCrop(image_sz), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(np.array([0.4712, 0.4499, 0.4031]), np.array([0.2726, 0.2634, 0.2794]))])
def __getitem__(self, index):
(path, target) = self.samples[index]
sample = self.loader(path)
patch_list = []
for _ in range(self.num_patch):
patch_list.append(self.transform(sample))
patch_list = torch.stack(patch_list, dim=0)
return (patch_list, target) |
def test_validate_series(df_urls: pd.DataFrame) -> None:
df_valid = validate_url(df_urls['messy_url'])
df_check = pd.Series([False, True, True, False, False, False, True, False, False, False, False, True, True])
assert df_check.equals(df_valid) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.