code stringlengths 281 23.7M |
|---|
def get_dummy_graph():
g = nx.DiGraph()
g.add_nodes_from(['kitchen', 'spoon', 'living room'])
g.add_edge('spoon', 'kitchen', type='in')
g.add_edge('kitchen', 'living room', type='connected')
g.add_edge('living room', 'kitchen', type='connected')
g.nodes['kitchen']['type'] = 'room'
g.nodes['living room']['type'] = 'room'
g.nodes['spoon']['type'] = 'object'
return g |
def test_push_pull_manifest_list_again(v22_protocol, basic_images, different_images, liveserver_session, app_reloader, data_model):
credentials = ('devtable', 'password')
options = ProtocolOptions()
blobs = {}
first_manifest = v22_protocol.build_schema2(basic_images, blobs, options)
second_manifest = v22_protocol.build_schema2(different_images, blobs, options)
builder = DockerSchema2ManifestListBuilder()
builder.add_manifest(first_manifest, 'amd64', 'linux')
builder.add_manifest(second_manifest, 'arm', 'linux')
manifestlist = builder.build()
v22_protocol.push_list(liveserver_session, 'devtable', 'newrepo', 'latest', manifestlist, [first_manifest, second_manifest], blobs, credentials=credentials, options=options)
options.skip_head_checks = True
v22_protocol.push_list(liveserver_session, 'devtable', 'newrepo', 'latest', manifestlist, [first_manifest, second_manifest], blobs, credentials=credentials, options=options)
v22_protocol.pull_list(liveserver_session, 'devtable', 'newrepo', 'latest', manifestlist, credentials=credentials, options=options) |
class LinearLRScheduler(Scheduler):
def __init__(self, optimizer: torch.optim.Optimizer, t_initial: int, lr_min_rate: float, warmup_t=0, warmup_lr_init=0.0, t_in_epochs=True, noise_range_t=None, noise_pct=0.67, noise_std=1.0, noise_seed=42, initialize=True) -> None:
super().__init__(optimizer, param_group_field='lr', noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, initialize=initialize)
self.t_initial = t_initial
self.lr_min_rate = lr_min_rate
self.warmup_t = warmup_t
self.warmup_lr_init = warmup_lr_init
self.t_in_epochs = t_in_epochs
if self.warmup_t:
self.warmup_steps = [((v - warmup_lr_init) / self.warmup_t) for v in self.base_values]
super().update_groups(self.warmup_lr_init)
else:
self.warmup_steps = [1 for _ in self.base_values]
def _get_lr(self, t):
if (t < self.warmup_t):
lrs = [(self.warmup_lr_init + (t * s)) for s in self.warmup_steps]
else:
t = (t - self.warmup_t)
total_t = (self.t_initial - self.warmup_t)
lrs = [(v - ((v - (v * self.lr_min_rate)) * (t / total_t))) for v in self.base_values]
return lrs
def get_epoch_values(self, epoch: int):
if self.t_in_epochs:
return self._get_lr(epoch)
else:
return None
def get_update_values(self, num_updates: int):
if (not self.t_in_epochs):
return self._get_lr(num_updates)
else:
return None |
_grad()
def convsample(model, shape, return_intermediates=True, verbose=True, make_prog_row=False):
if (not make_prog_row):
return model.p_sample_loop(None, shape, return_intermediates=return_intermediates, verbose=verbose)
else:
return model.progressive_denoising(None, shape, verbose=True) |
def _get_dataloader(data_length: int, dl2: bool, shuffle: bool, rs=None):
data_source = IterableWrapper(list(range(data_length)))
dp = data_source.sharding_filter()
if shuffle:
dp = dp.shuffle()
if dl2:
if (rs is None):
rs = DistributedReadingService()
dl = DataLoader2(dp, reading_service=rs)
else:
dp = dp.fullsync()
dl = DataLoader(dp)
return dl |
class BridgeTowerConfig(PretrainedConfig):
model_type = 'bridgetower'
def __init__(self, share_cross_modal_transformer_layers=True, hidden_act='gelu', hidden_size=768, initializer_factor=1, layer_norm_eps=1e-05, share_link_tower_layers=False, link_tower_type='add', num_attention_heads=12, num_hidden_layers=6, tie_word_embeddings=False, init_layernorm_from_vision_encoder=False, text_config=None, vision_config=None, **kwargs):
_ = kwargs.pop('text_config_dict', None)
_ = kwargs.pop('vision_config_dict', None)
super().__init__(**kwargs)
self.share_cross_modal_transformer_layers = share_cross_modal_transformer_layers
self.hidden_act = hidden_act
self.hidden_size = hidden_size
self.initializer_factor = initializer_factor
self.layer_norm_eps = layer_norm_eps
self.share_link_tower_layers = share_link_tower_layers
self.link_tower_type = link_tower_type
self.num_attention_heads = num_attention_heads
self.num_hidden_layers = num_hidden_layers
self.tie_word_embeddings = tie_word_embeddings
self.init_layernorm_from_vision_encoder = init_layernorm_from_vision_encoder
if (text_config is None):
text_config = {}
logger.info('`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.')
if (vision_config is None):
vision_config = {}
logger.info('`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.')
self.text_config = BridgeTowerTextConfig(**text_config)
self.vision_config = BridgeTowerVisionConfig(**vision_config)
def from_text_vision_configs(cls, text_config: BridgeTowerTextConfig, vision_config: BridgeTowerVisionConfig, **kwargs):
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)
def to_dict(self):
output = copy.deepcopy(self.__dict__)
output['text_config'] = self.text_config.to_dict()
output['vision_config'] = self.vision_config.to_dict()
output['model_type'] = self.__class__.model_type
return output |
class Up(nn.Module):
def __init__(self, in_channels, out_channels, bilinear=True):
super().__init__()
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv = DoubleConv(in_channels, out_channels, (in_channels // 2))
else:
self.up = nn.ConvTranspose3d(in_channels, (in_channels // 2), kernel_size=2, stride=2)
self.conv = DoubleConv(in_channels, out_channels)
def forward(self, x1, x2):
x1 = self.up(x1)
diffY = (x2.size()[2] - x1.size()[2])
diffX = (x2.size()[3] - x1.size()[3])
x1 = F.pad(x1, [(diffX // 2), (diffX - (diffX // 2)), (diffY // 2), (diffY - (diffY // 2))])
x = torch.cat([x2, x1], dim=1)
return self.conv(x) |
class OpenALBuffer(OpenALObject):
_format_map = {(1, 8): al.AL_FORMAT_MONO8, (1, 16): al.AL_FORMAT_MONO16, (2, 8): al.AL_FORMAT_STEREO8, (2, 16): al.AL_FORMAT_STEREO16}
def __init__(self, al_name):
self.al_name = al_name
self.name = al_name.value
assert self.is_valid
def is_valid(self):
self._check_error('Before validate buffer.')
if (self.al_name is None):
return False
valid = bool(al.alIsBuffer(self.al_name))
if (not valid):
al.alGetError()
return valid
def delete(self):
if ((self.al_name is not None) and self.is_valid):
al.alDeleteBuffers(1, ctypes.byref(self.al_name))
self._check_error('Error deleting buffer.')
self.al_name = None
def data(self, audio_data, audio_format):
assert self.is_valid
try:
al_format = self._format_map[(audio_format.channels, audio_format.sample_size)]
except KeyError:
raise MediaException(f"OpenAL does not support '{audio_format.sample_size}bit' audio.")
al.alBufferData(self.al_name, al_format, audio_data.pointer, audio_data.length, audio_format.sample_rate)
self._check_error('Failed to add data to buffer.') |
class Voxelization3D(chainer.Function):
def __init__(self, *, batch_size, pitch, origin, dimensions):
self.batch_size = batch_size
self.pitch = pitch
self.origin = origin
if (not (isinstance(dimensions, tuple) and (len(dimensions) == 3) and all((isinstance(d, int) for d in dimensions)))):
raise ValueError('dimensions must be a tuple of 4 integers')
self.dimensions = dimensions
def check_type_forward(self, in_types):
(values_type, points_type, batch_indices_type) = in_types[:3]
chainer.utils.type_check.expect((values_type.dtype == np.float32), (values_type.ndim == 2), (points_type.dtype == np.float32), (points_type.ndim == 2), (points_type.shape[1] == 3), (points_type.shape[0] == values_type.shape[0]), (batch_indices_type.dtype == np.int32), (batch_indices_type.ndim == 1), (batch_indices_type.shape[0] == values_type.shape[0])) |
def _new_root_model_state(component: ComponentType, schedule_render: Callable[([_LifeCycleStateId], None)]) -> _ModelState:
return _ModelState(parent=None, index=(- 1), key=None, model=Ref(), patch_path='', children_by_key={}, targets_by_event={}, life_cycle_state=_make_life_cycle_state(component, schedule_render)) |
def test_show_with_group_only(tester: CommandTester, poetry: Poetry, installed: Repository) -> None:
poetry.package.add_dependency(Factory.create_dependency('cachy', '^0.1.0'))
poetry.package.add_dependency(Factory.create_dependency('pendulum', '^2.0.0'))
poetry.package.add_dependency(Factory.create_dependency('pytest', '*', groups=['dev']))
cachy_010 = get_package('cachy', '0.1.0')
cachy_010.description = 'Cachy package'
pendulum_200 = get_package('pendulum', '2.0.0')
pendulum_200.description = 'Pendulum package'
pytest_373 = get_package('pytest', '3.7.3')
pytest_373.description = 'Pytest package'
installed.add_package(cachy_010)
installed.add_package(pendulum_200)
installed.add_package(pytest_373)
assert isinstance(poetry.locker, TestLocker)
poetry.locker.mock_lock_data({'package': [{'name': 'cachy', 'version': '0.1.0', 'description': 'Cachy package', 'optional': False, 'platform': '*', 'python-versions': '*', 'checksum': []}, {'name': 'pendulum', 'version': '2.0.0', 'description': 'Pendulum package', 'optional': False, 'platform': '*', 'python-versions': '*', 'checksum': []}, {'name': 'pytest', 'version': '3.7.3', 'description': 'Pytest package', 'optional': False, 'platform': '*', 'python-versions': '*', 'checksum': []}], 'metadata': {'python-versions': '*', 'platform': '*', 'content-hash': '', 'files': {'cachy': [], 'pendulum': [], 'pytest': []}}})
tester.execute('--only dev')
expected = 'pytest 3.7.3 Pytest package\n'
assert (tester.io.fetch_output() == expected) |
def compute_metric_for_each_image(metric_func):
def wrapper(D_ests, D_gts, masks, *nargs):
check_shape_for_metric_computation(D_ests, D_gts, masks)
bn = D_gts.shape[0]
results = []
for idx in range(bn):
cur_nargs = [(x[idx] if isinstance(x, (Tensor, Variable)) else x) for x in nargs]
if ((masks[idx].float().mean() / (D_gts[idx] > 0).float().mean()) < 0.1):
print('masks[idx].float().mean() too small, skip')
else:
ret = metric_func(D_ests[idx], D_gts[idx], masks[idx], *cur_nargs)
results.append(ret)
if (len(results) == 0):
print('masks[idx].float().mean() too small for all images in this batch, return 0')
return torch.tensor(0, dtype=torch.float32, device=D_gts.device)
else:
return torch.stack(results).mean()
return wrapper |
class PlayServerDifficulty(Packet):
id = 13
to = 1
def __init__(self, difficulty: int, locked: bool) -> None:
super().__init__()
self.difficulty = difficulty
self.locked = locked
def encode(self) -> bytes:
return (Buffer.pack('B', self.difficulty) + Buffer.pack('?', self.locked)) |
class AudioSampleEntry(object):
channels = 0
sample_size = 0
sample_rate = 0
bitrate = 0
codec = None
codec_description = None
def __init__(self, atom, fileobj):
(ok, data) = atom.read(fileobj)
if (not ok):
raise ASEntryError(('too short %r atom' % atom.name))
fileobj = BytesIO(data)
r = BitReader(fileobj)
try:
r.skip((6 * 8))
r.skip((2 * 8))
r.skip((8 * 8))
self.channels = r.bits(16)
self.sample_size = r.bits(16)
r.skip((2 * 8))
r.skip((2 * 8))
self.sample_rate = (r.bits(32) >> 16)
except BitReaderError as e:
raise ASEntryError(e)
assert r.is_aligned()
try:
extra = Atom(fileobj)
except AtomError as e:
raise ASEntryError(e)
self.codec = atom.name.decode('latin-1')
self.codec_description = None
if ((atom.name == b'mp4a') and (extra.name == b'esds')):
self._parse_esds(extra, fileobj)
elif ((atom.name == b'alac') and (extra.name == b'alac')):
self._parse_alac(extra, fileobj)
elif ((atom.name == b'ac-3') and (extra.name == b'dac3')):
self._parse_dac3(extra, fileobj)
if (self.codec_description is None):
self.codec_description = self.codec.upper()
def _parse_dac3(self, atom, fileobj):
assert (atom.name == b'dac3')
(ok, data) = atom.read(fileobj)
if (not ok):
raise ASEntryError(('truncated %s atom' % atom.name))
fileobj = BytesIO(data)
r = BitReader(fileobj)
try:
r.skip(((2 + 5) + 3))
acmod = r.bits(3)
lfeon = r.bits(1)
bit_rate_code = r.bits(5)
r.skip(5)
except BitReaderError as e:
raise ASEntryError(e)
self.channels = ([2, 1, 2, 3, 3, 4, 4, 5][acmod] + lfeon)
try:
self.bitrate = ([32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 384, 448, 512, 576, 640][bit_rate_code] * 1000)
except IndexError:
pass
def _parse_alac(self, atom, fileobj):
assert (atom.name == b'alac')
(ok, data) = atom.read(fileobj)
if (not ok):
raise ASEntryError(('truncated %s atom' % atom.name))
try:
(version, flags, data) = parse_full_atom(data)
except ValueError as e:
raise ASEntryError(e)
if (version != 0):
raise ASEntryError(('Unsupported version %d' % version))
fileobj = BytesIO(data)
r = BitReader(fileobj)
try:
r.skip(32)
compatibleVersion = r.bits(8)
if (compatibleVersion != 0):
return
self.sample_size = r.bits(8)
r.skip(((8 + 8) + 8))
self.channels = r.bits(8)
r.skip((16 + 32))
self.bitrate = r.bits(32)
self.sample_rate = r.bits(32)
except BitReaderError as e:
raise ASEntryError(e)
def _parse_esds(self, esds, fileobj):
assert (esds.name == b'esds')
(ok, data) = esds.read(fileobj)
if (not ok):
raise ASEntryError(('truncated %s atom' % esds.name))
try:
(version, flags, data) = parse_full_atom(data)
except ValueError as e:
raise ASEntryError(e)
if (version != 0):
raise ASEntryError(('Unsupported version %d' % version))
fileobj = BytesIO(data)
r = BitReader(fileobj)
try:
tag = r.bits(8)
if (tag != ES_Descriptor.TAG):
raise ASEntryError(('unexpected descriptor: %d' % tag))
assert r.is_aligned()
except BitReaderError as e:
raise ASEntryError(e)
try:
decSpecificInfo = ES_Descriptor.parse(fileobj)
except DescriptorError as e:
raise ASEntryError(e)
dec_conf_desc = decSpecificInfo.decConfigDescr
self.bitrate = dec_conf_desc.avgBitrate
self.codec += dec_conf_desc.codec_param
self.codec_description = dec_conf_desc.codec_desc
decSpecificInfo = dec_conf_desc.decSpecificInfo
if (decSpecificInfo is not None):
if (decSpecificInfo.channels != 0):
self.channels = decSpecificInfo.channels
if (decSpecificInfo.sample_rate != 0):
self.sample_rate = decSpecificInfo.sample_rate |
def symmetric_ema(xolds, yolds, low=None, high=None, n=512, decay_steps=1.0, low_counts_threshold=1e-08):
(xs, ys1, count_ys1) = one_sided_ema(xolds, yolds, low, high, n, decay_steps, low_counts_threshold=0)
(_, ys2, count_ys2) = one_sided_ema((- xolds[::(- 1)]), yolds[::(- 1)], (- high), (- low), n, decay_steps, low_counts_threshold=0)
ys2 = ys2[::(- 1)]
count_ys2 = count_ys2[::(- 1)]
count_ys = (count_ys1 + count_ys2)
ys = (((ys1 * count_ys1) + (ys2 * count_ys2)) / count_ys)
ys[(count_ys < low_counts_threshold)] = np.nan
return (xs, ys, count_ys) |
class NetworkAgent(Agent):
def __init__(self, dic_agent_conf, dic_traffic_env_conf, dic_path, cnt_round, best_round=None, bar_round=None, intersection_id='0'):
super(NetworkAgent, self).__init__(dic_agent_conf, dic_traffic_env_conf, dic_path, intersection_id=intersection_id)
self.num_actions = len(dic_traffic_env_conf['PHASE'][dic_traffic_env_conf['SIMULATOR_TYPE']])
self.num_phases = len(dic_traffic_env_conf['PHASE'][dic_traffic_env_conf['SIMULATOR_TYPE']])
self.num_lanes = np.sum(np.array(list(self.dic_traffic_env_conf['LANE_NUM'].values())))
self.memory = self.build_memory()
if (cnt_round == 0):
if os.listdir(self.dic_path['PATH_TO_MODEL']):
if self.dic_traffic_env_conf['ONE_MODEL']:
self.load_network('round_0')
else:
self.load_network('round_0_inter_{0}'.format(intersection_id))
else:
self.q_network = self.build_network()
self.q_network_bar = self.build_network_from_copy(self.q_network)
else:
try:
if best_round:
self.load_network('round_{0}_inter_{1}'.format(best_round, self.intersection_id))
if (bar_round and (bar_round != best_round) and (cnt_round > 10)):
self.load_network_bar('round_{0}_inter_{1}'.format(bar_round, self.intersection_id))
elif ('UPDATE_Q_BAR_EVERY_C_ROUND' in self.dic_agent_conf):
if self.dic_agent_conf['UPDATE_Q_BAR_EVERY_C_ROUND']:
self.load_network_bar('round_{0}'.format(max((((best_round - 1) // self.dic_agent_conf['UPDATE_Q_BAR_FREQ']) * self.dic_agent_conf['UPDATE_Q_BAR_FREQ']), 0), self.intersection_id))
else:
self.load_network_bar('round_{0}_inter_{1}'.format(max((best_round - self.dic_agent_conf['UPDATE_Q_BAR_FREQ']), 0), self.intersection_id))
else:
self.load_network_bar('round_{0}_inter_{1}'.format(max((best_round - self.dic_agent_conf['UPDATE_Q_BAR_FREQ']), 0), self.intersection_id))
elif self.dic_traffic_env_conf['ONE_MODEL']:
self.load_network('round_{0}'.format((cnt_round - 1), self.intersection_id))
if ('UPDATE_Q_BAR_EVERY_C_ROUND' in self.dic_agent_conf):
if self.dic_agent_conf['UPDATE_Q_BAR_EVERY_C_ROUND']:
self.load_network_bar('round_{0}'.format(max((((cnt_round - 1) // self.dic_agent_conf['UPDATE_Q_BAR_FREQ']) * self.dic_agent_conf['UPDATE_Q_BAR_FREQ']), 0)))
else:
self.load_network_bar('round_{0}'.format(max((cnt_round - self.dic_agent_conf['UPDATE_Q_BAR_FREQ']), 0)))
else:
self.load_network_bar('round_{0}'.format(max((cnt_round - self.dic_agent_conf['UPDATE_Q_BAR_FREQ']), 0)))
else:
self.load_network('round_{0}_inter_{1}'.format((cnt_round - 1), self.intersection_id))
if ('UPDATE_Q_BAR_EVERY_C_ROUND' in self.dic_agent_conf):
if self.dic_agent_conf['UPDATE_Q_BAR_EVERY_C_ROUND']:
self.load_network_bar('round_{0}_inter_{1}'.format(max((((cnt_round - 1) // self.dic_agent_conf['UPDATE_Q_BAR_FREQ']) * self.dic_agent_conf['UPDATE_Q_BAR_FREQ']), 0), self.intersection_id))
else:
self.load_network_bar('round_{0}_inter_{1}'.format(max((cnt_round - self.dic_agent_conf['UPDATE_Q_BAR_FREQ']), 0), self.intersection_id))
else:
self.load_network_bar('round_{0}_inter_{1}'.format(max((cnt_round - self.dic_agent_conf['UPDATE_Q_BAR_FREQ']), 0), self.intersection_id))
except:
print('fail to load network, current round: {0}'.format(cnt_round))
decayed_epsilon = (self.dic_agent_conf['EPSILON'] * pow(self.dic_agent_conf['EPSILON_DECAY'], cnt_round))
self.dic_agent_conf['EPSILON'] = max(decayed_epsilon, self.dic_agent_conf['MIN_EPSILON'])
def _unison_shuffled_copies(Xs, Y, sample_weight):
p = np.random.permutation(len(Y))
new_Xs = []
for x in Xs:
assert (len(x) == len(Y))
new_Xs.append(x[p])
return (new_Xs, Y[p], sample_weight[p])
def _cnn_network_structure(img_features):
conv1 = conv2d_bn(img_features, 1, filters=32, kernel_size=(8, 8), strides=(4, 4))
conv2 = conv2d_bn(conv1, 2, filters=16, kernel_size=(4, 4), strides=(2, 2))
img_flatten = Flatten()(conv2)
return img_flatten
def _shared_network_structure(state_features, dense_d):
hidden_1 = Dense(dense_d, activation='sigmoid', name='hidden_shared_1')(state_features)
return hidden_1
def _separate_network_structure(state_features, dense_d, num_actions, memo=''):
hidden_1 = Dense(dense_d, activation='sigmoid', name='hidden_separate_branch_{0}_1'.format(memo))(state_features)
q_values = Dense(num_actions, activation='linear', name='q_values_separate_branch_{0}'.format(memo))(hidden_1)
return q_values
def load_network(self, file_name, file_path=None):
if (file_path == None):
file_path = self.dic_path['PATH_TO_MODEL']
self.q_network = load_model(os.path.join(file_path, ('%s.h5' % file_name)), custom_objects={'Selector': Selector})
print(('succeed in loading model %s' % file_name))
def load_network_bar(self, file_name, file_path=None):
if (file_path == None):
file_path = self.dic_path['PATH_TO_MODEL']
self.q_network_bar = load_model(os.path.join(file_path, ('%s.h5' % file_name)), custom_objects={'Selector': Selector})
print(('succeed in loading model %s' % file_name))
def save_network(self, file_name):
self.q_network.save(os.path.join(self.dic_path['PATH_TO_MODEL'], ('%s.h5' % file_name)))
def save_network_bar(self, file_name):
self.q_network_bar.save(os.path.join(self.dic_path['PATH_TO_MODEL'], ('%s.h5' % file_name)))
def build_network(self):
raise NotImplementedError
def build_memory(self):
return []
def build_network_from_copy(self, network_copy):
network_structure = network_copy.to_json()
network_weights = network_copy.get_weights()
network = model_from_json(network_structure, custom_objects={'Selector': Selector})
network.set_weights(network_weights)
network.compile(optimizer=RMSprop(lr=self.dic_agent_conf['LEARNING_RATE']), loss=self.dic_agent_conf['LOSS_FUNCTION'])
return network
def prepare_Xs_Y(self, memory, dic_exp_conf):
ind_end = len(memory)
print('memory size before forget: {0}'.format(ind_end))
if (dic_exp_conf['PRETRAIN'] or dic_exp_conf['AGGREGATE']):
sample_slice = memory
else:
ind_sta = max(0, (ind_end - self.dic_agent_conf['MAX_MEMORY_LEN']))
memory_after_forget = memory[ind_sta:ind_end]
print('memory size after forget:', len(memory_after_forget))
sample_size = min(self.dic_agent_conf['SAMPLE_SIZE'], len(memory_after_forget))
sample_slice = random.sample(memory_after_forget, sample_size)
print('memory samples number:', sample_size)
dic_state_feature_arrays = {}
for feature_name in self.dic_traffic_env_conf['LIST_STATE_FEATURE']:
dic_state_feature_arrays[feature_name] = []
Y = []
for i in range(len(sample_slice)):
(state, action, next_state, reward, instant_reward, _, _) = sample_slice[i]
for feature_name in self.dic_traffic_env_conf['LIST_STATE_FEATURE']:
dic_state_feature_arrays[feature_name].append(state[feature_name])
_state = []
_next_state = []
for feature_name in self.dic_traffic_env_conf['LIST_STATE_FEATURE']:
_state.append([state[feature_name]])
_next_state.append([next_state[feature_name]])
target = self.q_network.predict(_state)
next_state_qvalues = self.q_network_bar.predict(_next_state)
if (self.dic_agent_conf['LOSS_FUNCTION'] == 'mean_squared_error'):
final_target = np.copy(target[0])
final_target[action] = ((reward / self.dic_agent_conf['NORMAL_FACTOR']) + (self.dic_agent_conf['GAMMA'] * np.max(next_state_qvalues[0])))
elif (self.dic_agent_conf['LOSS_FUNCTION'] == 'categorical_crossentropy'):
raise NotImplementedError
Y.append(final_target)
self.Xs = [np.array(dic_state_feature_arrays[feature_name]) for feature_name in self.dic_traffic_env_conf['LIST_STATE_FEATURE']]
self.Y = np.array(Y)
def convert_state_to_input(self, s):
if self.dic_traffic_env_conf['BINARY_PHASE_EXPANSION']:
inputs = []
for feature in self.dic_traffic_env_conf['LIST_STATE_FEATURE']:
if ('cur_phase' in feature):
inputs.append(np.array([self.dic_traffic_env_conf['PHASE'][self.dic_traffic_env_conf['SIMULATOR_TYPE']][s[feature][0]]]))
else:
inputs.append(np.array([s[feature]]))
return inputs
else:
return [np.array([s[feature]]) for feature in self.dic_traffic_env_conf['LIST_STATE_FEATURE']]
def choose_action(self, count, state):
state_input = self.convert_state_to_input(state)
q_values = self.q_network.predict(state_input)
if (random.random() <= self.dic_agent_conf['EPSILON']):
action = random.randrange(len(q_values[0]))
else:
action = np.argmax(q_values[0])
return action
def train_network(self, dic_exp_conf):
if (dic_exp_conf['PRETRAIN'] or dic_exp_conf['AGGREGATE']):
epochs = 1000
else:
epochs = self.dic_agent_conf['EPOCHS']
batch_size = min(self.dic_agent_conf['BATCH_SIZE'], len(self.Y))
early_stopping = EarlyStopping(monitor='val_loss', patience=self.dic_agent_conf['PATIENCE'], verbose=0, mode='min')
hist = self.q_network.fit(self.Xs, self.Y, batch_size=batch_size, epochs=epochs, shuffle=False, verbose=2, validation_split=0.3, callbacks=[early_stopping]) |
class TListWrapper(TestCase):
def test_empty(self):
wrapped = list_wrapper([])
self.assertEqual(wrapped, [])
def test_empty_song(self):
wrapped = list_wrapper([{}])
self.assertTrue((len(wrapped) == 1))
self.assertFalse(isinstance(wrapped[0], dict))
def test_none(self):
wrapped = list_wrapper([None, None])
self.assertTrue((len(wrapped) == 2))
self.assertEqual(wrapped, [None, None]) |
def convert_dog(data_root):
train_lst = (data_root + '/train_list.mat')
train_txt = (data_root + '/dog_train.txt')
info = scio.loadmat(train_lst)['file_list']
name_dict = {}
index = 0
for i in info:
name = i[0][0]
cate = name.split('/')[0]
if (cate in name_dict):
label = name_dict[cate]
else:
label = index
name_dict[cate] = index
index += 1
with open(train_txt, 'a') as f:
f.write(('%s %d\n' % (name, label)))
test_lst = (data_root + '/test_list.mat')
test_txt = (data_root + '/dog_test.txt')
info = scio.loadmat(test_lst)['file_list']
for i in info:
name = i[0][0]
cate = name.split('/')[0]
label = name_dict[cate]
with open(test_txt, 'a') as f:
f.write(('%s %d\n' % (name, label))) |
(auto_attribs=True, frozen=False)
class FaultLocalization(object):
faultElements: Sequence[FaultElement]
def __repr__(self) -> str:
return self.toSpecifierStr()
def toSpecifierStr(self) -> str:
rstStr = [ele.toSpecifierStr() for ele in sorted(frozenset(self.faultElements), key=(lambda x: (type(x).__name__, x)))]
return ';'.join(rstStr) |
def infer_Trange(events_pred, events_gt):
if (len(events_gt) == 0):
raise ValueError('The gt events should contain at least one event')
if (len(events_pred) == 0):
return infer_Trange(events_gt, events_gt)
min_pred = min([x[0] for x in events_pred])
min_gt = min([x[0] for x in events_gt])
max_pred = max([x[1] for x in events_pred])
max_gt = max([x[1] for x in events_gt])
Trange = (min(min_pred, min_gt), max(max_pred, max_gt))
return Trange |
class LocalVirtualSite(VirtualSite):
def __init__(self, p1: unit.Quantity, p2: unit.Quantity, p3: unit.Quantity, name: str, o_weights: List[float], x_weights: List[float], y_weights: List[float], orientations: List[Tuple[(int, ...)]]):
super().__init__(name=name, orientations=orientations)
self._p1 = p1.in_units_of(unit.nanometer)
self._p2 = p2.in_units_of(unit.nanometer)
self._p3 = p3.in_units_of(unit.nanometer)
self._o_weights = o_weights
self._x_weights = x_weights
self._y_weights = y_weights
def p1(self):
return self._p1
def p2(self):
return self._p2
def p3(self):
return self._p3
def to_dict(self):
vsite_dict = super().to_dict()
vsite_dict['p1'] = self._p1
vsite_dict['p2'] = self._p2
vsite_dict['p3'] = self._p3
vsite_dict['vsite_type'] = self.type
vsite_dict['o_weights'] = self._o_weights
vsite_dict['x_weights'] = self._x_weights
vsite_dict['y_weights'] = self._y_weights
return vsite_dict
def from_dict(cls, vsite_dict):
base_dict = deepcopy(vsite_dict)
assert (vsite_dict['vsite_type'] == 'LocalVirtualSite')
del base_dict['vsite_type']
return cls(**base_dict)
def local_frame_weights(self):
return (self._o_weights, self._x_weights, self._y_weights)
def local_frame_position(self):
return ([self._p1.value_in_unit(unit.nanometer), self._p2.value_in_unit(unit.nanometer), self._p3.value_in_unit(unit.nanometer)] * unit.nanometer)
def get_openmm_virtual_site(self, atoms: Tuple[(int, ...)]):
assert (len(atoms) == 3)
return self._openmm_virtual_site(atoms)
def type(self) -> str:
return 'DivalentLonePairVirtualSite' |
def insert_head_doc(docstring, head_doc=''):
if (len(head_doc) > 0):
return docstring.replace('one of the model classes of the library ', f'one of the model classes of the library (with a {head_doc} head) ')
return docstring.replace('one of the model classes of the library ', 'one of the base model classes of the library ') |
class TinyRV0Inst():
def __init__(self, bits):
self.bits = Bits32(bits)
def name(self):
if (self.bits == 19):
return 'nop'
elif (self.opcode == 51):
if (self.funct7 == 0):
if (self.funct3 == 0):
return 'add'
elif (self.funct3 == 1):
return 'sll'
elif (self.funct3 == 5):
return 'srl'
elif (self.funct3 == 7):
return 'and'
elif (self.opcode == 19):
if (self.funct3 == 0):
return 'addi'
elif (self.opcode == 35):
if (self.funct3 == 2):
return 'sw'
elif (self.opcode == 3):
if (self.funct3 == 2):
return 'lw'
elif (self.opcode == 99):
if (self.funct3 == 1):
return 'bne'
elif (self.opcode == 115):
if (self.funct3 == 1):
return 'csrw'
elif (self.funct3 == 2):
return 'csrr'
elif (self.bits == 0):
return '????'
raise AssertionError('Illegal instruction {}!'.format(self.bits))
def opcode(self):
return self.bits[tinyrv0_field_slice_opcode]
def rd(self):
return self.bits[tinyrv0_field_slice_rd]
def rs1(self):
return self.bits[tinyrv0_field_slice_rs1]
def rs2(self):
return self.bits[tinyrv0_field_slice_rs2]
def shamt(self):
return self.bits[tinyrv0_field_slice_shamt]
def i_imm(self):
return self.bits[tinyrv0_field_slice_i_imm]
def s_imm(self):
imm = Bits12(0)
imm[0:5] = self.bits[tinyrv0_field_slice_s_imm0]
imm[5:12] = self.bits[tinyrv0_field_slice_s_imm1]
return imm
def b_imm(self):
imm = Bits13(0)
imm[1:5] = self.bits[tinyrv0_field_slice_b_imm0]
imm[5:11] = self.bits[tinyrv0_field_slice_b_imm1]
imm[11:12] = self.bits[tinyrv0_field_slice_b_imm2]
imm[12:13] = self.bits[tinyrv0_field_slice_b_imm3]
return imm
def csrnum(self):
return self.bits[tinyrv0_field_slice_i_imm]
def funct7(self):
return self.bits[tinyrv0_field_slice_funct7]
def funct3(self):
return self.bits[tinyrv0_field_slice_funct3]
def __str__(self):
return disassemble_inst(self.bits) |
def crawl_specific_attrs():
query_link = {}
log_step = 100
disease_info = {}
keys = ['', '', '', '', '', '', '']
query_list = json.load(open('./Doctor_GLM/WebCrawl/query_MSD.json'))
for (i, elem) in enumerate(tqdm(query_list)):
url = elem[0]
query = elem[1]
info = file_to_specific_attrs(keys, url)
if (len(info) != 0):
disease_info[query] = info
query_link[query] = url
if (((i + 1) % log_step) == 0):
with open('../disease_info_features_v2.json', 'w', encoding='utf-8') as f:
json.dump(disease_info, f, indent=4, ensure_ascii=False)
with open('../disease_info_features_v2.json', 'w', encoding='utf-8') as f:
json.dump(disease_info, f, indent=4, ensure_ascii=False) |
def check_finite_int(num_slices, num_rows):
num_slices = int(num_slices)
num_rows = int(num_rows)
if (not all(np.isfinite((num_slices, num_rows)))):
raise ValueError('num_slices and num_rows must be finite.')
if ((num_slices < 0) or (num_rows < 0)):
raise ValueError('num_slices and num_rows must be positive (>=1).')
return (num_slices, num_rows) |
def main():
global opt, model, netContent
opt = parser.parse_args()
print(opt)
cuda = opt.cuda
if cuda:
print("=> use gpu id: '{}'".format(opt.gpus))
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus
if (not torch.cuda.is_available()):
raise Exception('No GPU found or Wrong gpu id, please run without --cuda')
opt.seed = random.randint(1, 10000)
print('Random Seed: ', opt.seed)
torch.manual_seed(opt.seed)
if cuda:
torch.cuda.manual_seed(opt.seed)
cudnn.benchmark = True
print('===> Loading datasets')
data_list = glob.glob((opt.trainset + '*.h5'))
print('===> Building model')
model = _NetG()
discr = _NetD()
criterion = nn.MSELoss(size_average=True)
print('===> Setting GPU')
if cuda:
model = model.cuda()
discr = discr.cuda()
criterion = criterion.cuda()
if opt.resume:
if os.path.isfile(opt.resume):
print("=> loading checkpoint '{}'".format(opt.resume))
checkpoint = torch.load(opt.resume)
opt.start_epoch = (checkpoint['epoch'] + 1)
model.load_state_dict(checkpoint['model'].state_dict())
discr.load_state_dict(checkpoint['discr'].state_dict())
else:
print("=> no checkpoint found at '{}'".format(opt.resume))
if opt.pretrained:
if os.path.isfile(opt.pretrained):
print("=> loading model '{}'".format(opt.pretrained))
weights = torch.load(opt.pretrained)
model.load_state_dict(weights['model'].state_dict())
discr.load_state_dict(weights['discr'].state_dict())
else:
print("=> no model found at '{}'".format(opt.pretrained))
print('===> Setting Optimizer')
G_optimizer = optim.RMSprop(model.parameters(), lr=(opt.lr / 2))
D_optimizer = optim.RMSprop(discr.parameters(), lr=opt.lr)
print('===> Training')
MSE = []
GLOSS = []
for epoch in range(opt.start_epoch, (opt.nEpochs + 1)):
mse = 0
Gloss = 0
for data_name in data_list:
train_set = DatasetFromHdf5(data_name)
training_data_loader = DataLoader(dataset=train_set, num_workers=opt.threads, batch_size=opt.batchSize, shuffle=True)
(a, b) = train(training_data_loader, G_optimizer, D_optimizer, model, discr, criterion, epoch)
mse += a
Gloss += b
mse = (mse / len(data_list))
Gloss = (Gloss / len(data_list))
MSE.append(format(mse))
GLOSS.append(format(Gloss))
save_checkpoint(model, discr, epoch)
print(mse)
file = open((((((('./checksample/mse_' + str(opt.noise_sigma)) + '_') + str(opt.nEpochs)) + '_') + str(opt.sigma)) + '.txt'), 'w')
for mse in MSE:
file.write((mse + '\n'))
file.close()
file = open((((((('./checksample/Gloss_' + str(opt.noise_sigma)) + '_') + str(opt.nEpochs)) + '_') + str(opt.sigma)) + '.txt'), 'w')
for g in GLOSS:
file.write((g + '\n'))
file.close() |
class Effect5386(BaseEffect):
type = 'passive'
def handler(fit, ship, context, projectionRange, **kwargs):
fit.modules.filteredChargeBoost((lambda mod: mod.charge.requiresSkill('Missile Launcher Operation')), 'kineticDamage', ship.getModifiedItemAttr('shipBonusCC2'), skill='Caldari Cruiser', **kwargs) |
class MemcachedObjectStore(IObjectStore):
CONNECT_TIMEOUT = (10 * 60)
FETCH_TIMEOUT = (30 * 60)
MAX_ITEM_SIZE_BYTES =
def __init__(self, storage_node_ips: Optional[List[str]]=None, port: Optional[int]=11212, connect_timeout: float=CONNECT_TIMEOUT, timeout: float=FETCH_TIMEOUT, noreply: bool=False, max_item_size_bytes: int=MAX_ITEM_SIZE_BYTES) -> None:
self.client_cache = {}
self.current_ip = None
self.SEPARATOR = '_'
self.port = port
self.storage_node_ips = (storage_node_ips or [])
for ip in self.storage_node_ips:
assert (self.SEPARATOR not in ip), f'IP address should not contain {self.SEPARATOR}'
self.hasher = RendezvousHash(nodes=self.storage_node_ips, seed=0)
self.connect_timeout = connect_timeout
self.timeout = timeout
self.noreply = noreply
self.max_item_size_bytes = max_item_size_bytes
logger.info(f'The storage node IPs: {self.storage_node_ips} with noreply: {self.noreply}')
super().__init__()
def put_many(self, objects: List[object], *args, **kwargs) -> List[Any]:
input = defaultdict(dict)
result = []
for obj in objects:
serialized_list = dump_into_chunks(obj, max_size_bytes=self.max_item_size_bytes)
uid = uuid.uuid4()
create_ref_ip = self._get_create_ref_ip(uid.__str__())
for (chunk_index, chunk) in enumerate(serialized_list):
ref = self._create_ref(uid, create_ref_ip, chunk_index)
input[create_ref_ip][ref] = chunk
return_ref = self._create_ref(uid, create_ref_ip, len(serialized_list))
result.append(return_ref)
for (create_ref_ip, ref_to_object) in input.items():
client = self._get_client_by_ip(create_ref_ip)
if client.set_many(ref_to_object, noreply=self.noreply):
raise RuntimeError('Unable to write few keys to cache')
return result
def put(self, obj: object, *args, **kwargs) -> Any:
serialized_list = dump_into_chunks(obj, max_size_bytes=self.max_item_size_bytes)
uid = uuid.uuid4()
create_ref_ip = self._get_create_ref_ip(uid.__str__())
client = self._get_client_by_ip(create_ref_ip)
for (chunk_index, chunk) in enumerate(serialized_list):
ref = self._create_ref(uid, create_ref_ip, chunk_index)
try:
if (not client.set(ref, chunk, noreply=self.noreply)):
raise RuntimeError(f'Unable to write {ref} to cache')
except BaseException as e:
raise RuntimeError(f'Received {e} while writing ref={ref} and obj size={len(chunk)}')
return self._create_ref(uid, create_ref_ip, len(serialized_list))
def get_many(self, refs: List[Any], *args, **kwargs) -> List[object]:
result = []
refs_per_ip = defaultdict((lambda : []))
chunks_by_refs = defaultdict((lambda : []))
start = time.monotonic()
for ref in refs:
(uid, ip, chunk_count) = ref.split(self.SEPARATOR)
chunk_count = int(chunk_count)
for chunk_index in range(chunk_count):
current_ref = self._create_ref(uid, ip, chunk_index)
refs_per_ip[ip].append(current_ref)
total_ref_count = 0
for (ip, current_refs) in refs_per_ip.items():
client = self._get_client_by_ip(ip)
cache_result = client.get_many(current_refs)
assert (len(cache_result) == len(current_refs)), f'Not all values were returned from cache from {ip} as {len(cache_result)} != {len(current_refs)}'
total_ref_count += len(cache_result)
chunks_by_refs.update(cache_result)
assert (len(chunks_by_refs) == total_ref_count), f"Total refs retrieved from memcached doesn't match as {len(chunks_by_refs)} != {total_ref_count}"
for ref in refs:
(uid, ip, chunk_count) = ref.split(self.SEPARATOR)
chunk_count = int(chunk_count)
deserialize_start = time.monotonic()
chunks = []
for chunk_index in range(chunk_count):
current_ref = self._create_ref(uid, ip, chunk_index)
chunk = chunks_by_refs[current_ref]
assert chunk, f'Serialized chunks were not present for ref={current_ref}'
chunks_by_refs.pop(current_ref)
chunks.append(chunk)
assert (chunk_count == len(chunks)), f'The chunk count must be equal for ref={ref}'
if (chunk_count == 1):
serialized = chunks[0]
else:
serialized = bytearray()
for chunk_index in range(chunk_count):
chunk = chunks[chunk_index]
serialized.extend(chunk)
chunks[chunk_index] = None
deserialized = cloudpickle.loads(serialized)
result.append(deserialized)
deserialize_end = time.monotonic()
logger.debug(f'The time taken to deserialize {len(serialized)} bytes and {chunk_count} chunks is: {(deserialize_end - deserialize_start)}')
end = time.monotonic()
logger.info(f'The total time taken to read all objects is: {(end - start)}')
assert (len(result) == len(refs)), f'The total number of refs must be equal as {len(result)} != {len(refs)}'
return result
def get(self, ref: Any, *args, **kwargs) -> object:
(uid, ip, chunk_count) = ref.split(self.SEPARATOR)
chunk_count = int(chunk_count)
client = self._get_client_by_ip(ip)
serialized = bytearray()
for chunk_index in range(chunk_count):
ref = self._create_ref(uid, ip, chunk_index)
chunk = client.get(ref)
serialized.extend(chunk)
return cloudpickle.loads(serialized)
def close(self) -> None:
for client in self.client_cache.values():
client.close()
self.client_cache.clear()
def _create_ref(self, uid, ip, chunk_index) -> str:
return f'{uid}{self.SEPARATOR}{ip}{self.SEPARATOR}{chunk_index}'
def _get_storage_node_ip(self, key: str):
storage_node_ip = self.hasher.get_node(key)
return storage_node_ip
def _get_create_ref_ip(self, uid: str):
if self.storage_node_ips:
create_ref_ip = self._get_storage_node_ip(uid)
else:
create_ref_ip = self._get_current_ip()
return create_ref_ip
def _get_client_by_ip(self, ip_address: str):
if (ip_address in self.client_cache):
return self.client_cache[ip_address]
base_client = Client((ip_address, self.port), connect_timeout=self.connect_timeout, timeout=self.timeout, no_delay=True)
client = RetryingClient(base_client, attempts=15, retry_delay=1, retry_for=[MemcacheUnexpectedCloseError, ConnectionRefusedError, ConnectionResetError, BrokenPipeError, TimeoutError])
self.client_cache[ip_address] = client
return client
def _get_current_ip(self):
if (self.current_ip is None):
self.current_ip = socket.gethostbyname(socket.gethostname())
return self.current_ip |
def register(name: str, cls: Type[BaseEnv], max_episode_steps=None, default_kwargs: dict=None):
if (name in REGISTERED_ENVS):
logger.warn(f'Env {name} already registered')
if (not issubclass(cls, BaseEnv)):
raise TypeError(f'Env {name} must inherit from BaseEnv')
REGISTERED_ENVS[name] = EnvSpec(name, cls, max_episode_steps=max_episode_steps, default_kwargs=default_kwargs) |
.parametrize('data', [[[], [0, 1, 2, 3, 4, 5]], [[None, None, None], [0, 1, 2, 3, 4, 5]], [[1, None, None], [1, 2, 3, 4, 5]], [[None, 4, None], [0, 1, 2, 3]], [[None, 4, 2], [0, 2]], [[3, 1, None], []]])
def test_slice(data):
(pars, expected) = data
a = Stream()
b = a.slice(*pars)
out = b.sink_to_list()
for i in range(6):
a.emit(i)
assert (out == expected) |
class ContentFormPetition(ContentFormGeneric):
title = forms.CharField(max_length=200)
publication_date = forms.DateField(required=False)
show_publication_date = SwitchField(required=False, label=_('Show publication date'))
paper_signatures = forms.IntegerField()
field_order = ('title', 'publication_date', 'show_publication_date', 'paper_signatures') |
def test_query_url_fail():
query = {'query_format': 'advanced', 'product': 'FOO'}
checkstr = 'does not appear to support'
exc = bugzilla.BugzillaError('FAKEERROR query_format', code=123)
bz = tests.mockbackend.make_bz(version='4.0.0', bug_search_args=None, bug_search_return=exc)
try:
bz.query(query)
except Exception as e:
assert (checkstr in str(e))
bz = tests.mockbackend.make_bz(version='5.1.0', bug_search_args=None, bug_search_return=exc)
try:
bz.query(query)
except Exception as e:
assert (checkstr not in str(e)) |
class Effect6368(BaseEffect):
type = 'passive'
def handler(fit, src, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: (mod.item.group.name == 'Remote Shield Booster')), 'falloffEffectiveness', src.getModifiedItemAttr('falloffBonus'), **kwargs)
fit.modules.filteredItemBoost((lambda mod: (mod.item.group.name == 'Ancillary Remote Shield Booster')), 'falloffEffectiveness', src.getModifiedItemAttr('falloffBonus'), **kwargs) |
.parametrize('manager', [MonadWideMarginsConfig], indirect=True)
def test_wide_margins(manager):
manager.test_window('one')
assert_dimensions(manager, 4, 4, 788, 588)
manager.test_window('two')
assert_focused(manager, 'two')
assert_dimensions(manager, 4, 304, 788, 288)
manager.c.layout.previous()
assert_focused(manager, 'one')
assert_dimensions(manager, 4, 4, 788, 292) |
class MemCreateExpression():
R: pybamm.Parameter
model: pybamm.BaseModel
def setup(self):
set_random_seed()
def mem_create_expression(self):
self.R = pybamm.Parameter('Particle radius [m]')
D = pybamm.Parameter('Diffusion coefficient [m2.s-1]')
j = pybamm.Parameter('Interfacial current density [A.m-2]')
F = pybamm.Parameter('Faraday constant [C.mol-1]')
c0 = pybamm.Parameter('Initial concentration [mol.m-3]')
self.model = pybamm.BaseModel()
c = pybamm.Variable('Concentration [mol.m-3]', domain='negative particle')
N = ((- D) * pybamm.grad(c))
dcdt = (- pybamm.div(N))
self.model.rhs = {c: dcdt}
lbc = pybamm.Scalar(0)
rbc = (((- j) / F) / D)
self.model.boundary_conditions = {c: {'left': (lbc, 'Neumann'), 'right': (rbc, 'Neumann')}}
self.model.initial_conditions = {c: c0}
self.model.variables = {'Concentration [mol.m-3]': c, 'Surface concentration [mol.m-3]': pybamm.surf(c), 'Flux [mol.m-2.s-1]': N}
return self.model |
def orig_function(inputs, outputs, mode=None, accept_inplace=False, name=None, profile=None, on_unused_input=None, output_keys=None, fgraph: Optional[FunctionGraph]=None) -> Function:
if profile:
t1 = time.perf_counter()
mode = pytensor.compile.mode.get_mode(mode)
inputs = list(map(convert_function_input, inputs))
if (outputs is not None):
if isinstance(outputs, (list, tuple)):
outputs = list(map(FunctionMaker.wrap_out, outputs))
else:
outputs = FunctionMaker.wrap_out(outputs)
defaults = [getattr(input, 'value', None) for input in inputs]
if isinstance(mode, (list, tuple)):
raise ValueError('We do not support the passing of multiple modes')
fn = None
try:
Maker = getattr(mode, 'function_maker', FunctionMaker)
m = Maker(inputs, outputs, mode, accept_inplace=accept_inplace, profile=profile, on_unused_input=on_unused_input, output_keys=output_keys, name=name, fgraph=fgraph)
with config.change_flags(compute_test_value='off'):
fn = m.create(defaults)
finally:
if (profile and fn):
t2 = time.perf_counter()
profile.compile_time += (t2 - t1)
profile.nb_nodes = len(fn.maker.fgraph.apply_nodes)
return fn |
def main():
args = parse_args()
cfg = mmcv.Config.fromfile(args.config)
if (args.options is not None):
cfg.merge_from_dict(args.options)
output_config = cfg.get('output_config', {})
output_config = merge_configs(output_config, dict(out=args.out))
eval_config = cfg.get('eval_config', {})
eval_config = merge_configs(eval_config, dict(metrics=args.eval))
eval_config = merge_configs(eval_config, args.eval_options)
assert (output_config or eval_config), 'Please specify at least one operation (save or eval the results) with the argument "--out" or "--eval"'
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.data.test.test_mode = True
if (cfg.test_cfg is None):
cfg.test_cfg = dict(average_clips=args.average_clips)
else:
cfg.test_cfg.average_clips = args.average_clips
if (args.launcher == 'none'):
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
dataset = build_dataset(cfg.data.test, dict(test_mode=True))
data_loader = build_dataloader(dataset, videos_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False)
model = mmcv.ConfigDict(type='VanillaTracker', backbone=cfg.model.backbone)
model.backbone.out_indices = cfg.test_cfg.out_indices
model.backbone.strides = cfg.test_cfg.strides
model.backbone.pretrained = args.checkpoint
model = build_model(model, train_cfg=None, test_cfg=cfg.test_cfg)
print(cfg.test_cfg)
fp16_cfg = cfg.get('fp16', None)
if (fp16_cfg is not None):
wrap_fp16_model(model)
if args.fuse_conv_bn:
model = fuse_conv_bn(model)
if (not distributed):
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader)
else:
model = MMDistributedDataParallel(model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False)
outputs = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect)
(rank, _) = get_dist_info()
if (rank == 0):
if output_config:
out = output_config['out']
print(f'''
writing results to {out}''')
dataset.dump_results(outputs, **output_config)
if eval_config:
eval_res = dataset.evaluate(outputs, **eval_config)
for (name, val) in eval_res.items():
print(f'{name}: {val:.04f}') |
def exec_random_walk(graphs, alias_method_j, alias_method_q, v, walk_length, amount_neighbours):
original_v = v
t0 = time()
initialLayer = 0
layer = initialLayer
path = deque()
path.append(v)
while (len(path) < walk_length):
r = random.random()
if (r < 0.3):
v = chooseNeighbor(v, graphs, alias_method_j, alias_method_q, layer)
path.append(v)
else:
r = random.random()
limiar_moveup = prob_moveup(amount_neighbours[layer][v])
if (r > limiar_moveup):
if (layer > initialLayer):
layer = (layer - 1)
elif (((layer + 1) in graphs) and (v in graphs[(layer + 1)])):
layer = (layer + 1)
t1 = time()
logging.info('RW - vertex {}. Time : {}s'.format(original_v, (t1 - t0)))
return path |
def list_logged_exceptions(log_records: list[logging.LogRecord], pattern: str='', types: (type[Any] | tuple[(type[Any], ...)])=Exception, log_level: int=logging.ERROR, del_log_records: bool=True) -> list[BaseException]:
found: list[BaseException] = []
compiled_pattern = re.compile(pattern)
for (index, record) in enumerate(log_records):
if ((record.levelno >= log_level) and record.exc_info):
error = record.exc_info[1]
if ((error is not None) and isinstance(error, types) and compiled_pattern.search(str(error))):
if del_log_records:
del log_records[(index - len(found))]
found.append(error)
return found |
class Model(nn.Module):
def __init__(self, model_name, num_layers, input_dim, hidden_dim, output_dim, hidden_dim_multiplier, num_heads, normalization, dropout):
super().__init__()
normalization = NORMALIZATION[normalization]
self.input_linear = nn.Linear(in_features=input_dim, out_features=hidden_dim)
self.dropout = nn.Dropout(p=dropout)
self.act = nn.GELU()
self.residual_modules = nn.ModuleList()
for _ in range(num_layers):
for module in MODULES[model_name]:
residual_module = ResidualModuleWrapper(module=module, normalization=normalization, dim=hidden_dim, hidden_dim_multiplier=hidden_dim_multiplier, num_heads=num_heads, dropout=dropout)
self.residual_modules.append(residual_module)
self.output_normalization = normalization(hidden_dim)
self.output_linear = nn.Linear(in_features=hidden_dim, out_features=output_dim)
def forward(self, graph, x):
x = self.input_linear(x)
x = self.dropout(x)
x = self.act(x)
for residual_module in self.residual_modules:
x = residual_module(graph, x)
x = self.output_normalization(x)
x = self.output_linear(x).squeeze(1)
return x |
def BayesNet(args):
if (args.dataset == 'MNIST'):
net_name = MNIST_Net
elif ((args.dataset == 'CIFAR10') or (args.dataset == 'CIFAR100')):
net_name = CIFAR_Net
class OurNet(net_name):
def __init__(self, args):
super(OurNet, self).__init__(args)
if torch.cuda.is_available():
self.cuda()
self.sqrt_cov_prev = []
self.sqrt_cov_next = []
self.cov_weight = []
if (args.regu_type == 'LAST'):
(name, module) = list(self.named_children())[(- 1)]
shape = list(module.parameters())[0].shape
sqt_cov_pre = nn.Parameter(torch.eye(shape[1]), requires_grad=False)
sqt_cov_nex = nn.Parameter(torch.eye(shape[0]), requires_grad=False)
if torch.cuda.is_available():
sqt_cov_pre = sqt_cov_pre.cuda()
sqt_cov_nex = sqt_cov_nex.cuda()
self.sqrt_cov_prev.append(sqt_cov_pre)
self.sqrt_cov_next.append(sqt_cov_nex)
self.cov_weight.append(list(module.parameters())[0])
else:
for (name, module) in self.named_children():
shape = list(module.parameters())[0].shape
if (len(shape) == 4):
if ((args.regu_type == 'ALL') or (args.regu_type == 'CONV')):
sqt_cov_pre = nn.Parameter(torch.eye(((shape[1] * shape[2]) * shape[3])), requires_grad=False)
sqt_cov_nex = nn.Parameter(torch.eye(shape[0]), requires_grad=False)
if torch.cuda.is_available():
sqt_cov_pre = sqt_cov_pre.cuda()
sqt_cov_nex = sqt_cov_nex.cuda()
self.sqrt_cov_prev.append(sqt_cov_pre)
self.sqrt_cov_next.append(sqt_cov_nex)
self.cov_weight.append(list(module.parameters())[0].view(shape[0], (- 1)))
elif (len(shape) == 2):
if ((args.regu_type == 'ALL') or (args.regu_type == 'FC')):
sqt_cov_pre = nn.Parameter(torch.eye(shape[1]), requires_grad=False)
sqt_cov_nex = nn.Parameter(torch.eye(shape[0]), requires_grad=False)
if torch.cuda.is_available():
sqt_cov_pre = sqt_cov_pre.cuda()
sqt_cov_nex = sqt_cov_nex.cuda()
self.sqrt_cov_prev.append(sqt_cov_pre)
self.sqrt_cov_next.append(sqt_cov_nex)
self.cov_weight.append(list(module.parameters())[0])
def regularizer(self):
r = []
for i in range(len(self.cov_weight)):
r_sqrt = torch.mm(torch.mm(self.sqrt_cov_next[i], self.cov_weight[i]), self.sqrt_cov_prev[i])
r.append(torch.sum((r_sqrt * r_sqrt)))
return r
def _thresholding(self, sv, lower, upper):
uidx = (sv > upper)
lidx = (sv < lower)
sv[uidx] = upper
sv[lidx] = lower
return sv
def update_covs(self, lower, upper):
for i in range(len(self.cov_weight)):
cov_next = torch.mm(self.sqrt_cov_next[i], self.sqrt_cov_next[i].t())
cov_prev_weight = torch.mm(torch.mm(self.cov_weight[i].t(), cov_next), self.cov_weight[i])
(u, s, _) = torch.svd(cov_prev_weight.data)
s = (s.shape[0] / s)
s = self._thresholding(s, lower, upper)
s = torch.sqrt(s)
self.sqrt_cov_prev[i].data = torch.mm(torch.mm(u, torch.diag(s)), u.t())
for i in range(len(self.cov_weight)):
cov_prev = torch.mm(self.sqrt_cov_prev[i], self.sqrt_cov_prev[i].t())
cov_next_weight = torch.mm(torch.mm(self.cov_weight[i], cov_prev), self.cov_weight[i].t())
(u, s, _) = torch.svd(cov_next_weight.data)
s = (s.shape[0] / s)
s = self._thresholding(s, lower, upper)
s = torch.sqrt(s)
self.sqrt_cov_next[i].data = torch.mm(torch.mm(u, torch.diag(s)), u.t())
return OurNet(args) |
class AttentionUNet(nn.Module):
def __init__(self, in_ch, num_classes, base_ch=32, block='SingleConv', pool=True):
super().__init__()
num_block = 2
block = get_block(block)
self.inc = inconv(in_ch, base_ch, block=block)
self.down1 = down_block(base_ch, (2 * base_ch), num_block=num_block, block=block, pool=pool)
self.down2 = down_block((2 * base_ch), (4 * base_ch), num_block=num_block, block=block, pool=pool)
self.down3 = down_block((4 * base_ch), (8 * base_ch), num_block=num_block, block=block, pool=pool)
self.down4 = down_block((8 * base_ch), (16 * base_ch), num_block=num_block, block=block, pool=pool)
self.down5 = down_block((16 * base_ch), (32 * base_ch), num_block=num_block, block=block, pool=pool)
self.up1 = attention_up_block((16 * base_ch), (8 * base_ch), num_block=num_block, block=block)
self.up2 = attention_up_block((8 * base_ch), (4 * base_ch), num_block=num_block, block=block)
self.up3 = attention_up_block((4 * base_ch), (2 * base_ch), num_block=num_block, block=block)
self.up4 = attention_up_block((2 * base_ch), base_ch, num_block=num_block, block=block)
self.outc = nn.Conv2d(base_ch, num_classes, kernel_size=1)
def forward(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
out = self.up1(x5, x4)
out = self.up2(out, x3)
out = self.up3(out, x2)
out = self.up4(out, x1)
out = self.outc(out)
return out |
class CacheEvaluationListener(Listener):
def __init__(self):
smokesignal.on('evaluation_finished', self.on_evaluation_finished)
super().__init__()
def on_evaluation_finished(self, evaluation, dataset, predictor):
self.fname = _timestamped_filename(f'{dataset}-{predictor}-predictions')
evaluation.to_file(self.fname) |
def fix_overpassing_lines(lines, buses, distance_crs, tol=1):
lines_to_add = []
lines_to_split = []
lines_epsgmod = lines.to_crs(distance_crs)
buses_epsgmod = buses.to_crs(distance_crs)
tqdm_kwargs_substation_ids = dict(ascii=False, unit=' lines', total=lines.shape[0], desc='Verify lines overpassing nodes ')
for l in tqdm(lines.index, **tqdm_kwargs_substation_ids):
bus_in_tol_epsg = buses_epsgmod[(buses_epsgmod.geometry.distance(lines_epsgmod.geometry.loc[l]) <= tol)]
bus_in_tol_epsg = bus_in_tol_epsg[((bus_in_tol_epsg.geometry.distance(lines_epsgmod.geometry.loc[l].boundary.geoms[0]) > tol) | (bus_in_tol_epsg.geometry.distance(lines_epsgmod.geometry.loc[l].boundary.geoms[1]) > tol))]
if (not bus_in_tol_epsg.empty):
lines_to_split.append(l)
buses_locs = buses.geometry.loc[bus_in_tol_epsg.index]
new_geometries = _split_linestring_by_point(lines.geometry[l], buses_locs)
n_geoms = len(new_geometries)
df_append = gpd.GeoDataFrame(([lines.loc[l]] * n_geoms))
df_append['geometry'] = new_geometries
df_append['line_id'] = [(str(df_append['line_id'].iloc[0]) + f'_{id}') for id in range(n_geoms)]
lines_to_add.append(df_append)
if (not lines_to_add):
return (lines, buses)
df_to_add = gpd.GeoDataFrame(pd.concat(lines_to_add, ignore_index=True))
df_to_add.set_crs(lines.crs, inplace=True)
df_to_add.set_index((lines.index[(- 1)] + df_to_add.index), inplace=True)
df_to_add['length'] = df_to_add.to_crs(distance_crs).geometry.length
df_to_add = line_endings_to_bus_conversion(df_to_add)
lines.drop(lines_to_split, inplace=True)
lines = gpd.GeoDataFrame(pd.concat([lines, df_to_add], ignore_index=True).reset_index(drop=True), crs=lines.crs)
return (lines, buses) |
def Linf_PGD(x_in, y_true, net, steps, eps):
if (eps == 0):
return x_in
training = net.training
if training:
net.eval()
x_adv = x_in.clone().requires_grad_()
optimizer = Linf_SGD([x_adv], lr=0.007)
for _ in range(steps):
optimizer.zero_grad()
net.zero_grad()
(out, _) = net(x_adv)
loss = (- F.cross_entropy(out, y_true))
loss.backward()
optimizer.step()
diff = (x_adv - x_in)
diff.clamp_((- eps), eps)
x_adv.detach().copy_((diff + x_in).clamp_(0, 1))
net.zero_grad()
if training:
net.train()
return x_adv |
class TrainOptions():
def __init__(self):
self.parser = argparse.ArgumentParser()
req = self.parser.add_argument_group('Required')
req.add_argument('--name', required=True, help='Name of the experiment')
gen = self.parser.add_argument_group('General')
gen.add_argument('--time_to_run', type=int, default=np.inf, help='Total time to run in seconds. Used for training in environments with timing constraints')
gen.add_argument('--resume', dest='resume', default=False, action='store_true', help='Resume from checkpoint (Use latest checkpoint by default')
gen.add_argument('--num_workers', type=int, default=4, help='Number of processes used for data loading')
pin = gen.add_mutually_exclusive_group()
pin.add_argument('--pin_memory', dest='pin_memory', action='store_true')
pin.add_argument('--no_pin_memory', dest='pin_memory', action='store_false')
gen.set_defaults(pin_memory=True)
io = self.parser.add_argument_group('io')
io.add_argument('--log_dir', default='logs', help='Directory to store logs')
io.add_argument('--checkpoint', default=None, help='Path to checkpoint')
io.add_argument('--from_json', default=None, help='Load options from json file instead of the command line')
io.add_argument('--pretrained_checkpoint', default=config.PRE_MODEL_PATH, help='Load a pretrained checkpoint at the beginning training')
train = self.parser.add_argument_group('Training Options')
train.add_argument('--num_epochs', type=int, default=4, help='Total number of training epochs')
train.add_argument('--lr', type=float, default=5e-05, help='Learning rate')
train.add_argument('--batch_size', type=int, default=32, help='Batch size')
train.add_argument('--summary_steps', type=int, default=2000, help='Summary saving frequency')
train.add_argument('--test_steps', type=int, default=1000, help='Testing frequency during training')
train.add_argument('--checkpoint_steps', type=int, default=2000, help='Checkpoint saving frequency')
train.add_argument('--img_res', type=int, default=224, help='Rescale bounding boxes to size [img_res, img_res] before feeding them in the network')
train.add_argument('--rot_factor', type=float, default=30, help='Random rotation in the range [-rot_factor, rot_factor]')
train.add_argument('--noise_factor', type=float, default=0.4, help='Randomly multiply pixel values with factor in the range [1-noise_factor, 1+noise_factor]')
train.add_argument('--scale_factor', type=float, default=0.25, help='Rescale bounding boxes by a factor of [1-scale_factor,1+scale_factor]')
train.add_argument('--ignore_3d', default=False, action='store_true', help='Ignore GT 3D data (for unpaired experiments')
train.add_argument('--shape_loss_weight', default=0, type=float, help='Weight of per-vertex loss')
train.add_argument('--keypoint_loss_weight', default=5.0, type=float, help='Weight of 2D and 3D keypoint loss')
train.add_argument('--pose_loss_weight', default=1.0, type=float, help='Weight of SMPL pose loss')
train.add_argument('--beta_loss_weight', default=0.001, type=float, help='Weight of SMPL betas loss')
train.add_argument('--cam_loss_weight', default=0.0001, type=float, help='Weight of SMPL cam_t loss')
train.add_argument('--feat_loss_weight', default=0.1, type=float, help='Weight of feat mse loss')
train.add_argument('--openpose_train_weight', default=0.0, help='Weight for OpenPose keypoints during training')
train.add_argument('--gt_train_weight', default=1.0, help='Weight for GT keypoints during training')
train.add_argument('--tiny_pose_loss_weight', default=2.5, type=float, help='Weight of tiny pose 2d keypoint loss')
train.add_argument('--consistency_loss_weight', default=0.25, type=float, help='Weight of consistency loss')
train.add_argument('--ramp', default='up', type=str, help='Whether to rampup the consistency loss weight')
train.add_argument('--kernel', default='dot', type=str, help='kernel for xent loss')
train.add_argument('--loss_consistency_type', default=0, type=int, help='type of consistency loss format')
train.add_argument('--input_img', default='orig', type=str, help='orig, lr, up')
train.add_argument('--max_queue_size', default=20000, type=int, help='max queue size')
train.add_argument('--sample_size', default=(8192 * 2), type=int, help='negative sampling size')
train.add_argument('--tau', default=0.1, type=float, help='temperatur parameter')
shuffle_train = train.add_mutually_exclusive_group()
shuffle_train.add_argument('--shuffle_train', dest='shuffle_train', action='store_true', help='Shuffle training data')
shuffle_train.add_argument('--no_shuffle_train', dest='shuffle_train', action='store_false', help="Don't shuffle training data")
shuffle_train.set_defaults(shuffle_train=True)
return
def parse_args(self, manual_args=None):
self.args = self.parser.parse_args(manual_args)
if (self.args.from_json is not None):
path_to_json = os.path.abspath(self.args.from_json)
with open(path_to_json, 'r') as f:
json_args = json.load(f)
json_args = namedtuple('json_args', json_args.keys())(**json_args)
return json_args
else:
self.args.log_dir = os.path.join(os.path.abspath(self.args.log_dir), self.args.name)
self.args.summary_dir = os.path.join(self.args.log_dir, 'tensorboard')
if (not os.path.exists(self.args.log_dir)):
os.makedirs(self.args.log_dir)
self.args.checkpoint_dir = os.path.join(self.args.log_dir, 'checkpoints')
if (not os.path.exists(self.args.checkpoint_dir)):
os.makedirs(self.args.checkpoint_dir)
self.save_dump()
return self.args
def save_dump(self):
if (not os.path.exists(self.args.log_dir)):
os.makedirs(self.args.log_dir)
with open(os.path.join(self.args.log_dir, 'config.json'), 'w') as f:
json.dump(vars(self.args), f, indent=4)
return |
class QuadraticConstraint(Constraint):
Sense = ConstraintSense
def __init__(self, quadratic_program: Any, name: str, linear: Union[(ndarray, spmatrix, List[float], Dict[(Union[(str, int)], float)])], quadratic: Union[(ndarray, spmatrix, List[List[float]], Dict[(Tuple[(Union[(int, str)], Union[(int, str)])], float)])], sense: ConstraintSense, rhs: float) -> None:
super().__init__(quadratic_program, name, sense, rhs)
self._linear = LinearExpression(quadratic_program, linear)
self._quadratic = QuadraticExpression(quadratic_program, quadratic)
def linear(self) -> LinearExpression:
return self._linear
def linear(self, linear: Union[(ndarray, spmatrix, List[float], Dict[(Union[(str, int)], float)])]) -> None:
self._linear = LinearExpression(self.quadratic_program, linear)
def quadratic(self) -> QuadraticExpression:
return self._quadratic
def quadratic(self, quadratic: Union[(ndarray, spmatrix, List[List[float]], Dict[(Tuple[(Union[(int, str)], Union[(int, str)])], float)])]) -> None:
self._quadratic = QuadraticExpression(self.quadratic_program, quadratic)
def evaluate(self, x: Union[(ndarray, List, Dict[(Union[(int, str)], float)])]) -> float:
return (self.linear.evaluate(x) + self.quadratic.evaluate(x)) |
def train_detector(model, dataset, cfg, distributed=False, validate=False, timestamp=None, meta=None):
logger = get_root_logger(cfg.log_level)
dataset = (dataset if isinstance(dataset, (list, tuple)) else [dataset])
if ('imgs_per_gpu' in cfg.data):
logger.warning('"imgs_per_gpu" is deprecated in MMDet V2.0. Please use "samples_per_gpu" instead')
if ('samples_per_gpu' in cfg.data):
logger.warning(f'Got "imgs_per_gpu"={cfg.data.imgs_per_gpu} and "samples_per_gpu"={cfg.data.samples_per_gpu}, "imgs_per_gpu"={cfg.data.imgs_per_gpu} is used in this experiments')
else:
logger.warning(f'Automatically set "samples_per_gpu"="imgs_per_gpu"={cfg.data.imgs_per_gpu} in this experiments')
cfg.data.samples_per_gpu = cfg.data.imgs_per_gpu
data_loaders = [build_dataloader(ds, cfg.data.samples_per_gpu, cfg.data.workers_per_gpu, len(cfg.gpu_ids), dist=distributed, seed=cfg.seed) for ds in dataset]
optimizer = build_optimizer(model, cfg.optimizer)
if (cfg.optimizer_config.get('type', None) and (cfg.optimizer_config['type'] == 'DistOptimizerHook')):
if cfg.optimizer_config.get('use_fp16', False):
(model, optimizer) = apex.amp.initialize(model.cuda(), optimizer, opt_level='O1')
for m in model.modules():
if hasattr(m, 'fp16_enabled'):
m.fp16_enabled = True
if distributed:
find_unused_parameters = cfg.get('find_unused_parameters', False)
model = MMDistributedDataParallel(model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False, find_unused_parameters=find_unused_parameters)
else:
model = MMDataParallel(model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids)
if ('runner' not in cfg):
cfg.runner = {'type': 'EpochBasedRunner', 'max_epochs': cfg.total_epochs}
warnings.warn('config is now expected to have a `runner` section, please set `runner` in your config.', UserWarning)
elif ('total_epochs' in cfg):
assert (cfg.total_epochs == cfg.runner.max_epochs)
runner = build_runner(cfg.runner, default_args=dict(model=model, optimizer=optimizer, work_dir=cfg.work_dir, logger=logger, meta=meta))
runner.timestamp = timestamp
fp16_cfg = cfg.get('fp16', None)
if (fp16_cfg is not None):
optimizer_config = Fp16OptimizerHook(**cfg.optimizer_config, **fp16_cfg, distributed=distributed)
elif (distributed and ('type' not in cfg.optimizer_config)):
optimizer_config = OptimizerHook(**cfg.optimizer_config)
else:
optimizer_config = cfg.optimizer_config
runner.register_training_hooks(cfg.lr_config, optimizer_config, cfg.checkpoint_config, cfg.log_config, cfg.get('momentum_config', None))
if distributed:
if isinstance(runner, EpochBasedRunner):
runner.register_hook(DistSamplerSeedHook())
if validate:
val_samples_per_gpu = cfg.data.val.pop('samples_per_gpu', 1)
if (val_samples_per_gpu > 1):
cfg.data.val.pipeline = replace_ImageToTensor(cfg.data.val.pipeline)
val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))
val_dataloader = build_dataloader(val_dataset, samples_per_gpu=val_samples_per_gpu, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False)
eval_cfg = cfg.get('evaluation', {})
eval_cfg['by_epoch'] = (cfg.runner['type'] != 'IterBasedRunner')
eval_hook = (DistEvalHook if distributed else EvalHook)
runner.register_hook(eval_hook(val_dataloader, **eval_cfg))
if cfg.get('custom_hooks', None):
custom_hooks = cfg.custom_hooks
assert isinstance(custom_hooks, list), f'custom_hooks expect list type, but got {type(custom_hooks)}'
for hook_cfg in cfg.custom_hooks:
assert isinstance(hook_cfg, dict), f'Each item in custom_hooks expects dict type, but got {type(hook_cfg)}'
hook_cfg = hook_cfg.copy()
priority = hook_cfg.pop('priority', 'NORMAL')
hook = build_from_cfg(hook_cfg, HOOKS)
runner.register_hook(hook, priority=priority)
if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
runner.run(data_loaders, cfg.workflow) |
.parametrize('serializer', ['dask', 'pickle', 'disk'])
def test_multiple_deserializations(serializer):
data1 = bytearray(10)
proxy = proxy_object.asproxy(data1, serializers=(serializer,))
pxy = proxy._pxy_get()
data2 = proxy._pxy_deserialize()
assert (data1 == data2)
if (serializer == 'disk'):
file_path = pxy.obj[0]['disk-io-header']['path']
assert isinstance(file_path, SpillToDiskFile)
assert file_path.exists()
file_path = str(file_path)
data3 = pxy.deserialize()
assert (data1 == data3)
if (serializer == 'disk'):
assert (not os.path.exists(file_path)) |
class ListenbrainzSubmission(EventPlugin):
PLUGIN_ID = 'listenbrainz'
PLUGIN_NAME = _('ListenBrainz Submission')
PLUGIN_DESC = _('Submit listens to ListenBrainz.')
PLUGIN_ICON = Icons.NETWORK_WORKGROUP
def __init__(self):
self.__enabled = False
self.queue = ListenBrainzSubmitQueue()
queue_thread = threading.Thread(None, self.queue.run)
queue_thread.setDaemon(True)
queue_thread.start()
self.start_time = 0
self.unpaused_time = 0
self.elapsed = 0
self.nowplaying = None
self.exclude = plugin_config.get('exclude')
def plugin_on_song_ended(self, song, stopped):
if ((song is None) or (not self.__enabled)):
return
if (self.unpaused_time > 0):
self.elapsed += (time.time() - self.unpaused_time)
if ((self.elapsed < (4 * 60)) and (self.elapsed <= (0.5 * song.get('~#length', 0)))):
return
print_d(('Checking against filter %s' % self.exclude))
if (self.exclude and Query(self.exclude).search(song)):
print_d(('Not submitting: %s' % song('~artist~title')))
return
self.queue.submit(song, self.start_time)
def song_excluded(self, song):
if (self.exclude and Query(self.exclude).search(song)):
print_d(f"{song('~artist~title')} is excluded by {self.exclude}")
return True
return False
def send_nowplaying(self, song):
if (not self.song_excluded(song)):
self.queue.set_nowplaying(song)
def plugin_on_song_started(self, song):
if (song is None):
return
self.start_time = int(time.time())
if app.player.paused:
self.unpaused_time = 0
else:
self.unpaused_time = time.time()
self.elapsed = 0
if (self.__enabled and (not app.player.paused)):
self.send_nowplaying(song)
else:
self.nowplaying = song
def plugin_on_paused(self):
if (self.unpaused_time > 0):
self.elapsed += (time.time() - self.unpaused_time)
self.unpaused_time = 0
def plugin_on_unpaused(self):
self.unpaused_time = time.time()
if (self.__enabled and self.nowplaying):
self.send_nowplaying(self.nowplaying)
self.nowplaying = None
def enabled(self):
self.__enabled = True
print_d('Plugin enabled - accepting new songs.')
def disabled(self):
self.__enabled = False
print_d('Plugin disabled - not accepting any new songs.')
def PluginPreferences(self, parent):
def changed(entry, key):
if entry.get_property('sensitive'):
plugin_config.set(key, entry.get_text())
box = Gtk.VBox(spacing=12)
table = Gtk.Table(n_rows=2, n_columns=2)
table.props.expand = False
table.set_col_spacings(6)
table.set_row_spacings(6)
labels = []
label_names = [_('User _token:')]
for (idx, name) in enumerate(label_names):
label = Gtk.Label(label=name)
label.set_alignment(0.0, 0.5)
label.set_use_underline(True)
table.attach(label, 0, 1, idx, (idx + 1), xoptions=(Gtk.AttachOptions.FILL | Gtk.AttachOptions.SHRINK))
labels.append(label)
row = 0
entry = UndoEntry()
entry.set_text(plugin_config.get('user_token'))
entry.connect('changed', changed, 'user_token')
table.attach(entry, 1, 2, row, (row + 1))
labels[row].set_mnemonic_widget(entry)
row += 1
box.pack_start(qltk.Frame(_('Account'), child=table), True, True, 0)
table = Gtk.Table(n_rows=5, n_columns=2)
table.props.expand = False
table.set_col_spacings(6)
table.set_row_spacings(6)
label_names = [_('_Artist pattern:'), _('_Title pattern:'), _('T_ags:'), _('Exclude _filter:')]
labels = []
for (idx, name) in enumerate(label_names):
label = Gtk.Label(label=name)
label.set_alignment(0.0, 0.5)
label.set_use_underline(True)
table.attach(label, 0, 1, idx, (idx + 1), xoptions=(Gtk.AttachOptions.FILL | Gtk.AttachOptions.SHRINK))
labels.append(label)
row = 0
entry = UndoEntry()
entry.set_text(plugin_config.get('artistpat'))
entry.connect('changed', changed, 'artistpat')
table.attach(entry, 1, 2, row, (row + 1))
entry.set_tooltip_text(_('The pattern used to format the artist name for submission. Leave blank for default.'))
labels[row].set_mnemonic_widget(entry)
row += 1
entry = UndoEntry()
entry.set_text(plugin_config.get('titlepat'))
entry.connect('changed', changed, 'titlepat')
table.attach(entry, 1, 2, row, (row + 1))
entry.set_tooltip_text(_('The pattern used to format the title for submission. Leave blank for default.'))
labels[row].set_mnemonic_widget(entry)
row += 1
entry = UndoEntry()
entry.set_text(plugin_config.get('tags'))
entry.connect('changed', changed, 'tags')
table.attach(entry, 1, 2, row, (row + 1))
entry.set_tooltip_text(_('List of tags to include in the submission. Comma-separated, use double-quotes if necessary.'))
labels[row].set_mnemonic_widget(entry)
row += 1
entry = ValidatingEntry(Query.validator)
entry.set_text(plugin_config.get('exclude'))
entry.set_tooltip_text(_('Songs matching this filter will not be submitted.'))
entry.connect('changed', changed, 'exclude')
table.attach(entry, 1, 2, row, (row + 1))
labels[row].set_mnemonic_widget(entry)
row += 1
offline = plugin_config.ConfigCheckButton(_("_Offline mode (don't submit anything)"), 'offline', populate=True)
table.attach(offline, 0, 2, row, (row + 1))
box.pack_start(qltk.Frame(_('Submission'), child=table), True, True, 0)
return box |
_bpe('gpt2')
class GPT2BPE(object):
def add_args(parser):
parser.add_argument('--gpt2-encoder-json', type=str, default=DEFAULT_ENCODER_JSON, help='path to encoder.json')
parser.add_argument('--gpt2-vocab-bpe', type=str, default=DEFAULT_VOCAB_BPE, help='path to vocab.bpe')
def __init__(self, args):
encoder_json = file_utils.cached_path(getattr(args, 'gpt2_encoder_json', DEFAULT_ENCODER_JSON))
vocab_bpe = file_utils.cached_path(getattr(args, 'gpt2_vocab_bpe', DEFAULT_VOCAB_BPE))
self.bpe = get_encoder(encoder_json, vocab_bpe)
def encode(self, x: str) -> str:
return ' '.join(map(str, self.bpe.encode(x)))
def decode(self, x: str) -> str:
return self.bpe.decode(x.split())
def is_beginning_of_word(self, x: str) -> bool:
return self.decode(x).startswith(' ') |
class ModuleRenamesTransformer(MigrationTransformer):
def __init__(self, *args, **kwargs):
self.from_imports = []
MigrationTransformer.__init__(self, *args, **kwargs)
def do_lint(self, original_node, module):
if (module == 'window'):
self.lint(original_node, "The 'libqtile.window' has been moved to 'libqtile.backend.x11.window'.")
else:
self.lint(original_node, "The 'libqtile.command_*' modules have been moved to 'libqtile.command.*'.")
(m.ImportAlias(name=m.Attribute(value=m.Name('libqtile'), attr=m.Name(m.MatchIfTrue((lambda x: (x in MODULE_MAP)))))))
def update_import_module_names(self, original_node, updated_node) -> cst.ImportAlias:
module = original_node.name.attr.value
self.do_lint(original_node, module)
new_module = MODULE_MAP[module]
return updated_node.with_changes(name=new_module)
(m.ImportFrom(module=m.Attribute(value=m.Name('libqtile'), attr=m.Name(m.MatchIfTrue((lambda x: (x in MODULE_MAP)))))))
def update_import_from_module_names(self, original_node, updated_node) -> cst.ImportFrom:
module = original_node.module.attr.value
self.do_lint(original_node, module)
new_module = MODULE_MAP[module]
return updated_node.with_changes(module=new_module)
(m.ImportFrom(module=m.Name('libqtile'), names=[m.ZeroOrMore(), m.ImportAlias(name=m.Name(m.MatchIfTrue((lambda x: (x in IMPORT_MAP))))), m.ZeroOrMore()]))
def tag_from_imports(self, original_node, _) -> cst.ImportFrom:
for name in original_node.names:
if (name.name.value in IMPORT_MAP):
self.lint(original_node, f'From libqtile import {name.name.value} is deprecated.')
self.from_imports.append(name.name.value)
return original_node |
(StepsRunner, 'run_step_group')
def test_run_step_groups_sequence_with_failing_fail(mock_run_step_group):
mock_run_step_group.side_effect = [None, None, ValueError('arb'), KeyError('arb failure handler err')]
with pytest.raises(ValueError) as err:
StepsRunner(get_valid_test_pipeline(), Context()).run_step_groups(groups=['sg3', 'sg1', 'sg2', 'sg4'], success_group='arb success', failure_group='arb fail')
assert (str(err.value) == 'arb')
assert (mock_run_step_group.mock_calls == [call('sg3'), call('sg1'), call('sg2'), call('arb fail', raise_stop=True)]) |
def get_pairs(df, merge_col=['session_id', 'wcs_user_sk'], pair_col='i_category_id', output_col_1='category_id_1', output_col_2='category_id_2'):
pair_df = df.merge(df, on=merge_col, suffixes=['_t1', '_t2'], how='inner')
pair_df = pair_df[[f'{pair_col}_t1', f'{pair_col}_t2']]
pair_df = pair_df[(pair_df[f'{pair_col}_t1'] < pair_df[f'{pair_col}_t2'])].reset_index(drop=True)
pair_df.columns = [output_col_1, output_col_2]
return pair_df |
class LeguHashmap(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = (_root if _root else self)
self._read()
def _read(self):
self.header = self._root.HeaderT(self._io, self, self._root)
self.classes_info = ([None] * self.header.table_size)
for i in range(self.header.table_size):
self.classes_info[i] = self._root.ClassesInfoT(self._io, self, self._root)
self.nb_methods_info = self._io.read_u4le()
self.methods_info = ([None] * self.nb_methods_info)
for i in range(self.nb_methods_info):
self.methods_info[i] = self._root.MethodsInfoT(self._io, self, self._root)
class HeaderT(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = (_root if _root else self)
self._read()
def _read(self):
self.unknown = self._io.read_u4le()
self.table_size = self._io.read_u4le()
class ClassesInfoT(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = (_root if _root else self)
self._read()
def _read(self):
self.utf8_hash = self._io.read_u4le()
self.string_off = self._io.read_u4le()
self.methods_info_idx = self._io.read_u4le()
class MethodsInfoT(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = (_root if _root else self)
self._read()
def _read(self):
self.nb_methods = self._io.read_u4le()
self.packed_info = ([None] * self.nb_methods)
for i in range(self.nb_methods):
self.packed_info[i] = self._root.PackedMethodT(self._io, self, self._root)
class PackedMethodT(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = (_root if _root else self)
self._read()
def _read(self):
self.packed_code_off = self._io.read_u4le()
self.code_size = self._io.read_u4le()
self.code_off = self._io.read_u4le() |
class PreLoadedMapStyle():
def __init__(self, dir, transform, buffer_size=100):
dataset = torchvision.datasets.ImageFolder(dir, transform=transform)
self.size = len(dataset)
self.samples = [dataset[torch.randint(0, len(dataset), size=(1,)).item()] for i in range(buffer_size)]
def __len__(self):
return self.size
def __getitem__(self, idx):
return self.samples[(idx % len(self.samples))] |
def test_create(skip_qtbot):
widget = ChangeLogWidget({'1.0': 'Foo', '2.0': 'Bar'})
skip_qtbot.addWidget(widget)
assert (widget.select_version.count() == 2)
assert (widget.select_version.itemText(0) == '1.0')
assert (widget.select_version.itemText(1) == '2.0')
widget.select_version.setCurrentIndex(0)
qScroll0 = cast(QtWidgets.QScrollArea, widget.changelog.currentWidget())
qLabel0 = cast(QtWidgets.QLabel, qScroll0.widget())
assert (qLabel0.text() == 'Foo')
widget.select_version.setCurrentIndex(1)
qScroll1 = cast(QtWidgets.QScrollArea, widget.changelog.currentWidget())
qLabel1 = cast(QtWidgets.QLabel, qScroll1.widget())
assert (qLabel1.text() == 'Bar') |
def create_debug_lettered_tiles(**writer_kwargs):
writer_kwargs['lettered_grid'] = True
writer_kwargs['num_subtiles'] = (2, 2)
(init_kwargs, save_kwargs) = AWIPSTiledWriter.separate_init_kwargs(**writer_kwargs)
writer = AWIPSTiledWriter(**init_kwargs)
sector_id = save_kwargs['sector_id']
sector_info = writer.awips_sectors[sector_id]
(area_def, arr) = _create_debug_array(sector_info, save_kwargs['num_subtiles'])
now = datetime.utcnow()
product = xr.DataArray(da.from_array(arr, chunks='auto'), attrs=dict(name='debug_{}'.format(sector_id), platform_name='DEBUG', sensor='TILES', start_time=now, end_time=now, area=area_def, standard_name='toa_bidirectional_reflectance', units='1', valid_min=0, valid_max=255))
created_files = writer.save_dataset(product, **save_kwargs)
return created_files |
def _eval(train_pipeline: TrainPipelineSparseDist, it: Iterator[Batch]) -> Tuple[(float, float, float)]:
train_pipeline._model.eval()
device = train_pipeline._device
auroc = metrics.AUROC(compute_on_step=False).to(device)
accuracy = metrics.Accuracy(compute_on_step=False).to(device)
val_losses = []
step = 0
with torch.no_grad():
while True:
try:
(loss, logits, labels) = train_pipeline.progress(it)
val_losses.append(loss)
preds = torch.sigmoid(logits)
labels = labels.to(torch.int32)
auroc(preds, labels)
accuracy(preds, labels)
step += 1
except StopIteration:
break
auroc_result = auroc.compute().item()
accuracy_result = accuracy.compute().item()
bce_loss = torch.mean(torch.stack(val_losses))
return (auroc_result, accuracy_result, bce_loss) |
class Ffmpeg():
_RE_DURATION = re.compile(b'Duration: (\\d{2}):(\\d{2}):(\\d{2})\\.(\\d{2}),')
_RE_TIME = re.compile(b'time=(\\d{2}):(\\d{2}):(\\d{2})\\.(\\d{2})')
_RE_VERSION = re.compile(b'ffmpeg version (.+?) ')
CMD = None
priority = 0
streams = []
start_time = (0, 0)
output_filename = None
error_message = ''
def __init__(self, priority, plugin=None):
self.plugin = plugin
self.priority = priority
self.streams = []
self.start_time = (0, 0)
self.output_filename = None
self.error_message = ''
self.find()
def find(cls):
if (cls.CMD is not None):
return True
try:
if (os.name == 'nt'):
ffmpeg = (os.path.join(PKGDIR, 'lib', 'ffmpeg.exe') if is_executable(os.path.join(PKGDIR, 'lib', 'ffmpeg.exe')) else 'ffmpeg.exe')
else:
ffmpeg = 'ffmpeg'
cmd = (which(ffmpeg) or ffmpeg)
p = subprocess.Popen([cmd, '-version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = ((r.strip() if r else '') for r in p.communicate())
except OSError:
return False
m = cls._RE_VERSION.search(out)
if (m is not None):
cls.VERSION = m.group(1)
cls.CMD = cmd
return True
def found(self):
return (self.CMD is not None)
def add_stream(self, streams):
if isinstance(streams, list):
self.streams.extend(streams)
else:
self.streams.append(streams)
def set_start_time(self, start_time):
self.start_time = start_time
def set_output_filename(self, output_filename):
self.output_filename = output_filename
def run(self):
if ((self.CMD is None) or (self.output_filename is None)):
return False
maps = []
args = []
meta = []
for (i, stream) in enumerate(self.streams):
args.extend(['-i', stream[1]])
maps.extend(['-map', '{}:{}:0'.format(i, stream[0])])
if (stream[0] == 's'):
meta.extend(['-metadata:s:s:0:{}'.format(i), 'language={}'.format(stream[2])])
args.extend(maps)
args.extend(meta)
args.extend(['-y', '-vcodec', 'copy', '-acodec', 'copy', '-scodec', 'copy', '-ss', '00:{}:{}.00'.format(self.start_time[0], self.start_time[1]), '-sub_charenc', 'utf-8'])
call = (([self.CMD] + args) + [self.output_filename])
self.plugin.log_debug(('EXECUTE ' + ' '.join(call)))
p = subprocess.Popen(call, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
renice(p.pid, self.priority)
duration = self._find_duration(p)
if duration:
last_line = self._progress(p, duration)
else:
last_line = ''
(out, err) = ((r.strip() if r else '') for r in p.communicate())
if (err or p.returncode):
self.error_message = last_line
return False
else:
self.error_message = ''
return True
def _find_duration(self, process):
duration = 0
while True:
line = process.stderr.readline()
if (not line):
break
m = self._RE_DURATION.search(line)
if (m is not None):
duration = sum(((int(v) * [((60 * 60) * 100), (60 * 100), 100, 1][i]) for (i, v) in enumerate(m.groups())))
break
return duration
def _progress(self, process, duration):
line = b''
last_line = b''
while True:
c = process.stderr.read(1)
if (not c):
break
elif (c == b'\r'):
last_line = line.strip(b'\r\n')
line = b''
m = self._RE_TIME.search(last_line)
if (m is not None):
current_time = sum(((int(v) * [((60 * 60) * 100), (60 * 100), 100, 1][i]) for (i, v) in enumerate(m.groups())))
if self.plugin:
progress = ((current_time * 100) // duration)
self.plugin.pyfile.set_progress(progress)
else:
line += c
continue
return to_str(last_line) |
class MockPsutil(ModuleType):
__version__ = '5.8.0'
def cpu_percent(cls):
return 2.6
def cpu_freq(cls):
class Freq():
def __init__(self):
self.current = 500.067
self.min = 400.0
self.max = 2800.0
return Freq() |
_config
def test_select_layout(manager):
layout = manager.c.layout
assert (layout.screen.info()['index'] == 0)
with pytest.raises(libqtile.command.client.SelectError, match='Item not available in object'):
layout.screen[0]
assert (layout.group.info()['name'] == 'a')
with pytest.raises(libqtile.command.client.SelectError, match='Item not available in object'):
layout.group['a'] |
def test_windows_sequence(runner, path_rgb_byte_tif):
result = runner.invoke(main_group, ['blocks', path_rgb_byte_tif, '--sequence'])
assert (result.exit_code == 0)
features = tuple(map(json.loads, result.output.splitlines()))
with rasterio.open(path_rgb_byte_tif) as src:
actual_first = features[0]
expected_first = next(src.block_windows())
bounds = src.window_bounds(expected_first[1])
(xmin, ymin, xmax, ymax) = transform_bounds(src.crs, 'EPSG:4326', *bounds)
coordinates = [[xmin, ymin], [xmin, ymax], [xmax, ymax], [xmax, ymin]]
assert np.array_equal(actual_first['geometry']['coordinates'][0], coordinates)
assert check_features_block_windows(features, src, bidx=1) |
def count_words(filename):
counter = collections.Counter()
with open(filename, 'r') as fd:
for line in fd:
words = line.strip().split()
counter.update(words)
count_pairs = sorted(counter.items(), key=(lambda x: ((- x[1]), x[0])))
(words, counts) = list(zip(*count_pairs))
return (words, counts) |
class complex():
def __init__(self, x: object, y: object=None) -> None:
pass
def __add__(self, n: complex) -> complex:
pass
def __radd__(self, n: float) -> complex:
pass
def __sub__(self, n: complex) -> complex:
pass
def __rsub__(self, n: float) -> complex:
pass
def __mul__(self, n: complex) -> complex:
pass
def __truediv__(self, n: complex) -> complex:
pass
def __neg__(self) -> complex:
pass |
def _timed_repartition(annotated_delta: DeltaAnnotated, destination_partition: Partition, repartition_type: RepartitionType, repartition_args: dict, max_records_per_output_file: int, enable_profiler: bool, read_kwargs_provider: Optional[ReadKwargsProvider], s3_table_writer_kwargs: Optional[Dict[(str, Any)]]=None, repartitioned_file_content_type: ContentType=ContentType.PARQUET, deltacat_storage=unimplemented_deltacat_storage, deltacat_storage_kwargs: Optional[Dict[(str, Any)]]=None, **kwargs) -> RepartitionResult:
if (deltacat_storage_kwargs is None):
deltacat_storage_kwargs = {}
task_id = get_current_ray_task_id()
worker_id = get_current_ray_worker_id()
with (memray.Tracker(f'repartition_{worker_id}_{task_id}.bin') if enable_profiler else nullcontext()):
tables: List[pa.Table] = deltacat_storage.download_delta(annotated_delta, storage_type=StorageType.LOCAL, file_reader_kwargs_provider=read_kwargs_provider)
if (repartition_type == RepartitionType.RANGE):
return repartition_range(tables=tables, destination_partition=destination_partition, repartition_args=repartition_args, max_records_per_output_file=max_records_per_output_file, s3_table_writer_kwargs=s3_table_writer_kwargs, repartitioned_file_content_type=repartitioned_file_content_type, deltacat_storage=deltacat_storage, deltacat_storage_kwargs=deltacat_storage_kwargs)
else:
raise NotImplementedError(f'Repartition type {repartition_type} is not supported.') |
_safe
def lookup_struct_class(constant_false):
if (CONST_FALSE_SIZE and constant_false and (constant_false[(- 1)] < CONST_FALSE_SIZE)):
n = CONST_FALSE_SIZE
pos = 0
for r in range(1, len(constant_false)):
pos += ncr(n, r)
r = len(constant_false)
last_idx = 0
for idx in constant_false:
pos += (ncr(n, r) - ncr(((n - idx) + last_idx), r))
n -= ((idx - last_idx) + 1)
r -= 1
last_idx = (idx + 1)
for (i, cls) in struct_class_iter:
if (i == pos):
return cls
return W_Struct |
def main():
args = parse_options()
global COLOR
COLOR = (args.color and sys.stdout.isatty())
if (args.sim and (not args.commit) and (not args.diff)):
sims = find_sims(args.sim, args.ignore)
if sims:
print(('%s: %s' % (yel('Similar symbols'), ', '.join(sims))))
else:
print(('%s: no similar symbols found' % yel('Similar symbols')))
sys.exit(0)
defined = {}
undefined = {}
if (args.commit or args.diff):
head = get_head()
commit_a = None
commit_b = None
if args.commit:
commit_a = (args.commit + '~')
commit_b = args.commit
elif args.diff:
split = args.diff.split('..')
commit_a = split[0]
commit_b = split[1]
undefined_a = {}
undefined_b = {}
reset(commit_a)
(undefined_a, _) = check_symbols(args.ignore)
reset(commit_b)
(undefined_b, defined) = check_symbols(args.ignore)
for symbol in sorted(undefined_b):
if (symbol not in undefined_a):
files = sorted(undefined_b.get(symbol))
undefined[symbol] = files
else:
files = sorted((undefined_b.get(symbol) - undefined_a.get(symbol)))
if files:
undefined[symbol] = files
reset(head)
else:
(undefined, defined) = check_symbols(args.ignore)
for symbol in sorted(undefined):
print(red(symbol))
files = sorted(undefined.get(symbol))
print(('%s: %s' % (yel('Referencing files'), ', '.join(files))))
sims = find_sims(symbol, args.ignore, defined)
sims_out = yel('Similar symbols')
if sims:
print(('%s: %s' % (sims_out, ', '.join(sims))))
else:
print(('%s: %s' % (sims_out, 'no similar symbols found')))
if args.find:
print(('%s:' % yel('Commits changing symbol')))
commits = find_commits(symbol, args.diff)
if commits:
for commit in commits:
commit = commit.split(' ', 1)
print(('\t- %s ("%s")' % (yel(commit[0]), commit[1])))
else:
print('\t- no commit found')
print() |
def t2_circuits(num_of_gates: Union[(List[int], np.array)], gate_time: float, qubits: List[int], n_echos: int=1, phase_alt_echo: bool=False) -> Tuple[(List[qiskit.QuantumCircuit], np.array)]:
if (n_echos < 1):
raise ValueError('Must be at least one echo')
xdata = (((2 * gate_time) * np.array(num_of_gates)) * n_echos)
qr = qiskit.QuantumRegister((max(qubits) + 1))
cr = qiskit.ClassicalRegister(len(qubits))
circuits = []
for (circ_index, circ_length) in enumerate(num_of_gates):
circ = qiskit.QuantumCircuit(qr, cr)
circ.name = (('t2circuit_' + str(circ_index)) + '_0')
for (qind, qubit) in enumerate(qubits):
circ.append(U2Gate(0.0, 0.0), [qr[qubit]])
circ = pad_id_gates(circ, qr, qubit, circ_length)
circ.y(qr[qubit])
for echoid in range((n_echos - 1)):
circ = pad_id_gates(circ, qr, qubit, (2 * circ_length))
if (phase_alt_echo and (not (echoid % 2))):
circ.x(qr[qubit])
else:
circ.y(qr[qubit])
circ = pad_id_gates(circ, qr, qubit, circ_length)
circ.append(U2Gate(0.0, 0.0), [qr[qubit]])
circ.barrier(qr)
for (qind, qubit) in enumerate(qubits):
circ.measure(qr[qubit], cr[qind])
circuits.append(circ)
return (circuits, xdata) |
def generate_asts_for_modules(py_modules: list[StubSource], parse_only: bool, mypy_options: MypyOptions, verbose: bool) -> None:
if (not py_modules):
return
if verbose:
print(f'Processing {len(py_modules)} files...')
if parse_only:
for mod in py_modules:
parse_source_file(mod, mypy_options)
return
try:
res = build([module.source for module in py_modules], mypy_options)
except CompileError as e:
raise SystemExit(f'Critical error during semantic analysis: {e}') from e
for mod in py_modules:
mod.ast = res.graph[mod.module].tree
if (mod.runtime_all is None):
mod.runtime_all = res.manager.semantic_analyzer.export_map[mod.module] |
def main():
print('Generated using setters:')
x = PrettyTable(['City name', 'Area', 'Population', 'Annual Rainfall'])
x.title = 'Australian capital cities'
x.sortby = 'Population'
x.reversesort = True
x.int_format['Area'] = '04'
x.float_format = '6.1'
x.align['City name'] = 'l'
x.add_row(['Adelaide', 1295, 1158259, 600.5])
x.add_row(['Brisbane', 5905, 1857594, 1146.4])
x.add_row(['Darwin', 112, 120900, 1714.7])
x.add_row(['Hobart', 1357, 205556, 619.5])
x.add_row(['Sydney', 2058, 4336374, 1214.8])
x.add_row(['Melbourne', 1566, 3806092, 646.9])
x.add_row(['Perth', 5386, 1554769, 869.4])
print(x)
print
print('Generated using constructor arguments:')
y = PrettyTable(['City name', 'Area', 'Population', 'Annual Rainfall'], title='Australian capital cities', sortby='Population', reversesort=True, int_format='04', float_format='6.1', max_width=12, min_width=4, align='c', valign='t')
y.align['City name'] = 'l'
y.add_row(['Adelaide', 1295, 1158259, 600.5])
y.add_row(['Brisbane', 5905, 1857594, 1146.4])
y.add_row(['Darwin', 112, 120900, 1714.7])
y.add_row(['Hobart', 1357, 205556, 619.5])
y.add_row(['Sydney', 2058, 4336374, 1214.8])
y.add_row(['Melbourne', 1566, 3806092, 646.9])
y.add_row(['Perth', 5386, 1554769, 869.4])
print(y) |
def to_functional(func: Callable) -> tf.keras.Model:
def wrapper(*args, **kwargs):
model = args[0]
if isinstance(model, tf.keras.Sequential):
_logger.info('Input model is a Sequential model. Converting to Functional model.')
model = tf.keras.Model(inputs=model.inputs, outputs=model.outputs)
args = ((model,) + args[1:])
return func(*args, **kwargs)
return wrapper |
class TrackerParams():
def set_default_values(self, default_vals: dict):
for (name, val) in default_vals.items():
if (not hasattr(self, name)):
setattr(self, name, val)
def get(self, name: str, *default):
if (len(default) > 1):
raise ValueError('Can only give one default value.')
if (not default):
return getattr(self, name)
return getattr(self, name, default[0])
def has(self, name: str):
return hasattr(self, name) |
class SE(object):
def __init__(self, params, batcher, prepare=None):
params = utils.dotdict(params)
params.usepytorch = (True if ('usepytorch' not in params) else params.usepytorch)
params.seed = (1111 if ('seed' not in params) else params.seed)
params.batch_size = (128 if ('batch_size' not in params) else params.batch_size)
params.nhid = (0 if ('nhid' not in params) else params.nhid)
params.kfold = (5 if ('kfold' not in params) else params.kfold)
if (('classifier' not in params) or (not params['classifier'])):
params.classifier = {'nhid': 0}
assert ('nhid' in params.classifier), 'Set number of hidden units in classifier config!!'
self.params = params
self.batcher = batcher
self.prepare = (prepare if prepare else (lambda x, y: None))
self.list_tasks = ['CR', 'MR', 'MPQA', 'SUBJ', 'SST2', 'SST5', 'TREC', 'MRPC', 'SICKRelatedness', 'SICKEntailment', 'STSBenchmark', 'SNLI', 'ImageCaptionRetrieval', 'STS12', 'STS13', 'STS14', 'STS15', 'STS16', 'Length', 'WordContent', 'Depth', 'TopConstituents', 'BigramShift', 'Tense', 'SubjNumber', 'ObjNumber', 'OddManOut', 'CoordinationInversion', 'SICKRelatedness-finetune', 'STSBenchmark-finetune', 'STSBenchmark-fix']
def eval(self, name):
if isinstance(name, list):
self.results = {x: self.eval(x) for x in name}
return self.results
tpath = self.params.task_path
assert (name in self.list_tasks), ((str(name) + ' not in ') + str(self.list_tasks))
if (name == 'CR'):
self.evaluation = CREval((tpath + '/downstream/CR'), seed=self.params.seed)
elif (name == 'MR'):
self.evaluation = MREval((tpath + '/downstream/MR'), seed=self.params.seed)
elif (name == 'MPQA'):
self.evaluation = MPQAEval((tpath + '/downstream/MPQA'), seed=self.params.seed)
elif (name == 'SUBJ'):
self.evaluation = SUBJEval((tpath + '/downstream/SUBJ'), seed=self.params.seed)
elif (name == 'SST2'):
self.evaluation = SSTEval((tpath + '/downstream/SST/binary'), nclasses=2, seed=self.params.seed)
elif (name == 'SST5'):
self.evaluation = SSTEval((tpath + '/downstream/SST/fine'), nclasses=5, seed=self.params.seed)
elif (name == 'TREC'):
self.evaluation = TRECEval((tpath + '/downstream/TREC'), seed=self.params.seed)
elif (name == 'MRPC'):
self.evaluation = MRPCEval((tpath + '/downstream/MRPC'), seed=self.params.seed)
elif (name == 'SICKRelatedness'):
self.evaluation = SICKRelatednessEval((tpath + '/downstream/SICK'), seed=self.params.seed)
elif (name == 'STSBenchmark'):
self.evaluation = STSBenchmarkEval((tpath + '/downstream/STS/STSBenchmark'), seed=self.params.seed)
elif (name == 'STSBenchmark-fix'):
self.evaluation = STSBenchmarkEval((tpath + '/downstream/STS/STSBenchmark-fix'), seed=self.params.seed)
elif (name == 'STSBenchmark-finetune'):
self.evaluation = STSBenchmarkFinetune((tpath + '/downstream/STS/STSBenchmark'), seed=self.params.seed)
elif (name == 'SICKRelatedness-finetune'):
self.evaluation = SICKEval((tpath + '/downstream/SICK'), seed=self.params.seed)
elif (name == 'SICKEntailment'):
self.evaluation = SICKEntailmentEval((tpath + '/downstream/SICK'), seed=self.params.seed)
elif (name == 'SNLI'):
self.evaluation = SNLIEval((tpath + '/downstream/SNLI'), seed=self.params.seed)
elif (name in ['STS12', 'STS13', 'STS14', 'STS15', 'STS16']):
fpath = (name + '-en-test')
self.evaluation = eval((name + 'Eval'))(((tpath + '/downstream/STS/') + fpath), seed=self.params.seed)
elif (name == 'ImageCaptionRetrieval'):
self.evaluation = ImageCaptionRetrievalEval((tpath + '/downstream/COCO'), seed=self.params.seed)
elif (name == 'Length'):
self.evaluation = LengthEval((tpath + '/probing'), seed=self.params.seed)
elif (name == 'WordContent'):
self.evaluation = WordContentEval((tpath + '/probing'), seed=self.params.seed)
elif (name == 'Depth'):
self.evaluation = DepthEval((tpath + '/probing'), seed=self.params.seed)
elif (name == 'TopConstituents'):
self.evaluation = TopConstituentsEval((tpath + '/probing'), seed=self.params.seed)
elif (name == 'BigramShift'):
self.evaluation = BigramShiftEval((tpath + '/probing'), seed=self.params.seed)
elif (name == 'Tense'):
self.evaluation = TenseEval((tpath + '/probing'), seed=self.params.seed)
elif (name == 'SubjNumber'):
self.evaluation = SubjNumberEval((tpath + '/probing'), seed=self.params.seed)
elif (name == 'ObjNumber'):
self.evaluation = ObjNumberEval((tpath + '/probing'), seed=self.params.seed)
elif (name == 'OddManOut'):
self.evaluation = OddManOutEval((tpath + '/probing'), seed=self.params.seed)
elif (name == 'CoordinationInversion'):
self.evaluation = CoordinationInversionEval((tpath + '/probing'), seed=self.params.seed)
self.params.current_task = name
self.evaluation.do_prepare(self.params, self.prepare)
self.results = self.evaluation.run(self.params, self.batcher)
return self.results |
class BaseCrownBuilder(ABC, Generic[(LeafCr, DictCr, ListCr)]):
def build_empty_crown(self, as_list: bool) -> Union[(DictCr, ListCr)]:
if as_list:
return self._make_list_crown(current_path=(), paths_with_leaves=[])
return self._make_dict_crown(current_path=(), paths_with_leaves=[])
def build_crown(self, paths_to_leaves: PathsTo[LeafCr]) -> Union[(DictCr, ListCr)]:
paths_with_leaves = [PathWithLeaf(path, leaf) for (path, leaf) in paths_to_leaves.items()]
paths_with_leaves.sort(key=(lambda x: x.path))
return cast(Union[(DictCr, ListCr)], self._build_crown(paths_with_leaves, 0))
def _build_crown(self, paths_with_leaves: PathedLeaves[LeafCr], path_offset: int) -> Union[(LeafCr, DictCr, ListCr)]:
if (not paths_with_leaves):
raise ValueError
try:
first = paths_with_leaves[0].path[path_offset]
except IndexError:
if (len(paths_with_leaves) != 1):
raise ValueError
return paths_with_leaves[0].leaf
if isinstance(first, str):
return self._make_dict_crown(paths_with_leaves[0].path[:path_offset], paths_with_leaves)
if isinstance(first, int):
return self._make_list_crown(paths_with_leaves[0].path[:path_offset], paths_with_leaves)
raise RuntimeError
def _get_dict_crown_map(self, current_path: KeyPath, paths_with_leaves: PathedLeaves[LeafCr]) -> Mapping[(str, Union[(LeafCr, DictCr, ListCr)])]:
return {cast(str, key): self._build_crown(list(path_group), (len(current_path) + 1)) for (key, path_group) in groupby(paths_with_leaves, (lambda x: x.path[len(current_path)]))}
def _make_dict_crown(self, current_path: KeyPath, paths_with_leaves: PathedLeaves[LeafCr]) -> DictCr:
...
def _get_list_crown_map(self, current_path: KeyPath, paths_with_leaves: PathedLeaves[LeafCr]) -> Sequence[Union[(LeafCr, DictCr, ListCr)]]:
grouped_paths = [list(grouped_paths) for (key, grouped_paths) in groupby(paths_with_leaves, (lambda x: x.path[len(current_path)]))]
if (paths_with_leaves and (len(grouped_paths) != (cast(int, paths_with_leaves[(- 1)].path[len(current_path)]) + 1))):
raise ValueError(f'Found gaps in list mapping at {current_path}')
return [self._build_crown(path_group, (len(current_path) + 1)) for path_group in grouped_paths]
def _make_list_crown(self, current_path: KeyPath, paths_with_leaves: PathedLeaves[LeafCr]) -> ListCr:
... |
class PickupExporter():
def __init__(self, game: RandovaniaGame) -> None:
self.game = game
def create_details(self, original_index: PickupIndex, pickup_target: PickupTarget, visual_pickup: PickupEntry, model_pickup: PickupEntry, model_style: PickupModelStyle, name: str, description: str) -> ExportedPickupDetails:
raise NotImplementedError
def get_model(self, model_pickup: PickupEntry) -> PickupModel:
if (self.game != model_pickup.model.game):
return PickupModel(model_pickup.model.game, model_pickup.offworld_models.get(self.game, default_database.pickup_database_for_game(self.game).default_offworld_model))
else:
return model_pickup.model
def export(self, original_index: PickupIndex, pickup_target: PickupTarget, visual_pickup: PickupEntry, model_style: PickupModelStyle) -> ExportedPickupDetails:
model_pickup = (pickup_target.pickup if (model_style == PickupModelStyle.ALL_VISIBLE) else visual_pickup)
if (model_style in {PickupModelStyle.ALL_VISIBLE, PickupModelStyle.HIDE_MODEL}):
name = pickup_target.pickup.name
description = _pickup_description(pickup_target.pickup)
else:
name = visual_pickup.name
description = ''
return self.create_details(original_index, pickup_target, visual_pickup, model_pickup, model_style, name, description) |
class SegformerFeatureExtractionTester(unittest.TestCase):
def __init__(self, parent, batch_size=7, num_channels=3, min_resolution=30, max_resolution=400, do_resize=True, size=30, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], reduce_labels=False):
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.reduce_labels = reduce_labels
def prepare_feat_extract_dict(self):
return {'do_resize': self.do_resize, 'size': self.size, 'do_normalize': self.do_normalize, 'image_mean': self.image_mean, 'image_std': self.image_std, 'reduce_labels': self.reduce_labels} |
class TestYAMLFiles():
def test_filename_matches_reader_name(self):
import yaml
class IgnoreLoader(yaml.SafeLoader):
def _ignore_all_tags(self, tag_suffix, node):
return ((tag_suffix + ' ') + node.value)
IgnoreLoader.add_multi_constructor('', IgnoreLoader._ignore_all_tags)
from satpy._config import glob_config
from satpy.readers import read_reader_config
for reader_config in glob_config('readers/*.yaml'):
reader_fn = os.path.basename(reader_config)
reader_fn_name = os.path.splitext(reader_fn)[0]
reader_info = read_reader_config([reader_config], loader=IgnoreLoader)
assert (reader_fn_name == reader_info['name']), "Reader YAML filename doesn't match reader name in the YAML file."
def test_available_readers(self):
from satpy import available_readers
reader_names = available_readers()
assert (len(reader_names) > 0)
assert isinstance(reader_names[0], str)
assert ('viirs_sdr' in reader_names)
assert ('abi_l1b' in reader_names)
assert (reader_names == sorted(reader_names))
reader_infos = available_readers(as_dict=True)
assert (len(reader_names) == len(reader_infos))
assert isinstance(reader_infos[0], dict)
for reader_info in reader_infos:
assert ('name' in reader_info)
assert (reader_infos == sorted(reader_infos, key=(lambda reader_info: reader_info['name'])))
def test_available_readers_base_loader(self, monkeypatch):
import yaml
from satpy import available_readers
from satpy._config import glob_config
def patched_import_error(name, globals=None, locals=None, fromlist=(), level=0):
if (name in ('netcdf4',)):
raise ImportError(f'Mocked import error {name}')
return real_import(name, globals=globals, locals=locals, fromlist=fromlist, level=level)
monkeypatch.delitem(sys.modules, 'netcdf4', raising=False)
monkeypatch.setattr(builtins, '__import__', patched_import_error)
with pytest.raises(ImportError):
import netcdf4
reader_names = available_readers(yaml_loader=yaml.BaseLoader)
assert ('abi_l1b' in reader_names)
assert ('viirs_l1b' in reader_names)
assert (len(reader_names) == len(list(glob_config('readers/*.yaml')))) |
class CmdOOCLook(MuxAccountLookCommand):
key = 'look'
aliases = ['l', 'ls']
locks = 'cmd:all()'
help_category = 'General'
account_caller = True
def func(self):
if (_MULTISESSION_MODE < 2):
self.msg('You are out-of-character (OOC).\nUse |wic|n to get back into the game.')
return
self.msg(self.account.at_look(target=self.playable, session=self.session)) |
.slow
.pydicom
def test_metersetmap_agreement(loaded_dicom_dataset, logfile_delivery_data):
dicom_delivery_data = Delivery.from_dicom(loaded_dicom_dataset, FRACTION_GROUP)
dicom_metersetmap = dicom_delivery_data.metersetmap(grid_resolution=5)
logfile_metersetmap = logfile_delivery_data.metersetmap(grid_resolution=5)
diff = (logfile_metersetmap - dicom_metersetmap)
max_diff = np.max(np.abs(diff))
std_diff = np.std(diff)
try:
assert (max_diff < 4.1)
assert (std_diff < 0.4)
except AssertionError:
max_val = np.max([np.max(logfile_metersetmap), np.max(dicom_metersetmap)])
plt.figure()
plt.pcolormesh(dicom_metersetmap, vmin=0, vmax=max_val)
plt.colorbar()
plt.figure()
plt.pcolormesh(logfile_metersetmap, vmin=0, vmax=max_val)
plt.colorbar()
plt.figure()
plt.pcolormesh((logfile_metersetmap - dicom_metersetmap), vmin=(- max_diff), vmax=max_diff, cmap='bwr')
plt.colorbar()
plt.show()
raise |
_grad()
def evaluate(data_loader, model, device):
criterion = torch.nn.CrossEntropyLoss()
metric_logger = utils.MetricLogger(delimiter=' ')
header = 'Test:'
print_freq = 10
model.eval()
for (images, target) in metric_logger.log_every(data_loader, print_freq, header):
images = images.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
with torch.cuda.amp.autocast():
output = model(images)
loss = criterion(output, target)
(acc1, acc5) = accuracy(output, target, topk=(1, 5))
batch_size = images.shape[0]
metric_logger.update(loss=loss.item())
metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)
metric_logger.synchronize_between_processes()
print('* {top1.global_avg:.3f} {top5.global_avg:.3f} loss {losses.global_avg:.3f}'.format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss))
return {k: meter.global_avg for (k, meter) in metric_logger.meters.items()} |
.skipif((not HAVE_DEPS_FOR_RESOURCE_ESTIMATES), reason='pyscf and/or jax not installed.')
def test_estimate():
n = 152
lam = 3071.8
L = 275
dE = 0.001
chi = 10
res = _compute_cost(n, lam, L, dE, chi, 20000, 3, 3, 3)
assert np.isclose(res[0], 1663687)
assert np.isclose(res[1], )
assert np.isclose(res[2], 438447)
res = _compute_cost(n, lam, L, dE, chi, res[0], 3, 3, 3)
assert np.isclose(res[0], 1663707)
assert np.isclose(res[1], )
assert np.isclose(res[2], 438452)
res = _compute_cost(n, lam, L, dE, chi, 20000, 3, 5, 1)
assert np.isclose(res[0], 907828)
assert np.isclose(res[1], )
assert np.isclose(res[2], 219526)
res = _compute_cost(n, lam, L, dE, chi, res[0], 3, 5, 1)
assert np.isclose(res[0], 907828)
assert np.isclose(res[1], )
assert np.isclose(res[2], 219526) |
class Ansible(InstanceModule):
AnsibleException = AnsibleException
_ansible
def __call__(self, module_name, module_args=None, check=True, become=False, **kwargs):
result = self._host.backend.run_ansible(module_name, module_args, check=check, become=become, **kwargs)
if result.get('failed', False):
raise AnsibleException(result)
return result
_ansible
def get_variables(self):
return self._host.backend.get_variables()
def __repr__(self):
return '<ansible>' |
class FighterInfo():
def __init__(self, itemID, amount=None, state=None, abilities=None):
self.itemID = itemID
self.amount = amount
self.state = state
self.abilities = abilities
def fromFighter(cls, fighter):
if (fighter is None):
return None
info = cls(itemID=fighter.itemID, amount=fighter.amount, state=fighter.active, abilities={fa.effectID: fa.active for fa in fighter.abilities})
return info
def toFighter(self):
item = Market.getInstance().getItem(self.itemID, eager=('attributes', 'group.category'))
try:
fighter = Fighter(item)
except ValueError:
pyfalog.warning('Invalid item: {}'.format(self.itemID))
return None
if (self.amount is not None):
fighter.amount = self.amount
if (self.state is not None):
fighter.active = self.state
if (self.abilities is not None):
for ability in fighter.abilities:
ability.active = self.abilities.get(ability.effectID, ability.active)
return fighter
def __repr__(self):
return makeReprStr(self, ['itemID', 'amount', 'state', 'abilities']) |
class Criterion(torch.nn.Module):
def __init__(self, opt):
super(Criterion, self).__init__()
self.par = opt
self.angular_margin = opt.loss_arcface_angular_margin
self.feature_scale = opt.loss_arcface_feature_scale
self.class_map = torch.nn.Parameter(torch.Tensor(opt.n_classes, opt.embed_dim))
stdv = (1.0 / np.sqrt(self.class_map.size(1)))
self.class_map.data.uniform_((- stdv), stdv)
self.name = 'arcface'
self.lr = opt.loss_arcface_lr
self.ALLOWED_MINING_OPS = ALLOWED_MINING_OPS
self.REQUIRES_BATCHMINER = REQUIRES_BATCHMINER
self.REQUIRES_OPTIM = REQUIRES_OPTIM
def forward(self, batch, labels, **kwargs):
(bs, labels) = (len(batch), labels.to(self.par.device))
class_map = torch.nn.functional.normalize(self.class_map, dim=1)
cos_similarity = batch.mm(class_map.T).clamp(min=1e-10, max=(1 - 1e-10))
pick = torch.zeros(bs, self.par.n_classes).bool().to(self.par.device)
pick[(torch.arange(bs), labels)] = 1
original_target_logit = cos_similarity[pick]
theta = torch.acos(original_target_logit)
marginal_target_logit = torch.cos((theta + self.angular_margin))
class_pred = (self.feature_scale * (cos_similarity + (marginal_target_logit - original_target_logit).unsqueeze(1)))
loss = torch.nn.CrossEntropyLoss()(class_pred, labels)
return loss |
def upload_files_to_zenodo(filepaths, title, author=None, use_sandbox=False, record_name=None):
filepaths = [pathlib.Path(filepath) for filepath in filepaths]
root_depositions_url = get_root_depositions_url(use_sandbox)
if (record_name is not None):
if use_sandbox:
raise ValueError('Cannot use sandbox when `record_name` is provided')
old_deposition_id = get_zenodo_record_id(record_name)
old_deposition_url = f'{root_depositions_url}/{old_deposition_id}'
new_version_url = f'{old_deposition_url}/actions/newversion'
r = zenodo_api_with_helpful_fallback(new_version_url, 'post')
deposition_id = int(r.json()['links']['latest_draft'].split('/')[(- 1)])
else:
r = zenodo_api_with_helpful_fallback(root_depositions_url, 'post', json={}, headers=HEADERS)
try:
response = r.json()
except json.JSONDecodeError:
raise ValueError(f'Unexpected response: {r.text}')
try:
deposition_id = response['id']
except KeyError:
raise ValueError(f'Unexpected response: {response}')
metadata = create_metadata(title, author)
set_metadata(metadata, deposition_id, use_sandbox=use_sandbox)
filenames = [path.name for path in filepaths]
delete_filenames_within_record(filenames, deposition_id, use_sandbox=use_sandbox)
upload_filepaths(filepaths, deposition_id, use_sandbox=use_sandbox)
publish_deposition(deposition_id, use_sandbox=use_sandbox)
return deposition_id |
def gpubdb_argparser():
args = get_gpubdb_argparser_commandline_args()
with open(args['config_file']) as fp:
args = yaml.safe_load(fp.read())
args = add_empty_config(args)
KEYS_TO_ENV_VAR_MAPPING = {'data_dir': os.environ.get('DATA_DIRECTORY'), 'output_dir': os.environ.get('OUTPUT_DIRECTORY', './'), 'sheet': os.environ.get('GOOGLE_SPREADSHEET_NAME'), 'tab': os.environ.get('GOOGLE_SPREADSHEET_TAB'), 'scheduler_file_path': os.environ.get('SCHEDULER_FILE'), 'benchmark_runner_include_sql': os.environ.get('RUNNER_INCLUDE_SQL')}
for key in args.keys():
if ((args.get(key) is None) and (key in KEYS_TO_ENV_VAR_MAPPING)):
args[key] = KEYS_TO_ENV_VAR_MAPPING[key]
return args |
def js_bridge(window):
window.load_html('<html><body>TEST</body></html>')
assert_js(window, 'get_int', 420)
assert_js(window, 'get_float', 3.141)
assert_js(window, 'get_string', 'test')
assert_js(window, 'get_object', {'key1': 'value', 'key2': 420})
assert_js(window, 'get_objectlike_string', '{"key1": "value", "key2": 420}')
assert_js(window, 'get_single_quote', "te'st")
assert_js(window, 'get_double_quote', 'te"st')
assert_js(window, 'echo', 'test', 'test')
assert_js(window, 'multiple', [1, 2, 3], 1, 2, 3) |
class HouseholderInverseMultiplier(nn.Module):
def __init__(self, group, dim, learnable):
super(HouseholderInverseMultiplier, self).__init__()
self.group = group
self.dim = dim
H_group = self.constructH(group)
self.H_inv = nn.Parameter(H_group.t().repeat((dim // group), 1, 1), requires_grad=learnable)
def constructH(self, group):
H = torch.ones(1, 1).cuda()
for i in range(int(math.log2(group))):
H = (torch.cat((torch.cat([H, H], 1), torch.cat([H, (- H)], 1)), 0) / math.sqrt(2))
assert (H.shape[0] == group)
return H
def forward(self, x):
x_shape2 = x.shape
x = x.reshape((- 1), x.shape[(- 1)])
x = x.reshape((- 1), (self.dim // self.group), self.group).transpose(0, 1)
x = torch.bmm(x, self.H_inv).transpose(0, 1)
x = x.reshape(x_shape2)
return x |
class VQLPIPSWithDiscriminator(nn.Module):
def __init__(self, disc_start, codebook_weight=1.0, pixelloss_weight=1.0, disc_num_layers=3, disc_in_channels=3, disc_factor=1.0, disc_weight=1.0, perceptual_weight=1.0, use_actnorm=False, disc_conditional=False, disc_ndf=64, disc_loss='hinge'):
super().__init__()
assert (disc_loss in ['hinge', 'vanilla'])
self.codebook_weight = codebook_weight
self.pixel_weight = pixelloss_weight
self.perceptual_loss = LPIPS().eval()
self.perceptual_weight = perceptual_weight
self.discriminator = NLayerDiscriminator(input_nc=disc_in_channels, n_layers=disc_num_layers, use_actnorm=use_actnorm, ndf=disc_ndf).apply(weights_init)
self.discriminator_iter_start = disc_start
if (disc_loss == 'hinge'):
self.disc_loss = hinge_d_loss
elif (disc_loss == 'vanilla'):
self.disc_loss = vanilla_d_loss
else:
raise ValueError(f"Unknown GAN loss '{disc_loss}'.")
print(f'VQLPIPSWithDiscriminator running with {disc_loss} loss.')
self.disc_factor = disc_factor
self.discriminator_weight = disc_weight
self.disc_conditional = disc_conditional
def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None):
if (last_layer is not None):
nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0]
g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0]
else:
nll_grads = torch.autograd.grad(nll_loss, self.last_layer[0], retain_graph=True)[0]
g_grads = torch.autograd.grad(g_loss, self.last_layer[0], retain_graph=True)[0]
d_weight = (torch.norm(nll_grads) / (torch.norm(g_grads) + 0.0001))
d_weight = torch.clamp(d_weight, 0.0, 10000.0).detach()
d_weight = (d_weight * self.discriminator_weight)
return d_weight
def forward(self, codebook_loss, inputs, reconstructions, optimizer_idx, global_step, last_layer=None, cond=None, split='train'):
rec_loss = torch.abs((inputs.contiguous() - reconstructions.contiguous()))
if (self.perceptual_weight > 0):
p_loss = self.perceptual_loss(inputs.contiguous(), reconstructions.contiguous())
rec_loss = (rec_loss + (self.perceptual_weight * p_loss))
else:
p_loss = torch.tensor([0.0])
nll_loss = rec_loss
nll_loss = torch.mean(nll_loss)
if (optimizer_idx == 0):
if (cond is None):
assert (not self.disc_conditional)
logits_fake = self.discriminator(reconstructions.contiguous())
else:
assert self.disc_conditional
logits_fake = self.discriminator(torch.cat((reconstructions.contiguous(), cond), dim=1))
g_loss = (- torch.mean(logits_fake))
try:
d_weight = self.calculate_adaptive_weight(nll_loss, g_loss, last_layer=last_layer)
except RuntimeError:
assert (not self.training)
d_weight = torch.tensor(0.0)
disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start)
loss = ((nll_loss + ((d_weight * disc_factor) * g_loss)) + (self.codebook_weight * codebook_loss.mean()))
log = {'{}/total_loss'.format(split): loss.clone().detach().mean(), '{}/quant_loss'.format(split): codebook_loss.detach().mean(), '{}/nll_loss'.format(split): nll_loss.detach().mean(), '{}/rec_loss'.format(split): rec_loss.detach().mean(), '{}/p_loss'.format(split): p_loss.detach().mean(), '{}/d_weight'.format(split): d_weight.detach(), '{}/disc_factor'.format(split): torch.tensor(disc_factor), '{}/g_loss'.format(split): g_loss.detach().mean()}
return (loss, log)
if (optimizer_idx == 1):
if (cond is None):
logits_real = self.discriminator(inputs.contiguous().detach())
logits_fake = self.discriminator(reconstructions.contiguous().detach())
else:
logits_real = self.discriminator(torch.cat((inputs.contiguous().detach(), cond), dim=1))
logits_fake = self.discriminator(torch.cat((reconstructions.contiguous().detach(), cond), dim=1))
disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start)
d_loss = (disc_factor * self.disc_loss(logits_real, logits_fake))
log = {'{}/disc_loss'.format(split): d_loss.clone().detach().mean(), '{}/logits_real'.format(split): logits_real.detach().mean(), '{}/logits_fake'.format(split): logits_fake.detach().mean()}
return (d_loss, log) |
def sync_execute_write_reqs(write_reqs: List[WriteReq], storage: StoragePlugin, memory_budget_bytes: int, rank: int, event_loop: asyncio.AbstractEventLoop) -> PendingIOWork:
return event_loop.run_until_complete(execute_write_reqs(write_reqs=write_reqs, storage=storage, memory_budget_bytes=memory_budget_bytes, rank=rank)) |
def parse_args():
parser = argparse.ArgumentParser(description='Finetune a transformers model on a text classification task')
parser.add_argument('--dataset_name', type=str, default=None, help='The name of the dataset to use (via the datasets library).')
parser.add_argument('--dataset_config_names', nargs='+', type=str, required=True, help='The configuration names of the dataset to use (via the datasets library).')
parser.add_argument('--dataset_split_names', nargs='+', type=str, required=True, help='The names of the training data set splits to use (via the datasets library).')
parser.add_argument('--preprocessing_num_workers', type=int, default=None, help='The number of processes to use for the preprocessing.')
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument('--preprocessing_only', action='store_true', help='Only run the preprocessing script to be cached for future use')
parser.add_argument('--cache_dir', type=str, default=None, help='Where do you want to store the pretrained models downloaded from huggingface.co')
parser.add_argument('--validation_split_percentage', type=int, default=1, help='Percentage of training data that should be used for validation if no validation is present in dataset.')
parser.add_argument('--logging_steps', type=int, default=500, help='Number of steps between each logging')
parser.add_argument('--saving_steps', type=int, default=500, help='Number of steps between each logging')
parser.add_argument('--audio_column_name', type=str, default='audio', help="Column in the dataset that contains speech file path. Defaults to 'audio'")
parser.add_argument('--model_name_or_path', type=str, help='Path to pretrained model or model identifier from huggingface.co/models.', required=True)
parser.add_argument('--config_name', type=str, default=None, help='Pretrained config name or path if not the same as model_name')
parser.add_argument('--train_cache_file_name', type=str, default=None, help='Path to the train cached file name')
parser.add_argument('--validation_cache_file_name', type=str, default=None, help='Path to the validation cached file name')
parser.add_argument('--per_device_train_batch_size', type=int, default=8, help='Batch size (per device) for the training dataloader.')
parser.add_argument('--per_device_eval_batch_size', type=int, default=8, help='Batch size (per device) for the evaluation dataloader.')
parser.add_argument('--learning_rate', type=float, default=5e-05, help='Initial learning rate (after the potential warmup period) to use.')
parser.add_argument('--weight_decay', type=float, default=0.0, help='Weight decay to use.')
parser.add_argument('--num_train_epochs', type=int, default=3, help='Total number of training epochs to perform.')
parser.add_argument('--max_train_steps', type=int, default=None, help='Total number of training steps to perform. If provided, overrides num_train_epochs.')
parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help='Number of updates steps to accumulate before performing a backward/update pass.')
parser.add_argument('--gradient_checkpointing', action='store_true', help='If True, use gradient checkpointing to save memory at the expense of slower backward pass.')
parser.add_argument('--lr_scheduler_type', type=SchedulerType, default='linear', help='The scheduler type to use.', choices=['linear', 'cosine', 'cosine_with_restarts', 'polynomial', 'constant', 'constant_with_warmup'])
parser.add_argument('--num_warmup_steps', type=int, default=0, help='Number of steps for the warmup in the lr scheduler.')
parser.add_argument('--output_dir', type=str, default=None, help='Where to store the final model.')
parser.add_argument('--seed', type=int, default=0, help='A seed for reproducible training.')
parser.add_argument('--max_gumbel_temperature', type=float, default=2.0, help='Maximum temperature for gumbel softmax.')
parser.add_argument('--min_gumbel_temperature', type=float, default=0.5, help='Minimum temperature for gumbel softmax.')
parser.add_argument('--gumbel_temperature_decay', type=float, default=0.999995, help='Decay of gumbel temperature during training.')
parser.add_argument('--max_duration_in_seconds', type=float, default=5.0, help='Filter out audio files that are longer than `max_duration_in_seconds` seconds')
parser.add_argument('--min_duration_in_seconds', type=float, default=3.0, help='Filter out audio files that are shorter than `min_duration_in_seconds` seconds')
parser.add_argument('--pad_to_multiple_of', type=int, default=None, help='If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta).')
parser.add_argument('--adam_beta1', type=float, default=0.9, help='Beta1 for AdamW optimizer')
parser.add_argument('--adam_beta2', type=float, default=0.999, help='Beta2 for AdamW optimizer')
parser.add_argument('--adam_epsilon', type=float, default=1e-08, help='Epsilon for AdamW optimizer')
parser.add_argument('--push_to_hub', action='store_true', help='Whether or not to push the model to the Hub.')
parser.add_argument('--hub_model_id', type=str, help='The name of the repository to keep in sync with the local `output_dir`.')
parser.add_argument('--hub_token', type=str, help='The token to use to push to the Model Hub.')
args = parser.parse_args()
if args.push_to_hub:
assert (args.output_dir is not None), 'Need an `output_dir` to create a repo when `--push_to_hub` is passed.'
if (args.output_dir is not None):
os.makedirs(args.output_dir, exist_ok=True)
return args |
class RW():
def __init__(self, addr, imagefd, logger, seek_lock):
self.addr = addr
self.seek_lock = seek_lock
self.imagefd = imagefd
self.logger = helpers.get_child_logger(logger, 'FS')
self.logger.debug('File for {0}'.format(addr))
def read(self, offset, length):
self.logger.debug('{0} reading {1} bytes from [{2}]'.format(self.addr, length, hex(offset)))
self.seek_lock.acquire()
self.imagefd.seek(offset)
data = self.imagefd.read(length)
self.seek_lock.release()
return data
def write(self, offset, data):
self.logger.debug('{0} writing {1} bytes to {2}'.format(self.addr, len(data), hex(offset)))
self.seek_lock.acquire()
self.imagefd.seek(offset)
self.imagefd.write(data)
self.seek_lock.release() |
class Block(nn.Module):
def __init__(self, inplanes, planes, stride=1, dilation=1, start_with_relu=True, norm_layer=None, norm_kwargs=None):
super(Block, self).__init__()
norm_kwargs = (norm_kwargs if (norm_kwargs is not None) else {})
if isinstance(planes, (list, tuple)):
assert (len(planes) == 3)
else:
planes = ((planes,) * 3)
outplanes = planes[(- 1)]
if ((outplanes != inplanes) or (stride != 1)):
self.skip = nn.Sequential()
(self.skip.add_module('conv1', nn.Conv2d(inplanes, outplanes, 1, stride=stride, bias=False)),)
self.skip.add_module('bn1', norm_layer(num_features=outplanes, **norm_kwargs))
else:
self.skip = None
rep = OrderedDict()
for i in range(3):
rep[('act%d' % (i + 1))] = nn.ReLU(inplace=True)
rep[('conv%d' % (i + 1))] = SeparableConv2d(inplanes, planes[i], 3, stride=(stride if (i == 2) else 1), dilation=dilation, norm_layer=norm_layer, norm_kwargs=norm_kwargs)
rep[('bn%d' % (i + 1))] = norm_layer(planes[i], **norm_kwargs)
inplanes = planes[i]
if (not start_with_relu):
del rep['act1']
else:
rep['act1'] = nn.ReLU(inplace=False)
self.rep = nn.Sequential(rep)
def forward(self, x):
skip = x
if (self.skip is not None):
skip = self.skip(skip)
x = (self.rep(x) + skip)
return x |
def single_order():
order = {'orderNo': 'E4CACBXXXXX528384A20C930', 'orderCost': '127.19', 'quantity': '2', 'status': 'success', 'paidBy': 'online', 'paidTo': None, 'refundAmount': None, 'purchaseDate': '2013-11-13', 'name': 'Pankaj Kumar', 'email': '', 'city': 'Pune', 'state': 'Maharashtra', 'country': 'India', 'address': 'address', 'zipcode': '411027', 'phoneNo': '22222', 'attendee': [{'ticketName': 'Free Visit', 'ticketId': 'TKEFAJC', 'name': 'Pankaj Kumar', 'email': '', 'checkin': 'no', 'ticketNo': 'E4CACB-694', 'status': 'attending', 'details': {'Date': ''}}]}
return [order] |
class FusedBiasLeakyReLU(nn.Module):
def __init__(self, num_channels, negative_slope=0.2, scale=(2 ** 0.5)):
super(FusedBiasLeakyReLU, self).__init__()
self.bias = nn.Parameter(torch.zeros(num_channels))
self.negative_slope = negative_slope
self.scale = scale
def forward(self, input):
return fused_bias_leakyrelu(input, self.bias, self.negative_slope, self.scale) |
def iam_group(var):
(yield block('variable', 'name', {}))
(yield block('variable', 'path', {'default': '/'}))
group = (yield block('resource', 'aws_iam_group', var.name, {'name': var.name}))
(yield block('output', 'name', {'value': var.name}))
(yield block('output', 'resource', {'value': group})) |
class CsvWriterTest(Csv, WriterTest, TestCase):
()
def test_fields(self, context):
context.set_input_fields(['foo', 'bar'])
context.write_sync(('a', 'b'), ('c', 'd'))
context.stop()
assert (self.readlines() == ('foo,bar', 'a,b', 'c,d'))
(skip_header=False)
def test_fields_with_headers(self, context):
context.set_input_fields(['foo', 'bar'])
context.write_sync(('a', 'b'), ('c', 'd'))
context.stop()
assert (self.readlines() == ('foo,bar', 'a,b', 'c,d'))
(skip_header=True)
def test_fields_without_headers(self, context):
context.set_input_fields(['foo', 'bar'])
context.write_sync(('a', 'b'), ('c', 'd'))
context.stop()
assert (self.readlines() == ('a,b', 'c,d'))
()
def test_fields_from_type(self, context):
context.set_input_type(namedtuple('Point', 'x y'))
context.write_sync((1, 2), (3, 4))
context.stop()
assert (self.readlines() == ('x,y', '1,2', '3,4'))
()
def test_nofields_multiple_args(self, context):
context.write_sync(L1, L2, L3, L4)
context.stop()
assert (self.readlines() == ('a,hey', 'b,bee', 'c,see', 'd,dee'))
()
def test_nofields_multiple_args_length_mismatch(self, context):
with pytest.raises(TypeError):
context.write_sync((L1, L2), (L3,))
()
def test_nofields_empty_args(self, context):
context.write_sync(EMPTY, EMPTY, EMPTY)
context.stop()
assert (self.readlines() == ('', '', '')) |
class ResidualBlock(nn.Module):
def __init__(self, in_channel, out_channel, stride=1):
super(ResidualBlock, self).__init__()
self.in_channel = in_channel
self.out_channel = out_channel
self.stride = stride
self.res_bottleneck = nn.Sequential(nn.BatchNorm2d(in_channel), nn.ReLU(inplace=True), nn.Conv2d(in_channel, (out_channel // 4), 1, 1, bias=False), nn.BatchNorm2d((out_channel // 4)), nn.ReLU(inplace=True), nn.Conv2d((out_channel // 4), (out_channel // 4), 3, stride, padding=1, bias=False), nn.BatchNorm2d((out_channel // 4)), nn.ReLU(inplace=True), nn.Conv2d((out_channel // 4), out_channel, 1, 1, bias=False))
self.shortcut = nn.Conv2d(in_channel, out_channel, 1, stride, bias=False)
def forward(self, x):
res = x
out = self.res_bottleneck(x)
if ((self.in_channel != self.out_channel) or (self.stride != 1)):
res = self.shortcut(x)
out += res
return out |
class AdvertiserViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = AdvertiserSerializer
lookup_field = 'slug'
def get_queryset(self):
if self.request.user.is_staff:
return Advertiser.objects.all()
return self.request.user.advertisers.all()
(detail=True, methods=['get'])
def report(self, request, slug=None):
advertiser = self.get_object()
start_date = parse_date_string(request.query_params.get('start_date'))
end_date = parse_date_string(request.query_params.get('end_date'))
if (not start_date):
start_date = (timezone.now() - timedelta(days=30))
queryset = AdImpression.objects.filter(advertisement__flight__campaign__advertiser=advertiser).filter(date__gte=start_date)
if end_date:
queryset = queryset.filter(date__lte=end_date)
advertiser_report = AdvertiserReport(queryset)
advertiser_report.generate()
flights = []
for flight in Flight.objects.filter(campaign__advertiser=advertiser):
flight_queryset = queryset.filter(advertisement__flight=flight)
report = AdvertiserReport(flight_queryset)
report.generate()
if report.total['views']:
flight_data = FlightSerializer(flight).data
flight_data['report'] = {'total': report.total, 'days': report.results}
flight_data['advertisements'] = []
for ad in flight.advertisements.all():
ad_queryset = queryset.filter(advertisement=ad)
ad_report = AdvertiserReport(ad_queryset)
ad_report.generate()
if ad_report.total['views']:
ad_data = AdvertisementSerializer(ad).data
ad_data['report'] = {'total': ad_report.total, 'days': ad_report.results}
flight_data['advertisements'].append(ad_data)
flights.append(flight_data)
return Response({'total': advertiser_report.total, 'days': advertiser_report.results, 'flights': flights}) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.