code stringlengths 281 23.7M |
|---|
class Mpd2(base.ThreadPoolText):
defaults = [('update_interval', 1, 'Interval of update widget'), ('host', 'localhost', 'Host of mpd server'), ('port', 6600, 'Port of mpd server'), ('password', None, 'Password for auth on mpd server'), ('mouse_buttons', keys, 'b_num -> action.'), ('play_states', play_states, 'Play state mapping'), ('format_fns', format_fns, 'Dictionary of format methods'), ('command', default_cmd, 'command to be executed by mapped mouse button.'), ('prepare_status', status_dict, 'characters to show the status of MPD'), ('status_format', default_format, 'format for displayed song info.'), ('idle_format', default_idle_format, 'format for status when mpd has no playlist.'), ('idle_message', default_idle_message, 'text to display when mpd is idle.'), ('undefined_value', default_undefined_status_value, 'text to display when status key is undefined.'), ('timeout', 30, 'MPDClient timeout'), ('idletimeout', 5, 'MPDClient idle command timeout'), ('no_connection', 'No connection', 'Text when mpd is disconnected'), ('color_progress', None, 'Text color to indicate track progress.'), ('space', '-', 'Space keeper')]
def __init__(self, **config):
super().__init__('', **config)
self.add_defaults(Mpd2.defaults)
if self.color_progress:
self.color_progress = utils.hex(self.color_progress)
def _configure(self, qtile, bar):
super()._configure(qtile, bar)
self.client = MPDClient()
self.client.timeout = self.timeout
self.client.idletimeout = self.idletimeout
def connected(self):
try:
self.client.ping()
except (socket_error, ConnectionError):
try:
self.client.connect(self.host, self.port)
if self.password:
self.client.password(self.password)
except (socket_error, ConnectionError, CommandError):
return False
return True
def poll(self):
if self.connected:
return self.update_status()
else:
return self.no_connection
def update_status(self):
self.client.command_list_ok_begin()
self.client.status()
self.client.currentsong()
(status, current_song) = self.client.command_list_end()
return self.formatter(status, current_song)
def button_press(self, x, y, button):
base.ThreadPoolText.button_press(self, x, y, button)
m_name = self.mouse_buttons[button]
if self.connected:
if hasattr(self, m_name):
self.__try_call(m_name)
elif hasattr(self.client, m_name):
self.__try_call(m_name, self.client)
def __try_call(self, attr_name, obj=None):
err1 = 'Class {Class} has no attribute {attr}.'
err2 = 'attribute "{Class}.{attr}" is not callable.'
context = (obj or self)
try:
getattr(context, attr_name)()
except (AttributeError, TypeError) as e:
if isinstance(e, AttributeError):
err = err1.format(Class=type(context).__name__, attr=attr_name)
else:
err = err2.format(Class=type(context).__name__, attr=attr_name)
logger.exception('%s %s', err, e.args[0])
def toggle(self):
status = self.client.status()
play_status = status['state']
if (play_status == 'play'):
self.client.pause()
else:
self.client.play()
def formatter(self, status, current_song):
song_info = defaultdict((lambda : self.undefined_value))
song_info['play_status'] = self.play_states[status['state']]
if ((status['state'] == 'stop') and (current_song == {})):
song_info['idle_message'] = self.idle_message
fmt = self.idle_format
else:
fmt = self.status_format
for k in current_song:
song_info[k] = current_song[k]
song_info['fulltime'] = song_info['time']
del song_info['time']
song_info.update(status)
if (song_info['updating_db'] == self.undefined_value):
song_info['updating_db'] = '0'
if (not callable(self.prepare_status['repeat'])):
for k in self.prepare_status:
if ((k in status) and (status[k] != '0')):
song_info[k] = self.prepare_status[k]
else:
song_info[k] = self.space
else:
self.prepare_formatting(song_info)
if (('remaining' in self.status_format) or self.color_progress):
total = (float(song_info['fulltime']) if (song_info['fulltime'] != self.undefined_value) else 0.0)
elapsed = (float(song_info['elapsed']) if (song_info['elapsed'] != self.undefined_value) else 0.0)
song_info['remaining'] = '{:.2f}'.format(float((total - elapsed)))
if (('song' in self.status_format) and (song_info['song'] != self.undefined_value)):
song_info['currentsong'] = str((int(song_info['song']) + 1))
if (('artist' in self.status_format) and (song_info['artist'] == self.undefined_value)):
artist_keys = ('albumartist', 'performer', 'composer', 'conductor', 'ensemble')
for key in artist_keys:
if (song_info[key] != self.undefined_value):
song_info['artist'] = song_info[key]
break
for key in song_info:
if isinstance(song_info[key], list):
song_info[key] = ', '.join(song_info[key])
if ('all' in self.format_fns):
for key in song_info:
song_info[key] = self.format_fns['all'](song_info[key])
for fmt_fn in self.format_fns:
if ((fmt_fn in song_info) and (fmt_fn != 'all')):
song_info[fmt_fn] = self.format_fns[fmt_fn](song_info[fmt_fn])
if (not isinstance(fmt, str)):
fmt = str(fmt)
formatted = fmt.format_map(song_info)
if (self.color_progress and (status['state'] != 'stop')):
try:
progress = int(((len(formatted) * elapsed) / total))
formatted = '<span color="{0}">{1}</span>{2}'.format(self.color_progress, formatted[:progress], formatted[progress:])
except (ZeroDivisionError, ValueError):
pass
return formatted
def prepare_formatting(self, status):
for key in self.prepare_status:
self.prepare_status[key](status, key, self.space)
def finalize(self):
super().finalize()
try:
self.client.close()
self.client.disconnect()
except ConnectionError:
pass |
def _check_mopidy_extensions_user() -> Dict[(str, Tuple[(bool, str)])]:
config = subprocess.run(['mopidy', 'config'], stdout=subprocess.PIPE, universal_newlines=True, check=True).stdout
parser = configparser.ConfigParser()
parser.read_string(config)
extensions = {}
for extension in ['spotify', 'soundcloud', 'jamendo']:
try:
if (parser[extension]['enabled'] == 'true'):
extensions[extension] = (True, 'Extension probably functional')
else:
extensions[extension] = (False, 'Extension disabled')
except KeyError:
extensions[extension] = (False, 'Extension disabled')
return extensions |
class Window(Gtk.Window):
windows: list[Gtk.Window] = []
_preven_inital_show = False
def __init__(self, *args, **kwargs):
self._header_bar = None
dialog = kwargs.pop('dialog', True)
super().__init__(*args, **kwargs)
type(self).windows.append(self)
if dialog:
if is_wayland():
self.set_modal(True)
self.set_type_hint(Gdk.WindowTypeHint.DIALOG)
self.set_destroy_with_parent(True)
self.set_position(Gtk.WindowPosition.CENTER_ON_PARENT)
connect_obj(self, 'destroy', type(self).windows.remove, self)
self.connect('key-press-event', self._on_key_press)
def _on_key_press(self, widget, event):
is_dialog = (self.get_type_hint() == Gdk.WindowTypeHint.DIALOG)
if ((is_dialog and is_accel(event, 'Escape')) or ((not is_dialog) and is_accel(event, '<Primary>w'))):
if (isinstance(self.get_focus(), Gtk.Entry) and isinstance(self.get_focus().get_parent(), Gtk.TreeView)):
self.get_focus().get_parent().grab_focus()
return Gdk.EVENT_PROPAGATE
self.close()
return Gdk.EVENT_STOP
if ((not is_dialog) and is_accel(event, 'F11')):
self.toggle_fullscreen()
return Gdk.EVENT_STOP
return Gdk.EVENT_PROPAGATE
def toggle_fullscreen(self):
window = self.get_window()
if (not window):
is_fullscreen = False
else:
is_fullscreen = (window.get_state() & Gdk.WindowState.FULLSCREEN)
if is_fullscreen:
self.unfullscreen()
else:
self.fullscreen()
def set_default_size(self, width, height):
if self._header_bar:
(width, height) = fix_default_size(width, height)
super().set_default_size(width, height)
def use_header_bar(self):
assert (not self._header_bar)
if (not should_use_header_bar()):
return False
header_bar = Gtk.HeaderBar()
header_bar.set_show_close_button(True)
header_bar.show()
old_title = self.get_title()
self.set_titlebar(header_bar)
if (old_title is not None):
self.set_title(old_title)
self._header_bar = header_bar
self.set_default_size(*self.get_default_size())
return header_bar
def has_close_button(self):
if (self.get_type_hint() == Gdk.WindowTypeHint.NORMAL):
return True
if (os.name == 'nt'):
return True
if (sys.platform == 'darwin'):
return True
if (self._header_bar is not None):
return self._header_bar.get_show_close_button()
return True
def present(self):
try:
from gi.repository import GdkX11
except ImportError:
super().present()
else:
window = self.get_window()
if (window and isinstance(window, GdkX11.X11Window)):
timestamp = GdkX11.x11_get_server_time(window)
self.present_with_time(timestamp)
else:
super().present()
def set_transient_for(self, parent):
is_toplevel = (parent and (parent.props.type == Gtk.WindowType.TOPLEVEL))
if ((parent is None) or (not is_toplevel)):
if parent:
print_w(('Not a toplevel window set for: %r' % self))
from quodlibet import app
parent = app.window
super().set_transient_for(parent)
def prevent_inital_show(cls, value):
cls._preven_inital_show = bool(value)
def show_maybe(self):
if (not self._preven_inital_show):
self.show()
return (not self._preven_inital_show) |
class RoutingTotals(ctypes.Structure):
_fields_ = [('dwInflow', ctypes.c_double), ('wwInflow', ctypes.c_double), ('gwInflow', ctypes.c_double), ('iiInflow', ctypes.c_double), ('exInflow', ctypes.c_double), ('flooding', ctypes.c_double), ('outflow', ctypes.c_double), ('evapLoss', ctypes.c_double), ('seepLoss', ctypes.c_double), ('reacted', ctypes.c_double), ('initStorage', ctypes.c_double), ('finalStorage', ctypes.c_double), ('pctError', ctypes.c_double)]
_py_alias_ids = {'dwInflow': 'dry_weather_inflow', 'wwInflow': 'wet_weather_inflow', 'gwInflow': 'groundwater_inflow', 'iiInflow': 'II_inflow', 'exInflow': 'external_inflow', 'flooding': 'flooding', 'outflow': 'outflow', 'evapLoss': 'evaporation_loss', 'seepLoss': 'seepage_loss', 'reacted': 'reacted', 'initStorage': 'initial_storage', 'finalStorage': 'final_storage', 'pctError': 'routing_error'} |
class Effect3962(BaseEffect):
type = 'passive'
def handler(fit, src, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Repair Systems')), 'armorDamageAmount', src.getModifiedItemAttr('subsystemBonusMinmatarDefensive'), skill='Minmatar Defensive Systems', **kwargs)
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Shield Operation')), 'shieldBonus', src.getModifiedItemAttr('subsystemBonusMinmatarDefensive'), skill='Minmatar Defensive Systems', **kwargs) |
class WindowSetFullScreenEventSequenceTest(EventSequenceTest, unittest.TestCase):
last_sequence = 2
def on_resize(self, width, height):
self.check_sequence(1, 'on_resize')
def on_expose(self):
self.check_sequence(2, 'on_expose')
def test_method(self):
window.Window._enable_event_queue = True
win = window.Window()
try:
win.dispatch_events()
win.push_handlers(self)
win.set_fullscreen()
self.check_sequence(0, 'begin')
while ((not win.has_exit) and (not self.finished)):
win.dispatch_events()
self.check()
finally:
win.close() |
class ModalEmbeddings(nn.Module):
def __init__(self, config, encoder, embeddings):
super().__init__()
self.config = config
self.encoder = encoder
self.proj_embeddings = nn.Linear(config.modal_hidden_size, config.hidden_size)
self.position_embeddings = embeddings.position_embeddings
self.token_type_embeddings = embeddings.token_type_embeddings
self.word_embeddings = embeddings.word_embeddings
self.LayerNorm = embeddings.LayerNorm
self.dropout = nn.Dropout(p=config.hidden_dropout_prob)
def forward(self, input_modal, start_token=None, end_token=None, position_ids=None, token_type_ids=None):
token_embeddings = self.proj_embeddings(self.encoder(input_modal))
seq_length = token_embeddings.size(1)
if (start_token is not None):
start_token_embeds = self.word_embeddings(start_token)
seq_length += 1
token_embeddings = torch.cat([start_token_embeds.unsqueeze(1), token_embeddings], dim=1)
if (end_token is not None):
end_token_embeds = self.word_embeddings(end_token)
seq_length += 1
token_embeddings = torch.cat([token_embeddings, end_token_embeds.unsqueeze(1)], dim=1)
if (position_ids is None):
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_modal.device)
position_ids = position_ids.unsqueeze(0).expand(input_modal.size(0), seq_length)
if (token_type_ids is None):
token_type_ids = torch.zeros((input_modal.size(0), seq_length), dtype=torch.long, device=input_modal.device)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = ((token_embeddings + position_embeddings) + token_type_embeddings)
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings |
def optimize_one_inter_rep(inter_rep, layer_name, target, probe, lr=0.001, max_epoch=256, loss_func=nn.CrossEntropyLoss(), verbose=False):
with autocast('cuda', enabled=False):
target_clone = torch.Tensor(target).to(torch.long).to(torch_device).unsqueeze(0)
tensor = inter_rep.clone().to(torch_device).requires_grad_(True)
rep_f = (lambda : tensor)
optimizer = torch.optim.Adam([tensor], lr=lr)
if verbose:
bar = tqdm(range(max_epoch), leave=False)
else:
bar = range(max_epoch)
for i in bar:
input_tensor = rep_f()
optimizer.zero_grad()
probe_seg_out = probe(input_tensor)
loss = loss_func(probe_seg_out, target_clone)
loss.backward()
optimizer.step()
if verbose:
bar.set_description(f'At layer {layer_name} [{(i + 1)}/{max_epoch}]; Loss: {loss.item():.3f}')
return rep_f().clone() |
class PAZ2(Stage):
_format = {None: [E(1, 4, x_fixed(b'PAZ2'), dummy=True), E(6, 7, 'i2'), E(9, 9, 'a1'), E(11, 25, 'e15.8'), E(27, 30, 'i4'), E(32, 39, 'f8.3'), E(41, 43, 'i3'), E(45, 47, 'i3'), E(49, None, 'a25+')], ('IMS1.0', 'USA_DMC'): [E(1, 4, x_fixed(b'PAZ2'), dummy=True), E(6, 7, 'i2'), E(9, 9, 'a1'), E(11, 25, 'e15.8'), E(27, 30, 'i4'), E(32, 39, 'f8.3'), E(40, 42, 'i3'), E(44, 46, 'i3'), E(48, None, 'a25+')]}
output_units = Units.T(help='output units code (V=volts, A=amps, C=counts)')
scale_factor = Float.T(help='scale factor [ouput units/input units]')
decimation = Int.T(optional=True, help='decimation')
correction = Float.T(optional=True, help='group correction applied [s]')
npoles = Int.T(help='number of poles')
nzeros = Int.T(help='number of zeros')
description = String.T(default='', optional=True, help='description')
poles = List.T(Complex.T())
zeros = List.T(Complex.T())
comments = List.T(String.T(optional=True))
def append_dataline(self, line, version_dialect):
d = PAZ2Data.deserialize(line, version_dialect)
v = complex(d.real, d.imag)
i = (len(self.poles) + len(self.zeros))
if (i < self.npoles):
self.poles.append(v)
elif (i < (self.npoles + self.nzeros)):
self.zeros.append(v)
else:
raise DeserializeError('more poles and zeros than expected')
def write_datalines(self, writer):
for pole in self.poles:
PAZ2Data(real=pole.real, imag=pole.imag).write(writer)
for zero in self.zeros:
PAZ2Data(real=zero.real, imag=zero.imag).write(writer) |
def split(k, port, should_fail=False):
cmd = ('devlink port split %s count %s' % (port.bus_info, k))
(stdout, stderr) = run_command(cmd, should_fail=should_fail)
if should_fail:
if (not test((stderr != ''), ('%s is unsplittable' % port.name))):
print(('split an unsplittable port %s' % port.name))
return create_split_group(port, k)
else:
if (stderr == ''):
return create_split_group(port, k)
print(("didn't split a splittable port %s" % port.name))
return [] |
('satpy.readers.electrol_hrit.HRITGOMSFileHandler.__init__', return_value=None)
('satpy.readers.electrol_hrit.HRITFileHandler.get_dataset', return_value={})
class TestHRITGOMSFileHandler(unittest.TestCase):
('satpy.readers.electrol_hrit.HRITGOMSFileHandler.calibrate')
def test_get_dataset(self, calibrate_mock, *mocks):
key = make_dataid(name='CH1', calibration='counts')
fake_array = mock.MagicMock()
fake_array.attrs = dict()
calibrate_mock.return_value = fake_array
fh = HRITGOMSFileHandler()
fh.platform_name = 'Electro'
fh.mda = {'projection_parameters': {'SSP_longitude': 0.0}, 'orbital_parameters': {'satellite_nominal_longitude': 0.5}}
info = {'units': 'm', 'standard_name': 'electro', 'wavelength': 5.0}
output = fh.get_dataset(key, info)
calibrate_mock.assert_called()
attrs_exp = info.copy()
attrs_exp.update({'orbital_parameters': {'satellite_nominal_longitude': 0.5, 'satellite_nominal_latitude': 0.0, 'projection_longitude': 0.0, 'projection_latitude': 0.0, 'projection_altitude': .0}, 'platform_name': 'Electro', 'sensor': 'msu-gs'})
assert (dict(output.attrs, **attrs_exp) == output.attrs)
def test_calibrate(self, *mocks):
lut = np.linspace(1000000.0, 1600000.0, num=1024).astype(np.int32)
lut = np.tile(lut, (10, 1))
fh = HRITGOMSFileHandler()
fh.prologue = {'ImageCalibration': lut}
fh.chid = 1
counts = DataArray(da.linspace(1, 1023, 25, chunks=5, dtype=np.uint16).reshape(5, 5))
with pytest.raises(NotImplementedError):
fh.calibrate(counts, 'nonsense')
out = fh.calibrate(counts, 'counts')
assert np.all((out.values == counts.values))
out = fh.calibrate(counts, 'radiance')
assert np.allclose(out.values, (lut[(0, counts)] / 1000.0))
out = fh.calibrate(counts, 'brightness_temperature')
assert np.allclose(out.values, (lut[(0, counts)] / 1000.0))
def test_get_area_def(self, *mocks):
example_area_ext = ((- 5566748.0802), (- 1854249.1809), 5570748.6178, 2000.2688)
fh = HRITGOMSFileHandler()
fh.mda = {'cfac': , 'lfac': , 'coff': 1392.0, 'loff': 0.0, 'number_of_lines': 464, 'number_of_columns': 2784, 'projection_parameters': {'SSP_longitude': 0.0}}
area = fh.get_area_def(True)
assert np.allclose(np.array(area.area_extent), np.array(example_area_ext)) |
def test_gitlab_webhook_payload_known_issue():
expected = {'commit': '770830e7cae6db4f7fc0f4dbe20bd5f', 'ref': 'refs/tags/fourthtag', 'git_url': ':someuser/some-test-project.git', 'commit_info': {'url': ' 'date': '2019-10-17T18:07:48Z', 'message': 'Update Dockerfile'}}
def lookup_commit(repo_id, commit_sha):
if (commit_sha == '770830e7cae6db4f7fc0f4dbe20bd5f'):
return {'added': [], 'author': {'name': 'Some User', 'email': ''}, 'url': ' 'message': 'Update Dockerfile', 'removed': [], 'modified': ['Dockerfile'], 'id': '770830e7cae6db4f7fc0f4dbe20bd5f'}
return None
assertSchema('gitlab_webhook_known_issue', expected, gl_webhook, lookup_commit=lookup_commit) |
def test_unexpected_kwarg_node():
model = Model()
with pytest.raises(TypeError):
node = Node(model, 'test_node', invalid=True)
with pytest.raises(TypeError):
inpt = Input(model, 'test_input', invalid=True)
with pytest.raises(TypeError):
storage = Storage(model, 'test_storage', invalid=True)
assert (not model.nodes) |
def sample_min_with_randFeatures(num_features, d, nObservations, value_of_nObservations, alpha, l, noise, initial_point, optimize_method='L-BFGS-B', maximize=False, bnds=None):
l = np.array(([l] * num_features))
W = np.divide(npr.randn(num_features, d), l)
b = ((2 * np.pi) * npr.uniform(0, 1, num_features))
num_of_nObservations = len(nObservations)
b_for_nObservations = np.array(([b] * num_of_nObservations)).T
phi_vector_inverse = (math.sqrt(((2 * alpha) / num_features)) * np.cos((np.dot(W, nObservations.T) + b_for_nObservations)))
A = (np.divide(np.dot(phi_vector_inverse, phi_vector_inverse.T), noise) + np.eye(num_features))
A_inverse = compute_inverse(A)
mean_of_post_theta = np.divide(np.dot(np.dot(A_inverse, phi_vector_inverse), value_of_nObservations), noise)
mean_of_post_theta = np.squeeze(np.asarray(mean_of_post_theta))
variance_of_post_theta = A_inverse
sample_of_theta = npr.multivariate_normal(mean_of_post_theta, variance_of_post_theta)
def function_to_optimize(x):
phi_x = (math.sqrt(((2 * alpha) / num_features)) * np.cos((np.dot(W, x.T) + b)))
approx_function = np.dot(phi_x.T, sample_of_theta)
if maximize:
approx_function = (- approx_function)
return approx_function
if (optimize_method not in ('CG', 'BFGS', 'Newton-CG', 'L-BFGS-B', 'TNC', 'SLSQP', 'dogleg', 'trust-ncg', 'trust-krylov', 'trust-exact', 'trust-constr')):
result = spo.minimize(function_to_optimize, initial_point, method=optimize_method, options={'gtol': (10 ** (- 50)), 'maxiter': 2000}, bounds=bnds)
else:
def gradient_to_help(x):
temp = np.sin((np.dot(W, x.T) + b))
temp_multiple = np.array(([temp] * d)).T
gradient_of_phi_x = ((- np.sqrt(((2 * alpha) / num_features))) * np.multiply(temp_multiple, W))
gradient_function = np.dot(gradient_of_phi_x.T, sample_of_theta)
if maximize:
gradient_function = (- gradient_function)
return gradient_function
result = spo.minimize(function_to_optimize, initial_point, method=optimize_method, jac=gradient_to_help, bounds=bnds, options={'gtol': (10 ** (- 50)), 'maxiter': 2000})
return result |
class Migration(migrations.Migration):
dependencies = [('sponsors', '0073_auto__1906')]
operations = [migrations.AddField(model_name='providedtextasset', name='shared_text', field=models.TextField(blank=True, null=True)), migrations.AddField(model_name='providedtextassetconfiguration', name='shared_text', field=models.TextField(blank=True, null=True))] |
def prune_non_overlapping_boxes(boxlist1, boxlist2, min_overlap=0.0, scope=None):
with tf.name_scope(scope, 'PruneNonOverlappingBoxes'):
ioa_ = ioa(boxlist2, boxlist1)
ioa_ = tf.reduce_max(ioa_, reduction_indices=[0])
keep_bool = tf.greater_equal(ioa_, tf.constant(min_overlap))
keep_inds = tf.squeeze(tf.where(keep_bool), squeeze_dims=[1])
new_boxlist1 = gather(boxlist1, keep_inds)
return (new_boxlist1, keep_inds) |
class XLMRobertaConfig(PretrainedConfig):
model_type = 'xlm-roberta'
def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=1, bos_token_id=0, eos_token_id=2, position_embedding_type='absolute', use_cache=True, classifier_dropout=None, **kwargs):
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.position_embedding_type = position_embedding_type
self.use_cache = use_cache
self.classifier_dropout = classifier_dropout |
def recursive_find_python_class(folder, trainer_name, current_module):
tr = None
for (importer, modname, ispkg) in pkgutil.iter_modules(folder):
if (not ispkg):
m = importlib.import_module(((current_module + '.') + modname))
if hasattr(m, trainer_name):
tr = getattr(m, trainer_name)
break
if (tr is None):
for (importer, modname, ispkg) in pkgutil.iter_modules(folder):
if ispkg:
next_current_module = ((current_module + '.') + modname)
tr = recursive_find_python_class([join(folder[0], modname)], trainer_name, current_module=next_current_module)
if (tr is not None):
break
return tr |
def calculate_curtailment(n, label, curtailment):
avail = n.generators_t.p_max_pu.multiply(n.generators.p_nom_opt).sum().groupby(n.generators.carrier).sum()
used = n.generators_t.p.sum().groupby(n.generators.carrier).sum()
curtailment[label] = (((avail - used) / avail) * 100).round(3)
return curtailment |
_tf
class TFDeiTRobertaModelTest(TFVisionTextDualEncoderMixin, unittest.TestCase):
def get_pretrained_model_and_inputs(self):
model = TFVisionTextDualEncoderModel.from_vision_text_pretrained('Rocketknight1/tiny-random-deit-tf', 'hf-internal-testing/tiny-random-roberta')
batch_size = 13
pixel_values = floats_tensor([batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size])
input_ids = ids_tensor([batch_size, 4], model.text_model.config.vocab_size)
attention_mask = random_attention_mask([batch_size, 4])
inputs = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return (model, inputs)
def check_vision_text_output_attention(self, text_config, input_ids, attention_mask, vision_config, pixel_values=None, **kwargs):
(vision_model, text_model) = self.get_vision_text_model(vision_config, text_config)
model = TFVisionTextDualEncoderModel(vision_model=vision_model, text_model=text_model)
output = model(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask, output_attentions=True)
vision_attentions = output.vision_model_output.attentions
self.assertEqual(len(vision_attentions), vision_config.num_hidden_layers)
image_size = to_2tuple(vision_model.config.image_size)
patch_size = to_2tuple(vision_model.config.patch_size)
num_patches = ((image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]))
seq_len = (num_patches + 2)
self.assertEqual(vision_attentions[0].shape[(- 3):], (vision_config.num_attention_heads, seq_len, seq_len))
text_attentions = output.text_model_output.attentions
self.assertEqual(len(text_attentions), text_config.num_hidden_layers)
self.assertEqual(text_attentions[0].shape[(- 3):], (text_config.num_attention_heads, input_ids.shape[(- 1)], input_ids.shape[(- 1)]))
def get_vision_text_model(self, vision_config, text_config):
vision_model = TFDeiTModel(vision_config, name='vision_model')
text_model = TFRobertaModel(text_config, name='text_model')
return (vision_model, text_model)
def prepare_config_and_inputs(self):
vit_model_tester = TFDeiTModelTester(self)
bert_model_tester = TFRobertaModelTester(self)
vision_config_and_inputs = vit_model_tester.prepare_config_and_inputs()
text_config_and_inputs = bert_model_tester.prepare_config_and_inputs()
(vision_config, pixel_values, _) = vision_config_and_inputs
(text_config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels) = text_config_and_inputs
return {'text_config': text_config, 'vision_config': vision_config, 'pixel_values': pixel_values, 'attention_mask': input_mask, 'input_ids': input_ids, 'text_token_type_ids': token_type_ids, 'text_sequence_labels': sequence_labels, 'text_token_labels': token_labels, 'text_choice_labels': choice_labels} |
class TransformerEncoderBase(nn.Module):
def __init__(self, cfg, embed_tokens):
self.cfg = cfg
super(TransformerEncoderBase, self).__init__()
self.register_buffer('version', torch.Tensor([3]))
self.dropout_module = nn.Dropout(cfg.dropout)
self.encoder_layerdrop = cfg.encoder_layerdrop
embed_dim = embed_tokens.embedding_dim
self.padding_idx = embed_tokens.padding_idx
self.max_source_positions = cfg.max_source_positions
self.embed_tokens = embed_tokens
self.embed_scale = (1.0 if cfg.no_scale_embedding else math.sqrt(embed_dim))
if (not cfg.no_token_positional_embeddings):
self.embed_positions = PositionalEmbedding(cfg.max_source_positions, embed_dim, self.padding_idx, learned=cfg.encoder_learned_pos)
else:
self.embed_positions = None
if cfg.layernorm_embedding:
self.layernorm_embedding = LayerNorm(embed_dim)
else:
self.layernorm_embedding = None
if (self.encoder_layerdrop > 0.0):
self.layers = LayerDropModuleList(p=self.encoder_layerdrop)
else:
self.layers = nn.ModuleList([])
self.layers.extend([self.build_encoder_layer(cfg) for i in range(cfg.encoder_layers)])
self.num_layers = len(self.layers)
if cfg.encoder_normalize_before:
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
from onmt.modules.optimized.flash_mha import flash_bert_mha
self.fast_bert_mha = flash_bert_mha
self.n_languages = (- 1)
self.has_adapter = False
def build_encoder_layer(self, cfg):
layer = TransformerEncoderLayerBase(cfg)
return layer
def forward_embedding(self, src_tokens, token_embedding: Optional[torch.Tensor]=None):
if (token_embedding is None):
token_embedding = self.embed_tokens(src_tokens)
x = embed = (self.embed_scale * token_embedding)
if (self.embed_positions is not None):
x = (embed + self.embed_positions(src_tokens))
if (self.layernorm_embedding is not None):
x = self.layernorm_embedding(x)
x = self.dropout_module(x)
return (x, embed)
def forward(self, src_tokens, src_mask: Optional[torch.Tensor]=None, return_all_hiddens: bool=False, token_embeddings: Optional[torch.Tensor]=None):
return self.forward_scriptable(src_tokens, src_mask, return_all_hiddens, token_embeddings)
def forward_scriptable(self, src_tokens, src_mask: Optional[torch.Tensor]=None, return_all_hiddens: bool=False, token_embeddings: Optional[torch.Tensor]=None):
if (src_mask is None):
encoder_padding_mask = src_tokens.eq(self.padding_idx)
else:
encoder_padding_mask = src_mask
has_pads = ((src_tokens.device.type == 'xla') or encoder_padding_mask.any())
(x, encoder_embedding) = self.forward_embedding(src_tokens, token_embeddings)
if has_pads:
x = (x * (1 - encoder_padding_mask.unsqueeze((- 1)).type_as(x)))
can_run_fast_bert_mha = False
seq_len = x.size(1)
bsz = x.size(0)
if (self.fast_bert_mha and torch.is_autocast_enabled()):
can_run_fast_bert_mha = True
padding_mask = encoder_padding_mask
if (padding_mask is None):
padding_mask = x.new_zeros(bsz, seq_len)
padding_mask = padding_mask.long()
lengths = (1 - padding_mask).sum(dim=1)
lengths = lengths.cpu().tolist()
x = x.view((- 1), x.size((- 1)))
non_pad_indices = torch.nonzero(padding_mask.view((- 1)).ne(1)).squeeze(1)
x = x.index_select(0, non_pad_indices)
max_len = max(lengths)
a = torch.tensor(np.array(([0] + lengths)), dtype=torch.int32)
cu_seqlens = torch.cumsum(a, 0).to(dtype=torch.int32, device=x.device)
else:
max_len = (- 1)
cu_seqlens = None
non_pad_indices = None
x = x.transpose(0, 1)
encoder_states = []
if return_all_hiddens:
encoder_states.append(x)
for layer in self.layers:
x = layer(x, encoder_padding_mask=(encoder_padding_mask if has_pads else None), max_len=max_len, cu_seqlens=cu_seqlens)
if return_all_hiddens:
assert (encoder_states is not None)
encoder_states.append(x)
if (self.layer_norm is not None):
x = self.layer_norm(x)
src_lengths = src_tokens.ne(self.padding_idx).sum(dim=1, dtype=torch.int32).reshape((- 1), 1).contiguous()
if can_run_fast_bert_mha:
x = index_copy(x, non_pad_indices, (bsz * seq_len))
x = x.view(bsz, seq_len, (- 1))
x = x.transpose(0, 1).contiguous()
return (x, encoder_padding_mask, encoder_embedding, encoder_states) |
def test_float32():
Format = QtGui.QImage.Format
dtype = np.float32
(w, h) = (192, 108)
(lo, hi) = ((- 1), 1)
lut_none = None
lut_mono1 = np.random.randint(256, size=256, dtype=np.uint8)
lut_mono2 = np.random.randint(256, size=(256, 1), dtype=np.uint8)
lut_rgb = np.random.randint(256, size=(256, 3), dtype=np.uint8)
lut_rgba = np.random.randint(256, size=(256, 4), dtype=np.uint8)
lut_mono1_s = np.random.randint(256, size=255, dtype=np.uint8)
lut_mono2_s = np.random.randint(256, size=(255, 1), dtype=np.uint8)
lut_rgb_s = np.random.randint(256, size=(255, 3), dtype=np.uint8)
lut_rgba_s = np.random.randint(256, size=(255, 4), dtype=np.uint8)
lut_mono1_l = np.random.randint(256, size=257, dtype=np.uint8)
lut_mono2_l = np.random.randint(256, size=(257, 1), dtype=np.uint8)
lut_rgb_l = np.random.randint(256, size=(257, 3), dtype=np.uint8)
lut_rgba_l = np.random.randint(256, size=(257, 4), dtype=np.uint8)
levels = [lo, hi]
check_format((h, w), dtype, levels, lut_none, Format.Format_Grayscale8)
check_format((h, w, 3), dtype, levels, lut_none, Format.Format_RGB888)
check_format((h, w, 4), dtype, levels, lut_none, Format.Format_RGBA8888)
check_format((h, w), dtype, levels, lut_mono1, Format.Format_Indexed8)
check_format((h, w), dtype, levels, lut_mono2, Format.Format_Indexed8)
check_format((h, w), dtype, levels, lut_rgb, Format.Format_Indexed8)
check_format((h, w), dtype, levels, lut_rgba, Format.Format_Indexed8)
check_format((h, w), dtype, levels, lut_mono1_s, Format.Format_Indexed8)
check_format((h, w), dtype, levels, lut_mono2_s, Format.Format_Indexed8)
check_format((h, w), dtype, levels, lut_rgb_s, Format.Format_Indexed8)
check_format((h, w), dtype, levels, lut_rgba_s, Format.Format_Indexed8)
check_format((h, w), dtype, levels, lut_mono1_l, Format.Format_Grayscale8)
check_format((h, w), dtype, levels, lut_mono2_l, Format.Format_Grayscale8)
check_format((h, w), dtype, levels, lut_rgb_l, Format.Format_RGBX8888)
check_format((h, w), dtype, levels, lut_rgba_l, Format.Format_RGBA8888) |
def _test_roialign_allclose(device, dtype):
if ((not torch.cuda.is_available()) and (device == 'cuda')):
pytest.skip('test requires GPU')
try:
from mmcv.ops import roi_align
except ModuleNotFoundError:
pytest.skip('test requires compilation')
pool_h = 2
pool_w = 2
spatial_scale = 1.0
sampling_ratio = 2
for (case, output) in zip(inputs, outputs):
np_input = np.array(case[0])
np_rois = np.array(case[1])
np_output = np.array(output[0])
np_grad = np.array(output[1])
x = torch.tensor(np_input, dtype=dtype, device=device, requires_grad=True)
rois = torch.tensor(np_rois, dtype=dtype, device=device)
output = roi_align(x, rois, (pool_h, pool_w), spatial_scale, sampling_ratio, 'avg', True)
output.backward(torch.ones_like(output))
assert np.allclose(output.data.type(torch.float).cpu().numpy(), np_output, atol=0.001)
assert np.allclose(x.grad.data.type(torch.float).cpu().numpy(), np_grad, atol=0.001) |
def parse_string_date(obj_datetime):
try:
logging.info(('Obj_date time ' + str(obj_datetime)))
obj_datetime = re.sub('\\s\\s+', ' ', obj_datetime).strip()
logging.info(('Obj_date sub time ' + str(obj_datetime)))
date_line = re.match('[\\s\\S\\n]*\\w{3} \\w{3} \\d{1,} \\d{2}:\\d{2}:\\d{2} \\w{3} \\d{4}[\\s\\S\\n]*', obj_datetime)
if (date_line is not None):
search_response = date_line.group().strip()
logging.info(('Search response: ' + str(search_response)))
return search_response
else:
return ''
except Exception as e:
logging.info(('Exception %s when trying to parse string to date' % str(e)))
return '' |
def fit_model(model, train_data, test_data):
weights_dir = 'RNN_weights.h5'
try:
if os.path.exists(weights_dir):
model.load_weights(weights_dir)
print('Load weights')
train_generator = util.video_generator(train_data, BatchSize, SequenceLength, CNN_output, N_CLASSES)
test_generator = util.video_generator(test_data, BatchSize, SequenceLength, CNN_output, N_CLASSES)
print('Start fitting model')
checkpointer = keras.callbacks.ModelCheckpoint(weights_dir, save_weights_only=True)
model.fit_generator(train_generator, steps_per_epoch=100, epochs=200, validation_data=test_generator, validation_steps=50, verbose=2, callbacks=[checkpointer])
except KeyboardInterrupt:
print('Training time:') |
def load_arguments(argument_class, json_file_path=None):
parser = HfArgumentParser(argument_class)
if (json_file_path is not None):
(args,) = parser.parse_json_file(json_file=json_file_path)
elif ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')):
(args,) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
(args,) = parser.parse_args_into_dataclasses()
return args |
class TestTravelTimeMatrixComputer():
def test_travel_time_matrix_initialization(self, transport_network, population_grid_points, origin_point, departure_datetime):
travel_time_matrix_computer = r5py.TravelTimeMatrixComputer(transport_network, origins=origin_point, destinations=population_grid_points, departure=departure_datetime, transport_modes=[r5py.TransportMode.TRANSIT, r5py.TransportMode.WALK])
assert isinstance(travel_time_matrix_computer.transport_network, r5py.TransportNetwork)
assert isinstance(travel_time_matrix_computer.origins, geopandas.GeoDataFrame)
assert isinstance(travel_time_matrix_computer.destinations, geopandas.GeoDataFrame)
assert (travel_time_matrix_computer.origins.shape == origin_point.shape)
assert (travel_time_matrix_computer.destinations.shape == population_grid_points.shape)
def test_travel_time_matrix_initialization_with_files(self, transport_network_files_tuple, population_grid_points, origin_point, departure_datetime):
travel_time_matrix_computer = r5py.TravelTimeMatrixComputer(transport_network_files_tuple, origins=origin_point, destinations=population_grid_points, departure=departure_datetime, transport_modes=[r5py.TransportMode.TRANSIT, r5py.TransportMode.WALK])
assert isinstance(travel_time_matrix_computer.transport_network, r5py.TransportNetwork)
def test_all_to_all(self, transport_network, population_grid_points, departure_datetime):
travel_time_matrix_computer = r5py.TravelTimeMatrixComputer(transport_network, origins=population_grid_points, departure=departure_datetime, transport_modes=[r5py.TransportMode.TRANSIT, r5py.TransportMode.WALK])
travel_time_matrix = travel_time_matrix_computer.compute_travel_times()
assert isinstance(travel_time_matrix, pandas.DataFrame)
assert (travel_time_matrix.shape == (8464, 3))
assert (travel_time_matrix.columns.to_list() == ['from_id', 'to_id', 'travel_time'])
assert (travel_time_matrix['from_id'].min() == travel_time_matrix['to_id'].min())
assert (travel_time_matrix['from_id'].max() == travel_time_matrix['to_id'].max())
assert (travel_time_matrix['travel_time'].min() >= 0)
assert (travel_time_matrix['travel_time'].max() == pytest.approx(50, abs=3))
def test_one_to_all(self, transport_network, population_grid_points, origin_point, departure_datetime):
travel_time_matrix_computer = r5py.TravelTimeMatrixComputer(transport_network, origins=origin_point, destinations=population_grid_points, departure=departure_datetime, transport_modes=[r5py.TransportMode.TRANSIT, r5py.TransportMode.WALK])
travel_time_matrix = travel_time_matrix_computer.compute_travel_times()
assert (travel_time_matrix.shape == (92, 3))
assert (travel_time_matrix['from_id'].unique() == [0])
assert (travel_time_matrix['to_id'].min() == 0)
assert (travel_time_matrix['to_id'].max() == 91)
assert (travel_time_matrix['travel_time'].max() == pytest.approx(30, abs=3))
def test_one_to_all_with_percentiles(self, transport_network, population_grid_points, origin_point, departure_datetime):
travel_time_matrix_computer = r5py.TravelTimeMatrixComputer(transport_network, origins=origin_point, destinations=population_grid_points, departure=departure_datetime, transport_modes=[r5py.TransportMode.TRANSIT, r5py.TransportMode.WALK], percentiles=[25, 50, 75])
travel_time_matrix = travel_time_matrix_computer.compute_travel_times()
assert (travel_time_matrix.shape == (92, 5))
required_cols = ['travel_time_p25', 'travel_time_p50', 'travel_time_p75']
for col in required_cols:
assert (col in travel_time_matrix.columns)
check = (travel_time_matrix['travel_time_p75'] >= travel_time_matrix['travel_time_p25'])
assert (False not in check.to_list())
def test_gtfs_date_range_warnings(self, transport_network, population_grid_points, origin_point, departure_datetime):
with pytest.warns(RuntimeWarning, match='Departure time'):
travel_time_matrix_computer = r5py.TravelTimeMatrixComputer(transport_network, origins=origin_point, destinations=population_grid_points, departure=datetime.datetime(2021, 2, 22, 8, 30), transport_modes=[r5py.TransportMode.TRANSIT, r5py.TransportMode.WALK])
del travel_time_matrix_computer
def test_gtfs_date_range_warnings_without_gtfs_file(self, transport_network_from_test_files_without_gtfs, population_grid_points, origin_point, departure_datetime):
with pytest.warns(RuntimeWarning, match='Departure'):
travel_time_matrix_computer = r5py.TravelTimeMatrixComputer(transport_network_from_test_files_without_gtfs, origins=origin_point, destinations=population_grid_points, departure=departure_datetime, transport_modes=[r5py.TransportMode.TRANSIT, r5py.TransportMode.WALK])
del travel_time_matrix_computer
.parametrize(['snap_to_network', 'expected_snap_to_network'], [(True, True), (False, False)])
def test_snap_to_network_parameter(self, transport_network, population_grid_points, departure_datetime, snap_to_network, expected_snap_to_network):
travel_time_matrix_computer = r5py.TravelTimeMatrixComputer(transport_network, population_grid_points, departure=departure_datetime, snap_to_network=snap_to_network)
assert (travel_time_matrix_computer.snap_to_network == expected_snap_to_network)
.parametrize(['snap_to_network', 'expected_travel_times'], [(True, pytest.lazy_fixture('walking_times_snapped')), (False, pytest.lazy_fixture('walking_times_not_snapped'))])
def test_snap_to_network(self, transport_network, population_grid_points, departure_datetime, snap_to_network, expected_travel_times):
travel_time_matrix_computer = r5py.TravelTimeMatrixComputer(transport_network, origins=population_grid_points, departure=departure_datetime, snap_to_network=snap_to_network, transport_modes=[r5py.TransportMode.WALK])
travel_times = travel_time_matrix_computer.compute_travel_times()
travel_times = travel_times.set_index(['from_id', 'to_id']).sort_index()
expected_travel_times = expected_travel_times.set_index(['from_id', 'to_id']).sort_index()
pandas.testing.assert_frame_equal(travel_times, expected_travel_times)
def test_snap_to_network_with_unsnappable_origins(self, transport_network, unsnappable_points, population_grid_points, departure_datetime):
origins = pandas.concat([population_grid_points[(- 3):], unsnappable_points]).reset_index(drop=False)
with pytest.warns(RuntimeWarning, match='Some destination points could not be snapped to the street network'):
travel_time_matrix = r5py.TravelTimeMatrixComputer(transport_network, origins, departure=departure_datetime, snap_to_network=True, transport_modes=[r5py.TransportMode.WALK])
_ = travel_time_matrix.compute_travel_times()
def test_snap_to_network_with_only_unsnappable_origins(self, transport_network, unsnappable_points, departure_datetime):
with pytest.raises(ValueError, match='After snapping, no valid origin points remain'), pytest.warns(RuntimeWarning, match='Some origin points could not be snapped to the street network'):
travel_time_matrix = r5py.TravelTimeMatrixComputer(transport_network, unsnappable_points, departure=departure_datetime, snap_to_network=True, transport_modes=[r5py.TransportMode.WALK])
_ = travel_time_matrix.compute_travel_times()
def test_snap_to_network_with_unsnappable_destinations(self, transport_network, population_grid_points, unsnappable_points, departure_datetime):
destinations = pandas.concat([population_grid_points[(- 3):], unsnappable_points]).reset_index(drop=False)
with pytest.warns(RuntimeWarning, match='Some destination points could not be snapped to the street network'):
travel_time_matrix = r5py.TravelTimeMatrixComputer(transport_network, origins=population_grid_points, destinations=destinations, departure=departure_datetime, snap_to_network=True, transport_modes=[r5py.TransportMode.WALK])
_ = travel_time_matrix.compute_travel_times()
def test_snap_to_network_with_only_unsnappable_destinations(self, transport_network, population_grid_points, unsnappable_points, departure_datetime):
with pytest.raises(ValueError, match='After snapping, no valid destination points remain'), pytest.warns(RuntimeWarning, match='Some destination points could not be snapped to the street network'):
travel_time_matrix = r5py.TravelTimeMatrixComputer(transport_network, origins=population_grid_points, destinations=unsnappable_points, departure=departure_datetime, snap_to_network=True, transport_modes=[r5py.TransportMode.WALK])
_ = travel_time_matrix.compute_travel_times()
.parametrize('snap_to_network', [True, False])
def test_travel_time_between_identical_from_and_to_ids(self, transport_network, population_grid_points, departure_datetime, snap_to_network):
travel_time_matrix = r5py.TravelTimeMatrixComputer(transport_network, origins=population_grid_points, transport_modes=[r5py.TransportMode.WALK], departure=departure_datetime, snap_to_network=snap_to_network).compute_travel_times()
assert (travel_time_matrix[(travel_time_matrix['from_id'] == travel_time_matrix['to_id'])].travel_time.max() == 0) |
class GroupSong(AudioFile):
def __init__(self, can_multiple: bool=True, can_change: bool=True, cant_change: (list[str] | None)=None):
self._can_multiple = can_multiple
self._can_change = can_change
self._cant_change = (cant_change or [])
def can_multiple_values(self, key=None):
if (key is None):
return self._can_multiple
if (self._can_multiple is True):
return True
return (key in self._can_multiple)
def can_change(self, key=None):
if (key is None):
return self._can_change
if (self._can_change is True):
return (key not in self._cant_change)
return (key in self._can_change) |
def _topk_py_impl(op, x, k, axis, idx_dtype):
ndim = x.ndim
assert ((- ndim) <= axis < ndim)
axis %= ndim
if (k == 0):
raise ValueError('topk: kth cannot be zero')
elif (k > x.shape[axis]):
raise ValueError(f'topk: kth cannot be larger than the size of specified axis {int(axis)}')
if (abs(k) == 1):
fn_max = [None, np.max, np.min][k]
fn_argmax = [None, np.argmax, np.argmin][k]
if (not op.return_indices):
return np.expand_dims(fn_max(x, axis=axis), axis)
elif op.return_values:
zi = np.expand_dims(fn_argmax(x, axis=axis), axis)
idx2 = tuple(((np.arange(s).reshape(((s,) + ((1,) * ((ndim - i) - 1)))) if (i != axis) else zi) for (i, s) in enumerate(x.shape)))
zv = x[idx2]
return (zv, zi.astype(idx_dtype))
else:
zi = np.expand_dims(fn_argmax(x, axis=axis), axis)
return zi.astype(idx_dtype)
if (x.shape[axis] == abs(k)):
if (not op.return_indices):
return x.copy()
else:
l = axis
r = (ndim - l)
reps = list(x.shape)
reps[axis] = 1
zi = np.arange(abs(k), dtype=idx_dtype)
zi = zi.reshape(((((1,) * l) + (k,)) + ((1,) * (r - 1))))
zi = np.tile(zi, reps)
if op.return_values:
return (x.copy(), zi)
else:
return zi
idx = ([slice(None)] * ndim)
idx[axis] = (slice((- k), None) if (k > 0) else slice((- k)))
if (not op.return_indices):
zv = np.partition(x, (- k), axis=axis)[tuple(idx)]
return zv
elif op.return_values:
zi = np.argpartition(x, (- k), axis=axis)[tuple(idx)]
idx2 = tuple(((np.arange(s).reshape(((s,) + ((1,) * ((ndim - i) - 1)))) if (i != axis) else zi) for (i, s) in enumerate(x.shape)))
zv = x[idx2]
return (zv, zi.astype(idx_dtype))
else:
zi = np.argpartition(x, (- k), axis=axis)[tuple(idx)]
return zi.astype(idx_dtype) |
def get_segment_dataset(config, use_gt_inssem=False):
if (config.dataset_class == 'panopli'):
if use_gt_inssem:
(instance_dir, semantics_dir, instance_to_semantic_key) = ('rs_instance', 'rs_semantics', 'rs_instance_to_semantic')
else:
(instance_dir, semantics_dir, instance_to_semantic_key) = ('m2f_instance', 'm2f_semantics', 'm2f_instance_to_semantic')
return SegmentPanopLiDataset(Path(config.dataset_root), 'train', (128, 128), config.max_depth, overfit=config.overfit, max_rays=config.max_rays_segments, semantics_dir=semantics_dir, instance_dir=instance_dir, instance_to_semantic_key=instance_to_semantic_key, create_seg_data_func=create_segmentation_data_panopli, subsample_frames=config.subsample_frames)
elif (config.dataset_class == 'mos'):
if use_gt_inssem:
(instance_dir, semantics_dir) = ('instance', 'semantic')
else:
(instance_dir, semantics_dir) = ('detic_instance', 'detic_semantic')
return SegmentMOSDataset(Path(config.dataset_root), 'train', (128, 128), config.max_depth, overfit=config.overfit, max_rays=config.max_rays_segments, semantics_dir=semantics_dir, instance_dir=instance_dir, instance_to_semantic_key=None, create_seg_data_func=None, subsample_frames=config.subsample_frames)
raise NotImplementedError |
class TestDecodeHexRecords(TestIntelHexBase):
def setUp(self):
self.ih = IntelHex()
self.decode_record = self.ih._decode_record
def tearDown(self):
del self.ih
def test_empty_line(self):
self.decode_record('')
def test_non_empty_line(self):
self.assertRaisesMsg(HexRecordError, 'Hex file contains invalid record at line 1', self.decode_record, ' ', 1)
def test_short_record(self):
self.assertRaisesMsg(HexRecordError, 'Hex file contains invalid record at line 1', self.decode_record, ':', 1)
def test_odd_hexascii_digits(self):
self.assertRaisesMsg(HexRecordError, 'Hex file contains invalid record at line 1', self.decode_record, ':F', 1)
def test_invalid_length(self):
self.assertRaisesMsg(RecordLengthError, 'Record at line 1 has invalid length', self.decode_record, ':FF', 1)
def test_invalid_record_type(self):
self.assertRaisesMsg(RecordTypeError, 'Record at line 1 has invalid record type', self.decode_record, ':000000FF01', 1)
def test_invalid_checksum(self):
self.assertRaisesMsg(RecordChecksumError, 'Record at line 1 has invalid checksum', self.decode_record, ':', 1)
def test_invalid_eof(self):
self.assertRaisesMsg(EOFRecordError, 'File has invalid End-of-File record', self.decode_record, ':FE', 1)
def test_invalid_extended_segment(self):
self.assertRaisesMsg(ExtendedSegmentAddressRecordError, 'Invalid Extended Segment Address Record at line 1', self.decode_record, ':FE', 1)
self.assertRaisesMsg(ExtendedSegmentAddressRecordError, 'Invalid Extended Segment Address Record at line 1', self.decode_record, ':FB', 1)
def test_invalid_linear_address(self):
self.assertRaisesMsg(ExtendedLinearAddressRecordError, 'Invalid Extended Linear Address Record at line 1', self.decode_record, ':FC', 1)
self.assertRaisesMsg(ExtendedLinearAddressRecordError, 'Invalid Extended Linear Address Record at line 1', self.decode_record, ':F9', 1)
def test_invalid_start_segment_addr(self):
self.assertRaisesMsg(StartSegmentAddressRecordError, 'Invalid Start Segment Address Record at line 1', self.decode_record, ':FD', 1)
self.assertRaisesMsg(StartSegmentAddressRecordError, 'Invalid Start Segment Address Record at line 1', self.decode_record, ':F8', 1)
def test_duplicate_start_segment_addr(self):
self.decode_record(':E5')
self.assertRaisesMsg(DuplicateStartAddressRecordError, 'Start Address Record appears twice at line 2', self.decode_record, ':F9', 2)
def test_invalid_start_linear_addr(self):
self.assertRaisesMsg(StartLinearAddressRecordError, 'Invalid Start Linear Address Record at line 1', self.decode_record, ':FB', 1)
self.assertRaisesMsg(StartLinearAddressRecordError, 'Invalid Start Linear Address Record at line 1', self.decode_record, ':F6', 1)
def test_duplicate_start_linear_addr(self):
self.decode_record(':E3')
self.assertRaisesMsg(DuplicateStartAddressRecordError, 'Start Address Record appears twice at line 2', self.decode_record, ':F7', 2)
def test_addr_overlap(self):
self.decode_record(':FF')
self.assertRaisesMsg(AddressOverlapError, 'Hex file has data overlap at address 0x0 on line 1', self.decode_record, ':FF', 1)
def test_data_record(self):
self.decode_record(':FF\n')
self.decode_record(':F9\r\n')
self.decode_record(':1004E300CFF0FBE2FDF220FF20F2E120E2FBE6F396')
def test_eof(self):
self.assertRaises(_EndOfFile, self.decode_record, ':FF') |
class Poll():
_EVENT_TO_MASK = {'r': select.POLLIN, 'w': select.POLLOUT}
def _has_event(events, event):
return ((events & event) != 0)
def for_events(cls, *events):
notifier = eintr_retry_call(select.poll)
for (fd, event) in events:
mask = cls._EVENT_TO_MASK.get(event)
if (not mask):
raise ValueError(f'Unknown event type: {repr(event)}')
notifier.register(fd, mask)
return cls(notifier)
def __init__(self, notifier):
self._notifier = notifier
def poll(self, timeout=None):
return list(self._parse_events(eintr_retry_call(self._notifier.poll, timeout)))
def _parse_events(self, events):
for (fd, event_mask) in events:
if self._has_event(event_mask, select.POLLNVAL):
raise IOError(f'File descriptor not open: {repr(fd)}')
if self._has_event(event_mask, select.POLLERR):
raise IOError(f'Error while polling fd: {repr(fd)}')
if self._has_event(event_mask, select.POLLIN):
(yield (fd, 'r'))
if self._has_event(event_mask, select.POLLOUT):
(yield (fd, 'w'))
if self._has_event(event_mask, select.POLLHUP):
(yield (fd, 'h')) |
class FrameCounter(QtCore.QObject):
sigFpsUpdate = QtCore.Signal(object)
def __init__(self, interval=1000):
super().__init__()
self.count = 0
self.last_update = 0
self.interval = interval
def update(self):
self.count += 1
if (self.last_update == 0):
self.last_update = perf_counter()
self.startTimer(self.interval)
def timerEvent(self, evt):
now = perf_counter()
elapsed = (now - self.last_update)
fps = (self.count / elapsed)
self.last_update = now
self.count = 0
self.sigFpsUpdate.emit(fps) |
class AssignResult(util_mixins.NiceRepr):
def __init__(self, num_gts, gt_inds, max_overlaps, labels=None):
self.num_gts = num_gts
self.gt_inds = gt_inds
self.max_overlaps = max_overlaps
self.labels = labels
self._extra_properties = {}
def num_preds(self):
return len(self.gt_inds)
def set_extra_property(self, key, value):
assert (key not in self.info)
self._extra_properties[key] = value
def get_extra_property(self, key):
return self._extra_properties.get(key, None)
def info(self):
basic_info = {'num_gts': self.num_gts, 'num_preds': self.num_preds, 'gt_inds': self.gt_inds, 'max_overlaps': self.max_overlaps, 'labels': self.labels}
basic_info.update(self._extra_properties)
return basic_info
def __nice__(self):
parts = []
parts.append(f'num_gts={self.num_gts!r}')
if (self.gt_inds is None):
parts.append(f'gt_inds={self.gt_inds!r}')
else:
parts.append(f'gt_inds.shape={tuple(self.gt_inds.shape)!r}')
if (self.max_overlaps is None):
parts.append(f'max_overlaps={self.max_overlaps!r}')
else:
parts.append(f'max_overlaps.shape={tuple(self.max_overlaps.shape)!r}')
if (self.labels is None):
parts.append(f'labels={self.labels!r}')
else:
parts.append(f'labels.shape={tuple(self.labels.shape)!r}')
return ', '.join(parts)
def random(cls, **kwargs):
from mmdet.core.bbox import demodata
rng = demodata.ensure_rng(kwargs.get('rng', None))
num_gts = kwargs.get('num_gts', None)
num_preds = kwargs.get('num_preds', None)
p_ignore = kwargs.get('p_ignore', 0.3)
p_assigned = kwargs.get('p_assigned', 0.7)
p_use_label = kwargs.get('p_use_label', 0.5)
num_classes = kwargs.get('p_use_label', 3)
if (num_gts is None):
num_gts = rng.randint(0, 8)
if (num_preds is None):
num_preds = rng.randint(0, 16)
if (num_gts == 0):
max_overlaps = torch.zeros(num_preds, dtype=torch.float32)
gt_inds = torch.zeros(num_preds, dtype=torch.int64)
if ((p_use_label is True) or (p_use_label < rng.rand())):
labels = torch.zeros(num_preds, dtype=torch.int64)
else:
labels = None
else:
import numpy as np
max_overlaps = torch.from_numpy(rng.rand(num_preds))
is_assigned = torch.from_numpy((rng.rand(num_preds) < p_assigned))
n_assigned = min(num_preds, min(num_gts, is_assigned.sum()))
assigned_idxs = np.where(is_assigned)[0]
rng.shuffle(assigned_idxs)
assigned_idxs = assigned_idxs[0:n_assigned]
assigned_idxs.sort()
is_assigned[:] = 0
is_assigned[assigned_idxs] = True
is_ignore = (torch.from_numpy((rng.rand(num_preds) < p_ignore)) & is_assigned)
gt_inds = torch.zeros(num_preds, dtype=torch.int64)
true_idxs = np.arange(num_gts)
rng.shuffle(true_idxs)
true_idxs = torch.from_numpy(true_idxs)
gt_inds[is_assigned] = true_idxs[:n_assigned]
gt_inds = torch.from_numpy(rng.randint(1, (num_gts + 1), size=num_preds))
gt_inds[is_ignore] = (- 1)
gt_inds[(~ is_assigned)] = 0
max_overlaps[(~ is_assigned)] = 0
if ((p_use_label is True) or (p_use_label < rng.rand())):
if (num_classes == 0):
labels = torch.zeros(num_preds, dtype=torch.int64)
else:
labels = torch.from_numpy(rng.randint(0, num_classes, size=num_preds))
labels[(~ is_assigned)] = 0
else:
labels = None
self = cls(num_gts, gt_inds, max_overlaps, labels)
return self
def add_gt_(self, gt_labels):
self_inds = torch.arange(1, (len(gt_labels) + 1), dtype=torch.long, device=gt_labels.device)
self.gt_inds = torch.cat([self_inds, self.gt_inds])
self.max_overlaps = torch.cat([self.max_overlaps.new_ones(len(gt_labels)), self.max_overlaps])
if (self.labels is not None):
self.labels = torch.cat([gt_labels, self.labels]) |
def Branin_Hoo(X):
x1 = X[0]
x2 = X[1]
x1bar = ((15 * x1) - 5)
x2bar = (15 * x2)
term1 = (((x2bar - ((5.1 * (x1bar ** 2)) / (4 * (np.pi ** 2)))) + ((5 * x1bar) / np.pi)) - 6)
term2 = ((10 - (10 / (8 * np.pi))) * np.cos(x1bar))
ret = (((((term1 ** 2) + term2) - 44.81) / 51.95) + ((10 ** (- 3)) * npr.normal(0, 1)))
return ret |
class BubbleManager():
def __init__(self, sprite):
self.sprite = sprite
self.flipped = False
self.bubble = None
def say(self, text: str, border=Bubble.SAY):
if isinstance(text, (int, float)):
text = str((round(text, 2) if ((text % 1) > 0) else int(text)))
if self.bubble:
self.bubble.kill()
self.bubble = None
if ((text is not None) and (text.strip() != '')):
self.bubble = Bubble(self.sprite, text, border)
self.sprite.stage.bubbles.add(self.bubble)
def think(self, text: str):
self.say(text, Bubble.THINK) |
class Migration(migrations.Migration):
dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]
operations = [migrations.CreateModel(name='Profile', fields=[('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Created At')), ('modified_at', models.DateTimeField(auto_now=True, verbose_name='Last Modified At')), ('city', models.CharField(max_length=100, null=True, blank=True)), ('contact_no', models.CharField(max_length=15, null=True, blank=True)), ('created_by', models.ForeignKey(related_name='created_profile_set', verbose_name='Created By', blank=True, on_delete=models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, null=True)), ('modified_by', models.ForeignKey(related_name='updated_profile_set', verbose_name='Modified By', blank=True, on_delete=models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, null=True)), ('user', models.OneToOneField(to=settings.AUTH_USER_MODEL, on_delete=models.deletion.CASCADE))], options={'abstract': False}, bases=(models.Model,))] |
def inference_tip(infer_function: InferFn[_NodesT], raise_on_overwrite: bool=False) -> TransformFn[_NodesT]:
def transform(node: _NodesT, infer_function: InferFn[_NodesT]=infer_function) -> _NodesT:
if (raise_on_overwrite and (node._explicit_inference is not None) and (node._explicit_inference is not infer_function)):
raise InferenceOverwriteError('Inference already set to {existing_inference}. Trying to overwrite with {new_inference} for {node}'.format(existing_inference=infer_function, new_inference=node._explicit_inference, node=node))
node._explicit_inference = _inference_tip_cached(infer_function)
return node
return transform |
class TestBaseHandler(RapidTest):
def setUp(self):
self.connection = self.create_connection()
def test_dispatch(self):
msg = IncomingMessage(self.connection, 'hello')
retVal = BaseHandler.dispatch(self.router, msg)
self.assertFalse(retVal)
self.assertEqual(len(msg.responses), 0) |
class MADDPGCritic(nn.Module):
def __init__(self, scheme, args):
super(MADDPGCritic, self).__init__()
self.args = args
self.n_actions = args.n_actions
self.n_agents = args.n_agents
self.input_shape = (self._get_input_shape(scheme) + (self.n_actions * self.n_agents))
if self.args.obs_last_action:
self.input_shape += self.n_actions
self.output_type = 'q'
self.fc1 = nn.Linear(self.input_shape, args.hidden_dim)
self.fc2 = nn.Linear(args.hidden_dim, args.hidden_dim)
self.fc3 = nn.Linear(args.hidden_dim, 1)
def forward(self, inputs, actions):
inputs = th.cat((inputs, actions), dim=(- 1))
x = F.relu(self.fc1(inputs))
x = F.relu(self.fc2(x))
q = self.fc3(x)
return q
def _get_input_shape(self, scheme):
input_shape = scheme['state']['vshape']
if self.args.obs_individual_obs:
input_shape += scheme['obs']['vshape']
if self.args.obs_agent_id:
input_shape += self.n_agents
return input_shape |
def test_update_all_packages(monkeypatch):
public_pkg_1 = PkgFile('Flask', '1.0')
public_pkg_2 = PkgFile('requests', '1.0')
private_pkg_1 = PkgFile('my_private_pkg', '1.0')
private_pkg_2 = PkgFile('my_other_private_pkg', '1.0')
roots_mock = {Path('/opt/pypi'): [public_pkg_1, private_pkg_1], Path('/data/pypi'): [public_pkg_2, private_pkg_2]}
def core_listdir_mock(path: Path):
return roots_mock.get(path, [])
monkeypatch.setattr(manage, 'listdir', core_listdir_mock)
monkeypatch.setattr(manage, 'update', Mock(return_value=None))
destdir = None
dry_run = False
stable_only = True
update_all_packages(roots=list(roots_mock.keys()), destdir=destdir, dry_run=dry_run, stable_only=stable_only, ignorelist=None)
manage.update.assert_called_once_with(frozenset([public_pkg_1, public_pkg_2, private_pkg_1, private_pkg_2]), destdir, dry_run, stable_only) |
class Effect1046(BaseEffect):
type = 'passive'
def handler(fit, src, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Remote Armor Repair Systems')), 'maxRange', src.getModifiedItemAttr('shipBonusGC'), skill='Gallente Cruiser', **kwargs) |
.unit()
def test_insert_missing_modules(monkeypatch: pytest.MonkeyPatch, tmp_path: Path) -> None:
monkeypatch.chdir(tmp_path)
modules = {'xxx.project.foo': ModuleType('xxx.project.foo')}
_insert_missing_modules(modules, 'xxx.project.foo')
assert (sorted(modules) == ['xxx', 'xxx.project', 'xxx.project.foo'])
mod = ModuleType('mod', doc='My Module')
modules = {'xxy': mod}
_insert_missing_modules(modules, 'xxy')
assert (modules == {'xxy': mod})
modules = {}
_insert_missing_modules(modules, '')
assert (not modules) |
class WaypointService(object):
def __init__(self, data_path=None, parent=None):
logging.debug('>>')
self.parent = parent
self.pytrainer_main = parent
self.data_path = data_path
logging.debug('<<')
def removeWaypoint(self, id_waypoint):
logging.debug('>>')
logging.debug(('Deleting id_waypoint=%s' % id_waypoint))
waypoint = self.pytrainer_main.ddbb.session.query(Waypoint).filter((Waypoint.id == id_waypoint)).one()
self.pytrainer_main.ddbb.session.delete(waypoint)
self.pytrainer_main.ddbb.session.commit()
logging.debug('<<')
def updateWaypoint(self, id_waypoint, lat, lon, name, desc, sym):
logging.debug('>>')
logging.debug(('Updating waypoint id: %d with lat %s,lon %s,comment %s,name %s,sym %s' % (id_waypoint, lat, lon, desc, name, sym)))
waypoint = self.pytrainer_main.ddbb.session.query(Waypoint).filter((Waypoint.id == id_waypoint)).one()
waypoint.lat = lat
waypoint.lon = lon
waypoint.name = name
waypoint.comment = desc
waypoint.sym = sym
self.pytrainer_main.ddbb.session.commit()
logging.debug('<<')
def addWaypoint(self, lon=None, lat=None, name=None, comment=None, sym=None):
logging.debug('>>')
waypoint = Waypoint(lon=lon, lat=lat, name=name, comment=comment, sym=sym)
logging.debug(('Adding waypoint with details lat %s,lon %s,comment %s,name %s,sym %s' % (lat, lon, comment, name, sym)))
self.pytrainer_main.ddbb.session.add(waypoint)
self.pytrainer_main.ddbb.session.commit()
logging.debug('<<')
return waypoint.id
def getwaypointInfo(self, id_waypoint):
return self.pytrainer_main.ddbb.session.execute(select(Waypoint.lat, Waypoint.lon, Waypoint.ele, Waypoint.comment, Waypoint.time, Waypoint.name, Waypoint.sym).where((Waypoint.id == id_waypoint))).all()
def getAllWaypoints(self):
return self.pytrainer_main.ddbb.session.execute(select(Waypoint.id, Waypoint.lat, Waypoint.lon, Waypoint.ele, Waypoint.comment, Waypoint.time, Waypoint.name, Waypoint.sym).order_by(Waypoint.name)).all()
def actualize_fromgpx(self, gpxfile):
logging.debug('>>')
from .lib.gpx import Gpx
gpx = Gpx(self.data_path, gpxfile)
tracks = gpx.getTrackRoutes()
if (len(tracks) > 1):
time = unixtime2date(tracks[0][1])
self.recordwindow.rcd_date.set_text(time)
self._actualize_fromgpx(gpx)
else:
msg = _('The gpx file seems to be a several days records. Perhaps you will need to edit your gpx file')
from .gui.warning import Warning
warning = Warning(self.data_path, self._actualize_fromgpx, [gpx])
warning.set_text(msg)
warning.run()
logging.debug('<<')
def _actualize_fromgpx(self, gpx):
logging.debug('>>')
(distance, time) = gpx.getMaxValues()
(upositive, unegative) = gpx.getUnevenness()
self.recordwindow.rcd_upositive.set_text(str(upositive))
self.recordwindow.rcd_unegative.set_text(str(unegative))
self.recordwindow.set_distance(distance)
self.recordwindow.set_recordtime(((time / 60.0) / 60.0))
self.recordwindow.on_calcavs_clicked(None)
logging.debug('<<') |
class FortBlackCommand(models.Model):
black_commands = models.TextField(verbose_name='', default='/bin/rm, /sbin/reboot, /sbin/halt, /sbin/shutdown, /usr/bin/passwd, /bin/su, /sbin/init, /bin/chmod, /bin/chown, /usr/sbin/visudo')
class Meta():
db_table = 'ops_fort_black_command'
verbose_name = ''
verbose_name_plural = '' |
class TypeObject():
typ: Union[(type, super, str)]
base_classes: Set[Union[(type, str)]] = field(default_factory=set)
is_protocol: bool = False
protocol_members: Set[str] = field(default_factory=set)
is_thrift_enum: bool = field(init=False)
is_universally_assignable: bool = field(init=False)
artificial_bases: Set[type] = field(default_factory=set, init=False)
_protocol_positive_cache: Dict[(Value, BoundsMap)] = field(default_factory=dict, repr=False)
def __post_init__(self) -> None:
if isinstance(self.typ, str):
self.is_universally_assignable = False
self.is_thrift_enum = False
return
if isinstance(self.typ, super):
self.is_universally_assignable = False
else:
assert isinstance(self.typ, type), repr(self.typ)
self.is_universally_assignable = issubclass(self.typ, mock.NonCallableMock)
self.is_thrift_enum = hasattr(self.typ, '_VALUES_TO_NAMES')
self.base_classes |= set(get_mro(self.typ))
if (self.typ is int):
self.artificial_bases.add(float)
self.artificial_bases.add(complex)
if (self.typ is float):
self.artificial_bases.add(complex)
if self.is_thrift_enum:
self.artificial_bases.add(int)
self.base_classes |= self.artificial_bases
def is_assignable_to_type(self, typ: type) -> bool:
for base in self.base_classes:
if isinstance(base, str):
continue
elif safe_issubclass(base, typ):
return True
return self.is_universally_assignable
def is_assignable_to_type_object(self, other: 'TypeObject') -> bool:
if isinstance(other.typ, super):
return False
if isinstance(other.typ, str):
return (self.is_universally_assignable or other.is_protocol or (other.typ in self.base_classes))
return self.is_assignable_to_type(other.typ)
def can_assign(self, self_val: Value, other_val: Union[(KnownValue, TypedValue, SubclassValue)], ctx: CanAssignContext) -> CanAssign:
other = other_val.get_type_object(ctx)
if other.is_universally_assignable:
return {}
if isinstance(self.typ, super):
if isinstance(other.typ, super):
return {}
return CanAssignError(f'Cannot assign to super object {self}')
if (not self.is_protocol):
if other.is_protocol:
if (self.typ is object):
return {}
return CanAssignError(f'Cannot assign protocol {other_val} to non-protocol {self}')
if isinstance(self.typ, str):
if safe_in(self.typ, other.base_classes):
return {}
return CanAssignError(f'Cannot assign {other_val} to {self}')
else:
for base in other.base_classes:
if (base is self.typ):
return {}
if (isinstance(base, type) and safe_issubclass(base, self.typ)):
return {}
return CanAssignError(f'Cannot assign {other_val} to {self}')
else:
if isinstance(other.typ, super):
return CanAssignError(f'Cannot assign super object {other_val} to protocol {self}')
bounds_map = self._protocol_positive_cache.get(other_val)
if (bounds_map is not None):
return bounds_map
if ctx.can_assume_compatibility(self, other):
return {}
with ctx.assume_compatibility(self, other):
result = self._is_compatible_with_protocol(self_val, other_val, ctx)
if (isinstance(result, CanAssignError) and other.artificial_bases):
for base in other.artificial_bases:
subresult = self._is_compatible_with_protocol(self_val, TypedValue(base), ctx)
if (not isinstance(subresult, CanAssignError)):
result = subresult
break
if (not isinstance(result, CanAssignError)):
self._protocol_positive_cache[other_val] = result
return result
def _is_compatible_with_protocol(self, self_val: Value, other_val: Value, ctx: CanAssignContext) -> CanAssign:
bounds_maps = []
for member in self.protocol_members:
expected = ctx.get_attribute_from_value(self_val, member, prefer_typeshed=True)
if (member == '__call__'):
actual = other_val
elif ((member == '__hash__') and _should_use_permissive_dunder_hash(other_val)):
actual = AnyValue(AnySource.inference)
else:
actual = ctx.get_attribute_from_value(other_val, member)
if (actual is UNINITIALIZED_VALUE):
can_assign = CanAssignError(f'{other_val} has no attribute {member!r}')
else:
can_assign = expected.can_assign(actual, ctx)
if isinstance(can_assign, CanAssignError):
can_assign = CanAssignError(f'Value of protocol member {member!r} conflicts', [can_assign])
if isinstance(can_assign, CanAssignError):
return can_assign
bounds_maps.append(can_assign)
return unify_bounds_maps(bounds_maps)
def is_instance(self, obj: object) -> bool:
return safe_isinstance(obj, self.typ)
def is_exactly(self, types: Container[type]) -> bool:
return (self.typ in types)
def is_metatype_of(self, other: 'TypeObject') -> bool:
if (isinstance(self.typ, type) and isinstance(other.typ, type)):
return (issubclass(self.typ, type) and safe_isinstance(other.typ, self.typ))
else:
return False
def has_attribute(self, attr: str, ctx: CanAssignContext) -> bool:
if self.is_protocol:
return (attr in self.protocol_members)
for base in self.base_classes:
try:
present = (attr in base.__dict__)
except Exception:
present = False
if present:
return True
return False
def __str__(self) -> str:
base = stringify_object(self.typ)
if self.is_protocol:
return f"{base} (Protocol with members {', '.join(map(repr, self.protocol_members))})"
return base |
class nnUNetTrainer_50epochs(nnUNetTrainer):
def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool=True, device: torch.device=torch.device('cuda')):
super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device)
self.num_epochs = 50 |
def is_valid_unlock(unlock: ReceiveUnlock, channel_state: NettingChannelState, sender_state: NettingChannelEndState) -> PendingLocksStateOrError:
received_balance_proof = unlock.balance_proof
current_balance_proof = get_current_balanceproof(sender_state)
lock = get_lock(sender_state, unlock.secrethash)
if (lock is None):
msg = 'Invalid Unlock message. There is no corresponding lock for {}'.format(encode_hex(unlock.secrethash))
return (False, msg, None)
pending_locks = compute_locks_without(sender_state.pending_locks, EncodedData(bytes(lock.encoded)))
if (pending_locks is None):
msg = f'Invalid unlock message. The lock is unknown {encode_hex(lock.encoded)}'
return (False, msg, None)
locksroot_without_lock = compute_locksroot(pending_locks)
(_, _, current_transferred_amount, current_locked_amount) = current_balance_proof
expected_transferred_amount = (current_transferred_amount + TokenAmount(lock.amount))
expected_locked_amount = (current_locked_amount - lock.amount)
is_valid_balance_proof = is_balance_proof_usable_onchain(received_balance_proof=received_balance_proof, channel_state=channel_state, sender_state=sender_state)
result: PendingLocksStateOrError = (False, None, None)
if (not is_valid_balance_proof):
msg = f'Invalid Unlock message. {is_valid_balance_proof.as_error_message}'
result = (False, msg, None)
elif (received_balance_proof.locksroot != locksroot_without_lock):
msg = "Invalid Unlock message. Balance proof's locksroot didn't match, expected: {} got: {}.".format(encode_hex(locksroot_without_lock), encode_hex(received_balance_proof.locksroot))
result = (False, msg, None)
elif (received_balance_proof.transferred_amount != expected_transferred_amount):
msg = "Invalid Unlock message. Balance proof's wrong transferred_amount, expected: {} got: {}.".format(expected_transferred_amount, received_balance_proof.transferred_amount)
result = (False, msg, None)
elif (received_balance_proof.locked_amount != expected_locked_amount):
msg = "Invalid Unlock message. Balance proof's wrong locked_amount, expected: {} got: {}.".format(expected_locked_amount, received_balance_proof.locked_amount)
result = (False, msg, None)
else:
result = (True, None, pending_locks)
return result |
_tf
class TFMT5ModelTest(unittest.TestCase):
def test_resize_embeddings(self):
model = TFMT5ForConditionalGeneration.from_pretrained('google/mt5-small')
original_vocab_size = model.get_input_embeddings().weight.shape[0]
self.assertEqual(original_vocab_size, model.config.vocab_size)
tokenizer = T5Tokenizer.from_pretrained('google/mt5-small')
tokenizer.add_special_tokens({'bos_token': '', 'eos_token': ''})
model._resize_token_embeddings(len(tokenizer))
self.assertEqual(model.get_input_embeddings().weight.shape[0], len(tokenizer))
self.assertNotEqual(model.get_input_embeddings().weight.shape[0], original_vocab_size) |
def temp_filename(*args, as_path=False, **kwargs):
from tests import mkstemp
try:
del kwargs['as_path']
except KeyError:
pass
(fd, filename) = mkstemp(*args, **kwargs)
os.close(fd)
normalized = normalize_path(filename)
(yield (Path(normalized) if as_path else normalized))
try:
os.remove(filename)
except OSError as e:
if (e.errno != errno.ENOENT):
raise |
.skipif((not PY311_PLUS), reason='Requires Python 3.11 or higher')
def test_star_exceptions() -> None:
code = textwrap.dedent('\n try:\n raise ExceptionGroup("group", [ValueError(654)])\n except* ValueError:\n print("Handling ValueError")\n except* TypeError:\n print("Handling TypeError")\n else:\n sys.exit(127)\n finally:\n sys.exit(0)')
node = extract_node(code)
assert isinstance(node, TryStar)
assert (node.as_string() == code.replace('"', "'").strip())
assert isinstance(node.body[0], Raise)
assert (node.block_range(1) == (1, 11))
assert (node.block_range(2) == (2, 2))
assert (node.block_range(3) == (3, 3))
assert (node.block_range(4) == (4, 4))
assert (node.block_range(5) == (5, 5))
assert (node.block_range(6) == (6, 6))
assert (node.block_range(7) == (7, 7))
assert (node.block_range(8) == (8, 8))
assert (node.block_range(9) == (9, 9))
assert (node.block_range(10) == (10, 10))
assert (node.block_range(11) == (11, 11))
assert node.handlers
handler = node.handlers[0]
assert isinstance(handler, ExceptHandler)
assert (handler.type.name == 'ValueError')
orelse = node.orelse[0]
assert isinstance(orelse, Expr)
assert (orelse.value.args[0].value == 127)
final = node.finalbody[0]
assert isinstance(final, Expr)
assert (final.value.args[0].value == 0) |
class CosFace(torch.nn.Module):
def __init__(self, s=64.0, m=0.4):
super(CosFace, self).__init__()
self.s = s
self.m = m
def forward(self, logits: torch.Tensor, labels: torch.Tensor):
index = torch.where((labels != (- 1)))[0]
target_logit = logits[(index, labels[index].view((- 1)))]
final_target_logit = (target_logit - self.m)
logits[(index, labels[index].view((- 1)))] = final_target_logit
logits = (logits * self.s)
return logits |
class Question(Model, TranslationMixin):
prefetch_lookups = ('conditions', 'optionsets')
uri = models.URLField(max_length=800, blank=True, default='', verbose_name=_('URI'), help_text=_('The Uniform Resource Identifier of this question (auto-generated).'))
uri_prefix = models.URLField(max_length=256, verbose_name=_('URI Prefix'), help_text=_('The prefix for the URI of this question.'))
uri_path = models.CharField(max_length=512, blank=True, default='', verbose_name=_('URI Path'), help_text=_('The path for the URI of this question.'))
comment = models.TextField(blank=True, default='', verbose_name=_('Comment'), help_text=_('Additional internal information about this question.'))
locked = models.BooleanField(default=False, verbose_name=_('Locked'), help_text=_('Designates whether this question can be changed.'))
attribute = models.ForeignKey(Attribute, blank=True, null=True, on_delete=models.SET_NULL, related_name='questions', verbose_name=_('Attribute'), help_text=_('The attribute this question belongs to.'))
is_collection = models.BooleanField(default=False, verbose_name=_('is collection'), help_text=_('Designates whether this question is a collection.'))
is_optional = models.BooleanField(default=False, verbose_name=_('is optional'), help_text=_('Designates whether this question is optional.'))
editors = models.ManyToManyField(Site, related_name='questions_as_editor', blank=True, verbose_name=_('Editors'), help_text=_('The sites that can edit this question (in a multi site setup).'))
help_lang1 = models.TextField(blank=True, default='', verbose_name=_('Help (primary)'), help_text=_('The help text for this question (in the primary language).'))
help_lang2 = models.TextField(blank=True, default='', verbose_name=_('Help (secondary)'), help_text=_('The help text for this question (in the secondary language).'))
help_lang3 = models.TextField(blank=True, default='', verbose_name=_('Help (tertiary)'), help_text=_('The help text for this question (in the tertiary language).'))
help_lang4 = models.TextField(blank=True, default='', verbose_name=_('Help (quaternary)'), help_text=_('The help text for this question (in the quaternary language).'))
help_lang5 = models.TextField(blank=True, default='', verbose_name=_('Help (quinary)'), help_text=_('The help text for this question (in the quinary language).'))
text_lang1 = models.TextField(blank=True, default='', verbose_name=_('Text (primary)'), help_text=_('The text for this question (in the primary language).'))
text_lang2 = models.TextField(blank=True, default='', verbose_name=_('Text (secondary)'), help_text=_('The text for this question (in the secondary language).'))
text_lang3 = models.TextField(blank=True, default='', verbose_name=_('Text (tertiary)'), help_text=_('The text for this question (in the tertiary language).'))
text_lang4 = models.TextField(blank=True, default='', verbose_name=_('Text (quaternary)'), help_text=_('The text for this question (in the quaternary language).'))
text_lang5 = models.TextField(blank=True, default='', verbose_name=_('Text (quinary)'), help_text=_('The text for this question (in the quinary language).'))
default_text_lang1 = models.TextField(blank=True, default='', verbose_name=_('Default text value (primary)'), help_text=_('The default text value for this question (in the primary language).'))
default_text_lang2 = models.TextField(blank=True, default='', verbose_name=_('Default text value (secondary)'), help_text=_('The default text value for this question (in the secondary language).'))
default_text_lang3 = models.TextField(blank=True, default='', verbose_name=_('Default text value (tertiary)'), help_text=_('The default text value for this question (in the tertiary language).'))
default_text_lang4 = models.TextField(blank=True, default='', verbose_name=_('Default text value (quaternary)'), help_text=_('The default text value for this question (in the quaternary language).'))
default_text_lang5 = models.TextField(default='', blank=True, verbose_name=_('Default text value (quinary)'), help_text=_('The default text value for this question (in the quinary language).'))
default_option = models.ForeignKey(Option, blank=True, null=True, on_delete=models.SET_NULL, verbose_name=_('Default option'), help_text=_('The default option for this question. To be used with regular optionsets.'))
default_external_id = models.CharField(max_length=256, blank=True, verbose_name=_('Default external id'), help_text=_('The default external id for this question. To be used with dynamic optionsets.'))
verbose_name_lang1 = models.CharField(max_length=256, blank=True, verbose_name=_('Name (primary)'), help_text=_('The name displayed for this question (in the primary language).'))
verbose_name_lang2 = models.CharField(max_length=256, blank=True, verbose_name=_('Name (secondary)'), help_text=_('The name displayed for this question (in the secondary language).'))
verbose_name_lang3 = models.CharField(max_length=256, blank=True, verbose_name=_('Name (tertiary)'), help_text=_('The name displayed for this question (in the tertiary language).'))
verbose_name_lang4 = models.CharField(max_length=256, blank=True, verbose_name=_('Name (quaternary)'), help_text=_('The name displayed for this question (in the quaternary language).'))
verbose_name_lang5 = models.CharField(max_length=256, blank=True, verbose_name=_('Name (quinary)'), help_text=_('The name displayed for this question (in the quinary language).'))
widget_type = models.CharField(max_length=16, verbose_name=_('Widget type'), help_text=_('Type of widget for this question.'))
value_type = models.CharField(max_length=8, choices=VALUE_TYPE_CHOICES, verbose_name=_('Value type'), help_text=_('Type of value for this question.'))
minimum = models.FloatField(null=True, blank=True, verbose_name=_('Minimum'), help_text=_('Minimal value for this question.'))
maximum = models.FloatField(null=True, blank=True, verbose_name=_('Maximum'), help_text=_('Maximum value for this question.'))
step = models.FloatField(null=True, blank=True, verbose_name=_('Step'), help_text=_('Step in which the value for this question can be incremented/decremented.'))
unit = models.CharField(max_length=64, blank=True, verbose_name=_('Unit'), help_text=_('Unit for this question.'))
width = models.IntegerField(null=True, blank=True, verbose_name=_('Width'), help_text=_('Width for the widget of this question (optional, full width: 12).'))
optionsets = models.ManyToManyField('options.OptionSet', blank=True, related_name='questions', verbose_name=_('Option sets'), help_text=_('Option sets for this question.'))
conditions = models.ManyToManyField(Condition, blank=True, related_name='questions', verbose_name=_('Conditions'), help_text=_('List of conditions evaluated for this question.'))
objects = QuestionManager()
class Meta():
ordering = ('uri',)
verbose_name = _('Question')
verbose_name_plural = _('Questions')
def __str__(self):
return self.uri
def save(self, *args, **kwargs):
self.uri = self.build_uri(self.uri_prefix, self.uri_path)
super().save(*args, **kwargs)
def text(self):
return self.trans('text')
def help(self):
return self.trans('help')
def default_text(self):
return self.trans('default_text')
def verbose_name(self):
return self.trans('verbose_name')
_property
def is_locked(self):
return (self.locked or any((page.is_locked for page in self.pages.all())) or any((questionset.is_locked for questionset in self.questionsets.all())))
_property
def has_conditions(self):
return self.conditions.exists()
_property
def descendants(self):
return []
def prefetch_elements(self):
models.prefetch_related_objects([self], *self.prefetch_lookups)
def to_dict(self, *ancestors):
return {'id': self.id, 'uri': self.uri, 'text': self.text, 'is_collection': self.is_collection, 'attribute': (self.attribute.uri if self.attribute else None), 'ancestors': [{'id': ancestor.id, 'is_collection': ancestor.is_collection, 'verbose_name': ancestor.verbose_name, 'attribute': (ancestor.attribute.uri if ancestor.attribute else None), 'conditions': [condition.uri for condition in ancestor.conditions.all()]} for ancestor in ancestors], 'conditions': [condition.uri for condition in self.conditions.all()]}
def build_uri(cls, uri_prefix, uri_path):
if (not uri_path):
raise RuntimeError('uri_path is missing')
return join_url((uri_prefix or settings.DEFAULT_URI_PREFIX), '/questions/', uri_path) |
class Inhibitor():
def __init__(self, source: InhibitorSource):
self.source = source
self.inhibited = False
def inhibit(self):
if (not self.inhibited):
self.source.inhibit()
self.inhibited = True
def uninhibit(self):
if self.inhibited:
self.source.uninhibit()
self.inhibited = False
def __enter__(self):
self.inhibit()
def __exit__(self, exc_type, exc_val, exc_tb):
self.uninhibit() |
class Behavior():
def __init__(self, quarkResultInstance: 'QuarkResult', methodCaller: Method, firstAPI: Method, secondAPI: Method) -> None:
self.quarkResult = quarkResultInstance
self.methodCaller = methodCaller
self.firstAPI = firstAPI
self.secondAPI = secondAPI
self.methodCaller.behavior = self
self.firstAPI.behavior = self
self.secondAPI.behavior = self
def hasString(self, pattern: str, isRegex=False) -> List[str]:
usageTable = self.quarkResult.quark._evaluate_method(self.methodCaller.innerObj)
result_generator = self.quarkResult.quark.check_parameter_on_single_method(usage_table=usageTable, first_method=self.firstAPI.innerObj, second_method=self.secondAPI.innerObj, keyword_item_list=[(pattern,), (pattern,)], regex=isRegex)
found_keywords = {keyword for (_, keyword_list) in result_generator for keyword in keyword_list}
return list(found_keywords)
def hasUrl(self) -> List[str]:
return self.hasString(URL_REGEX, True)
def getParamValues(self) -> List[Any]:
allResult = self.hasString('.*', True)
argumentStr = max(allResult, key=len)[1:(- 1)]
return get_arguments_from_argument_str(argumentStr, self.secondAPI.descriptor)
def isArgFromMethod(self, targetMethod: List[str]) -> bool:
(className, methodName, descriptor) = targetMethod
pattern = PyEval.get_method_pattern(className, methodName, descriptor)
return bool(self.hasString(pattern))
def getMethodsInArgs(self) -> List[str]:
METHOD_REGEX = 'L(.*?)\\;\\('
methodCalled = []
for param in self.getParamValues():
for result in re.findall(METHOD_REGEX, param):
className = ('L' + result.split('->')[0])
methodName = re.findall('->(.*?)\\(', result)[0]
descriptor = (result.split(methodName)[(- 1)] + ';')
methodObj_list = self.quarkResult.quark.apkinfo.find_method(class_name=className, method_name=methodName, descriptor=descriptor)
for methodObj in methodObj_list:
methodCalled.append(Method(methodObj=methodObj))
return methodCalled |
def AddOraclePath(train_info, valid_info, test_info, info_dict):
orc_meta_dict = defaultdict(list)
for i in train_info:
(spk, wav_path, txt) = (i['speaker'], i['wav_path'], i['text'])
orc_meta_dict[spk].append([txt, wav_path])
for i in valid_info:
(spk, wav_path, txt) = (i['speaker'], i['wav_path'], i['text'])
orc_meta_dict[spk].append([txt, wav_path])
for i in test_info:
(spk, wav_path, txt) = (i['speaker'], i['wav_path'], i['text'])
orc_meta_dict[spk].append([txt, wav_path])
for k in info_dict:
tmp_dict = info_dict[k]
for idx in range(len(tmp_dict)):
(s_txt, t_spk) = (tmp_dict[idx][0]['text'], tmp_dict[idx][1]['speaker'])
ref = orc_meta_dict[t_spk]
info_dict[k][idx].append(None)
for (ref_txt, ref_path) in ref:
if (s_txt == ref_txt):
info_dict[k][idx][(- 1)] = ref_path
return info_dict |
def install_hatch_project(context: Context, path: Path) -> None:
py_proj_toml = toml.load((path / 'pyproject.toml'))
hatch_default_env = py_proj_toml['tool']['hatch']['envs'].get('default', {})
hatch_default_features = hatch_default_env.get('features', [])
hatch_default_deps = hatch_default_env.get('dependencies', [])
context.run(f"pip install -e '.[{','.join(hatch_default_features)}]'")
context.run(f"pip install {' '.join(map(repr, hatch_default_deps))}") |
def test_spatialdropout1d_legacy_interface():
old_layer = keras.layers.SpatialDropout1D(p=0.6, name='sd1d')
new_layer_1 = keras.layers.SpatialDropout1D(rate=0.6, name='sd1d')
new_layer_2 = keras.layers.SpatialDropout1D(0.6, name='sd1d')
assert (json.dumps(old_layer.get_config()) == json.dumps(new_layer_1.get_config()))
assert (json.dumps(old_layer.get_config()) == json.dumps(new_layer_2.get_config())) |
class CompatibilityFilesTests(unittest.TestCase):
def package(self):
bytes_data = io.BytesIO(b'Hello, world!')
return util.create_package(file=bytes_data, path='some_path', contents=('a', 'b', 'c'))
def files(self):
return resources.files(self.package)
def test_spec_path_iter(self):
self.assertEqual(sorted((path.name for path in self.files.iterdir())), ['a', 'b', 'c'])
def test_child_path_iter(self):
self.assertEqual(list((self.files / 'a').iterdir()), [])
def test_orphan_path_iter(self):
self.assertEqual(list(((self.files / 'a') / 'a').iterdir()), [])
self.assertEqual(list((((self.files / 'a') / 'a') / 'a').iterdir()), [])
def test_spec_path_is(self):
self.assertFalse(self.files.is_file())
self.assertFalse(self.files.is_dir())
def test_child_path_is(self):
self.assertTrue((self.files / 'a').is_file())
self.assertFalse((self.files / 'a').is_dir())
def test_orphan_path_is(self):
self.assertFalse(((self.files / 'a') / 'a').is_file())
self.assertFalse(((self.files / 'a') / 'a').is_dir())
self.assertFalse((((self.files / 'a') / 'a') / 'a').is_file())
self.assertFalse((((self.files / 'a') / 'a') / 'a').is_dir())
def test_spec_path_name(self):
self.assertEqual(self.files.name, 'testingpackage')
def test_child_path_name(self):
self.assertEqual((self.files / 'a').name, 'a')
def test_orphan_path_name(self):
self.assertEqual(((self.files / 'a') / 'b').name, 'b')
self.assertEqual((((self.files / 'a') / 'b') / 'c').name, 'c')
def test_spec_path_open(self):
self.assertEqual(self.files.read_bytes(), b'Hello, world!')
self.assertEqual(self.files.read_text(), 'Hello, world!')
def test_child_path_open(self):
self.assertEqual((self.files / 'a').read_bytes(), b'Hello, world!')
self.assertEqual((self.files / 'a').read_text(), 'Hello, world!')
def test_orphan_path_open(self):
with self.assertRaises(FileNotFoundError):
((self.files / 'a') / 'b').read_bytes()
with self.assertRaises(FileNotFoundError):
(((self.files / 'a') / 'b') / 'c').read_bytes()
def test_open_invalid_mode(self):
with self.assertRaises(ValueError):
self.files.open('0')
def test_orphan_path_invalid(self):
with self.assertRaises(ValueError):
CompatibilityFiles.OrphanPath()
def test_wrap_spec(self):
spec = wrap_spec(self.package)
self.assertIsInstance(spec.loader.get_resource_reader(None), CompatibilityFiles) |
def evaluate(dataloader, model, criterion, postprocessors, confusion, config, args, thresh):
model.eval()
criterion.eval()
logging.error('VALIDATION')
for (i, batch) in enumerate(tqdm(dataloader)):
(seq_images, targets, _) = batch
if (seq_images == None):
continue
seq_images = seq_images.cuda()
cuda_targets = []
cuda_targets = []
for b in targets:
temp_dict = {}
temp_dict['calib'] = b['calib'].cuda()
temp_dict['center_img'] = b['center_img'].cuda()
temp_dict['labels'] = b['labels'].cuda()
temp_dict['roads'] = b['roads'].cuda()
temp_dict['control_points'] = b['control_points'].cuda()
temp_dict['con_matrix'] = b['con_matrix'].cuda()
temp_dict['endpoints'] = b['endpoints'].cuda()
temp_dict['mask'] = b['mask'].cuda()
temp_dict['bev_mask'] = b['bev_mask'].cuda()
temp_dict['obj_corners'] = b['obj_corners'].cuda()
temp_dict['obj_converted'] = b['obj_converted'].cuda()
temp_dict['obj_exists'] = b['obj_exists'].cuda()
temp_dict['init_point_matrix'] = b['init_point_matrix'].cuda()
temp_dict['sorted_control_points'] = b['sorted_control_points'].cuda()
temp_dict['grid_sorted_control_points'] = b['grid_sorted_control_points'].cuda()
temp_dict['sort_index'] = b['sort_index'].cuda()
temp_dict['left_traffic'] = b['left_traffic'].cuda()
temp_dict['outgoings'] = b['outgoings']
temp_dict['incomings'] = b['incomings']
cuda_targets.append(temp_dict)
seq_images = seq_images.cuda()
outputs = model(seq_images, cuda_targets[0]['calib'], cuda_targets[0]['grid_sorted_control_points'], targets[0]['left_traffic'], thresh=thresh, training=args.use_gt)
static_thresh = thresh
out = vis_tools.get_selected_polylines(outputs, thresh=static_thresh)
(hausdorff_static_dist, hausdorff_static_idx, hausdorff_gt, out) = vis_tools.hausdorff_match(out, targets[0])
try:
confusion.update(out, hausdorff_gt, hausdorff_static_idx, targets[0], static=True, polyline=True)
except Exception as e:
logging.error('EXCEPTION IN CONFUSION ')
logging.error(str(e))
continue
return confusion |
class IntegrationMultiModule(xt.EditableModule):
def __init__(self, a, b):
self.a = a
self.b = b
def forward(self, x, c):
return (torch.cos(((self.a * x) + (self.b * c))), torch.sin(((self.a * x) + (self.b * c))))
def getparamnames(self, methodname, prefix=''):
return [(prefix + 'a'), (prefix + 'b')] |
class Plugin():
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.reset_counters()
def reset_counters(self):
self.called_preparse = 0
self.called_postparsing = 0
self.called_precmd = 0
self.called_postcmd = 0
self.called_cmdfinalization = 0
def prepost_hook_one(self) -> None:
self.poutput('one')
def prepost_hook_two(self) -> None:
self.poutput('two')
def prepost_hook_too_many_parameters(self, param) -> None:
pass
def prepost_hook_with_wrong_return_annotation(self) -> bool:
pass
def preparse(self, data: cmd2.plugin.PostparsingData) -> cmd2.plugin.PostparsingData:
self.called_preparse += 1
return data
def postparse_hook(self, data: cmd2.plugin.PostparsingData) -> cmd2.plugin.PostparsingData:
self.called_postparsing += 1
return data
def postparse_hook_stop(self, data: cmd2.plugin.PostparsingData) -> cmd2.plugin.PostparsingData:
self.called_postparsing += 1
data.stop = True
return data
def postparse_hook_emptystatement(self, data: cmd2.plugin.PostparsingData) -> cmd2.plugin.PostparsingData:
self.called_postparsing += 1
raise exceptions.EmptyStatement
def postparse_hook_exception(self, data: cmd2.plugin.PostparsingData) -> cmd2.plugin.PostparsingData:
self.called_postparsing += 1
raise ValueError
def postparse_hook_too_many_parameters(self, data1, data2) -> cmd2.plugin.PostparsingData:
pass
def postparse_hook_undeclared_parameter_annotation(self, data) -> cmd2.plugin.PostparsingData:
pass
def postparse_hook_wrong_parameter_annotation(self, data: str) -> cmd2.plugin.PostparsingData:
pass
def postparse_hook_undeclared_return_annotation(self, data: cmd2.plugin.PostparsingData):
pass
def postparse_hook_wrong_return_annotation(self, data: cmd2.plugin.PostparsingData) -> str:
pass
def precmd(self, statement: cmd2.Statement) -> cmd2.Statement:
self.called_precmd += 1
return statement
def precmd_hook(self, data: plugin.PrecommandData) -> plugin.PrecommandData:
self.called_precmd += 1
return data
def precmd_hook_emptystatement(self, data: plugin.PrecommandData) -> plugin.PrecommandData:
self.called_precmd += 1
raise exceptions.EmptyStatement
def precmd_hook_exception(self, data: plugin.PrecommandData) -> plugin.PrecommandData:
self.called_precmd += 1
raise ValueError
def precmd_hook_not_enough_parameters(self) -> plugin.PrecommandData:
pass
def precmd_hook_too_many_parameters(self, one: plugin.PrecommandData, two: str) -> plugin.PrecommandData:
return one
def precmd_hook_no_parameter_annotation(self, data) -> plugin.PrecommandData:
return data
def precmd_hook_wrong_parameter_annotation(self, data: str) -> plugin.PrecommandData:
return data
def precmd_hook_no_return_annotation(self, data: plugin.PrecommandData):
return data
def precmd_hook_wrong_return_annotation(self, data: plugin.PrecommandData) -> cmd2.Statement:
return self.statement_parser.parse('hi there')
def postcmd(self, stop: bool, statement: cmd2.Statement) -> bool:
self.called_postcmd += 1
return stop
def postcmd_hook(self, data: plugin.PostcommandData) -> plugin.PostcommandData:
self.called_postcmd += 1
return data
def postcmd_hook_exception(self, data: plugin.PostcommandData) -> plugin.PostcommandData:
self.called_postcmd += 1
raise ZeroDivisionError
def postcmd_hook_not_enough_parameters(self) -> plugin.PostcommandData:
pass
def postcmd_hook_too_many_parameters(self, one: plugin.PostcommandData, two: str) -> plugin.PostcommandData:
return one
def postcmd_hook_no_parameter_annotation(self, data) -> plugin.PostcommandData:
return data
def postcmd_hook_wrong_parameter_annotation(self, data: str) -> plugin.PostcommandData:
return data
def postcmd_hook_no_return_annotation(self, data: plugin.PostcommandData):
return data
def postcmd_hook_wrong_return_annotation(self, data: plugin.PostcommandData) -> cmd2.Statement:
return self.statement_parser.parse('hi there')
def cmdfinalization_hook(self, data: plugin.CommandFinalizationData) -> plugin.CommandFinalizationData:
self.called_cmdfinalization += 1
return data
def cmdfinalization_hook_stop(self, data: cmd2.plugin.CommandFinalizationData) -> cmd2.plugin.CommandFinalizationData:
self.called_cmdfinalization += 1
data.stop = True
return data
def cmdfinalization_hook_exception(self, data: cmd2.plugin.CommandFinalizationData) -> cmd2.plugin.CommandFinalizationData:
self.called_cmdfinalization += 1
raise ValueError
def cmdfinalization_hook_system_exit(self, data: cmd2.plugin.CommandFinalizationData) -> cmd2.plugin.CommandFinalizationData:
self.called_cmdfinalization += 1
raise SystemExit(5)
def cmdfinalization_hook_keyboard_interrupt(self, data: cmd2.plugin.CommandFinalizationData) -> cmd2.plugin.CommandFinalizationData:
self.called_cmdfinalization += 1
raise KeyboardInterrupt
def cmdfinalization_hook_passthrough_exception(self, data: cmd2.plugin.CommandFinalizationData) -> cmd2.plugin.CommandFinalizationData:
self.called_cmdfinalization += 1
wrapped_ex = OSError('Pass me up')
raise exceptions.PassThroughException(wrapped_ex=wrapped_ex)
def cmdfinalization_hook_not_enough_parameters(self) -> plugin.CommandFinalizationData:
pass
def cmdfinalization_hook_too_many_parameters(self, one: plugin.CommandFinalizationData, two: str) -> plugin.CommandFinalizationData:
return one
def cmdfinalization_hook_no_parameter_annotation(self, data) -> plugin.CommandFinalizationData:
return data
def cmdfinalization_hook_wrong_parameter_annotation(self, data: str) -> plugin.CommandFinalizationData:
return data
def cmdfinalization_hook_no_return_annotation(self, data: plugin.CommandFinalizationData):
return data
def cmdfinalization_hook_wrong_return_annotation(self, data: plugin.CommandFinalizationData) -> cmd2.Statement:
return self.statement_parser.parse('hi there') |
def gather_experiment_predss(experiment) -> List[np.ndarray]:
chkpts_dir = (Path(experiment) / 'chkpts')
chkpt_iter_dirs = sorted(chkpts_dir.iterdir(), key=(lambda p: int(p.stem.split('_')[(- 1)])))[1:]
try:
preds_npzs = [np.load((chkpt_iter_dir / 'preds.npz')) for chkpt_iter_dir in chkpt_iter_dirs]
(predss, varss) = zip(*[(preds_npz['Y_pred'], preds_npz['Y_var']) for preds_npz in preds_npzs])
except FileNotFoundError:
predss = [np.load((chkpt_iter_dir / 'preds.npy')) for chkpt_iter_dir in chkpt_iter_dirs]
return predss |
def make_canonical_identifier(chain_identifier=EMPTY, token_network_address=EMPTY, channel_identifier=EMPTY) -> CanonicalIdentifier:
return create(CanonicalIdentifierProperties(chain_identifier=chain_identifier, token_network_address=token_network_address, channel_identifier=(channel_identifier or make_channel_identifier()))) |
(everythings(min_int=(- ), max_int=, allow_inf=False))
def test_ujson_converter_unstruct_collection_overrides(everything: Everything):
converter = ujson_make_converter(unstruct_collection_overrides={AbstractSet: sorted})
raw = converter.unstructure(everything)
assert (raw['a_set'] == sorted(raw['a_set']))
assert (raw['a_mutable_set'] == sorted(raw['a_mutable_set']))
assert (raw['a_frozenset'] == sorted(raw['a_frozenset'])) |
def test_single(hatch, helpers, temp_dir_data, config_file):
config_file.model.template.plugins['default']['tests'] = False
config_file.save()
project_name = 'My.App'
with temp_dir_data.as_cwd():
result = hatch('new', project_name)
assert (result.exit_code == 0), result.output
project_path = (temp_dir_data / 'my-app')
project = Project(project_path)
helpers.update_project_environment(project, 'default', {'skip-install': True, **project.config.envs['default']})
helpers.update_project_environment(project, 'foo', {})
helpers.update_project_environment(project, 'bar', {})
with project_path.as_cwd():
result = hatch('env', 'create', 'foo')
assert (result.exit_code == 0), result.output
with project_path.as_cwd():
result = hatch('env', 'create', 'bar')
assert (result.exit_code == 0), result.output
env_data_path = (((temp_dir_data / 'data') / 'env') / 'virtual')
assert env_data_path.is_dir()
project_data_path = (env_data_path / project_path.name)
assert project_data_path.is_dir()
storage_dirs = list(project_data_path.iterdir())
assert (len(storage_dirs) == 1)
storage_path = storage_dirs[0]
assert (len(storage_path.name) == 8)
env_dirs = list(storage_path.iterdir())
assert (len(env_dirs) == 2)
foo_env_path = (storage_path / 'foo')
bar_env_path = (storage_path / 'bar')
assert foo_env_path.is_dir()
assert bar_env_path.is_dir()
with project_path.as_cwd():
result = hatch('env', 'remove', 'bar')
assert (result.exit_code == 0), result.output
assert (result.output == helpers.dedent('\n Removing environment: bar\n '))
assert foo_env_path.is_dir()
assert (not bar_env_path.is_dir()) |
def load_json_info(gt_file, img_info):
annotation = mmcv.load(gt_file)
anno_info = []
for line in annotation['lines']:
segmentation = line['points']
x = max(0, min(segmentation[0::2]))
y = max(0, min(segmentation[1::2]))
w = abs((max(segmentation[0::2]) - x))
h = abs((max(segmentation[1::2]) - y))
bbox = [x, y, w, h]
anno = dict(iscrowd=line['ignore'], category_id=1, bbox=bbox, area=(w * h), segmentation=[segmentation])
anno_info.append(anno)
img_info.update(anno_info=anno_info)
return img_info |
def setup_resolver(configuration: BaseConfiguration, patches: GamePatches) -> tuple[(State, Logic)]:
game = filtered_database.game_description_for_layout(configuration).get_mutable()
bootstrap = game.game.generator.bootstrap
game.resource_database = bootstrap.patch_resource_database(game.resource_database, configuration)
(new_game, starting_state) = bootstrap.logic_bootstrap(configuration, game, patches)
logic = Logic(new_game, configuration)
starting_state.resources.add_self_as_requirement_to_resources = True
return (starting_state, logic) |
def test_ScanArgs_remove_nonseq_outer_input():
hmm_model_env = create_test_hmm()
scan_args = hmm_model_env['scan_args']
hmm_model_env['scan_op']
Y_t = hmm_model_env['Y_t']
Y_rv = hmm_model_env['Y_rv']
mus_in = hmm_model_env['mus_in']
mus_t = hmm_model_env['mus_t']
sigmas_in = hmm_model_env['sigmas_in']
sigmas_t = hmm_model_env['sigmas_t']
Gamma_rv = hmm_model_env['Gamma_rv']
Gamma_in = hmm_model_env['Gamma_in']
S_in = hmm_model_env['S_in']
S_t = hmm_model_env['S_t']
rng_in = hmm_model_env['rng_in']
scan_args_copy = copy(scan_args)
test_v = Gamma_rv
rm_info = scan_args_copy.remove_from_fields(test_v, rm_dependents=True)
(removed_nodes, _) = zip(*rm_info)
assert (Gamma_rv in removed_nodes)
assert (Gamma_in in removed_nodes)
assert (S_in in removed_nodes)
assert (S_t in removed_nodes)
assert (Y_t in removed_nodes)
assert (Y_rv in removed_nodes)
assert (mus_in in scan_args_copy.outer_in_seqs)
assert (sigmas_in in scan_args_copy.outer_in_seqs)
assert (mus_t in scan_args_copy.inner_in_seqs)
assert (sigmas_t in scan_args_copy.inner_in_seqs)
assert (rng_in not in scan_args_copy.inner_out_shared)
assert (not scan_args_copy.outer_out_shared) |
def evaluate(model, data, epoch, args, tb_writer=None):
metrics = {}
if (not is_master(args)):
return metrics
device = torch.device(args.device)
model.eval()
zero_shot_metrics = zero_shot_eval(model, data, epoch, args)
metrics.update(zero_shot_metrics)
autocast = get_autocast(args.precision)
if (('val' in data) and (args.val_frequency and (((epoch % args.val_frequency) == 0) or (epoch == args.epochs)))):
dataloader = data['val'].dataloader
num_samples = 0
samples_per_val = dataloader.num_samples
cumulative_loss = 0.0
(all_image_features, all_text_features) = ([], [])
with torch.no_grad():
for (i, batch) in enumerate(dataloader):
(images, texts) = batch
images = images.to(device=device, non_blocking=True)
texts = texts.to(device=device, non_blocking=True)
with autocast():
(image_features, text_features, logit_scale) = model(images, texts)
all_image_features.append(image_features.cpu())
all_text_features.append(text_features.cpu())
logit_scale = logit_scale.mean()
logits_per_image = ((logit_scale * image_features) text_features.t())
logits_per_text = logits_per_image.t()
batch_size = images.shape[0]
labels = torch.arange(batch_size, device=device).long()
total_loss = ((F.cross_entropy(logits_per_image, labels) + F.cross_entropy(logits_per_text, labels)) / 2)
cumulative_loss += (total_loss * batch_size)
num_samples += batch_size
if (is_master(args) and ((i % 100) == 0)):
logging.info(f'Eval Epoch: {epoch} [{num_samples} / {samples_per_val}] Loss: {(cumulative_loss / num_samples):.6f} ')
val_metrics = get_metrics(image_features=torch.cat(all_image_features), text_features=torch.cat(all_text_features), logit_scale=logit_scale.cpu())
loss = (cumulative_loss / num_samples)
metrics.update({**val_metrics, 'val_loss': loss.item(), 'epoch': epoch, 'num_samples': num_samples})
if (not metrics):
return metrics
logging.info((f'Eval Epoch: {epoch} ' + '\t'.join([f'{k}: {round(v, 4):.4f}' for (k, v) in metrics.items()])))
if args.save_logs:
for (name, val) in metrics.items():
if (tb_writer is not None):
tb_writer.add_scalar(f'val/{name}', val, epoch)
with open(os.path.join(args.checkpoint_path, 'results.jsonl'), 'a+') as f:
f.write(json.dumps(metrics))
f.write('\n')
if args.wandb:
assert (wandb is not None), 'Please install wandb.'
for (name, val) in metrics.items():
wandb.log({f'val/{name}': val, 'epoch': epoch})
return metrics |
class JPEG2000(BinaryCodec):
fmt = '.jp2'
def name(self):
return 'JPEG2000'
def description(self):
return f'JPEG2000. ffmpeg version {_get_ffmpeg_version()}'
def _get_encode_cmd(self, in_filepath, quality, out_filepath):
cmd = ['ffmpeg', '-loglevel', 'panic', '-y', '-i', in_filepath, '-vcodec', 'jpeg2000', '-pix_fmt', 'yuv444p', '-c:v', 'libopenjpeg', '-compression_level', quality, out_filepath]
return cmd
def _get_decode_cmd(self, out_filepath, rec_filepath):
cmd = ['ffmpeg', '-loglevel', 'panic', '-y', '-i', out_filepath, rec_filepath]
return cmd |
class ErrorHandlingTestCases(unittest.TestCase):
def test_catch_catches(self):
(exception=BlockingIOError)
def f():
raise BlockingIOError
f()
self.assertTrue(True)
def test_catch_doesnt_catch_unspecified(self):
(exception=BlockingIOError)
def f():
raise ValueError
self.assertRaises(ValueError, f)
def test_catch_uses_handler(self):
def switcheroo_handler(e):
raise ConnectionError from e
(exception=ValueError, handler=switcheroo_handler)
def f():
raise ValueError
self.assertRaises(ConnectionError, f)
def test_silent_catch_catches(self):
_catch(exception=BlockingIOError)
def f():
raise BlockingIOError
f()
self.assertTrue(True)
def test_catch_catches_multiple(self):
(exception=[FileExistsError, IndexError, IndentationError])
def f():
raise IndexError
f()
self.assertTrue(True)
def test_silent_catch_catches_multiple(self):
(exception=[FileExistsError, IndexError, IndentationError])
def f():
raise IndentationError
f()
self.assertTrue(True) |
def build_context_and_subject(auth_context=None, tuf_roots=None):
context = (auth_context.to_signed_dict() if auth_context else {})
single_root = (list(tuf_roots.values())[0] if ((tuf_roots is not None) and (len(tuf_roots) == 1)) else DISABLED_TUF_ROOT)
context.update({CLAIM_TUF_ROOTS: tuf_roots, CLAIM_TUF_ROOT: single_root})
if ((not auth_context) or auth_context.is_anonymous):
return (context, ANONYMOUS_SUB)
return (context, (auth_context.authed_user.username if auth_context.authed_user else None)) |
class TestTrainingExtensionsQcQuantizeRecurrentParamOp():
def test_qc_quantize_recurrent_param_op(self):
graph = tf.Graph()
config = tf.compat.v1.ConfigProto(log_device_placement=False)
sess = tf.compat.v1.Session(graph=graph, config=config)
bitwidth = 8
use_symm_encoding = True
with graph.as_default():
with tf.device('/device:CPU:0'):
inp = tf.compat.v1.placeholder(tf.float32, shape=[10], name='input')
tensor_quantizer = libpymo.TensorQuantizer(libpymo.QuantizationMode.QUANTIZATION_TF, libpymo.RoundingMode.ROUND_NEAREST)
tensor_quantizer_val = libpymo.PtrToInt64(tensor_quantizer)
tensor_quant_ref = tf.Variable(initial_value=tensor_quantizer_val, trainable=False, dtype=tf.int64)
time_step_tensor = tf.constant(1, dtype=tf.int32)
encoding_min = tf.Variable(initial_value=(- 0.5), trainable=True, dtype=tf.double)
encoding_max = tf.Variable(initial_value=0.5, trainable=True, dtype=tf.double)
bit_width = tf.Variable(initial_value=bitwidth, trainable=False, dtype=tf.int8)
use_symmetric_encoding = tf.Variable(initial_value=use_symm_encoding, trainable=False, dtype=tf.bool)
mode_var = tf.Variable(initial_value=int(libpymo.TensorQuantizerOpMode.oneShotQuantizeDequantize), trainable=False, dtype=tf.int32)
sess.run([mode_var.initializer, tensor_quant_ref.initializer, encoding_min.initializer, encoding_max.initializer, bit_width.initializer, use_symmetric_encoding.initializer])
pass_through_op_output = zero_out_module.qc_quantize_recurrent_param(name='quant_op', in_tensor=inp, op_mode=mode_var, tensor_quantizer_reference=tensor_quant_ref, encoding_min=encoding_min, encoding_max=encoding_max, bit_width=bit_width, use_symmetric_encoding=use_symmetric_encoding, time_steps=time_step_tensor)
inp_tensor = sess.graph.get_tensor_by_name('input:0')
np.random.seed(18)
inp_data = np.random.randint(low=(- 1), high=2, size=10).astype(np.float32)
print(inp_data)
out_data = sess.run(pass_through_op_output, feed_dict={inp_tensor: inp_data})
print(out_data)
assert np.allclose(out_data, inp_data, atol=1e-06)
sess.close() |
def test_repr_pyobjectsdef_pyclass_without_associated_resource(project):
code = 'class MyClass: pass'
mod = libutils.get_string_module(project, code)
obj = mod.get_attribute('MyClass').pyobject
assert isinstance(obj, pyobjectsdef.PyClass)
assert repr(obj).startswith('<rope.base.pyobjectsdef.PyClass "::MyClass" at 0x') |
def download_and_unzip(url, root, dataset):
folder = os.path.join(root, dataset)
folder_zips = os.path.join(folder, 'zips')
if (not os.path.exists(folder_zips)):
os.makedirs(folder_zips)
filename_zip = os.path.join(folder_zips, url.split('/')[(- 1)])
download_from_url(url, filename_zip)
if filename_zip.endswith('.zip'):
zip_ref = zipfile.ZipFile(filename_zip, 'r')
zip_ref.extractall(folder)
zip_ref.close()
elif filename_zip.endswith(('.tar.gz', '.tgz')):
tarfile.open(name=filename_zip, mode='r:gz').extractall(folder)
elif filename_zip.endswith('.gz'):
filename_no_gz = filename_zip[:(- 3)]
with gzip.open(filename_zip, 'rb') as f_in, open(filename_no_gz, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out) |
def test_run_script_with_binary_file(base_app, request):
test_dir = os.path.dirname(request.module.__file__)
filename = os.path.join(test_dir, 'scripts', 'binary.bin')
(out, err) = run_cmd(base_app, 'run_script {}'.format(filename))
assert ('is not an ASCII or UTF-8 encoded text file' in err[0])
assert (base_app.last_result is False) |
def delete_bytes(fobj, size: int, offset: int, BUFFER_SIZE: int=_DEFAULT_BUFFER_SIZE) -> None:
if ((size < 0) or (offset < 0)):
raise ValueError
fobj.seek(0, 2)
filesize = fobj.tell()
movesize = ((filesize - offset) - size)
if (movesize < 0):
raise ValueError
move_bytes(fobj, offset, (offset + size), movesize, BUFFER_SIZE)
resize_file(fobj, (- size), BUFFER_SIZE) |
def test_ignore_form_by_class(app, client):
selectors_to_ignore = ['form.form-get-class']
crawler = Crawler(client=client, initial_paths=['/'], rules=(PERMISSIVE_HYPERLINKS_ONLY_RULE_SET + SUBMIT_GET_FORMS_RULE_SET), ignore_css_selectors=selectors_to_ignore)
crawler.crawl()
submitted_forms = [form for form in crawler.graph.get_nodes_by_source(FORM) if form.requested]
assert (len(submitted_forms) == 0) |
def repo_result_view(repo, username, last_modified=None, stars=None, popularity=None):
kind = ('application' if (Repository.kind.get_name(repo.kind_id) == 'application') else 'repository')
view = {'kind': kind, 'title': ('app' if (kind == 'application') else 'repo'), 'namespace': search_entity_view(username, repo.namespace_user), 'name': repo.name, 'description': repo.description, 'is_public': model.repository.is_repository_public(repo), 'score': REPOSITORY_SEARCH_SCORE, 'href': ((((('/' + kind) + '/') + repo.namespace_user.username) + '/') + repo.name)}
if (last_modified is not None):
view['last_modified'] = last_modified
if (stars is not None):
view['stars'] = stars
if (popularity is not None):
view['popularity'] = popularity
return view |
def convert_data(apps, schema_editor):
Keynote = apps.get_model('conferences', 'Keynote')
for keynote in Keynote.objects.all():
keynote.description = json.dumps({'en': keynote.description})
keynote.title = json.dumps({'en': keynote.title})
for speaker in keynote.speakers.all():
speaker.bio = json.dumps({'en': speaker.bio})
speaker.pronouns = json.dumps({'en': speaker.pronouns})
speaker.save()
keynote.save() |
class AltCLIPProcessor(ProcessorMixin):
attributes = ['image_processor', 'tokenizer']
image_processor_class = 'CLIPImageProcessor'
tokenizer_class = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__(self, image_processor=None, tokenizer=None, **kwargs):
if ('feature_extractor' in kwargs):
warnings.warn('The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor` instead.', FutureWarning)
feature_extractor = kwargs.pop('feature_extractor')
image_processor = (image_processor if (image_processor is not None) else feature_extractor)
if (image_processor is None):
raise ValueError('You need to specify an `image_processor`.')
if (tokenizer is None):
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(image_processor, tokenizer)
def __call__(self, text=None, images=None, return_tensors=None, **kwargs):
if ((text is None) and (images is None)):
raise ValueError('You have to specify either text or images. Both cannot be none.')
if (text is not None):
encoding = self.tokenizer(text, return_tensors=return_tensors, **kwargs)
if (images is not None):
image_features = self.image_processor(images, return_tensors=return_tensors, **kwargs)
if ((text is not None) and (images is not None)):
encoding['pixel_values'] = image_features.pixel_values
return encoding
elif (text is not None):
return encoding
else:
return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors)
def batch_decode(self, *args, **kwargs):
return self.tokenizer.batch_decode(*args, **kwargs)
def decode(self, *args, **kwargs):
return self.tokenizer.decode(*args, **kwargs)
def model_input_names(self):
tokenizer_input_names = self.tokenizer.model_input_names
image_processor_input_names = self.image_processor.model_input_names
return list(dict.fromkeys((tokenizer_input_names + image_processor_input_names))) |
class WindowOptions():
arch: ArchOptions = ArchOptions()
array: ArrayOptions = ArrayOptions()
size_offset: SizeOffsetOptions = SizeOffsetOptions()
bar_fill: FillBarOptions = FillBarOptions()
louver_fill: FillLouverOptions = FillLouverOptions()
glass_fill: FillGlassPaneOptions = FillGlassPaneOptions()
type: WindowType = WindowType.RECTANGULAR
frame_thickness: float = 0.1
frame_depth: float = 0.1
window_depth: float = 0.05
resolution: int = 20
add_arch: bool = False |
class MobileViTAttention(nn.Module):
def __init__(self, in_channel=3, dim=512, kernel_size=3, patch_size=7, depth=3, mlp_dim=1024):
super().__init__()
(self.ph, self.pw) = (patch_size, patch_size)
self.conv1 = nn.Conv2d(in_channel, in_channel, kernel_size=kernel_size, padding=(kernel_size // 2))
self.conv2 = nn.Conv2d(in_channel, dim, kernel_size=1)
self.trans = Transformer(dim=dim, depth=depth, heads=8, head_dim=64, mlp_dim=mlp_dim)
self.conv3 = nn.Conv2d(dim, in_channel, kernel_size=1)
self.conv4 = nn.Conv2d((2 * in_channel), in_channel, kernel_size=kernel_size, padding=(kernel_size // 2))
def forward(self, x):
y = x.clone()
y = self.conv2(self.conv1(x))
(_, _, h, w) = y.shape
y = rearrange(y, 'bs dim (nh ph) (nw pw) -> bs (ph pw) (nh nw) dim', ph=self.ph, pw=self.pw)
y = self.trans(y)
y = rearrange(y, 'bs (ph pw) (nh nw) dim -> bs dim (nh ph) (nw pw)', ph=self.ph, pw=self.pw, nh=(h // self.ph), nw=(w // self.pw))
y = self.conv3(y)
y = torch.cat([x, y], 1)
y = self.conv4(y)
return y |
class SparseTransformerSentenceEncoderLayer(TransformerSentenceEncoderLayer):
def __init__(self, embedding_dim: int=768, ffn_embedding_dim: int=3072, num_attention_heads: int=8, dropout: float=0.1, attention_dropout: float=0.1, activation_dropout: float=0.1, activation_fn: str='relu', export: bool=False, is_bidirectional: bool=True, stride: int=32, expressivity: int=8) -> None:
super().__init__(embedding_dim, ffn_embedding_dim, num_attention_heads, dropout, attention_dropout, activation_dropout, activation_fn, export)
self.self_attn = SparseMultiheadAttention(self.embedding_dim, num_attention_heads, dropout=attention_dropout, add_bias_kv=False, add_zero_attn=False, self_attention=True, is_bidirectional=is_bidirectional, stride=stride, expressivity=expressivity) |
class numeric_grad():
type_eps = {'float64': 1e-07, 'float32': 0.0003, 'float16': 0.1, np.dtype('float64'): 1e-07, np.dtype('float32'): 0.0003, np.dtype('float16'): 0.1}
def __init__(self, f, pt, eps=None, out_type=None):
def prod(inputs):
rval = 1
for i in inputs:
rval *= i
return rval
packed_pt = False
if (not isinstance(pt, (list, tuple))):
pt = [pt]
packed_pt = True
apt = [np.array(p) for p in pt]
shapes = [p.shape for p in apt]
dtypes = [str(p.dtype) for p in apt]
total_size = sum((prod(sh) for sh in shapes))
working_dtype = min(((self.type_eps[dt], dt) for dt in dtypes))[1]
x = np.ndarray((total_size,), dtype=working_dtype)
if ((out_type is not None) and out_type.startswith('complex')):
gx = np.ndarray((total_size,), dtype=out_type)
else:
gx = np.ndarray((total_size,), dtype=working_dtype)
if (eps is None):
eps = max((self.type_eps[dt] for dt in dtypes))
cur_pos = 0
self.gf = []
for (i, p) in enumerate(apt):
p_size = prod(p.shape)
apt[i] = x[cur_pos:(cur_pos + p_size)].reshape(p.shape)
self.gf.append(gx[cur_pos:(cur_pos + p_size)].reshape(p.shape))
apt[i][...] = p
cur_pos += p_size
f_x = f(*[p.copy() for p in apt])
x_copy = x.copy()
for i in range(total_size):
x[:] = x_copy
x[i] += eps
f_eps = f(*apt)
gx[i] = ((f_eps - f_x) / eps)
if packed_pt:
self.gf = self.gf[0]
def abs_rel_err(a, b):
abs_err = abs((a - b))
rel_err = (abs_err / np.maximum((abs(a) + abs(b)), [1e-08]))
abs_err = np.asarray(abs_err)
rel_err = np.asarray(rel_err)
return (abs_err, rel_err)
def abs_rel_errors(self, g_pt):
if (len(g_pt) != len(self.gf)):
raise ValueError('argument has wrong number of elements', len(g_pt))
errs = []
for (i, (a, b)) in enumerate(zip(g_pt, self.gf)):
if (a.shape != b.shape):
raise ValueError(f'argument element {i} has wrong shapes {a.shape}, {b.shape}')
errs.append(numeric_grad.abs_rel_err(a, b))
return errs
def max_err(self, g_pt, abs_tol, rel_tol):
pos = []
errs = []
abs_errs = []
rel_errs = []
abs_rel_errs = self.abs_rel_errors(g_pt)
for (abs_err, rel_err) in abs_rel_errs:
if (not np.all(np.isfinite(abs_err))):
raise ValueError('abs_err not finite', repr(abs_err))
if (not np.all(np.isfinite(rel_err))):
raise ValueError('rel_err not finite', repr(rel_err))
scaled_err = np.minimum((abs_err / abs_tol), (rel_err / rel_tol))
max_i = scaled_err.argmax()
pos.append(max_i)
errs.append(scaled_err.flatten()[max_i])
abs_errs.append(abs_err.flatten()[max_i])
rel_errs.append(rel_err.flatten()[max_i])
max_arg = np.argmax(errs)
max_pos = pos[max_arg]
return (max_arg, max_pos, abs_errs[max_arg], rel_errs[max_arg]) |
def load_results(n_demo):
expert_results = glob.glob(f'{data_dir}/expert/expert_{args.env}_seed=*.perf')
(expert_reward, _, _, _) = get_results(expert_results)
bc_mse_results = glob.glob(f'{data_dir}/bc/bc_{args.env}_policy_ntrajs={n_demo}_seed=*.perf')
(bc_mse_reward, _, _, _) = get_results(bc_mse_results)
exp_name = f'dril_{args.env}_ntraj={n_demo}_ensemble_lr=0.00025_lr=0.00025_bcep=1001_'
exp_name += f'shuffle=sample_w_replace_quantile=0.98_cost_-1_to_1_seed=*.perf'
bc_mse_variance_results = glob.glob(f'{data_dir}/dril/{exp_name}')
(bc_mse_variance_reward, bc_variance_u_reward, bc_variance_steps, bc_mse_variance_reward_curve) = get_results(bc_mse_variance_results, filter=True)
random_reward = []
random_results = glob.glob(f'{data_dir}/random/{args.env}/random*.perf')
for r in random_results:
random_reward.append(pandas.read_csv(r)['test_reward'].max())
params = [(clipped_loss, zero_expert_reward, use_obs_norm, use_bc, gail_normalized_reward, bc_loss, clamp_gail_action) for clipped_loss in [True] for zero_expert_reward in [True, False] for use_obs_norm in [False] for use_bc in [True] for gail_normalized_reward in [True] for clamp_gail_action in [False] for bc_loss in ['mse']]
gail = {}
for gail_reward_type in ['unbias', 'favor_zero_reward', 'favor_non_zero_reward']:
gail_results = f'gail_{args.env}_ntraj={n_demo}_'
gail_results += f'gail_lr=0.001_lr=0.00025_bcep=2001_'
gail_results += f'gail_reward_type={gail_reward_type}_seed=*.perf'
results = glob.glob(f'{data_dir}/gail/{gail_results}')
label = f'GAIL {gail_reward_type}'
(results, _, _, _) = get_results(results)
if results:
gail[label] = results
else:
gail[label] = []
return {'expert': numpy.array(expert_reward), 'bc_mse': numpy.array(bc_mse_reward), 'bc_mse_variance': numpy.array(bc_mse_variance_reward), 'bc_variance_u_reward_curve': bc_variance_u_reward, 'bc_mse_variance_reward_curve': bc_mse_variance_reward_curve, 'bc_variance_steps': bc_variance_steps, 'random': numpy.array(random_reward), **gail} |
class BanSelector(discord.ui.Select):
view: QuotientView
def __init__(self, ctx: Context, teams: T.List[BannedTeam]):
_options = []
for _ in teams:
_options.append(discord.SelectOption(label=f"{getattr(ctx.bot.get_user(_.user_id), 'name', 'unknown-user')} [{_.user_id}]", description=f"Expires: {(_.expires.strftime('%d %b %Y %H:%M') if _.expires else 'Never')}", emoji=emote.TextChannel, value=_.id))
super().__init__(placeholder='Select the players to Unban...', options=_options, max_values=len(_options))
async def callback(self, interaction: discord.Interaction):
(await interaction.response.defer())
self.view.custom_id = self.values
self.view.stop() |
_on_failure
.parametrize('number_of_nodes', [1])
.parametrize('channels_per_node', [0])
.parametrize('enable_rest_api', [True])
def test_payload_with_address_not_eip55(api_server_test_instance: APIServer):
invalid_address = '0xf696209d2ca35e6c88e5b99b7cda3abf316bed69'
channel_data_obj = {'partner_address': invalid_address, 'token_address': '0xEA674fdDe714fd979de3EdF0F56AA9716B898ec8', 'settle_timeout': '90'}
request = grequests.put(api_url_for(api_server_test_instance, 'channelsresource'), json=channel_data_obj)
response = request.send().response
assert_response_with_error(response, HTTPStatus.BAD_REQUEST) |
def bootstrap(field):
if (hasattr(field, 'field') and hasattr(field.field, 'widget') and field.field.widget):
widget = field.field.widget.__class__.__name__.lower()
if (widget in ['passwordinput', 'textinput', 'textarea', 'select', 'numberinput', 'emailinput']):
return add_class(field, 'form-control')
if (widget in ['checkboxinput', 'radioselect']):
return add_class(field, 'form-check-input')
if (widget == 'fileinput'):
return add_class(field, 'form-control-file')
return field |
def testParameterAddActions():
pa = OSC.ParameterAddAction('Myparam', 3)
pa.setVersion(minor=1)
prettyprint(pa.get_element())
pa2 = OSC.ParameterAddAction('Myparam', 3)
pa3 = OSC.ParameterAddAction('Myparam', 2)
assert (pa == pa2)
assert (pa != pa3)
pa4 = OSC.ParameterAddAction.parse(pa.get_element())
assert (pa == pa4)
assert (version_validation('GlobalAction', pa, 0) == ValidationResponse.OK)
assert (version_validation('GlobalAction', pa, 1) == ValidationResponse.OK)
assert (version_validation('GlobalAction', pa, 2) == ValidationResponse.OSC_VERSION) |
class TestAttributes(EvenniaTest):
def test_attrhandler(self):
key = 'testattr'
value = 'test attr value '
self.obj1.attributes.add(key, value)
self.assertEqual(self.obj1.attributes.get(key), value)
self.obj1.db.testattr = value
self.assertEqual(self.obj1.db.testattr, value)
def test_weird_text_save(self):
from django.utils.safestring import SafeText
key = 'test attr 2'
value = SafeText('test attr value 2')
self.obj1.attributes.add(key, value)
self.assertEqual(self.obj1.attributes.get(key), value) |
_dataframe_method
_function(message='This function will be deprecated in a 1.x release. Please use `pd.DataFrame.assign` instead.')
def add_columns(df: pd.DataFrame, fill_remaining: bool=False, **kwargs: Any) -> pd.DataFrame:
for (col_name, values) in kwargs.items():
df = df.add_column(col_name, values, fill_remaining=fill_remaining)
return df |
def get_estimation(idx, target_name, estimation_dict):
estimated = estimation_dict[target_name][idx]
if (len(estimated) == 0):
warn('TODO: zero estimation, caused by ddp')
return None
estimated = np.concatenate([estimated[key] for key in sorted(estimated.keys())], axis=0)
return estimated |
class RemoveEvent():
def __init__(self, itype, iid, destination):
assert ((itype == 'pack') or (itype == 'file'))
assert ((destination == 'queue') or (destination == 'collector'))
self.type = itype
self.id = iid
self.destination = destination
def to_list(self):
return ['remove', self.destination, self.type, self.id] |
class TestProcessParameterData(TestCase):
def test_process_1D_data(self):
name = 'lico2_ocv_example'
path = os.path.join(pybamm.root_dir(), 'tests', 'unit', 'test_parameters')
processed = pybamm.parameters.process_1D_data(name, path)
self.assertEqual(processed[0], name)
self.assertIsInstance(processed[1], tuple)
self.assertIsInstance(processed[1][0][0], np.ndarray)
self.assertIsInstance(processed[1][1], np.ndarray)
def test_process_2D_data(self):
name = 'lico2_diffusivity_Dualfoil1998_2D'
path = os.path.join(pybamm.root_dir(), 'tests', 'unit', 'test_parameters')
processed = pybamm.parameters.process_2D_data(name, path)
self.assertEqual(processed[0], name)
self.assertIsInstance(processed[1], tuple)
self.assertIsInstance(processed[1][0][0], np.ndarray)
self.assertIsInstance(processed[1][0][1], np.ndarray)
self.assertIsInstance(processed[1][1], np.ndarray)
def test_process_2D_data_csv(self):
name = 'data_for_testing_2D'
path = os.path.join(pybamm.root_dir(), 'tests', 'unit', 'test_parameters')
processed = pybamm.parameters.process_2D_data_csv(name, path)
self.assertEqual(processed[0], name)
self.assertIsInstance(processed[1], tuple)
self.assertIsInstance(processed[1][0][0], np.ndarray)
self.assertIsInstance(processed[1][0][1], np.ndarray)
self.assertIsInstance(processed[1][1], np.ndarray)
def test_process_3D_data_csv(self):
name = 'data_for_testing_3D'
path = os.path.join(pybamm.root_dir(), 'tests', 'unit', 'test_parameters')
processed = pybamm.parameters.process_3D_data_csv(name, path)
self.assertEqual(processed[0], name)
self.assertIsInstance(processed[1], tuple)
self.assertIsInstance(processed[1][0][0], np.ndarray)
self.assertIsInstance(processed[1][0][1], np.ndarray)
self.assertIsInstance(processed[1][0][2], np.ndarray)
self.assertIsInstance(processed[1][1], np.ndarray)
def test_error(self):
with self.assertRaisesRegex(FileNotFoundError, 'Could not find file'):
pybamm.parameters.process_1D_data('not_a_real_file', 'not_a_real_path') |
_layer('exampleconv1')
class ExampleConv1(MessagePassing):
def __init__(self, in_channels, out_channels, bias=True, **kwargs):
super().__init__(aggr=cfg.gnn.agg, **kwargs)
self.in_channels = in_channels
self.out_channels = out_channels
self.weight = Parameter(torch.Tensor(in_channels, out_channels))
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
glorot(self.weight)
zeros(self.bias)
def forward(self, batch):
(x, edge_index) = (batch.x, batch.edge_index)
x = torch.matmul(x, self.weight)
batch.x = self.propagate(edge_index, x=x)
return batch
def message(self, x_j):
return x_j
def update(self, aggr_out):
if (self.bias is not None):
aggr_out = (aggr_out + self.bias)
return aggr_out |
def _init_env_vars(use_gpu: bool):
if (('WORLD_SIZE' not in os.environ) or ('RANK' not in os.environ)):
os.environ['WORLD_SIZE'] = '1'
os.environ['RANK'] = '0'
os.environ['LOCAL_RANK'] = '0'
if (('MASTER_ADDR' not in os.environ) or ('MASTER_PORT' not in os.environ)):
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '29500'
if use_gpu:
os.environ['NCCL_ASYNC_ERROR_HANDLING'] = '1' |
def get_basic_announcements(announcements, include_local: bool=True):
return [announcement for announcement in announcements if (((announcement.get('type', '').lower() != 'primary_announcement') and (not announcement.get('local', False))) or (announcement.get('local', False) and include_local))] |
def handle_action_transfer_reroute(chain_state: ChainState, state_change: ActionTransferReroute) -> TransitionResult[ChainState]:
new_secrethash = state_change.secrethash
current_payment_task = chain_state.payment_mapping.secrethashes_to_task[state_change.transfer.lock.secrethash]
chain_state.payment_mapping.secrethashes_to_task.update({new_secrethash: deepcopy(current_payment_task)})
return subdispatch_to_paymenttask(chain_state, state_change, new_secrethash) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.