code stringlengths 281 23.7M |
|---|
class XHKGExchangeCalendar(TradingCalendar):
name = 'XHKG'
tz = timezone('Asia/Hong_Kong')
open_times = ((None, time(10, 1)), (pd.Timestamp('2011-03-07'), time(9, 31)))
break_start_times = ((None, time(12, 1)),)
break_end_times = ((None, time(13, 0)),)
close_times = ((None, time(16)),)
regular_early_close_times = ((None, time(12, 30)), (pd.Timestamp('2011-03-07'), time(12, 0)))
def __init__(self, *args, **kwargs):
super(XHKGExchangeCalendar, self).__init__(*args, **kwargs)
lunisolar_holidays = (chinese_buddhas_birthday_dates, chinese_lunar_new_year_dates, day_after_mid_autumn_festival_dates, double_ninth_festival_dates, dragon_boat_festival_dates, qingming_festival_dates)
earliest_precomputed_year = max(map(np.min, lunisolar_holidays)).year
if (earliest_precomputed_year > self.first_trading_session.year):
raise ValueError('the lunisolar holidays have only been computed back to {}, cannot instantiate the XHKG calendar back to {}'.format(earliest_precomputed_year, self.first_trading_session.year))
latest_precomputed_year = min(map(np.max, lunisolar_holidays)).year
if (latest_precomputed_year < self.last_trading_session.year):
raise ValueError('the lunisolar holidays have only been computed through {}, cannot instantiate the XHKG calendar in {}'.format(latest_precomputed_year, self.last_trading_session.year))
def regular_holidays(self):
return HolidayCalendar([new_years_day(observance=sunday_to_monday), GoodFriday, EasterMonday, LabourDay, HKRegionEstablishmentDay, CommemoratingAlliedVictory, IDontKnow, NationalDay, QueenBirthday, QueenBirthday2, christmas(), weekend_christmas(), boxing_day(observance=boxing_day_obs)])
def adhoc_holidays(self):
lunar_new_years_eve = (chinese_lunar_new_year_dates - pd.Timedelta(days=1))[((chinese_lunar_new_year_dates.weekday == SATURDAY) & (chinese_lunar_new_year_dates.year < 2013))]
lunar_new_year_2 = (chinese_lunar_new_year_dates + pd.Timedelta(days=1))
lunar_new_year_3 = (chinese_lunar_new_year_dates + pd.Timedelta(days=2))
lunar_new_year_4 = (chinese_lunar_new_year_dates + pd.Timedelta(days=3))[((((chinese_lunar_new_year_dates.weekday == SUNDAY) | (lunar_new_year_2.weekday == SUNDAY)) | (lunar_new_year_3.weekday == SUNDAY)) & (chinese_lunar_new_year_dates.year >= 2013))]
qingming_festival = vectorized_sunday_to_monday(qingming_festival_dates).values
years = qingming_festival.astype('M8[Y]')
easter_monday = EasterMonday.dates(years[0], (years[(- 1)] + 1))
qingming_festival[(qingming_festival == easter_monday)] += np.timedelta64(1, 'D')
mid_autumn_festival = day_after_mid_autumn_festival_dates.values
mid_autumn_festival[((day_after_mid_autumn_festival_dates.month == 10) & (day_after_mid_autumn_festival_dates.day == 1))] += np.timedelta64(1, 'D')
return list(chain(lunar_new_years_eve, chinese_lunar_new_year_dates, lunar_new_year_2, lunar_new_year_3, lunar_new_year_4, qingming_festival, vectorized_sunday_to_monday(chinese_buddhas_birthday_dates), vectorized_sunday_to_monday(dragon_boat_festival_dates), mid_autumn_festival, vectorized_sunday_to_monday(double_ninth_festival_dates), HKAdhocClosures))
def special_closes(self):
return [(time(12, 30), HolidayCalendar([new_years_eve(end_date=pd.Timestamp('1999-12-01'), days_of_week=weekdays), new_years_eve(start_date=pd.Timestamp('2000-12-01'), end_date=pd.Timestamp('2011-03-07'), days_of_week=weekdays), christmas_eve(end_date=pd.Timestamp('2011-03-07'), days_of_week=weekdays)])), (time(12, 0), HolidayCalendar([new_years_eve(start_date=pd.Timestamp('2011-03-07'), days_of_week=weekdays), christmas_eve(start_date=pd.Timestamp('2011-03-07'), days_of_week=weekdays)]))]
def special_closes_adhoc(self):
lunar_new_years_eve = (chinese_lunar_new_year_dates - pd.Timedelta(days=1))[(np.in1d(chinese_lunar_new_year_dates.weekday, [TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY]) & (chinese_lunar_new_year_dates.year >= 2013))].values
def selection(arr, start, end):
predicates = []
if (start is not None):
predicates.append((start.asm8 <= arr))
if (end is not None):
predicates.append((arr < end.asm8))
if (not predicates):
return arr
return arr[np.all(predicates, axis=0)]
return [(time, selection(lunar_new_years_eve, start, end)) for ((start, time), (end, _)) in toolz.sliding_window(2, toolz.concatv(self.regular_early_close_times, [(None, None)]))] |
def adjust_lr(optimizer, epoch, eta_max=args.init_lr, eta_min=0.0):
cur_lr = 0.0
if (args.lr_type == 'SGDR'):
i = int(math.log2(((epoch / args.sgdr_t) + 1)))
T_cur = (epoch - (args.sgdr_t * ((2 ** i) - 1)))
T_i = (args.sgdr_t * (2 ** i))
cur_lr = (eta_min + ((0.5 * (eta_max - eta_min)) * (1 + np.cos(((np.pi * T_cur) / T_i)))))
elif (args.lr_type == 'multistep'):
cur_lr = (args.init_lr * (0.1 ** bisect_right(args.milestones, epoch)))
for param_group in optimizer.param_groups:
param_group['lr'] = cur_lr
return cur_lr |
_api()
class filter(Stream):
def __init__(self, upstream, predicate, *args, **kwargs):
if (predicate is None):
predicate = _truthy
self.predicate = predicate
stream_name = kwargs.pop('stream_name', None)
self.kwargs = kwargs
self.args = args
Stream.__init__(self, upstream, stream_name=stream_name)
def update(self, x, who=None, metadata=None):
if self.predicate(x, *self.args, **self.kwargs):
return self._emit(x, metadata=metadata) |
.skipif((os.name == 'nt'), reason='Fails on Windows')
def test_workspace_loads_pycodestyle_config(pylsp, tmpdir):
workspace1_dir = tmpdir.mkdir('Test123')
pylsp.root_uri = str(workspace1_dir)
pylsp.workspace._root_uri = str(workspace1_dir)
workspace2_dir = tmpdir.mkdir('NewTest456')
cfg = workspace2_dir.join('pycodestyle.cfg')
cfg.write('[pycodestyle]\nmax-line-length = 1000')
workspace1 = {'uri': str(workspace1_dir)}
workspace2 = {'uri': str(workspace2_dir)}
event = {'added': [workspace2], 'removed': [workspace1]}
pylsp.m_workspace__did_change_workspace_folders(event)
seetings = pylsp.workspaces[str(workspace2_dir)]._config.settings()
assert (seetings['plugins']['pycodestyle']['maxLineLength'] == 1000)
server_settings = {'pylsp': {'plugins': {'pycodestyle': {'maxLineLength': 10}}}}
pylsp.m_workspace__did_change_configuration(server_settings)
assert (seetings['plugins']['pycodestyle']['maxLineLength'] == 1000)
workspace3_dir = tmpdir.mkdir('NewTest789')
cfg1 = workspace3_dir.join('pycodestyle.cfg')
cfg1.write('[pycodestyle]\nmax-line-length = 20')
workspace3 = {'uri': str(workspace3_dir)}
event = {'added': [workspace3], 'removed': [workspace2]}
pylsp.m_workspace__did_change_workspace_folders(event)
seetings = pylsp.workspaces[str(workspace3_dir)]._config.settings()
assert (seetings['plugins']['pycodestyle']['maxLineLength'] == 20) |
class TestViiL2NCFileHandler(unittest.TestCase):
def setUp(self):
self.test_file_name = ((TEST_FILE + str(uuid.uuid1())) + '.nc')
with Dataset(self.test_file_name, 'w') as nc:
g1 = nc.createGroup('data')
g1.createDimension('num_pixels', 100)
g1.createDimension('num_lines', 10)
g1_2 = g1.createGroup('measurement_data')
delta_lat = g1_2.createVariable('delta_lat', np.float32, dimensions=('num_lines', 'num_pixels'))
delta_lat[:] = 0.1
self.reader = ViiL2NCFileHandler(filename=self.test_file_name, filename_info={'creation_time': datetime.datetime(year=2017, month=9, day=22, hour=22, minute=40, second=10), 'sensing_start_time': datetime.datetime(year=2017, month=9, day=20, hour=12, minute=30, second=30), 'sensing_end_time': datetime.datetime(year=2017, month=9, day=20, hour=18, minute=30, second=50)}, filetype_info={})
def tearDown(self):
try:
os.remove(self.test_file_name)
except OSError:
pass
def test_functions(self):
variable = xr.DataArray(dims=('num_lines', 'num_pixels'), name='test_name', attrs={'key_1': 'value_1', 'key_2': 'value_2'}, data=da.from_array(np.ones((10, 100))))
orthorect_variable = self.reader._perform_orthorectification(variable, 'data/measurement_data/delta_lat')
expected_values = (1.1 * np.ones((10, 100)))
assert np.allclose(orthorect_variable.values, expected_values)
assert (orthorect_variable.attrs['key_1'] == 'value_1') |
class EditableModulePureFunction(PureFunction):
def __init__(self, obj: EditableModule, method: Callable):
self.obj = obj
self.method = method
super().__init__(method)
def _get_all_obj_params_init(self) -> List:
return list(self.obj.getparams(self.method.__name__))
def _set_all_obj_params(self, allobjparams: List):
self.obj.setparams(self.method.__name__, *allobjparams) |
def migrate_old_content(apps, schema_editor):
Release = apps.get_model('downloads', 'Release')
db_alias = schema_editor.connection.alias
releases = Release.objects.using(db_alias).filter(release_page__isnull=False)
for release in releases:
content = '\n'.join(release.release_page.content.raw.splitlines()[3:])
release.content = (MARKER + content)
release.release_page = None
release.save() |
def get_name_params_difference(named_parameters1, named_parameters2):
common_names = list(set(named_parameters1.keys()).intersection(set(named_parameters2.keys())))
named_diff_parameters = {}
for key in common_names:
named_diff_parameters[key] = get_diff_weights(named_parameters1[key], named_parameters2[key])
return named_diff_parameters |
def test_curried_operator():
import operator
for (k, v) in vars(cop).items():
if (not callable(v)):
continue
if (not isinstance(v, toolz.curry)):
try:
v(1)
except TypeError:
try:
v('x')
except TypeError:
pass
else:
continue
raise AssertionError(('toolz.curried.operator.%s is not curried!' % k))
assert (should_curry(getattr(operator, k)) == isinstance(v, toolz.curry)), k
assert (len((set(vars(cop)) & {'add', 'sub', 'mul'})) == 3) |
def nature_cnn(unscaled_images, **conv_kwargs):
scaled_images = (tf.cast(unscaled_images, tf.float32) / 255.0)
activ = tf.nn.relu
h = activ(conv(scaled_images, 'c1', nf=32, rf=8, stride=4, init_scale=np.sqrt(2), **conv_kwargs))
h2 = activ(conv(h, 'c2', nf=64, rf=4, stride=2, init_scale=np.sqrt(2), **conv_kwargs))
h3 = activ(conv(h2, 'c3', nf=64, rf=3, stride=1, init_scale=np.sqrt(2), **conv_kwargs))
h3 = conv_to_fc(h3)
return activ(fc(h3, 'fc1', nh=512, init_scale=np.sqrt(2))) |
class AMPServerFactory(protocol.ServerFactory):
noisy = False
def logPrefix(self):
return 'AMP'
def __init__(self, portal):
self.portal = portal
self.protocol = AMPServerProtocol
self.broadcasts = []
self.server_connection = None
self.launcher_connection = None
self.disconnect_callbacks = {}
self.server_connect_callbacks = []
def buildProtocol(self, addr):
self.portal.amp_protocol = AMPServerProtocol()
self.portal.amp_protocol.factory = self
return self.portal.amp_protocol |
def download_gif_file(gif_id: str, download_url: str, gif_dir: str):
gif_filepath = gif_id_to_filepath(gif_id, gif_dir=gif_dir)
Path(os.path.dirname(gif_filepath)).mkdir(parents=True, exist_ok=True)
if os.path.exists(gif_filepath):
return gif_filepath
try:
img_file = requests.get(download_url)
if (img_file.status_code == 200):
with open(gif_filepath, 'wb') as f:
f.write(img_file.content)
else:
gif_filepath = None
except Exception as e:
logging.error(f'Exception occurred for {twitter_url}')
logging.exception(e)
gif_filepath = None
try:
os.remove(gif_filepath)
except:
pass
return gif_filepath |
class RemoveColumnsCollator():
def __init__(self, data_collator, signature_columns, logger=None, model_name: Optional[str]=None, description: Optional[str]=None):
self.data_collator = data_collator
self.signature_columns = signature_columns
self.logger = logger
self.description = description
self.model_name = model_name
self.message_logged = False
def _remove_columns(self, feature: dict) -> dict:
if (not isinstance(feature, dict)):
return feature
if ((not self.message_logged) and self.logger and self.model_name):
ignored_columns = list((set(feature.keys()) - set(self.signature_columns)))
if (len(ignored_columns) > 0):
dset_description = ('' if (self.description is None) else f'in the {self.description} set')
self.logger.info(f"The following columns {dset_description} don't have a corresponding argument in `{self.model_name}.forward` and have been ignored: {', '.join(ignored_columns)}. If {', '.join(ignored_columns)} are not expected by `{self.model_name}.forward`, you can safely ignore this message.")
self.message_logged = True
return {k: v for (k, v) in feature.items() if (k in self.signature_columns)}
def __call__(self, features: List[dict]):
features = [self._remove_columns(feature) for feature in features]
return self.data_collator(features) |
def _sort_albums(songs):
no_album_count = 0
albums = {}
for song in songs:
if ('album' in song):
albums[song.list('album')[0]] = song
else:
no_album_count += 1
albums = [(song.get('date', ''), song, album) for (album, song) in albums.items()]
albums.sort()
return (albums, no_album_count) |
def extract_classes(chunks: Iterable[CacheData]) -> Iterable[JsonDict]:
def extract(chunks: Iterable[JsonDict]) -> Iterable[JsonDict]:
for chunk in chunks:
if isinstance(chunk, dict):
(yield chunk)
(yield from extract(chunk.values()))
elif isinstance(chunk, list):
(yield from extract(chunk))
(yield from extract([chunk.data for chunk in chunks])) |
def _filter_gabriel(edges, coordinates):
edge_pointer = 0
n_edges = len(edges)
to_drop = []
while (edge_pointer < n_edges):
edge = edges[edge_pointer]
cardinality = 0
for joff in range(edge_pointer, n_edges):
next_edge = edges[joff]
if (next_edge[0] != edge[0]):
break
cardinality += 1
for ix in range(edge_pointer, (edge_pointer + cardinality)):
(i, j) = edges[ix]
dij2 = ((coordinates[i] - coordinates[j]) ** 2).sum()
for ix2 in range(edge_pointer, (edge_pointer + cardinality)):
(_, k) = edges[ix2]
if (j == k):
continue
dik2 = ((coordinates[i] - coordinates[k]) ** 2).sum()
djk2 = ((coordinates[j] - coordinates[k]) ** 2).sum()
if (dij2 > (dik2 + djk2)):
to_drop.append((i, j))
to_drop.append((j, i))
edge_pointer += cardinality
return set(to_drop) |
class Animation(pg.ItemGroup):
def __init__(self, sim):
pg.ItemGroup.__init__(self)
self.sim = sim
self.clocks = sim.clocks
self.items = {}
for (name, cl) in self.clocks.items():
item = ClockItem(cl)
self.addItem(item)
self.items[name] = item
def restart(self):
for cl in self.items.values():
cl.reset()
def stepTo(self, t):
for i in self.items.values():
i.stepTo(t) |
def resolve_func_args(test_func, posargs, kwargs):
sig = inspect.signature(test_func)
assert (list(iter(sig.parameters))[0] == 'self')
posargs.insert(0, SelfMarker)
ba = sig.bind(*posargs, **kwargs)
ba.apply_defaults()
args = ba.arguments
required_args = [n for (n, v) in sig.parameters.items() if ((v.default is Parameter.empty) and (v.kind not in (Parameter.VAR_POSITIONAL, Parameter.VAR_KEYWORD)))]
assert (args['self'] == SelfMarker)
assert (required_args[0] == 'self')
del required_args[0], args['self']
required_args = [args[n] for n in required_args]
return (required_args, args) |
def test_create_beam_configuration_description_vanilla():
default_config = BeamConfiguration(power=BeamAmmoConfiguration(0, (- 1), (- 1), 0, 0, 5, 0), dark=BeamAmmoConfiguration(1, 45, (- 1), 1, 5, 5, 30), light=BeamAmmoConfiguration(2, 46, (- 1), 1, 5, 5, 30), annihilator=BeamAmmoConfiguration(3, 46, 45, 1, 5, 5, 30))
result = preset_describer.create_beam_configuration_description(default_config)
assert (result == []) |
class Bear(Creature):
def __init__(self, rand):
super().__init__(rand)
self.attack = [1, 10]
self.hp_max = 20
self.hp = self.hp_max
self.love = 3
self.name = 'Bear'
self.images = ['bear_normal']
def turn(self):
dmg = self.rand.randint(*self.attack)
if (self.rand.randint(0, 1) == 1):
return (f'The {self.name} attacks you for {dmg} DMG.', dmg)
else:
return (f'The {self.name} attacks you clumsily and misses.', 0) |
class TrickUsagePopup(QtWidgets.QDialog, Ui_TrickUsagePopup):
def __init__(self, parent: QWidget, window_manager: WindowManager, preset: Preset):
super().__init__(parent)
self.setupUi(self)
set_default_window_icon(self)
self._window_manager = window_manager
self._game_description = filtered_database.game_description_for_layout(preset.configuration)
database = self._game_description.resource_database
trick_level = preset.configuration.trick_level
if trick_level.minimal_logic:
trick_usage_description = 'Minimal Logic'
else:
trick_usage_description = ', '.join(sorted((f'{trick.long_name} ({trick_level.level_for_trick(trick).long_name})' for trick in database.trick if trick_level.has_specific_level_for_trick(trick))))
self.area_list_label.linkActivated.connect(self._on_click_link_to_data_editor)
self.setWindowTitle(f'{self.windowTitle()} for preset {preset.name}')
self.title_label.setText(self.title_label.text().format(trick_levels=trick_usage_description))
self.button_box.accepted.connect(self.button_box_close)
self.button_box.rejected.connect(self.button_box_close)
if trick_level.minimal_logic:
return
bootstrap = self._game_description.game.generator.bootstrap
trick_resources = ResourceCollection.from_resource_gain(database, bootstrap.trick_resources_for_configuration(trick_level, database))
lines = []
for region in sorted(self._game_description.region_list.regions, key=(lambda it: it.name)):
for area in sorted(region.areas, key=(lambda it: it.name)):
used_tricks = _check_used_tricks(area, trick_resources, database)
if used_tricks:
lines.append(f"""<p><a href="data-editor://{region.correct_name(area.in_dark_aether)}/{area.name}">{region.correct_name(area.in_dark_aether)} - {area.name}</a><br />{'<br />'.join(used_tricks)}</p>""")
self.area_list_label.setText(''.join(lines))
def button_box_close(self):
self.reject()
def _on_click_link_to_data_editor(self, link: str):
info = re.match('^data-editor://([^)]+)/([^)]+)$', link)
if info:
(region_name, area_name) = info.group(1, 2)
self._window_manager.open_data_visualizer_at(region_name, area_name, game=self._game_description.game) |
def group_channels(nuts):
by_ansl = {}
for nut in nuts:
if (nut.kind_id != CHANNEL):
continue
ansl = nut.codes[:4]
if (ansl not in by_ansl):
by_ansl[ansl] = {}
group = by_ansl[ansl]
k = (nut.codes[4][:(- 1)], nut.deltat, nut.tmin, nut.tmax)
if (k not in group):
group[k] = set()
group.add(nut.codes[4])
return by_ansl |
def test_filerewriter_is_str_dir_windows(windows):
assert (filesystem.FileRewriter.is_str_dir(Path('blah\\')) is False)
assert (filesystem.FileRewriter.is_str_dir('/blah') is False)
assert (filesystem.FileRewriter.is_str_dir('/blah/') is True)
assert (filesystem.FileRewriter.is_str_dir('c:\\blah\\') is True)
assert (filesystem.FileRewriter.is_str_dir('c:/blah/') is True) |
def recover_params(param_groups, param_names, rank=None, neighbor_hat_params=None, get_hat_params=True):
(params, _) = get_data(param_groups, param_names, is_get_grad=False)
flatten_params = TensorBuffer(params)
if get_hat_params:
assert ((neighbor_hat_params is not None) and (rank is not None))
flatten_hat_params = TensorBuffer(params)
flatten_hat_params.buffer.data[:] = neighbor_hat_params[rank].buffer
return (params, flatten_params, flatten_hat_params)
else:
return (params, flatten_params) |
def construct_infobox_prompt(current_sentence, current_name, other_names, num_examples=5, random_order=False):
instruction = 'Extract attributes from the given context using the format Attribute: Value.\n----'
example_library = get_example_library()
current_encoding = sentence_encode([current_sentence])
scores = (current_encoding * example_library['encodings']).sum(axis=1)
scores_indices = list(range(len(scores)))
best_example_indices = sorted(scores_indices, key=(lambda i: scores[i]), reverse=True)
best_example_indices = [i for i in best_example_indices if all([(tok not in example_library['sentences'][i]) for tok in (current_name.split() + sum([other_name.split() for other_name in other_names], []))])]
best_example_indices = [i for i in best_example_indices if all([((tok not in current_name) and (tok not in current_sentence)) for tok in example_library['names'][i].split()])]
best_example_indices = best_example_indices[:num_examples]
best_example_indices = reversed(best_example_indices)
if random_order:
random.shuffle(best_example_indices)
for i in best_example_indices:
name = example_library['names'][i]
instruction = ((((instruction + '\nContext (') + name) + '): ') + example_library['sentences'][i])
keys = [key.strip() for key in example_library['keys'][i].split(',') if (len(key.strip()) > 0)]
values = [value.strip() for value in example_library['values'][i].split(',') if (len(value.strip()) > 0)]
assert (len(keys) == len(values))
for (key, value) in zip(keys, values):
if key.endswith("'s"):
instruction = ((((((instruction + '\n') + name) + ' is ') + key) + ' ') + value)
else:
instruction = ((((((instruction + '\n') + name) + "'s ") + key) + ' is ') + value)
instruction = (instruction + '\n----')
return ((((((instruction + '\nContext (') + current_name) + '): ') + current_sentence) + '\n') + current_name) |
class Tee():
def __init__(self, fname, mode='a'):
self.stdout = sys.stdout
self.file = open(fname, mode)
def write(self, message):
self.stdout.write(message)
self.file.write(message)
self.flush()
def flush(self):
self.stdout.flush()
self.file.flush() |
def wide_resnet101_2d(deconv, delinear, channel_deconv, pretrained=False, progress=True, **kwargs):
kwargs['width_per_group'] = (64 * 2)
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3], pretrained, progress, deconv=deconv, delinear=delinear, channel_deconv=channel_deconv, **kwargs) |
class sysctl_oid_t(ctypes.Structure):
class slist_entry(ctypes.Structure):
_fields_ = (('sle_next', POINTER64),)
_fields_ = (('oid_parent', POINTER64), ('oid_link', slist_entry), ('oid_number', ctypes.c_int32), ('oid_kind', ctypes.c_int32), ('oid_arg1', POINTER64), ('oid_arg2', ctypes.c_int32), ('oid_name', POINTER64), ('oid_handler', POINTER64), ('oid_fmt', POINTER64), ('oid_descr', POINTER64), ('oid_version', ctypes.c_int32), ('oid_refcnt', ctypes.c_int32))
def __init__(self, ql, base):
self.ql = ql
self.base = base
def updateToMem(self):
self.ql.mem.write(self.base, bytes(self))
def loadFromMem(self):
data = self.ql.mem.read(self.base, ctypes.sizeof(self))
newObj = type(self).from_buffer(data)
newObj.ql = self.ql
newObj.base = self.base
return newObj
def dump(self):
for field in self._fields_:
if isinstance(getattr(self, field[0]), POINTER64):
self.ql.log.info(('%s: 0x%x' % (field[0], getattr(self, field[0]).value)))
elif isinstance(getattr(self, field[0]), sysctl_oid_t.slist_entry):
self.ql.log.info(('%s: Struct( 0x%x )' % (field[0], getattr(self, field[0]).sle_next.value)))
else:
self.ql.log.info(('%s: 0x%x' % (field[0], getattr(self, field[0])))) |
def test_licenses():
assert isinstance(LICENSES, dict)
assert (list(LICENSES) == sorted(LICENSES))
for (name, data) in LICENSES.items():
assert isinstance(data, dict)
assert ('id' in data)
assert isinstance(data['id'], str)
assert (data['id'].lower() == name)
assert ('deprecated' in data)
assert isinstance(data['deprecated'], bool) |
def test_ae_higherresolution_head():
with pytest.raises(AssertionError):
_ = AEHigherResolutionHead(in_channels=512, num_joints=17, with_ae_loss=[True, False], extra={'final_conv_kernel': 0}, loss_keypoint=dict(type='MultiLossFactory', num_joints=17, num_stages=2, ae_loss_type='exp', with_ae_loss=[True, False], push_loss_factor=[0.001, 0.001], pull_loss_factor=[0.001, 0.001], with_heatmaps_loss=[True, True], heatmaps_loss_factor=[1.0, 1.0]))
head = AEHigherResolutionHead(in_channels=512, num_joints=17, with_ae_loss=[True, False], extra={'final_conv_kernel': 3}, cat_output=[True], loss_keypoint=dict(type='MultiLossFactory', num_joints=17, num_stages=2, ae_loss_type='exp', with_ae_loss=[True, False], push_loss_factor=[0.001, 0.001], pull_loss_factor=[0.001, 0.001], with_heatmaps_loss=[True, True], heatmaps_loss_factor=[1.0, 1.0]))
head.init_weights()
assert (head.final_layers[0].padding == (1, 1))
head = AEHigherResolutionHead(in_channels=512, num_joints=17, with_ae_loss=[True, False], extra={'final_conv_kernel': 1}, cat_output=[True], loss_keypoint=dict(type='MultiLossFactory', num_joints=17, num_stages=2, ae_loss_type='exp', with_ae_loss=[True, False], push_loss_factor=[0.001, 0.001], pull_loss_factor=[0.001, 0.001], with_heatmaps_loss=[True, True], heatmaps_loss_factor=[1.0, 1.0]))
head.init_weights()
assert (head.final_layers[0].padding == (0, 0))
head = AEHigherResolutionHead(in_channels=512, num_joints=17, with_ae_loss=[True, False], cat_output=[True], loss_keypoint=dict(type='MultiLossFactory', num_joints=17, num_stages=2, ae_loss_type='exp', with_ae_loss=[True, False], push_loss_factor=[0.001, 0.001], pull_loss_factor=[0.001, 0.001], with_heatmaps_loss=[True, True], heatmaps_loss_factor=[1.0, 1.0]))
head.init_weights()
assert (head.final_layers[0].padding == (0, 0))
with pytest.raises(ValueError):
_ = AEHigherResolutionHead(in_channels=512, num_joints=17, with_ae_loss=[True, False], num_deconv_kernels=[1], cat_output=[True], loss_keypoint=dict(type='MultiLossFactory', num_joints=17, num_stages=2, ae_loss_type='exp', with_ae_loss=[True, False], push_loss_factor=[0.001, 0.001], pull_loss_factor=[0.001, 0.001], with_heatmaps_loss=[True, True], heatmaps_loss_factor=[1.0, 1.0]))
head = AEHigherResolutionHead(in_channels=512, num_joints=17, with_ae_loss=[True, False], num_deconv_kernels=[4], cat_output=[True], loss_keypoint=dict(type='MultiLossFactory', num_joints=17, num_stages=2, ae_loss_type='exp', with_ae_loss=[True, False], push_loss_factor=[0.001, 0.001], pull_loss_factor=[0.001, 0.001], with_heatmaps_loss=[True, True], heatmaps_loss_factor=[1.0, 1.0]))
head.init_weights()
assert (head.deconv_layers[0][0][0].output_padding == (0, 0))
head = AEHigherResolutionHead(in_channels=512, num_joints=17, with_ae_loss=[True, False], num_deconv_kernels=[3], cat_output=[True], loss_keypoint=dict(type='MultiLossFactory', num_joints=17, num_stages=2, ae_loss_type='exp', with_ae_loss=[True, False], push_loss_factor=[0.001, 0.001], pull_loss_factor=[0.001, 0.001], with_heatmaps_loss=[True, True], heatmaps_loss_factor=[1.0, 1.0]))
head.init_weights()
assert (head.deconv_layers[0][0][0].output_padding == (1, 1))
head = AEHigherResolutionHead(in_channels=512, num_joints=17, with_ae_loss=[True, False], num_deconv_kernels=[2], cat_output=[True], loss_keypoint=dict(type='MultiLossFactory', num_joints=17, num_stages=2, ae_loss_type='exp', with_ae_loss=[True, False], push_loss_factor=[0.001, 0.001], pull_loss_factor=[0.001, 0.001], with_heatmaps_loss=[True, True], heatmaps_loss_factor=[1.0, 1.0]))
head.init_weights()
assert (head.deconv_layers[0][0][0].output_padding == (0, 0))
head = AEHigherResolutionHead(in_channels=512, num_joints=17, tag_per_joint=False, with_ae_loss=[False, False], extra={'final_conv_kernel': 3}, cat_output=[True], loss_keypoint=dict(type='MultiLossFactory', num_joints=17, num_stages=2, ae_loss_type='exp', with_ae_loss=[False, False], push_loss_factor=[0.001, 0.001], pull_loss_factor=[0.001, 0.001], with_heatmaps_loss=[True, True], heatmaps_loss_factor=[1.0, 1.0]))
head.init_weights()
input_shape = (1, 512, 32, 32)
inputs = _demo_inputs(input_shape)
out = head(inputs)
assert (out[0].shape == torch.Size([1, 17, 32, 32]))
assert (out[1].shape == torch.Size([1, 17, 64, 64]))
head = AEHigherResolutionHead(in_channels=512, num_joints=17, tag_per_joint=False, with_ae_loss=[True, False], extra={'final_conv_kernel': 3}, cat_output=[True], loss_keypoint=dict(type='MultiLossFactory', num_joints=17, num_stages=2, ae_loss_type='exp', with_ae_loss=[True, False], push_loss_factor=[0.001, 0.001], pull_loss_factor=[0.001, 0.001], with_heatmaps_loss=[True, True], heatmaps_loss_factor=[1.0, 1.0]))
head.init_weights()
input_shape = (1, 512, 32, 32)
inputs = _demo_inputs(input_shape)
out = head(inputs)
assert (out[0].shape == torch.Size([1, 18, 32, 32]))
assert (out[1].shape == torch.Size([1, 17, 64, 64]))
head = AEHigherResolutionHead(in_channels=512, num_joints=17, tag_per_joint=True, with_ae_loss=[True, True], extra={'final_conv_kernel': 3}, cat_output=[True], loss_keypoint=dict(type='MultiLossFactory', num_joints=17, num_stages=2, ae_loss_type='exp', with_ae_loss=[True, True], push_loss_factor=[0.001, 0.001], pull_loss_factor=[0.001, 0.001], with_heatmaps_loss=[True, True], heatmaps_loss_factor=[1.0, 1.0]))
head.init_weights()
input_shape = (1, 512, 32, 32)
inputs = _demo_inputs(input_shape)
out = head(inputs)
assert (out[0].shape == torch.Size([1, 34, 32, 32]))
assert (out[1].shape == torch.Size([1, 34, 64, 64]))
head = AEHigherResolutionHead(in_channels=512, num_joints=17, tag_per_joint=True, with_ae_loss=[True, True], extra={'final_conv_kernel': 3}, cat_output=[False], loss_keypoint=dict(type='MultiLossFactory', num_joints=17, num_stages=2, ae_loss_type='exp', with_ae_loss=[True, True], push_loss_factor=[0.001, 0.001], pull_loss_factor=[0.001, 0.001], with_heatmaps_loss=[True, True], heatmaps_loss_factor=[1.0, 1.0]))
head.init_weights()
input_shape = (1, 512, 32, 32)
inputs = _demo_inputs(input_shape)
out = head(inputs)
assert (out[0].shape == torch.Size([1, 34, 32, 32]))
assert (out[1].shape == torch.Size([1, 34, 64, 64]))
head = AEHigherResolutionHead(in_channels=512, num_joints=17, tag_per_joint=True, with_ae_loss=[True, True], extra={'final_conv_kernel': 3}, cat_output=[False], loss_keypoint=dict(type='MultiLossFactory', num_joints=17, num_stages=2, ae_loss_type='exp', with_ae_loss=[True, True], push_loss_factor=[0.001, 0.001], pull_loss_factor=[0.001, 0.001], with_heatmaps_loss=[True, True], heatmaps_loss_factor=[1.0, 1.0]))
head.init_weights()
input_shape = (1, 512, 32, 32)
inputs = _demo_inputs(input_shape)
out = head([inputs])
assert (out[0].shape == torch.Size([1, 34, 32, 32]))
assert (out[1].shape == torch.Size([1, 34, 64, 64])) |
class TestValidator(SetUpTest, TestCase):
def test_validator_should_succeed(self):
with open(self.qlr_file) as f:
self.assertTrue(validator(f))
def test_validator_should_failed(self):
tf = NamedTemporaryFile(mode='w+t', suffix='.qlr')
tf.write('<!DOCTYPE qgis-layer-definition><not_qlr></not_qlr>')
msg = 'Invalid root tag of qlr file. Please ensure your file is correct.'
with self.assertRaisesMessage(ValidationError, msg):
validator(tf) |
def convert_conv_fc(blobs, state_dict, caffe_name, torch_name, converted_names):
state_dict[(torch_name + '.weight')] = torch.from_numpy(blobs[(caffe_name + '_w')])
converted_names.add((caffe_name + '_w'))
if ((caffe_name + '_b') in blobs):
state_dict[(torch_name + '.bias')] = torch.from_numpy(blobs[(caffe_name + '_b')])
converted_names.add((caffe_name + '_b')) |
def translate_pattern(pattern, anchor=1, prefix=None, is_regex=0):
if is_regex:
if isinstance(pattern, str):
return re.compile(pattern)
else:
return pattern
(start, _, end) = glob_to_re('_').partition('_')
if pattern:
pattern_re = glob_to_re(pattern)
assert (pattern_re.startswith(start) and pattern_re.endswith(end))
else:
pattern_re = ''
if (prefix is not None):
prefix_re = glob_to_re(prefix)
assert (prefix_re.startswith(start) and prefix_re.endswith(end))
prefix_re = prefix_re[len(start):(len(prefix_re) - len(end))]
sep = os.sep
if (os.sep == '\\'):
sep = '\\\\'
pattern_re = pattern_re[len(start):(len(pattern_re) - len(end))]
pattern_re = '{}\\A{}{}.*{}{}'.format(start, prefix_re, sep, pattern_re, end)
elif anchor:
pattern_re = '{}\\A{}'.format(start, pattern_re[len(start):])
return re.compile(pattern_re) |
class Migration(migrations.Migration):
dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('grants', '0010_remove_grant_user_id_grant_user')]
operations = [migrations.AlterField(model_name='grant', name='user', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='+', to=settings.AUTH_USER_MODEL, verbose_name='user'))] |
def main(memo, env, road_net, gui, volume, suffix, mod, cnt, gen, r_all, workers, onemodel):
NUM_COL = int(road_net.split('_')[0])
NUM_ROW = int(road_net.split('_')[1])
num_intersections = (NUM_ROW * NUM_COL)
print('num_intersections:', num_intersections)
ENVIRONMENT = ['sumo', 'anon'][env]
if r_all:
traffic_file_list = [(((ENVIRONMENT + '_') + road_net) + ('_%d_%s' % (v, suffix))) for v in range(100, 400, 100)]
else:
traffic_file_list = ['{0}_{1}_{2}_{3}'.format(ENVIRONMENT, road_net, volume, suffix)]
if env:
traffic_file_list = [(i + '.json') for i in traffic_file_list]
else:
traffic_file_list = [(i + '.xml') for i in traffic_file_list]
process_list = []
n_workers = workers
multi_process = True
global PRETRAIN
global NUM_ROUNDS
global EARLY_STOP
for traffic_file in traffic_file_list:
dic_exp_conf_extra = {'RUN_COUNTS': cnt, 'MODEL_NAME': mod, 'TRAFFIC_FILE': [traffic_file], 'ROADNET_FILE': 'roadnet_{0}.json'.format(road_net), 'NUM_ROUNDS': NUM_ROUNDS, 'NUM_GENERATORS': gen, 'MODEL_POOL': False, 'NUM_BEST_MODEL': 3, 'PRETRAIN': PRETRAIN, 'PRETRAIN_MODEL_NAME': mod, 'PRETRAIN_NUM_ROUNDS': 0, 'PRETRAIN_NUM_GENERATORS': 15, 'AGGREGATE': False, 'DEBUG': False, 'EARLY_STOP': EARLY_STOP}
dic_agent_conf_extra = {'EPOCHS': 100, 'SAMPLE_SIZE': 1000, 'MAX_MEMORY_LEN': 10000, 'UPDATE_Q_BAR_EVERY_C_ROUND': False, 'UPDATE_Q_BAR_FREQ': 5, 'N_LAYER': 2, 'TRAFFIC_FILE': traffic_file}
global TOP_K_ADJACENCY
global TOP_K_ADJACENCY_LANE
global NEIGHBOR
global SAVEREPLAY
global ADJACENCY_BY_CONNECTION_OR_GEO
global ANON_PHASE_REPRE
dic_traffic_env_conf_extra = {'USE_LANE_ADJACENCY': True, 'ONE_MODEL': onemodel, 'NUM_AGENTS': num_intersections, 'NUM_INTERSECTIONS': num_intersections, 'ACTION_PATTERN': 'set', 'MEASURE_TIME': 10, 'IF_GUI': gui, 'DEBUG': False, 'TOP_K_ADJACENCY': TOP_K_ADJACENCY, 'ADJACENCY_BY_CONNECTION_OR_GEO': ADJACENCY_BY_CONNECTION_OR_GEO, 'TOP_K_ADJACENCY_LANE': TOP_K_ADJACENCY_LANE, 'SIMULATOR_TYPE': ENVIRONMENT, 'BINARY_PHASE_EXPANSION': True, 'FAST_COMPUTE': True, 'NEIGHBOR': NEIGHBOR, 'MODEL_NAME': mod, 'SAVEREPLAY': SAVEREPLAY, 'NUM_ROW': NUM_ROW, 'NUM_COL': NUM_COL, 'TRAFFIC_FILE': traffic_file, 'VOLUME': volume, 'ROADNET_FILE': 'roadnet_{0}.json'.format(road_net), 'phase_expansion': {1: [0, 1, 0, 1, 0, 0, 0, 0], 2: [0, 0, 0, 0, 0, 1, 0, 1], 3: [1, 0, 1, 0, 0, 0, 0, 0], 4: [0, 0, 0, 0, 1, 0, 1, 0], 5: [1, 1, 0, 0, 0, 0, 0, 0], 6: [0, 0, 1, 1, 0, 0, 0, 0], 7: [0, 0, 0, 0, 0, 0, 1, 1], 8: [0, 0, 0, 0, 1, 1, 0, 0]}, 'phase_expansion_4_lane': {1: [1, 1, 0, 0], 2: [0, 0, 1, 1]}, 'LIST_STATE_FEATURE': ['cur_phase', 'lane_num_vehicle'], 'DIC_FEATURE_DIM': dict(D_LANE_QUEUE_LENGTH=(4,), D_LANE_NUM_VEHICLE=(4,), D_COMING_VEHICLE=(12,), D_LEAVING_VEHICLE=(12,), D_LANE_NUM_VEHICLE_BEEN_STOPPED_THRES1=(4,), D_CUR_PHASE=(1,), D_NEXT_PHASE=(1,), D_TIME_THIS_PHASE=(1,), D_TERMINAL=(1,), D_LANE_SUM_WAITING_TIME=(4,), D_VEHICLE_POSITION_IMG=(4, 60), D_VEHICLE_SPEED_IMG=(4, 60), D_VEHICLE_WAITING_TIME_IMG=(4, 60), D_PRESSURE=(1,), D_ADJACENCY_MATRIX=(2,), D_ADJACENCY_MATRIX_LANE=(6,)), 'DIC_REWARD_INFO': {'flickering': 0, 'sum_lane_queue_length': 0, 'sum_lane_wait_time': 0, 'sum_lane_num_vehicle_left': 0, 'sum_duration_vehicle_left': 0, 'sum_num_vehicle_been_stopped_thres01': 0, 'sum_num_vehicle_been_stopped_thres1': (- 0.25), 'pressure': 0}, 'LANE_NUM': {'LEFT': 1, 'RIGHT': 1, 'STRAIGHT': 1}, 'PHASE': {'sumo': {0: [0, 1, 0, 1, 0, 0, 0, 0], 1: [0, 0, 0, 0, 0, 1, 0, 1], 2: [1, 0, 1, 0, 0, 0, 0, 0], 3: [0, 0, 0, 0, 1, 0, 1, 0]}, 'anon': ANON_PHASE_REPRE}}
global hangzhou_archive
if hangzhou_archive:
template = 'Archive+2'
elif (volume == 'jinan'):
template = 'Jinan'
elif (volume == 'hangzhou'):
template = 'Hangzhou'
elif (volume == 'newyork'):
template = 'NewYork'
elif (volume == 'chacha'):
template = 'Chacha'
elif (volume == 'dynamic_attention'):
template = 'dynamic_attention'
elif (dic_traffic_env_conf_extra['LANE_NUM'] == config._LS):
template = 'template_ls'
elif (dic_traffic_env_conf_extra['LANE_NUM'] == config._S):
template = 'template_s'
elif (dic_traffic_env_conf_extra['LANE_NUM'] == config._LSR):
template = 'template_lsr'
else:
raise ValueError
if dic_traffic_env_conf_extra['NEIGHBOR']:
list_feature = dic_traffic_env_conf_extra['LIST_STATE_FEATURE'].copy()
for feature in list_feature:
for i in range(4):
dic_traffic_env_conf_extra['LIST_STATE_FEATURE'].append(((feature + '_') + str(i)))
if (mod in ['CoLight', 'GCN', 'SimpleDQNOne']):
dic_traffic_env_conf_extra['NUM_AGENTS'] = 1
dic_traffic_env_conf_extra['ONE_MODEL'] = False
if (('adjacency_matrix' not in dic_traffic_env_conf_extra['LIST_STATE_FEATURE']) and ('adjacency_matrix_lane' not in dic_traffic_env_conf_extra['LIST_STATE_FEATURE']) and (mod not in ['SimpleDQNOne'])):
dic_traffic_env_conf_extra['LIST_STATE_FEATURE'].append('adjacency_matrix')
dic_traffic_env_conf_extra['LIST_STATE_FEATURE'].append('adjacency_matrix_lane')
if dic_traffic_env_conf_extra['ADJACENCY_BY_CONNECTION_OR_GEO']:
TOP_K_ADJACENCY = 5
dic_traffic_env_conf_extra['LIST_STATE_FEATURE'].append('connectivity')
dic_traffic_env_conf_extra['DIC_FEATURE_DIM']['D_CONNECTIVITY'] = (5,)
dic_traffic_env_conf_extra['DIC_FEATURE_DIM']['D_ADJACENCY_MATRIX'] = (5,)
else:
dic_traffic_env_conf_extra['DIC_FEATURE_DIM']['D_ADJACENCY_MATRIX'] = (dic_traffic_env_conf_extra['TOP_K_ADJACENCY'],)
if dic_traffic_env_conf_extra['USE_LANE_ADJACENCY']:
dic_traffic_env_conf_extra['DIC_FEATURE_DIM']['D_ADJACENCY_MATRIX_LANE'] = (dic_traffic_env_conf_extra['TOP_K_ADJACENCY_LANE'],)
else:
dic_traffic_env_conf_extra['NUM_AGENTS'] = dic_traffic_env_conf_extra['NUM_INTERSECTIONS']
if dic_traffic_env_conf_extra['BINARY_PHASE_EXPANSION']:
dic_traffic_env_conf_extra['DIC_FEATURE_DIM']['D_CUR_PHASE'] = (8,)
if dic_traffic_env_conf_extra['NEIGHBOR']:
dic_traffic_env_conf_extra['DIC_FEATURE_DIM']['D_CUR_PHASE_0'] = (8,)
dic_traffic_env_conf_extra['DIC_FEATURE_DIM']['D_LANE_NUM_VEHICLE_0'] = (4,)
dic_traffic_env_conf_extra['DIC_FEATURE_DIM']['D_CUR_PHASE_1'] = (8,)
dic_traffic_env_conf_extra['DIC_FEATURE_DIM']['D_LANE_NUM_VEHICLE_1'] = (4,)
dic_traffic_env_conf_extra['DIC_FEATURE_DIM']['D_CUR_PHASE_2'] = (8,)
dic_traffic_env_conf_extra['DIC_FEATURE_DIM']['D_LANE_NUM_VEHICLE_2'] = (4,)
dic_traffic_env_conf_extra['DIC_FEATURE_DIM']['D_CUR_PHASE_3'] = (8,)
dic_traffic_env_conf_extra['DIC_FEATURE_DIM']['D_LANE_NUM_VEHICLE_3'] = (4,)
else:
dic_traffic_env_conf_extra['DIC_FEATURE_DIM']['D_CUR_PHASE_0'] = (1,)
dic_traffic_env_conf_extra['DIC_FEATURE_DIM']['D_LANE_NUM_VEHICLE_0'] = (4,)
dic_traffic_env_conf_extra['DIC_FEATURE_DIM']['D_CUR_PHASE_1'] = (1,)
dic_traffic_env_conf_extra['DIC_FEATURE_DIM']['D_LANE_NUM_VEHICLE_1'] = (4,)
dic_traffic_env_conf_extra['DIC_FEATURE_DIM']['D_CUR_PHASE_2'] = (1,)
dic_traffic_env_conf_extra['DIC_FEATURE_DIM']['D_LANE_NUM_VEHICLE_2'] = (4,)
dic_traffic_env_conf_extra['DIC_FEATURE_DIM']['D_CUR_PHASE_3'] = (1,)
dic_traffic_env_conf_extra['DIC_FEATURE_DIM']['D_LANE_NUM_VEHICLE_3'] = (4,)
print(traffic_file)
prefix_intersections = str(road_net)
dic_path_extra = {'PATH_TO_MODEL': os.path.join('model', memo, ((traffic_file + '_') + time.strftime('%m_%d_%H_%M_%S', time.localtime(time.time())))), 'PATH_TO_WORK_DIRECTORY': os.path.join('records', memo, ((traffic_file + '_') + time.strftime('%m_%d_%H_%M_%S', time.localtime(time.time())))), 'PATH_TO_DATA': os.path.join('data', template, prefix_intersections), 'PATH_TO_PRETRAIN_MODEL': os.path.join('model', 'initial', traffic_file), 'PATH_TO_PRETRAIN_WORK_DIRECTORY': os.path.join('records', 'initial', traffic_file), 'PATH_TO_ERROR': os.path.join('errors', memo)}
deploy_dic_exp_conf = merge(config.DIC_EXP_CONF, dic_exp_conf_extra)
deploy_dic_agent_conf = merge(getattr(config, 'DIC_{0}_AGENT_CONF'.format(mod.upper())), dic_agent_conf_extra)
deploy_dic_traffic_env_conf = merge(config.dic_traffic_env_conf, dic_traffic_env_conf_extra)
deploy_dic_path = merge(config.DIC_PATH, dic_path_extra)
if multi_process:
ppl = Process(target=pipeline_wrapper, args=(deploy_dic_exp_conf, deploy_dic_agent_conf, deploy_dic_traffic_env_conf, deploy_dic_path))
process_list.append(ppl)
else:
pipeline_wrapper(dic_exp_conf=deploy_dic_exp_conf, dic_agent_conf=deploy_dic_agent_conf, dic_traffic_env_conf=deploy_dic_traffic_env_conf, dic_path=deploy_dic_path)
if multi_process:
for i in range(0, len(process_list), n_workers):
i_max = min(len(process_list), (i + n_workers))
for j in range(i, i_max):
print(j)
print('start_traffic')
process_list[j].start()
print('after_traffic')
for k in range(i, i_max):
print('traffic to join', k)
process_list[k].join()
print('traffic finish join', k)
return memo |
class TestPassportElementErrorSelfieWithoutRequest(TestPassportElementErrorSelfieBase):
def test_slot_behaviour(self, passport_element_error_selfie):
inst = passport_element_error_selfie
for attr in inst.__slots__:
assert (getattr(inst, attr, 'err') != 'err'), f"got extra slot '{attr}'"
assert (len(mro_slots(inst)) == len(set(mro_slots(inst)))), 'duplicate slot'
def test_expected_values(self, passport_element_error_selfie):
assert (passport_element_error_selfie.source == self.source)
assert (passport_element_error_selfie.type == self.type_)
assert (passport_element_error_selfie.file_hash == self.file_hash)
assert (passport_element_error_selfie.message == self.message)
def test_to_dict(self, passport_element_error_selfie):
passport_element_error_selfie_dict = passport_element_error_selfie.to_dict()
assert isinstance(passport_element_error_selfie_dict, dict)
assert (passport_element_error_selfie_dict['source'] == passport_element_error_selfie.source)
assert (passport_element_error_selfie_dict['type'] == passport_element_error_selfie.type)
assert (passport_element_error_selfie_dict['file_hash'] == passport_element_error_selfie.file_hash)
assert (passport_element_error_selfie_dict['message'] == passport_element_error_selfie.message)
def test_equality(self):
a = PassportElementErrorSelfie(self.type_, self.file_hash, self.message)
b = PassportElementErrorSelfie(self.type_, self.file_hash, self.message)
c = PassportElementErrorSelfie(self.type_, '', '')
d = PassportElementErrorSelfie('', self.file_hash, '')
e = PassportElementErrorSelfie('', '', self.message)
f = PassportElementErrorDataField(self.type_, '', '', self.message)
assert (a == b)
assert (hash(a) == hash(b))
assert (a is not b)
assert (a != c)
assert (hash(a) != hash(c))
assert (a != d)
assert (hash(a) != hash(d))
assert (a != e)
assert (hash(a) != hash(e))
assert (a != f)
assert (hash(a) != hash(f)) |
class StorageAssets(models.Model):
storage_types = ((0, ''), (1, ''), (2, ''), (3, ''), (4, ''))
assets = models.OneToOneField('Assets', on_delete=models.CASCADE)
storage_type = models.SmallIntegerField(choices=storage_types, default=0, verbose_name='')
class Meta():
db_table = 'ops_storage_assets'
verbose_name = ''
verbose_name_plural = '' |
def test_available_commands(bot):
('test1', order=10)
def test1():
pass
('test2')
def test2():
pass
('test3', hidden=True)
def test3():
pass
assert ([cmd.name for cmd in bot.available_commands()] == ['help', 'test2', 'test1'])
assert ([cmd.name for cmd in bot.available_commands(all=True)] == ['help', 'start', 'test2', 'test3', 'test1']) |
class TestArrayColumns(BaseTestColumns):
def test_ArrayColumnInt64(self) -> None:
data = [None, [], [1], [1, None, 2], None]
col = infer_column(data)
self.assert_Column(col.elements(), [1, 1, None, 2])
for (sliced_col, sliced_data) in ((col, data), (col.slice(2, 2), data[2:4]), (col.slice(1, 4), data[1:5])):
self.assertEqual(len(sliced_col), len(sliced_data))
for (i, item) in enumerate(sliced_data):
if (item is None):
self.assertTrue(sliced_col.is_null_at(i))
else:
self.assertFalse(sliced_col.is_null_at(i))
self.assertEqual(len(sliced_col[i]), len(item))
for (j, value) in enumerate(item):
if (value is None):
self.assertTrue(sliced_col[i].is_null_at(j))
else:
self.assertFalse(sliced_col[i].is_null_at(j))
self.assertEqual(sliced_col[i][j], sliced_data[i][j])
def test_NestedArrayColumnInt64(self) -> None:
data = [[[1, 2], None, [3, 4]], [[4], [5]]]
col = infer_column(data)
self.assertEqual(col[0][0][0], 1)
self.assertEqual(col[0][0][1], 2)
self.assertTrue(col[0].is_null_at(1))
self.assertEqual(col[0][2][0], 3)
self.assertEqual(col[0][2][1], 4)
self.assertEqual(col[1][0][0], 4)
self.assertEqual(col[1][1][0], 5)
def test_NestedArrayColumnString(self) -> None:
data = [[], [[]], [['a']], [['b', 'c'], ['d', 'e', 'f']]]
col = infer_column(data)
self.assertEqual(len(col[0]), 0)
self.assertEqual(len(col[1]), 1)
self.assertEqual(len(col[1][0]), 0)
self.assertEqual(col[2][0][0], 'a')
self.assertEqual(col[3][0][0], 'b')
self.assertEqual(col[3][0][1], 'c')
self.assertEqual(col[3][1][0], 'd')
self.assertEqual(col[3][1][1], 'e')
self.assertEqual(col[3][1][2], 'f') |
def test_arrayToLineSegments():
xy = np.array([0.0])
parray = arrayToLineSegments(xy, xy, connect='all', finiteCheck=True)
segs = parray.drawargs()
assert (isinstance(segs, tuple) and (len(segs) in [1, 2]))
if (len(segs) == 1):
assert (len(segs[0]) == 0)
elif (len(segs) == 2):
assert (segs[1] == 0) |
def metadata_and_status(status):
return MockMessage(body=('', {'Metadata': obj({'mpris:trackid': obj(1), 'xesam:url': obj('/path/to/rickroll.mp3'), 'xesam:title': obj('Never Gonna Give You Up'), 'xesam:artist': obj(['Rick Astley']), 'xesam:album': obj('Whenever You Need Somebody'), 'mpris:length': obj()}), 'PlaybackStatus': obj(status)}, [])) |
class Migration(migrations.Migration):
dependencies = [('tasks', '0029_sites_blank')]
operations = [migrations.AddField(model_name='task', name='available', field=models.BooleanField(default=True, help_text='Designates whether this task is generally available for projects.', verbose_name='Available'))] |
class SetExtentToLocation(QtWidgets.QWidget):
def __init__(self, *args, m=None, **kwargs):
super().__init__(*args, **kwargs)
self.m = m
label = QtWidgets.QLabel('<b>Query Location:</b>')
self.inp = QtWidgets.QLineEdit()
self.inp.returnPressed.connect(self.set_extent)
layout = QtWidgets.QHBoxLayout()
layout.addWidget(label)
layout.addWidget(self.inp)
self.setLayout(layout)
self._lastquery = None
def enterEvent(self, e):
if (self.window().showhelp is True):
QtWidgets.QToolTip.showText(e.globalPos(), "<h3>Location Query</h3>Use the <b>OpenStreetMap Nominatim API</b> to query a location and set the Map-extent to the bounding box of the found location.<p>'location' can be a country-name, a city-name an address etc.")
()
def set_extent(self):
try:
now = datetime.now()
if (self._lastquery is None):
self._lastquery = now
elif ((now - self._lastquery).seconds <= 2):
self.window().statusBar().showMessage('... no fast queries allowed!')
return
txt = self.inp.text()
self.m.set_extent_to_location(txt)
self.m.redraw()
except Exception as ex:
_log.error('There was an error while trying to set the extent.', exc_info=(_log.getEffectiveLevel() <= logging.DEBUG)) |
class PayToEdit(CompletionTextEdit, ScanQRTextEdit, Logger):
def __init__(self, win: 'ElectrumWindow'):
CompletionTextEdit.__init__(self)
ScanQRTextEdit.__init__(self, config=win.config)
Logger.__init__(self)
self.win = win
self.amount_edit = win.amount_e
self.setFont(QFont(MONOSPACE_FONT))
document = self.document()
document.contentsChanged.connect(self.update_size)
fontMetrics = QFontMetrics(document.defaultFont())
self.fontSpacing = fontMetrics.lineSpacing()
margins = self.contentsMargins()
documentMargin = document.documentMargin()
self.verticalMargins = (margins.top() + margins.bottom())
self.verticalMargins += (self.frameWidth() * 2)
self.verticalMargins += (documentMargin * 2)
self.heightMin = (self.fontSpacing + self.verticalMargins)
self.heightMax = ((self.fontSpacing * 10) + self.verticalMargins)
self.c = None
self.textChanged.connect(self.check_text)
self.outputs = []
self.errors = []
self.is_pr = False
self.is_alias = False
self.update_size()
self.payto_scriptpubkey = None
self.lightning_invoice = None
self.previous_payto = ''
def setFrozen(self, b):
self.setReadOnly(b)
self.setStyleSheet((frozen_style if b else normal_style))
self.overlay_widget.setHidden(b)
def setGreen(self):
self.setStyleSheet(util.ColorScheme.GREEN.as_stylesheet(True))
def setExpired(self):
self.setStyleSheet(util.ColorScheme.RED.as_stylesheet(True))
def parse_address_and_amount(self, line) -> PartialTxOutput:
try:
(x, y) = line.split(',')
except ValueError:
raise Exception('expected two comma-separated values: (address, amount)') from None
scriptpubkey = self.parse_output(x)
amount = self.parse_amount(y)
return PartialTxOutput(scriptpubkey=scriptpubkey, value=amount)
def parse_output(self, x) -> bytes:
try:
address = self.parse_address(x)
return bfh(bitcoin.address_to_script(address))
except Exception:
pass
try:
script = self.parse_script(x)
return bfh(script)
except Exception:
pass
raise Exception('Invalid address or script.')
def parse_script(self, x):
script = ''
for word in x.split():
if (word[0:3] == 'OP_'):
opcode_int = opcodes[word]
script += construct_script([opcode_int])
else:
bfh(word)
script += construct_script([word])
return script
def parse_amount(self, x):
x = x.strip()
if (not x):
raise Exception('Amount is empty')
if parse_max_spend(x):
return x
p = pow(10, self.amount_edit.decimal_point())
try:
return int((p * Decimal(x)))
except decimal.InvalidOperation:
raise Exception('Invalid amount')
def parse_address(self, line):
r = line.strip()
m = re.match((('^' + RE_ALIAS) + '$'), r)
address = str((m.group(2) if m else r))
assert bitcoin.is_address(address)
return address
def check_text(self):
self.errors = []
if self.is_pr:
return
lines = [i for i in self.lines() if i]
self.payto_scriptpubkey = None
self.lightning_invoice = None
self.outputs = []
if (len(lines) == 1):
data = lines[0]
if data.startswith('qtum:'):
self.win.pay_to_URI(data)
return
bolt11_invoice = maybe_extract_bolt11_invoice(data)
if (bolt11_invoice is not None):
try:
self.win.parse_lightning_invoice(bolt11_invoice)
except LnDecodeException as e:
self.errors.append(PayToLineError(line_content=data, exc=e))
else:
self.lightning_invoice = bolt11_invoice
return
try:
self.payto_scriptpubkey = self.parse_output(data)
except Exception as e:
self.errors.append(PayToLineError(line_content=data, exc=e))
else:
self.win.set_onchain(True)
self.win.lock_amount(False)
return
else:
self._parse_as_multiline(lines, raise_errors=False)
def _parse_as_multiline(self, lines, *, raise_errors: bool):
outputs = []
total = 0
is_max = False
for (i, line) in enumerate(lines):
try:
output = self.parse_address_and_amount(line)
except Exception as e:
if raise_errors:
raise
else:
self.errors.append(PayToLineError(idx=i, line_content=line.strip(), exc=e, is_multiline=True))
continue
outputs.append(output)
if parse_max_spend(output.value):
is_max = True
else:
total += output.value
if outputs:
self.win.set_onchain(True)
self.win.max_button.setChecked(is_max)
self.outputs = outputs
self.payto_scriptpubkey = None
if self.win.max_button.isChecked():
self.win.spend_max()
else:
self.amount_edit.setAmount((total if outputs else None))
self.win.lock_amount((self.win.max_button.isChecked() or bool(outputs)))
def get_errors(self) -> Sequence[PayToLineError]:
return self.errors
def get_destination_scriptpubkey(self) -> Optional[bytes]:
return self.payto_scriptpubkey
def get_outputs(self, is_max: bool) -> List[PartialTxOutput]:
if self.payto_scriptpubkey:
if is_max:
amount = '!'
else:
amount = self.amount_edit.get_amount()
if (amount is None):
return []
self.outputs = [PartialTxOutput(scriptpubkey=self.payto_scriptpubkey, value=amount)]
return self.outputs[:]
def lines(self):
return self.toPlainText().split('\n')
def is_multiline(self):
return (len(self.lines()) > 1)
def paytomany(self):
self.setText('\n\n\n')
self.update_size()
def update_size(self):
docLineCount = self.document().lineCount()
if ((self.cursorRect().right() + 1) >= self.overlay_widget.pos().x()):
docLineCount += 1
docHeight = (docLineCount * self.fontSpacing)
h = (docHeight + self.verticalMargins)
h = min(max(h, self.heightMin), self.heightMax)
self.setMinimumHeight(int(h))
self.setMaximumHeight(int(h))
self.verticalScrollBar().setHidden(((docHeight + self.verticalMargins) < self.heightMax))
self._updateOverlayPos()
def resolve(self):
self.is_alias = False
if self.hasFocus():
return
if self.is_multiline():
return
if self.is_pr:
return
key = str(self.toPlainText())
key = key.strip()
if (key == self.previous_payto):
return
self.previous_payto = key
if (not (('.' in key) and (not ('<' in key)) and (not (' ' in key)))):
return
parts = key.split(sep=',')
if (parts and (len(parts) > 0) and bitcoin.is_address(parts[0])):
return
try:
data = self.win.contacts.resolve(key)
except Exception as e:
self.logger.info(f'error resolving address/alias: {repr(e)}')
return
if (not data):
return
self.is_alias = True
address = data.get('address')
name = data.get('name')
new_url = (((key + ' <') + address) + '>')
self.setText(new_url)
self.previous_payto = new_url
self.win.contacts[key] = ('openalias', name)
self.win.contact_list.update()
self.setFrozen(True)
if (data.get('type') == 'openalias'):
self.validated = data.get('validated')
if self.validated:
self.setGreen()
else:
self.setExpired()
else:
self.validated = None |
def inv_z_basis_gate(pauli: str) -> cirq.Gate:
if ((pauli == 'I') or (pauli == 'Z')):
return cirq.I
if (pauli == 'X'):
return cirq.H
if (pauli == 'Y'):
return cirq.PhasedXZGate(axis_phase_exponent=(- 0.5), x_exponent=0.5, z_exponent=(- 0.5))
raise ValueError('Invalid Pauli.') |
_hook('tensorboard_plot')
class TensorboardPlotHook(ClassyHook):
on_end = ClassyHook._noop
def __init__(self, tb_writer, log_period: int=10) -> None:
super().__init__()
if (not tb_available):
raise ModuleNotFoundError('tensorboard not installed, cannot use TensorboardPlotHook')
if (not isinstance(log_period, int)):
raise TypeError('log_period must be an int')
self.tb_writer = tb_writer
self.learning_rates: Optional[List[float]] = None
self.wall_times: Optional[List[float]] = None
self.sample_fetch_times: Optional[List[float]] = None
self.log_period = log_period
self.state.step_count = {'train': 0, 'test': 0}
self.state.cum_sample_fetch_time = {'train': 0, 'test': 0}
def from_config(cls, config: Dict[(str, Any)]) -> 'TensorboardPlotHook':
tb_writer = SummaryWriter(**config['summary_writer'])
log_period = config.get('log_period', 10)
return cls(tb_writer=tb_writer, log_period=log_period)
def on_start(self, task) -> None:
if is_primary():
self.tb_writer.add_text('Task', f'{task}')
def on_phase_start(self, task) -> None:
self.learning_rates = []
self.wall_times = []
self.sample_fetch_times = []
if (not is_primary()):
return
if torch.cuda.is_available():
torch.cuda.reset_max_memory_allocated()
if (task.train and (task.train_phase_idx == 0)):
for (name, parameter) in task.base_model.named_parameters():
self.tb_writer.add_histogram(f'Parameters/{name}', parameter, global_step=(- 1))
def on_step(self, task) -> None:
self.state.step_count[task.phase_type] += 1
self.wall_times.append(time.time())
if ('sample_fetch_time' in task.last_batch.step_data):
self.sample_fetch_times.append(task.last_batch.step_data['sample_fetch_time'])
if task.train:
self.learning_rates.append(task.optimizer.options_view.lr)
def _get_cum_sample_fetch_times(self, phase_type) -> Tuple[(List[float], ...)]:
if (not self.sample_fetch_times):
return None
sample_fetch_times = torch.Tensor(self.sample_fetch_times)
max_sample_fetch_times = all_reduce_max(sample_fetch_times).tolist()
cum_sample_fetch_times = list(accumulate(([self.state.cum_sample_fetch_time[phase_type]] + max_sample_fetch_times)))[1:]
self.state.cum_sample_fetch_time[phase_type] = cum_sample_fetch_times[(- 1)]
return cum_sample_fetch_times
def on_phase_end(self, task) -> None:
if (self.learning_rates is None):
logging.warning('learning_rates is not initialized')
return
phase_type = task.phase_type
cum_sample_fetch_times = self._get_cum_sample_fetch_times(phase_type)
batches = len(task.losses)
if ((batches == 0) or (not is_primary())):
return
phase_type_idx = (task.train_phase_idx if task.train else task.eval_phase_idx)
logging.info(f'Plotting to Tensorboard for {phase_type} phase {phase_type_idx}')
for i in range(0, len(self.wall_times), self.log_period):
global_step = (((i + self.state.step_count[phase_type]) - len(self.wall_times)) + 1)
if cum_sample_fetch_times:
self.tb_writer.add_scalar(f'Speed/{phase_type}/cumulative_sample_fetch_time', cum_sample_fetch_times[i], global_step=global_step, walltime=self.wall_times[i])
if task.train:
self.tb_writer.add_scalar('Learning Rate/train', self.learning_rates[i], global_step=global_step, walltime=self.wall_times[i])
if task.train:
for (name, parameter) in task.base_model.named_parameters():
self.tb_writer.add_histogram(f'Parameters/{name}', parameter, global_step=phase_type_idx)
if (torch.cuda.is_available() and task.train):
self.tb_writer.add_scalar('Memory/peak_allocated', torch.cuda.max_memory_allocated(), global_step=phase_type_idx)
loss_avg = (sum(task.losses) / (batches * task.get_batchsize_per_replica()))
loss_key = 'Losses/{phase_type}'.format(phase_type=task.phase_type)
self.tb_writer.add_scalar(loss_key, loss_avg, global_step=phase_type_idx)
for meter in task.meters:
if (not isinstance(meter.value, dict)):
log.warn(f'Skipping meter {meter.name} with value: {meter.value}')
continue
for (name, value) in meter.value.items():
if isinstance(value, float):
meter_key = f'Meters/{phase_type}/{meter.name}/{name}'
self.tb_writer.add_scalar(meter_key, value, global_step=phase_type_idx)
else:
log.warn(f'Skipping meter name {meter.name}/{name} with value: {value}')
continue
if hasattr(task, 'perf_log'):
for perf in task.perf_log:
phase_idx = perf['phase_idx']
tag = perf['tag']
for (metric_name, metric_value) in perf.items():
if (metric_name in ['phase_idx', 'tag']):
continue
self.tb_writer.add_scalar(f'Speed/{tag}/{metric_name}', metric_value, global_step=phase_idx)
self.tb_writer.flush()
logging.info('Done plotting to Tensorboard') |
class Config(object):
rule_base_sys_nlu = '/home/wyshi/simulator/simulator/nlu_model/model/model-test-30-new.pkl'
use_sl_simulator = True
use_sl_generative = False
INTERACTIVE = True
device = 'cpu'
use_gpu = False
nlg_sample = False
nlg_template = True
n_episodes = 30000
save_dir = '/home/wyshi/simulator/model/save/template/oneHot_newReward_bitMore/'
if (not os.path.exists(save_dir)):
os.mkdir(save_dir)
use_sequicity_for_rl_model = False
with_bit = True
with_bit_rep_only = False
with_bit_more = True
with_bit_all = False
if with_bit:
assert (sum([with_bit_rep_only, with_bit_more, with_bit_all]) == 1)
else:
assert (sum([with_bit_rep_only, with_bit_more, with_bit_all]) == 0)
use_new_reward = False
bit_not_used_in_update = True
use_sent = False
use_multinomial = False
use_sent_one_hot = True
lr = 0.0001
discrete_act = True
discounted_factor = 0.9
init_exp = (0.5 if discrete_act else 0)
final_exp = (0 if discrete_act else 0)
loose_agents = True
small_value = 0
warm_start_episodes = 0
replay = True
batch_size = 64
seed = 0
update_every = 64
hidden_size = 200
n_layers = 2
dropout = 0.3
max_utt_len = 25
num_epochs = 30
vocab_size = 800
pretrained_dir = '/data/qkun/sequicity_multiwoz_0.4/models/multiwoz_sys911.pkl' |
class Mask(rq.List):
def __init__(self, name):
rq.List.__init__(self, name, rq.Card32, pad=0)
def pack_value(self, val):
mask_seq = array.array(rq.struct_to_array_codes['L'])
if isinstance(val, integer_types):
if (sys.byteorder == 'little'):
def fun(val):
mask_seq.insert(0, val)
elif (sys.byteorder == 'big'):
fun = mask_seq.append
else:
raise AssertionError(sys.byteorder)
while val:
fun((val & ))
val = (val >> 32)
else:
mask_seq.extend(val)
return (rq.encode_array(mask_seq), len(mask_seq), None) |
class Transform(torch.nn.Module):
def __init__(self, image_size):
super().__init__()
self.transforms = torch.nn.Sequential(Resize([image_size], interpolation=InterpolationMode.BICUBIC), CenterCrop(image_size), ConvertImageDtype(torch.float), Normalize((0., 0.4578275, 0.), (0., 0., 0.)))
def forward(self, x: torch.Tensor) -> torch.Tensor:
with torch.no_grad():
x = self.transforms(x)
return x |
class CompactnessWeightedAxis():
def __init__(self, gdf, areas=None, perimeters=None, longest_axis=None):
self.gdf = gdf
gdf = gdf.copy()
if (perimeters is None):
gdf['mm_p'] = gdf.geometry.length
perimeters = 'mm_p'
elif (not isinstance(perimeters, str)):
gdf['mm_p'] = perimeters
perimeters = 'mm_p'
self.perimeters = gdf[perimeters]
if (longest_axis is None):
from .dimension import LongestAxisLength
gdf['mm_la'] = LongestAxisLength(gdf).series
longest_axis = 'mm_la'
elif (not isinstance(longest_axis, str)):
gdf['mm_la'] = longest_axis
longest_axis = 'mm_la'
self.longest_axis = gdf[longest_axis]
if (areas is None):
areas = gdf.geometry.area
if (not isinstance(areas, str)):
gdf['mm_a'] = areas
areas = 'mm_a'
self.areas = gdf[areas]
self.series = pd.Series((gdf[longest_axis] * ((4 / np.pi) - ((16 * gdf[areas]) / (gdf[perimeters] ** 2)))), index=gdf.index) |
_default_transform.register(BoundedContinuous)
def bounded_cont_transform(op, rv, bound_args_indices=None):
if (bound_args_indices is None):
raise ValueError(f'Must specify bound_args_indices for {op} bounded distribution')
def transform_params(*args):
(lower, upper) = (None, None)
if (bound_args_indices[0] is not None):
lower = args[bound_args_indices[0]]
if (bound_args_indices[1] is not None):
upper = args[bound_args_indices[1]]
if (lower is not None):
if (isinstance(lower, TensorConstant) and np.all((lower.value == (- np.inf)))):
lower = None
else:
lower = pt.as_tensor_variable(lower)
if (upper is not None):
if (isinstance(upper, TensorConstant) and np.all((upper.value == np.inf))):
upper = None
else:
upper = pt.as_tensor_variable(upper)
return (lower, upper)
return transforms.Interval(bounds_fn=transform_params) |
class PrefetchDataset(torch.utils.data.Dataset):
def __init__(self, opt, dataset, pre_process_func):
self.images = dataset.images
self.load_image_func = dataset.coco.loadImgs
self.img_dir = dataset.img_dir
self.pre_process_func = pre_process_func
self.get_default_calib = dataset.get_default_calib
self.opt = opt
def __getitem__(self, index):
img_id = self.images[index]
img_info = self.load_image_func(ids=[img_id])[0]
img_path = os.path.join(self.img_dir, img_info['file_name'])
image = cv2.imread(img_path)
(images, meta) = ({}, {})
for scale in opt.test_scales:
input_meta = {}
calib = (img_info['calib'] if ('calib' in img_info) else self.get_default_calib(image.shape[1], image.shape[0]))
input_meta['calib'] = calib
(images[scale], meta[scale]) = self.pre_process_func(image, scale, input_meta)
ret = {'images': images, 'image': image, 'meta': meta}
if (('frame_id' in img_info) and (img_info['frame_id'] == 1)):
ret['is_first_frame'] = 1
ret['video_id'] = img_info['video_id']
return (img_id, ret)
def __len__(self):
return len(self.images) |
class _torchxconfig(Action):
_subcmd_configs: Dict[(str, Dict[(str, str)])] = {}
def __init__(self, subcmd: str, dest: str, option_strings: Sequence[Text], required: bool=False, default: Any=None, **kwargs: Any) -> None:
cfg = self._subcmd_configs.setdefault(subcmd, config.get_configs(prefix='cli', name=subcmd))
default = cfg.get(dest, default)
if default:
required = False
super().__init__(dest=dest, default=default, option_strings=option_strings, required=required, **kwargs)
def __call__(self, parser: ArgumentParser, namespace: Namespace, values: Any, option_string: Optional[str]=None) -> None:
setattr(namespace, self.dest, values) |
def ToTimeStr(val):
val = Decimal(val)
if (val >= ):
return '{} s'.format((val / ).quantize(Decimal('0.')))
if (val >= 1000000):
return '{} ms'.format((val / 1000000).quantize(Decimal('0.000001')))
if (val >= 1000):
return '{} us'.format((val / 1000).quantize(Decimal('0.001')))
return '{} ns'.format(val.quantize(Decimal('1'))) |
def imagesc(img, title=None, experiment=None, step=None, scale='minmax'):
if (scale == 'minmax'):
img = (img - img.ravel().min())
img = (img / img.ravel().max())
elif ((type(scale) is float) or (type(scale) is int)):
img = (((img * 0.5) / scale) + 0.5)
elif ((type(scale) is list) or (type(scale) is tuple)):
assert (len(scale) == 2), 'scale arg must be length 2'
(lo, hi) = scale
img = ((img - lo) / (hi - lo))
plt.clf()
plt.imshow(img)
if title:
plt.title(title)
if experiment:
experiment.log_figure(figure_name=title, step=step) |
class Shard(Enum):
PC_AS = 'pc-as'
PC_EU = 'pc-eu'
PC_KAKAO = 'pc-kakao'
PC_KRJP = 'pc-krjp'
PC_NA = 'pc-na'
PC_OC = 'pc-oc'
PC_SA = 'pc-sa'
PC_SEA = 'pc-sea'
PC_JP = 'pc-jp'
PC_RU = 'pc-ru'
PC_TOURNAMENT = 'pc-tournament'
XBOX_AS = 'xbox-as'
XBOX_EU = 'xbox-eu'
XBOX_NA = 'xbox-na'
XBOX_OC = 'xbox-oc'
XBOX_SA = 'xbox-sa'
KAKAO = 'kakao'
PSN = 'psn'
STEAM = 'steam'
TOURNAMENT = 'tournament'
XBOX = 'xbox'
CONSOLE = 'console' |
def test_tmp_path_too_long_on_parametrization(pytester: Pytester) -> None:
pytester.makepyfile('\n import pytest\n .parametrize("arg", ["1"*1000])\n def test_some(arg, tmp_path):\n tmp_path.joinpath("hello").touch()\n ')
reprec = pytester.inline_run()
reprec.assertoutcome(passed=1) |
class Reshape(Layer):
def __init__(self, target_shape, **kwargs):
super(Reshape, self).__init__(**kwargs)
self.target_shape = tuple(target_shape)
def _fix_unknown_dimension(self, input_shape, output_shape):
output_shape = list(output_shape)
msg = 'total size of new array must be unchanged'
(known, unknown) = (1, None)
for (index, dim) in enumerate(output_shape):
if (dim < 0):
if (unknown is None):
unknown = index
else:
raise ValueError('Can only specify one unknown dimension.')
else:
known *= dim
original = np.prod(input_shape, dtype=int)
if (unknown is not None):
if ((known == 0) or ((original % known) != 0)):
raise ValueError(msg)
output_shape[unknown] = (original // known)
elif (original != known):
raise ValueError(msg)
return tuple(output_shape)
def compute_output_shape(self, input_shape):
return ((input_shape[0],) + self._fix_unknown_dimension(input_shape[1:], self.target_shape))
def call(self, inputs):
target_shape = self.target_shape
if ((- 1) in target_shape):
input_shape = None
try:
input_shape = K.int_shape(inputs)
except TypeError:
pass
if (input_shape is not None):
target_shape = self.compute_output_shape(input_shape)[1:]
return K.reshape(inputs, (((- 1),) + target_shape))
def get_config(self):
config = {'target_shape': self.target_shape}
base_config = super(Reshape, self).get_config()
return dict((list(base_config.items()) + list(config.items()))) |
def run_experiment(variant):
tf.logging.set_verbosity(tf.logging.INFO)
with tf.Session() as sess:
data = joblib.load(variant['snapshot_filename'])
policy = data['policy']
env = data['env']
num_skills = (data['policy'].observation_space.flat_dim - data['env'].spec.observation_space.flat_dim)
best_z = get_best_skill(policy, env, num_skills, variant['max_path_length'])
fixed_z_env = FixedOptionEnv(env, num_skills, best_z)
tf.logging.info('Finetuning best skill...')
pool = SimpleReplayBuffer(env_spec=fixed_z_env.spec, max_replay_buffer_size=variant['max_pool_size'])
base_kwargs = dict(min_pool_size=variant['max_path_length'], epoch_length=variant['epoch_length'], n_epochs=variant['n_epochs'], max_path_length=variant['max_path_length'], batch_size=variant['batch_size'], n_train_repeat=variant['n_train_repeat'], eval_render=False, eval_n_episodes=1, eval_deterministic=True)
M = variant['layer_size']
if variant['use_pretrained_values']:
qf = data['qf']
vf = data['vf']
else:
del data['qf']
del data['vf']
qf = NNQFunction(env_spec=fixed_z_env.spec, hidden_layer_sizes=[M, M], var_scope='qf-finetune')
vf = NNVFunction(env_spec=fixed_z_env.spec, hidden_layer_sizes=[M, M], var_scope='vf-finetune')
algorithm = SAC(base_kwargs=base_kwargs, env=fixed_z_env, policy=policy, pool=pool, qf=qf, vf=vf, lr=variant['lr'], scale_reward=variant['scale_reward'], discount=variant['discount'], tau=variant['tau'], save_full_state=False)
algorithm.train() |
def get_current_samples_dir():
if ('pyglet_mp_samples_dir' not in os.environ):
raise mpexceptions.ExceptionUndefinedSamplesDir()
path = os.environ['pyglet_mp_samples_dir']
if (not os.path.isdir(path)):
raise mpexceptions.ExceptionSamplesDirDoesNotExist(path)
return path |
def distributed_init(args):
if (not getattr(args, 'tpu', False)):
if torch.distributed.is_initialized():
warnings.warn('Distributed is already initialized, cannot initialize twice!')
else:
logger.info('distributed init (rank {}): {}'.format(args.distributed_rank, args.distributed_init_method))
dist.init_process_group(backend=args.distributed_backend, init_method=args.distributed_init_method, world_size=args.distributed_world_size, rank=args.distributed_rank)
logger.info('initialized host {} as rank {}'.format(socket.gethostname(), args.distributed_rank))
if torch.cuda.is_available():
dist.all_reduce(torch.zeros(1).cuda())
args.distributed_rank = torch.distributed.get_rank()
else:
import torch_xla.core.xla_model as xm
assert (xm.xrt_world_size() == args.distributed_world_size)
args.device_id = xm.get_local_ordinal()
args.distributed_rank = xm.get_ordinal()
xm.rendezvous('distributed_init')
xm.mark_step()
if (not is_master(args)):
logging.getLogger().setLevel(logging.WARNING)
if (args.model_parallel_size > 1):
try:
from fairseq.model_parallel.megatron.mpu import get_model_parallel_rank, initialize_model_parallel, model_parallel_cuda_manual_seed
except ImportError:
raise ImportError('\n\nPlease install the megatron submodule:\n\n git submodule update --init fairseq/model_parallel/megatron')
initialize_model_parallel(args.model_parallel_size)
model_parallel_cuda_manual_seed(args.seed)
model_part_number = get_model_parallel_rank()
args.checkpoint_suffix += '-model_part-{0}'.format(model_part_number)
return args.distributed_rank |
class CachingImageList(wx.ImageList):
def __init__(self, width, height):
wx.ImageList.__init__(self, width, height)
self.map = {}
def GetImageIndex(self, *loaderArgs):
id_ = self.map.get(loaderArgs)
if (id_ is None):
bitmap = BitmapLoader.getBitmap(*loaderArgs)
if (bitmap is None):
return (- 1)
id_ = self.map[loaderArgs] = wx.ImageList.Add(self, bitmap)
return id_ |
def load_callbacks(output_dir):
output_dir = Path(output_dir)
output_dir.mkdir(exist_ok=True, parents=True)
(output_dir / 'ckpts').mkdir(exist_ok=True, parents=True)
callbacks = []
callbacks.append(ModelCheckpoint(monitor='val_mae_max_metric', dirpath=str((output_dir / 'ckpts')), filename='{epoch:02d}-{val_mae_max_metric:.4f}', verbose=True, save_last=True, save_top_k=1, mode='min', save_weights_only=True))
return callbacks |
class KickstartParser(object):
def __init__(self, handler, followIncludes=True, errorsAreFatal=True, missingIncludeIsFatal=True, unknownSectionIsFatal=True):
self.errorsAreFatal = errorsAreFatal
self.errorsCount = 0
self.followIncludes = followIncludes
self.handler = handler
self.currentdir = {}
self.missingIncludeIsFatal = missingIncludeIsFatal
self.unknownSectionIsFatal = unknownSectionIsFatal
self._state = STATE_COMMANDS
self._includeDepth = 0
self._line = ''
self.version = self.handler.version
Script._ver = self.version
Packages._ver = self.version
self._sections = {}
self.setupSections()
def _reset(self):
self._state = STATE_COMMANDS
self._includeDepth = 0
def getSection(self, s):
return self._sections[s]
def handleCommand(self, lineno, args):
if self.handler:
self.handler.currentLine = self._line
retval = self.handler.dispatcher(args, lineno)
return retval
def registerSection(self, obj):
if (not obj.sectionOpen):
raise TypeError(('no sectionOpen given for section %s' % obj))
if (not obj.sectionOpen.startswith('%')):
raise TypeError(('section %s tag does not start with a %%' % obj.sectionOpen))
self._sections[obj.sectionOpen] = obj
def _finalize(self, obj):
obj.finalize()
self._state = STATE_COMMANDS
def _handleSpecialComments(self, line):
if (self._state != STATE_COMMANDS):
return
if (line[:10] == '#platform='):
self.handler.platform = self._line[10:].strip()
def _readSection(self, lineIter, lineno):
obj = self._sections[self._state]
while True:
try:
line = next(lineIter)
if ((line == '') and (self._includeDepth == 0)):
if (self.version >= version.F8):
raise KickstartParseError((_('Section %s does not end with %%end.') % obj.sectionOpen), lineno=lineno)
self._finalize(obj)
except StopIteration:
break
lineno += 1
if (self._isBlankOrComment(line) and (not obj.allLines)):
continue
if line.lstrip().startswith('%'):
possibleSectionStart = line.split()[0]
if ((not self._validState(possibleSectionStart)) and (possibleSectionStart not in ('%end', '%include'))):
obj.handleLine(line)
continue
args = shlex.split(line)
if (args and (args[0] == '%end')):
self._finalize(obj)
break
elif (args and (args[0] == '%include')):
if ((len(args) == 1) or (not args[1])):
raise KickstartParseError(lineno=lineno)
self._handleInclude(args[1])
continue
elif (args and (args[0] == '%ksappend')):
continue
elif (args and self._validState(args[0])):
if (self.version >= version.F8):
raise KickstartParseError((_('Section %s does not end with %%end.') % obj.sectionOpen), lineno=lineno)
lineIter.put(line)
lineno -= 1
self._finalize(obj)
break
else:
obj.handleLine(line)
return lineno
def _validState(self, st):
return (st in list(self._sections.keys()))
def _tryFunc(self, fn):
try:
fn()
except Exception as msg:
self.errorsCount += 1
if self.errorsAreFatal:
raise
else:
print(msg, file=sys.stderr)
def _isBlankOrComment(self, line):
return (line.isspace() or (line == '') or (line.lstrip()[0] == '#'))
def _handleInclude(self, f):
if (not self.followIncludes):
return
self._includeDepth += 1
try:
self.readKickstart(f, reset=False)
except KickstartError:
if self.missingIncludeIsFatal:
raise
self._includeDepth -= 1
def _stateMachine(self, lineIter):
lineno = 0
while True:
try:
self._line = next(lineIter)
if (self._line == ''):
break
except StopIteration:
break
lineno += 1
if self._isBlankOrComment(self._line):
self._handleSpecialComments(self._line)
continue
args = shlex.split(self._line, comments=True)
if (args[0] == '%include'):
if ((len(args) == 1) or (not args[1])):
raise KickstartParseError(lineno=lineno)
self._handleInclude(args[1])
continue
if (self._state == STATE_COMMANDS):
if (args[0] == '%ksappend'):
continue
elif (args[0][0] == '%'):
newSection = args[0]
if (not self._validState(newSection)):
if self.unknownSectionIsFatal:
raise KickstartParseError((_('Unknown kickstart section: %s') % newSection), lineno=lineno)
else:
warnings.warn((_('Potentially unknown section seen at line %(lineno)s: %(sectionName)s') % {'lineno': lineno, 'sectionName': newSection}), KickstartParseWarning)
self.registerSection(NullSection(self.handler, sectionOpen=newSection))
self._state = newSection
obj = self._sections[self._state]
self._tryFunc((lambda : obj.handleHeader(lineno, args)))
lineno = self._readSection(lineIter, lineno)
else:
self._tryFunc((lambda : self.handleCommand(lineno, args)))
elif (self._state == STATE_END):
break
elif (self._includeDepth > 0):
lineIter.put(self._line)
lineno -= 1
lineno = self._readSection(lineIter, lineno)
def readKickstartFromString(self, s, reset=True):
if reset:
self._reset()
i = PutBackIterator((s.splitlines(True) + ['']))
self._stateMachine(i)
def readKickstart(self, f, reset=True):
if reset:
self._reset()
if (not os.path.exists(f)):
if ((self._includeDepth - 1) in self.currentdir):
if os.path.exists(os.path.join(self.currentdir[(self._includeDepth - 1)], f)):
f = os.path.join(self.currentdir[(self._includeDepth - 1)], f)
cd = os.path.dirname(f)
if (not cd.startswith('/')):
cd = os.path.abspath(cd)
self.currentdir[self._includeDepth] = cd
try:
s = load_to_str(f)
except KickstartError as e:
raise KickstartError((_('Unable to open input kickstart file: %s') % str(e)), lineno=0)
self.readKickstartFromString(s, reset=False)
def setupSections(self):
self._sections = {}
self.registerSection(PreScriptSection(self.handler, dataObj=Script))
self.registerSection(PreInstallScriptSection(self.handler, dataObj=Script))
self.registerSection(PostScriptSection(self.handler, dataObj=Script))
self.registerSection(OnErrorScriptSection(self.handler, dataObj=Script))
self.registerSection(TracebackScriptSection(self.handler, dataObj=Script))
self.registerSection(PackageSection(self.handler))
self.registerSection(NullSection(self.handler, sectionOpen='%addon'))
self.registerSection(NullSection(self.handler, sectionOpen='%anaconda')) |
class BatchTrainer(Trainer):
def build_data(self, split):
if ((split == TRAIN_SPLIT) and (self.eval_split != TRAIN_SPLIT)):
return self.build_batch(split)
elif (split in (TRAIN_SPLIT, VALID_SPLIT, TEST_SPLIT)):
return self.build_episode(split)
else:
raise UnexpectedSplitError(split)
def _get_num_total_classes(self):
total_classes = 0
for dataset_spec in self.benchmark_spec.dataset_spec_list:
for split in learning_spec.Split:
total_classes += len(dataset_spec.get_classes(split))
return total_classes
def _create_train_specification(self):
if (self.eval_split == TRAIN_SPLIT):
return learning_spec.EpisodeSpecification(learning_spec.Split.TRAIN, self.num_train_classes, self.num_support_train, self.num_query_train)
else:
return learning_spec.BatchSpecification(learning_spec.Split.TRAIN, self.learn_config.batch_size)
def set_way_shots_classes_logits_targets(self):
skip_train = True
if (self.eval_split == TRAIN_SPLIT):
skip_train = False
self.maybe_set_way_shots_classes_logits_targets(skip_train=skip_train)
def add_eval_summaries(self):
evaluation_summaries = []
for split in self.required_splits:
if ((split == TRAIN_SPLIT) and (self.eval_split != TRAIN_SPLIT)):
continue
evaluation_summaries.extend(self.add_eval_summaries_split(split))
return evaluation_summaries
def create_train_learner(self, train_learner_class, episode_or_batch):
num_total_classes = self._get_num_total_classes()
is_training = (False if (self.eval_split == TRAIN_SPLIT) else True)
return train_learner_class(is_training, self.learn_config.transductive_batch_norm, self.backprop_through_moments, self.ema_object, self.embedding_fn, episode_or_batch, num_total_classes, self.num_test_classes)
def create_eval_learner(self, eval_learner_class, episode):
if (eval_learner_class in BATCH_LEARNERS):
num_total_classes = self._get_num_total_classes()
return eval_learner_class(False, self.learn_config.transductive_batch_norm, self.backprop_through_moments, self.ema_object, self.embedding_fn, episode, num_total_classes, self.num_test_classes)
elif (eval_learner_class in EPISODIC_LEARNERS):
return eval_learner_class(False, self.learn_config.transductive_batch_norm, self.backprop_through_moments, self.ema_object, self.embedding_fn, episode)
else:
raise ValueError('The specified eval_learner_class should belong to BATCH_LEARNERS or EPISODIC_LEARNERS.') |
class Effect5424(BaseEffect):
type = 'passive'
def handler(fit, ship, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Large Hybrid Turret')), 'speed', ship.getModifiedItemAttr('shipBonusGB'), skill='Gallente Battleship', **kwargs) |
class SendSubmissionErrors(BaseErrorType):
class _SendSubmissionErrors():
instance: list[str] = strawberry.field(default_factory=list)
title: list[str] = strawberry.field(default_factory=list)
abstract: list[str] = strawberry.field(default_factory=list)
topic: list[str] = strawberry.field(default_factory=list)
languages: list[str] = strawberry.field(default_factory=list)
conference: list[str] = strawberry.field(default_factory=list)
type: list[str] = strawberry.field(default_factory=list)
duration: list[str] = strawberry.field(default_factory=list)
elevator_pitch: list[str] = strawberry.field(default_factory=list)
notes: list[str] = strawberry.field(default_factory=list)
audience_level: list[str] = strawberry.field(default_factory=list)
tags: list[str] = strawberry.field(default_factory=list)
short_social_summary: list[str] = strawberry.field(default_factory=list)
speaker_bio: list[str] = strawberry.field(default_factory=list)
speaker_photo: list[str] = strawberry.field(default_factory=list)
speaker_website: list[str] = strawberry.field(default_factory=list)
speaker_level: list[str] = strawberry.field(default_factory=list)
previous_talk_video: list[str] = strawberry.field(default_factory=list)
speaker_twitter_handle: list[str] = strawberry.field(default_factory=list)
speaker_instagram_handle: list[str] = strawberry.field(default_factory=list)
speaker_linkedin_url: list[str] = strawberry.field(default_factory=list)
speaker_facebook_url: list[str] = strawberry.field(default_factory=list)
speaker_mastodon_handle: list[str] = strawberry.field(default_factory=list)
non_field_errors: list[str] = strawberry.field(default_factory=list)
errors: _SendSubmissionErrors = None |
def _msvc14_find_vc2015():
try:
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, 'Software\\Microsoft\\VisualStudio\\SxS\\VC7', 0, (winreg.KEY_READ | winreg.KEY_WOW64_32KEY))
except OSError:
return (None, None)
best_version = 0
best_dir = None
with key:
for i in itertools.count():
try:
(v, vc_dir, vt) = winreg.EnumValue(key, i)
except OSError:
break
if (v and (vt == winreg.REG_SZ) and isdir(vc_dir)):
try:
version = int(float(v))
except (ValueError, TypeError):
continue
if ((version >= 14) and (version > best_version)):
(best_version, best_dir) = (version, vc_dir)
return (best_version, best_dir) |
class TestDeprecated():
_type_check
def test_deprecated(self, monkeypatch):
mod = types.ModuleType('TestDeprecated/test_deprecated')
monkeypatch.setitem(sys.modules, mod.__name__, mod)
deprecated(name='X', value=1, module_name=mod.__name__, message='deprecated message text', warning_class=DeprecationWarning)
mod.Y = deprecated(value=2, module_name=mod.__name__, message='more deprecated text', warning_class=PendingDeprecationWarning)
mod = sys.modules[mod.__name__]
mod.Z = 3
with warnings.catch_warnings(record=True) as log:
warnings.simplefilter('always', PendingDeprecationWarning)
warnings.simplefilter('always', DeprecationWarning)
assert (mod.X == 1)
assert (mod.Y == 2)
assert (mod.Z == 3)
[msg1, msg2] = log
assert (msg1.category is DeprecationWarning)
assert (msg1.message.args == ('deprecated message text',))
assert (msg2.category is PendingDeprecationWarning)
assert (msg2.message.args == ('more deprecated text',))
assert ('Y' in dir(mod))
_type_check
def test_deleting_deprecated_members(self, monkeypatch):
mod = types.ModuleType('TestDeprecated/test_deprecated')
monkeypatch.setitem(sys.modules, mod.__name__, mod)
deprecated(name='X', value=1, module_name=mod.__name__, message='deprecated message text', warning_class=DeprecationWarning)
mod.Y = deprecated(value=2, module_name=mod.__name__, message='more deprecated text', warning_class=PendingDeprecationWarning)
mod = sys.modules[mod.__name__]
mod.Z = 3
with warnings.catch_warnings(record=True) as log:
warnings.simplefilter('always', PendingDeprecationWarning)
warnings.simplefilter('always', DeprecationWarning)
del mod.X
del mod.Y
del mod.Z
[msg1, msg2] = log
assert (msg1.category is DeprecationWarning)
assert (msg1.message.args == ('deprecated message text',))
assert (msg2.category is PendingDeprecationWarning)
assert (msg2.message.args == ('more deprecated text',))
assert ('X' not in dir(mod))
assert ('Y' not in dir(mod))
assert ('Z' not in dir(mod))
with pytest.raises(AttributeError):
del mod.X |
_ordering
class APEBinaryValue(_APEValue):
kind = BINARY
def _parse(self, data):
self.value = data
def _write(self):
return self.value
def _validate(self, value):
if (not isinstance(value, bytes)):
raise TypeError('value not bytes')
return bytes(value)
def __len__(self):
return len(self.value)
def __bytes__(self):
return self._write()
def __eq__(self, other):
return (self.value == other)
def __lt__(self, other):
return (self.value < other)
def pprint(self):
return (u'[%d bytes]' % len(self)) |
class MyBertForTokenClassification(BertPreTrainedModel):
def __init__(self, config, num_labels):
super(MyBertForTokenClassification, self).__init__(config)
self.num_labels = num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, num_labels)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids, attention_mask, labels=None, label_mask=None):
(sequence_output, _) = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
if (labels is not None):
loss_fct = CrossEntropyLoss()
active_loss = (label_mask.view((- 1)) == 1)
active_logits = logits.view((- 1), self.num_labels)[active_loss]
active_labels = labels.view((- 1))[active_loss]
loss = loss_fct(active_logits, active_labels)
return loss
else:
return logits |
def parse_args():
parser = argparse.ArgumentParser(description='Train a recognizer')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument('--resume-from', help='the checkpoint file to resume from')
parser.add_argument('--validate', action='store_true', help='whether to evaluate the checkpoint during training')
parser.add_argument('--test-last', action='store_true', help='whether to test the checkpoint after training')
parser.add_argument('--test-best', action='store_true', help='whether to test the best checkpoint (if applicable) after training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument('--gpus', type=int, help='number of gpus to use (only applicable to non-distributed training)')
group_gpus.add_argument('--gpu-ids', type=int, nargs='+', help='ids of gpus to use (only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument('--deterministic', action='store_true', help='whether to set deterministic options for CUDNN backend.')
parser.add_argument('--cfg-options', nargs='+', action=DictAction, default={}, help="override some settings in the used config, the key-value pair in xxx=yyy format will be merged into config file. For example, '--cfg-options model.backbone.depth=18 model.backbone.with_cp=True'")
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if ('LOCAL_RANK' not in os.environ):
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args |
class StubgencSuite(unittest.TestCase):
def test_infer_hash_sig(self) -> None:
assert_equal(infer_c_method_args('__hash__'), [self_arg])
assert_equal(infer_method_ret_type('__hash__'), 'int')
def test_infer_getitem_sig(self) -> None:
assert_equal(infer_c_method_args('__getitem__'), [self_arg, ArgSig(name='index')])
def test_infer_setitem_sig(self) -> None:
assert_equal(infer_c_method_args('__setitem__'), [self_arg, ArgSig(name='index'), ArgSig(name='object')])
assert_equal(infer_method_ret_type('__setitem__'), 'None')
def test_infer_eq_op_sig(self) -> None:
for op in ('eq', 'ne', 'lt', 'le', 'gt', 'ge'):
assert_equal(infer_c_method_args(f'__{op}__'), [self_arg, ArgSig(name='other', type='object')])
def test_infer_binary_op_sig(self) -> None:
for op in ('add', 'radd', 'sub', 'rsub', 'mul', 'rmul'):
assert_equal(infer_c_method_args(f'__{op}__'), [self_arg, ArgSig(name='other')])
def test_infer_equality_op_sig(self) -> None:
for op in ('eq', 'ne', 'lt', 'le', 'gt', 'ge', 'contains'):
assert_equal(infer_method_ret_type(f'__{op}__'), 'bool')
def test_infer_unary_op_sig(self) -> None:
for op in ('neg', 'pos'):
assert_equal(infer_c_method_args(f'__{op}__'), [self_arg])
def test_infer_cast_sig(self) -> None:
for op in ('float', 'bool', 'bytes', 'int'):
assert_equal(infer_method_ret_type(f'__{op}__'), op)
def test_generate_class_stub_no_crash_for_object(self) -> None:
output: list[str] = []
mod = ModuleType('module', '')
gen = InspectionStubGenerator(mod.__name__, known_modules=[mod.__name__], module=mod)
gen.generate_class_stub('alias', object, output)
assert_equal(gen.get_imports().splitlines(), [])
assert_equal(output[0], 'class alias:')
def test_generate_class_stub_variable_type_annotation(self) -> None:
class TestClassVariableCls():
x = 1
output: list[str] = []
mod = ModuleType('module', '')
gen = InspectionStubGenerator(mod.__name__, known_modules=[mod.__name__], module=mod)
gen.generate_class_stub('C', TestClassVariableCls, output)
assert_equal(gen.get_imports().splitlines(), ['from typing import ClassVar'])
assert_equal(output, ['class C:', ' x: ClassVar[int] = ...'])
def test_generate_c_type_inheritance(self) -> None:
class TestClass(KeyError):
pass
output: list[str] = []
mod = ModuleType('module, ')
gen = InspectionStubGenerator(mod.__name__, known_modules=[mod.__name__], module=mod)
gen.generate_class_stub('C', TestClass, output)
assert_equal(output, ['class C(KeyError): ...'])
assert_equal(gen.get_imports().splitlines(), [])
def test_generate_c_type_inheritance_same_module(self) -> None:
output: list[str] = []
mod = ModuleType(TestBaseClass.__module__, '')
gen = InspectionStubGenerator(mod.__name__, known_modules=[mod.__name__], module=mod)
gen.generate_class_stub('C', TestClass, output)
assert_equal(output, ['class C(TestBaseClass): ...'])
assert_equal(gen.get_imports().splitlines(), [])
def test_generate_c_type_inheritance_other_module(self) -> None:
import argparse
class TestClass(argparse.Action):
pass
output: list[str] = []
mod = ModuleType('module', '')
gen = InspectionStubGenerator(mod.__name__, known_modules=[mod.__name__], module=mod)
gen.generate_class_stub('C', TestClass, output)
assert_equal(output, ['class C(argparse.Action): ...'])
assert_equal(gen.get_imports().splitlines(), ['import argparse'])
def test_generate_c_type_inheritance_builtin_type(self) -> None:
class TestClass(type):
pass
output: list[str] = []
mod = ModuleType('module', '')
gen = InspectionStubGenerator(mod.__name__, known_modules=[mod.__name__], module=mod)
gen.generate_class_stub('C', TestClass, output)
assert_equal(output, ['class C(type): ...'])
assert_equal(gen.get_imports().splitlines(), [])
def test_generate_c_type_with_docstring(self) -> None:
class TestClass():
def test(self, arg0: str) -> None:
output: list[str] = []
mod = ModuleType(TestClass.__module__, '')
gen = InspectionStubGenerator(mod.__name__, known_modules=[mod.__name__], module=mod)
gen.generate_function_stub('test', TestClass.test, output=output, class_info=ClassInfo(self_var='self', cls=TestClass, name='TestClass'))
assert_equal(output, ['def test(self, arg0: int) -> Any: ...'])
assert_equal(gen.get_imports().splitlines(), [])
def test_generate_c_type_with_docstring_no_self_arg(self) -> None:
class TestClass():
def test(self, arg0: str) -> None:
output: list[str] = []
mod = ModuleType(TestClass.__module__, '')
gen = InspectionStubGenerator(mod.__name__, known_modules=[mod.__name__], module=mod)
gen.generate_function_stub('test', TestClass.test, output=output, class_info=ClassInfo(self_var='self', cls=TestClass, name='TestClass'))
assert_equal(output, ['def test(self, arg0: int) -> Any: ...'])
assert_equal(gen.get_imports().splitlines(), [])
def test_generate_c_type_classmethod(self) -> None:
class TestClass():
def test(cls, arg0: str) -> None:
pass
output: list[str] = []
mod = ModuleType(TestClass.__module__, '')
gen = InspectionStubGenerator(mod.__name__, known_modules=[mod.__name__], module=mod)
gen.generate_function_stub('test', TestClass.test, output=output, class_info=ClassInfo(self_var='cls', cls=TestClass, name='TestClass'))
assert_equal(output, ['', 'def test(cls, *args, **kwargs): ...'])
assert_equal(gen.get_imports().splitlines(), [])
def test_generate_c_type_classmethod_with_overloads(self) -> None:
class TestClass():
def test(self, arg0: str) -> None:
pass
output: list[str] = []
mod = ModuleType(TestClass.__module__, '')
gen = InspectionStubGenerator(mod.__name__, known_modules=[mod.__name__], module=mod)
gen.generate_function_stub('test', TestClass.test, output=output, class_info=ClassInfo(self_var='cls', cls=TestClass, name='TestClass'))
assert_equal(output, ['', '', 'def test(cls, arg0: str) -> Any: ...', '', '', 'def test(cls, arg0: int) -> Any: ...'])
assert_equal(gen.get_imports().splitlines(), ['from typing import overload'])
def test_generate_c_type_with_docstring_empty_default(self) -> None:
class TestClass():
def test(self, arg0: str='') -> None:
output: list[str] = []
mod = ModuleType(TestClass.__module__, '')
gen = InspectionStubGenerator(mod.__name__, known_modules=[mod.__name__], module=mod)
gen.generate_function_stub('test', TestClass.test, output=output, class_info=ClassInfo(self_var='self', cls=TestClass, name='TestClass'))
assert_equal(output, ['def test(self, arg0: str = ...) -> Any: ...'])
assert_equal(gen.get_imports().splitlines(), [])
def test_generate_c_function_other_module_arg(self) -> None:
def test(arg0: str) -> None:
output: list[str] = []
mod = ModuleType(self.__module__, '')
gen = InspectionStubGenerator(mod.__name__, known_modules=[mod.__name__], module=mod)
gen.generate_function_stub('test', test, output=output)
assert_equal(output, ['def test(arg0: argparse.Action) -> Any: ...'])
assert_equal(gen.get_imports().splitlines(), ['import argparse'])
def test_generate_c_function_same_module(self) -> None:
def test(arg0: str) -> None:
output: list[str] = []
mod = ModuleType('argparse', '')
gen = InspectionStubGenerator(mod.__name__, known_modules=[mod.__name__], module=mod)
gen.generate_function_stub('test', test, output=output)
assert_equal(output, ['def test(arg0: Action) -> Action: ...'])
assert_equal(gen.get_imports().splitlines(), [])
def test_generate_c_function_other_module(self) -> None:
def test(arg0: str) -> None:
output: list[str] = []
mod = ModuleType(self.__module__, '')
gen = InspectionStubGenerator(mod.__name__, known_modules=[mod.__name__], module=mod)
gen.generate_function_stub('test', test, output=output)
assert_equal(output, ['def test(arg0: argparse.Action) -> argparse.Action: ...'])
assert_equal(gen.get_imports().splitlines(), ['import argparse'])
def test_generate_c_function_same_module_nested(self) -> None:
def test(arg0: str) -> None:
output: list[str] = []
mod = ModuleType('argparse', '')
gen = InspectionStubGenerator(mod.__name__, known_modules=[mod.__name__], module=mod)
gen.generate_function_stub('test', test, output=output)
assert_equal(output, ['def test(arg0: list[Action]) -> list[Action]: ...'])
assert_equal(gen.get_imports().splitlines(), [])
def test_generate_c_function_same_module_compound(self) -> None:
def test(arg0: str) -> None:
output: list[str] = []
mod = ModuleType('argparse', '')
gen = InspectionStubGenerator(mod.__name__, known_modules=[mod.__name__], module=mod)
gen.generate_function_stub('test', test, output=output)
assert_equal(output, ['def test(arg0: Union[Action, None]) -> Tuple[Action, None]: ...'])
assert_equal(gen.get_imports().splitlines(), [])
def test_generate_c_function_other_module_nested(self) -> None:
def test(arg0: str) -> None:
output: list[str] = []
mod = ModuleType(self.__module__, '')
gen = InspectionStubGenerator(mod.__name__, known_modules=['foo', 'foo.spangle', 'bar'], module=mod)
gen.generate_function_stub('test', test, output=output)
assert_equal(output, ['def test(arg0: foo.bar.Action) -> other.Thing: ...'])
assert_equal(gen.get_imports().splitlines(), ['import foo', 'import other'])
def test_generate_c_function_no_crash_for_non_str_docstring(self) -> None:
def test(arg0: str) -> None:
...
test.__doc__ = property((lambda self: 'test(arg0: str) -> None'))
output: list[str] = []
mod = ModuleType(self.__module__, '')
gen = InspectionStubGenerator(mod.__name__, known_modules=[mod.__name__], module=mod)
gen.generate_function_stub('test', test, output=output)
assert_equal(output, ['def test(*args, **kwargs): ...'])
assert_equal(gen.get_imports().splitlines(), [])
def test_generate_c_property_with_pybind11(self) -> None:
class TestClass():
def get_attribute(self) -> None:
attribute = property(get_attribute, doc='')
readwrite_properties: list[str] = []
readonly_properties: list[str] = []
mod = ModuleType('module', '')
gen = InspectionStubGenerator(mod.__name__, known_modules=[mod.__name__], module=mod)
gen.generate_property_stub('attribute', TestClass.__dict__['attribute'], TestClass.attribute, [], readwrite_properties, readonly_properties)
assert_equal(readwrite_properties, [])
assert_equal(readonly_properties, ['', 'def attribute(self) -> str: ...'])
def test_generate_c_property_with_rw_property(self) -> None:
class TestClass():
def __init__(self) -> None:
self._attribute = 0
def attribute(self) -> int:
return self._attribute
def attribute(self, value: int) -> None:
self._attribute = value
readwrite_properties: list[str] = []
readonly_properties: list[str] = []
mod = ModuleType('module', '')
gen = InspectionStubGenerator(mod.__name__, known_modules=[mod.__name__], module=mod)
gen.generate_property_stub('attribute', TestClass.__dict__['attribute'], TestClass.attribute, [], readwrite_properties, readonly_properties)
assert_equal(readwrite_properties, ['attribute: Incomplete'])
assert_equal(readonly_properties, [])
def test_generate_c_type_with_single_arg_generic(self) -> None:
class TestClass():
def test(self, arg0: str) -> None:
output: list[str] = []
mod = ModuleType(TestClass.__module__, '')
gen = InspectionStubGenerator(mod.__name__, known_modules=[mod.__name__], module=mod)
gen.generate_function_stub('test', TestClass.test, output=output, class_info=ClassInfo(self_var='self', cls=TestClass, name='TestClass'))
assert_equal(output, ['def test(self, arg0: List[int]) -> Any: ...'])
assert_equal(gen.get_imports().splitlines(), [])
def test_generate_c_type_with_double_arg_generic(self) -> None:
class TestClass():
def test(self, arg0: str) -> None:
output: list[str] = []
mod = ModuleType(TestClass.__module__, '')
gen = InspectionStubGenerator(mod.__name__, known_modules=[mod.__name__], module=mod)
gen.generate_function_stub('test', TestClass.test, output=output, class_info=ClassInfo(self_var='self', cls=TestClass, name='TestClass'))
assert_equal(output, ['def test(self, arg0: Dict[str, int]) -> Any: ...'])
assert_equal(gen.get_imports().splitlines(), [])
def test_generate_c_type_with_nested_generic(self) -> None:
class TestClass():
def test(self, arg0: str) -> None:
output: list[str] = []
mod = ModuleType(TestClass.__module__, '')
gen = InspectionStubGenerator(mod.__name__, known_modules=[mod.__name__], module=mod)
gen.generate_function_stub('test', TestClass.test, output=output, class_info=ClassInfo(self_var='self', cls=TestClass, name='TestClass'))
assert_equal(output, ['def test(self, arg0: Dict[str, List[int]]) -> Any: ...'])
assert_equal(gen.get_imports().splitlines(), [])
def test_generate_c_type_with_generic_using_other_module_first(self) -> None:
class TestClass():
def test(self, arg0: str) -> None:
output: list[str] = []
mod = ModuleType(TestClass.__module__, '')
gen = InspectionStubGenerator(mod.__name__, known_modules=[mod.__name__], module=mod)
gen.generate_function_stub('test', TestClass.test, output=output, class_info=ClassInfo(self_var='self', cls=TestClass, name='TestClass'))
assert_equal(output, ['def test(self, arg0: Dict[argparse.Action, int]) -> Any: ...'])
assert_equal(gen.get_imports().splitlines(), ['import argparse'])
def test_generate_c_type_with_generic_using_other_module_last(self) -> None:
class TestClass():
def test(self, arg0: str) -> None:
output: list[str] = []
mod = ModuleType(TestClass.__module__, '')
gen = InspectionStubGenerator(mod.__name__, known_modules=[mod.__name__], module=mod)
gen.generate_function_stub('test', TestClass.test, output=output, class_info=ClassInfo(self_var='self', cls=TestClass, name='TestClass'))
assert_equal(output, ['def test(self, arg0: Dict[str, argparse.Action]) -> Any: ...'])
assert_equal(gen.get_imports().splitlines(), ['import argparse'])
def test_generate_c_type_with_overload_pybind11(self) -> None:
class TestClass():
def __init__(self, arg0: str) -> None:
output: list[str] = []
mod = ModuleType(TestClass.__module__, '')
gen = InspectionStubGenerator(mod.__name__, known_modules=[mod.__name__], module=mod)
gen.generate_function_stub('__init__', TestClass.__init__, output=output, class_info=ClassInfo(self_var='self', cls=TestClass, name='TestClass'))
assert_equal(output, ['', 'def __init__(self, arg0: str) -> None: ...', '', 'def __init__(self, arg0: str, arg1: str) -> None: ...', '', 'def __init__(self, *args, **kwargs) -> Any: ...'])
assert_equal(gen.get_imports().splitlines(), ['from typing import overload'])
def test_generate_c_type_with_overload_shiboken(self) -> None:
class TestClass():
def __init__(self, arg0: str) -> None:
pass
output: list[str] = []
mod = ModuleType(TestClass.__module__, '')
gen = InspectionStubGenerator(mod.__name__, known_modules=[mod.__name__], module=mod)
gen.generate_function_stub('__init__', TestClass.__init__, output=output, class_info=ClassInfo(self_var='self', cls=TestClass, name='TestClass', docstring=getattr(TestClass, '__doc__', None)))
assert_equal(output, ['', 'def __init__(self, arg0: str) -> None: ...', '', 'def __init__(self, arg0: str, arg1: str) -> None: ...'])
assert_equal(gen.get_imports().splitlines(), ['from typing import overload']) |
def load_pairs(raw_data, split, direction):
(src, tgt) = direction.split('-')
src_f = f'{raw_data}/{split}.{direction}.{src}'
tgt_f = f'{raw_data}/{split}.{direction}.{tgt}'
if (tgt != 'en_XX'):
(src_f, tgt_f) = (tgt_f, src_f)
if (os.path.exists(src_f) and os.path.exists(tgt_f)):
return list(zip(open(src_f).read().splitlines(), open(tgt_f).read().splitlines()))
else:
return [] |
def repo_with_git_flow_and_release_channels_angular_commits(git_repo_factory, file_in_repo):
git_repo = git_repo_factory()
add_text_to_file(git_repo, file_in_repo)
git_repo.git.commit(m='Initial commit')
add_text_to_file(git_repo, file_in_repo)
git_repo.git.commit(m=COMMIT_MESSAGE.format(version='0.1.0'))
git_repo.git.tag('v0.1.0', m='v0.1.0')
add_text_to_file(git_repo, file_in_repo)
git_repo.git.commit(m='fix: add some more text')
add_text_to_file(git_repo, file_in_repo)
git_repo.git.commit(m=COMMIT_MESSAGE.format(version='0.1.1-rc.1'))
git_repo.git.tag('v0.1.1-rc.1', m='v0.1.1-rc.1')
add_text_to_file(git_repo, file_in_repo)
git_repo.git.commit(m='feat!: add some more text')
add_text_to_file(git_repo, file_in_repo)
git_repo.git.commit(m=COMMIT_MESSAGE.format(version='1.0.0-rc.1'))
git_repo.git.tag('v1.0.0-rc.1', m='v1.0.0-rc.1')
add_text_to_file(git_repo, file_in_repo)
git_repo.git.commit(m='feat: add some more text')
add_text_to_file(git_repo, file_in_repo)
git_repo.git.commit(m=COMMIT_MESSAGE.format(version='1.0.0'))
git_repo.git.tag('v1.0.0', m='v1.0.0')
assert (git_repo.commit('v1.0.0').hexsha == git_repo.head.commit.hexsha)
git_repo.create_head('dev')
git_repo.heads.dev.checkout()
add_text_to_file(git_repo, file_in_repo)
git_repo.git.commit(m='feat: (dev) add some more text')
add_text_to_file(git_repo, file_in_repo)
git_repo.git.commit(m=COMMIT_MESSAGE.format(version='1.1.0-rc.1'))
git_repo.git.tag('v1.1.0-rc.1', m='v1.1.0-rc.1')
add_text_to_file(git_repo, file_in_repo)
git_repo.git.commit(m='fix: (dev) add some more text')
add_text_to_file(git_repo, file_in_repo)
git_repo.git.commit(m=COMMIT_MESSAGE.format(version='1.1.0-rc.2'))
git_repo.git.tag('v1.1.0-rc.2', m='v1.1.0-rc.2')
assert (git_repo.commit('v1.1.0-rc.2').hexsha == git_repo.head.commit.hexsha)
git_repo.create_head('feature')
git_repo.heads.feature.checkout()
add_text_to_file(git_repo, file_in_repo)
git_repo.git.commit(m='feat: (feature) add some more text')
add_text_to_file(git_repo, file_in_repo)
git_repo.git.commit(m=COMMIT_MESSAGE.format(version='1.1.0-alpha.1'))
git_repo.git.tag('v1.1.0-alpha.1', m='v1.1.0-alpha.1')
add_text_to_file(git_repo, file_in_repo)
git_repo.git.commit(m='feat: (feature) add some more text')
add_text_to_file(git_repo, file_in_repo)
git_repo.git.commit(m=COMMIT_MESSAGE.format(version='1.1.0-alpha.2'))
git_repo.git.tag('v1.1.0-alpha.2', m='v1.1.0-alpha.2')
add_text_to_file(git_repo, file_in_repo)
git_repo.git.commit(m='fix: (feature) add some more text')
add_text_to_file(git_repo, file_in_repo)
git_repo.git.commit(m=COMMIT_MESSAGE.format(version='1.1.0-alpha.3'))
git_repo.git.tag('v1.1.0-alpha.3', m='v1.1.0-alpha.3')
assert (git_repo.commit('v1.1.0-alpha.3').hexsha == git_repo.head.commit.hexsha)
assert (git_repo.active_branch.name == 'feature')
(yield git_repo)
git_repo.close() |
def initial_data(watch_html: str) -> str:
patterns = ['window\\[[\'\\"]ytInitialData[\'\\"]]\\s*=\\s*', 'ytInitialData\\s*=\\s*']
for pattern in patterns:
try:
return parse_for_object(watch_html, pattern)
except HTMLParseError:
pass
raise RegexMatchError(caller='initial_data', pattern='initial_data_pattern') |
def conv_block(x, growth_rate, name, params=PARAM_NONE):
bn_axis = (3 if (backend.image_data_format() == 'channels_last') else 1)
x1 = layers.BatchNormalization(axis=bn_axis, epsilon=BN_EPS, name=(name + '_0_bn'))(x, params=params)
x1 = layers.Activation('relu', name=(name + '_0_relu'))(x1)
x1 = layers.Conv2D((4 * growth_rate), 1, use_bias=False, name=(name + '_1_conv'))(x1, params=params)
x1 = layers.BatchNormalization(axis=bn_axis, epsilon=BN_EPS, name=(name + '_1_bn'))(x1, params=params)
x1 = layers.Activation('relu', name=(name + '_1_relu'))(x1)
x1 = layers.Conv2D(growth_rate, 3, padding='same', use_bias=False, name=(name + '_2_conv'))(x1, params=params)
x = layers.Concatenate(axis=bn_axis, name=(name + '_concat'))([x, x1])
return x |
class TestFastConsumerFactory():
('confluent_kafka.Consumer')
def test_make_kafka_consumer(self, kafka_consumer, name, baseplate, bootstrap_servers, group_id, topics):
mock_consumer = mock.Mock()
mock_consumer.list_topics.return_value = mock.Mock(topics={'topic_1': mock.Mock(), 'topic_2': mock.Mock(), 'topic_3': mock.Mock()})
kafka_consumer.return_value = mock_consumer
_consumer = FastConsumerFactory.make_kafka_consumer(bootstrap_servers, group_id, topics)
assert (_consumer == mock_consumer)
kafka_consumer.assert_called_once_with({'bootstrap.servers': bootstrap_servers, 'group.id': group_id, 'auto.offset.reset': 'latest', 'heartbeat.interval.ms': 3000, 'session.timeout.ms': 10000, 'max.poll.interval.ms': 300000, 'enable.auto.commit': 'true', 'auto.commit.interval.ms': 5000, 'enable.auto.offset.store': 'true', 'on_commit': FastConsumerFactory._commit_callback})
mock_consumer.subscribe.assert_called_once()
assert (mock_consumer.subscribe.call_args_list[0][0][0] == topics)
('confluent_kafka.Consumer')
def test_make_kafka_consumer_unknown_topic(self, kafka_consumer, name, baseplate, bootstrap_servers, group_id, topics):
mock_consumer = mock.Mock()
mock_consumer.list_topics.return_value = mock.Mock(topics={'topic_1': mock.Mock(), 'topic_2': mock.Mock(), 'topic_3': mock.Mock()})
kafka_consumer.return_value = mock_consumer
with pytest.raises(AssertionError):
FastConsumerFactory.make_kafka_consumer(bootstrap_servers, group_id, ['topic_4'])
kafka_consumer.assert_called_once_with({'bootstrap.servers': bootstrap_servers, 'group.id': group_id, 'auto.offset.reset': 'latest', 'heartbeat.interval.ms': 3000, 'session.timeout.ms': 10000, 'max.poll.interval.ms': 300000, 'enable.auto.commit': 'true', 'auto.commit.interval.ms': 5000, 'enable.auto.offset.store': 'true', 'on_commit': FastConsumerFactory._commit_callback})
mock_consumer.subscribe.assert_not_called()
('confluent_kafka.Consumer')
def test_init(self, kafka_consumer, name, baseplate, bootstrap_servers, group_id, topics):
mock_consumer = mock.Mock()
mock_consumer.list_topics.return_value = mock.Mock(topics={'topic_1': mock.Mock(), 'topic_2': mock.Mock(), 'topic_3': mock.Mock()})
kafka_consumer.return_value = mock_consumer
handler_fn = mock.Mock()
message_unpack_fn = mock.Mock()
health_check_fn = mock.Mock()
factory = FastConsumerFactory.new(name=name, baseplate=baseplate, bootstrap_servers=bootstrap_servers, group_id=group_id, topics=topics, handler_fn=handler_fn, message_unpack_fn=message_unpack_fn, health_check_fn=health_check_fn)
assert (factory.name == name)
assert (factory.baseplate == baseplate)
assert (factory.handler_fn == handler_fn)
assert (factory.message_unpack_fn == message_unpack_fn)
assert (factory.health_check_fn == health_check_fn)
assert (factory.consumer == mock_consumer)
def make_queue_consumer_factory(self, name, baseplate, bootstrap_servers, group_id, topics):
('confluent_kafka.Consumer')
def _make_queue_consumer_factory(kafka_consumer, health_check_fn=None):
mock_consumer = mock.Mock()
mock_consumer.list_topics.return_value = mock.Mock(topics={'topic_1': mock.Mock(), 'topic_2': mock.Mock(), 'topic_3': mock.Mock()})
kafka_consumer.return_value = mock_consumer
return FastConsumerFactory.new(name=name, baseplate=baseplate, bootstrap_servers=bootstrap_servers, group_id=group_id, topics=topics, handler_fn=(lambda ctx, data, msg: True), message_unpack_fn=(lambda b: {}), health_check_fn=health_check_fn)
return _make_queue_consumer_factory
def test_build_pump_worker(self, make_queue_consumer_factory):
factory = make_queue_consumer_factory()
work_queue = Queue(maxsize=10)
pump = factory.build_pump_worker(work_queue)
assert isinstance(pump, KafkaConsumerWorker)
assert (pump.consumer == factory.consumer)
assert (pump.work_queue == work_queue)
def test_build_message_handler(self, make_queue_consumer_factory):
factory = make_queue_consumer_factory()
handler = factory.build_message_handler()
assert isinstance(handler, KafkaMessageHandler)
assert (handler.baseplate == factory.baseplate)
assert (handler.name == factory.name)
assert (handler.handler_fn == factory.handler_fn)
assert (handler.message_unpack_fn == factory.message_unpack_fn)
assert (handler.on_success_fn is None)
.parametrize('health_check_fn', [None, (lambda req: True)])
def test_build_health_checker(self, health_check_fn, make_queue_consumer_factory):
factory = make_queue_consumer_factory(health_check_fn=health_check_fn)
listener = mock.Mock(spec=socket.socket)
health_checker = factory.build_health_checker(listener)
assert isinstance(health_checker, StreamServer) |
def assert_soquets_belong_to_registers(cbloq: CompositeBloq):
for soq in cbloq.all_soquets:
reg = soq.reg
if (len(soq.idx) != len(reg.shape)):
raise BloqError(f'{soq} has an idx of the wrong shape for {reg}')
for (soq_i, reg_max) in zip(soq.idx, reg.shape):
if (soq_i >= reg_max):
raise BloqError(f"{soq}'s index exceeds the bounds provided by {reg}'s shape.")
if isinstance(soq.binst, DanglingT):
continue
if (soq.reg not in soq.binst.bloq.signature):
raise BloqError(f"{soq}'s register doesn't exist on its bloq {soq.binst.bloq}") |
class WinFontLoader(_FontLoader):
localFontRegPath = None
def getLocalFontRegPath(cls):
if (cls.localFontRegPath is not None):
return cls.localFontRegPath
import winreg
home = os.path.expanduser('~')
localFontPath = '\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Fonts'
profileListPath = 'SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\ProfileList'
try:
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, profileListPath) as key:
length = winreg.QueryInfoKey(key)[0]
for i in range(length):
name = winreg.EnumKey(key, i)
with winreg.OpenKey(key, name) as subkey:
path = winreg.QueryValueEx(subkey, 'ProfileImagePath')[0]
if (path == home):
cls.localFontRegPath = (name + localFontPath)
return cls.localFontRegPath
except WindowsError:
pass
cls.localFontRegPath = ''
return cls.localFontRegPath
def LoadFile(cls, name):
import winreg
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Fonts\\')
try:
file = winreg.QueryValueEx(key, (name + ' (TrueType)'))
return file[0]
except WindowsError:
file = None
localFontRegPath = cls.getLocalFontRegPath()
key = winreg.OpenKey(winreg.HKEY_USERS, localFontRegPath)
try:
file = winreg.QueryValueEx(key, (name + ' (TrueType)'))
return file[0]
except WindowsError:
file = None
if (file is None):
raise PyUnityException(f'Cannot find font named {name!r}') |
def configure(config_object, testing=False):
logger.debug('Configuring database')
db_kwargs = dict(config_object['DB_CONNECTION_ARGS'])
write_db_uri = config_object['DB_URI']
db.initialize(_db_from_url(write_db_uri, db_kwargs))
parsed_write_uri = make_url(write_db_uri)
db_random_func.initialize(SCHEME_RANDOM_FUNCTION[parsed_write_uri.drivername])
db_match_func.initialize(SCHEME_MATCH_FUNCTION[parsed_write_uri.drivername])
db_for_update.initialize(SCHEME_SPECIALIZED_FOR_UPDATE.get(parsed_write_uri.drivername, real_for_update))
db_concat_func.initialize(SCHEME_SPECIALIZED_CONCAT.get(parsed_write_uri.drivername, function_concat))
db_encrypter.initialize(FieldEncrypter(config_object.get('DATABASE_SECRET_KEY')))
db_count_estimator.initialize(SCHEME_ESTIMATOR_FUNCTION[parsed_write_uri.drivername])
read_replicas = config_object.get('DB_READ_REPLICAS', None)
is_read_only = (config_object.get('REGISTRY_STATE', 'normal') == 'readonly')
read_replica_dbs = []
if read_replicas:
read_replica_dbs = [_db_from_url(ro_config['DB_URI'], ro_config.get('DB_CONNECTION_ARGS', db_kwargs), is_read_replica=True) for ro_config in read_replicas]
read_only_config.initialize(ReadOnlyConfig(is_read_only, read_replica_dbs))
def _db_transaction():
return config_object['DB_TRANSACTION_FACTORY'](db)
def _db_disallow_replica_use():
return disallow_replica_use(db)
def _ensure_under_transaction():
if ((not testing) and (not config_object['TESTING'])):
if (db.transaction_depth() == 0):
raise Exception('Expected to be under a transaction')
(yield)
db_transaction.initialize(_db_transaction)
db_disallow_replica_use.initialize(_db_disallow_replica_use)
ensure_under_transaction.initialize(_ensure_under_transaction) |
class ConditionRendererMixin():
def render_condition(self, xml, condition):
if (condition['uri'] not in self.uris):
self.uris.add(condition['uri'])
xml.startElement('condition', {'dc:uri': condition['uri']})
self.render_text_element(xml, 'uri_prefix', {}, condition['uri_prefix'])
self.render_text_element(xml, 'uri_path', {}, condition['uri_path'])
self.render_text_element(xml, 'dc:comment', {}, condition['comment'])
self.render_text_element(xml, 'source', {'dc:uri': condition['source']['uri']}, None)
self.render_text_element(xml, 'relation', {}, condition['relation'])
self.render_text_element(xml, 'target_text', {}, condition['target_text'])
self.render_text_element(xml, 'target_option', {'dc:uri': (condition['target_option']['uri'] if (condition['target_option'] is not None) else None)}, None)
xml.endElement('condition')
if self.context.get('attributes'):
self.render_attribute(xml, condition['source'])
if self.context.get('options'):
if (condition['target_option'] is not None):
self.render_option(xml, condition['target_option']) |
def test_set_client_cert_unsuccessful_multiple_values(tester: CommandTester, mocker: MockerFixture) -> None:
mocker.spy(ConfigSource, '__init__')
with pytest.raises(ValueError) as e:
tester.execute('certificates.foo.client-cert path/to/cert.pem path/to/cert.pem')
assert (str(e.value) == 'You must pass exactly 1 value') |
class CmdPy(COMMAND_DEFAULT_CLASS):
key = 'py'
aliases = ['!']
switch_options = ('time', 'edit', 'clientraw')
locks = 'cmd:perm(py) or perm(Developer)'
help_category = 'System'
def func(self):
caller = self.caller
pycode = self.args
if ('edit' in self.switches):
caller.db._py_measure_time = ('time' in self.switches)
caller.db._py_clientraw = ('clientraw' in self.switches)
EvEditor(self.caller, loadfunc=_py_load, savefunc=_py_code, quitfunc=_py_quit, key='Python exec: :w or :!', persistent=True, codefunc=_py_code)
return
if (not pycode):
console = EvenniaPythonConsole(self.caller)
banner = f'''|gPython {sys.version} on {sys.platform}
Evennia interactive console mode - type 'exit()' to leave.|n'''
self.msg(banner)
line = ''
prompt = '>>>'
while (line.lower() not in ('exit', 'exit()')):
line = (yield prompt)
prompt = ('...' if console.push(line) else '>>>')
self.msg('|gClosing the Python console.|n')
return
_run_code_snippet(caller, self.args, measure_time=('time' in self.switches), client_raw=('clientraw' in self.switches)) |
def test_installer_file_contains_valid_version(default_installation: Path) -> None:
installer_file = ((default_installation / 'demo-0.1.0.dist-info') / 'INSTALLER')
with open(installer_file) as f:
installer_content = f.read()
match = re.match('Poetry (?P<version>.*)', installer_content)
assert match
parse_constraint(match.group('version')) |
.parametrize('fixer, in_file', collect_all_test_fixtures(), ids=_get_id)
def test_check_fixture(in_file, fixer, tmpdir):
if fixer:
main('unittest2pytest.fixes', args=['--no-diffs', '--fix', fixer, '-w', in_file, '--nobackups', '--output-dir', str(tmpdir)])
else:
main('unittest2pytest.fixes', args=['--no-diffs', '--fix', 'all', '-w', in_file, '--nobackups', '--output-dir', str(tmpdir)])
result_file_name = tmpdir.join(os.path.basename(in_file))
assert result_file_name.exists(), ('%s is missing' % result_file_name)
result_file_contents = result_file_name.readlines()
expected_file = in_file.replace('_in.py', '_out.py')
with open(expected_file) as fh:
expected_contents = fh.readlines()
try:
compile(''.join(expected_contents), expected_file, 'exec')
except Exception as e:
pytest.fail(f'FATAL: {expected_file} does not compile: {e}', False)
if (result_file_contents != expected_contents):
text = "Refactured code doesn't match expected outcome\n"
text += ''.join(unified_diff(expected_contents, result_file_contents, 'expected', 'refactured result'))
pytest.fail(text, False) |
class TestDownsampledRowwiseOperation(WithAssetFinder, ZiplineTestCase):
T = partial(pd.Timestamp, tz='utc')
START_DATE = T('2014-01-01')
END_DATE = T('2014-02-01')
HALF_WAY_POINT = T('2014-01-15')
dates = pd.date_range(START_DATE, END_DATE)
ASSET_FINDER_COUNTRY_CODE = '??'
class SidFactor(CustomFactor):
inputs = ()
window_length = 1
def compute(self, today, assets, out):
out[:] = assets
factor = SidFactor()
def init_class_fixtures(cls):
super(TestDownsampledRowwiseOperation, cls).init_class_fixtures()
cls.pipeline_engine = SimplePipelineEngine(get_loader=(lambda c: ExplodingObject()), asset_finder=cls.asset_finder, default_domain=EquitySessionDomain(cls.dates, country_code=cls.ASSET_FINDER_COUNTRY_CODE))
def make_equity_info(cls):
start = (cls.START_DATE - pd.Timedelta(days=1))
end = cls.END_DATE
early_end = cls.HALF_WAY_POINT
return pd.DataFrame([['A', 'Ayy Inc.', start, end, 'E'], ['B', 'early end', start, early_end, 'E'], ['C', 'C Inc.', start, end, 'E']], index=[ord('A'), ord('B'), ord('C')], columns=('symbol', 'asset_name', 'start_date', 'end_date', 'exchange'))
def test_downsampled_rank(self):
downsampled_rank = self.factor.rank().downsample('month_start')
pipeline = Pipeline({'rank': downsampled_rank})
results_month_start = self.pipeline_engine.run_pipeline(pipeline, self.START_DATE, self.END_DATE)
half_way_start = (self.HALF_WAY_POINT + pd.Timedelta(days=1))
results_halfway_start = self.pipeline_engine.run_pipeline(pipeline, half_way_start, self.END_DATE)
results_month_start_aligned = results_month_start.loc[half_way_start:]
assert_frame_equal(results_month_start_aligned, results_halfway_start) |
def test_metadata_with_wildcard_dependency_constraint() -> None:
test_path = ((Path(__file__).parent / 'fixtures') / 'with_wildcard_dependency_constraint')
builder = Builder(Factory().create_poetry(test_path))
metadata = Parser().parsestr(builder.get_metadata_content())
requires = metadata.get_all('Requires-Dist')
assert (requires == ['google-api-python-client (>=1.8,!=2.0.*)']) |
class VelocityDiscriminator(Discriminator):
def __init__(self, input_dim):
super(VelocityDiscriminator, self).__init__(input_dim=input_dim)
self.make_network(dim_input=input_dim, dim_output=2)
self.init_tf()
def make_network(self, dim_input, dim_output):
n_mlp_layers = 4
layer_size = 128
dim_hidden = ((n_mlp_layers - 1) * [layer_size])
dim_hidden.append(dim_output)
pool_size = 2
filter_size = 3
im_width = dim_input[0]
im_height = dim_input[1]
num_channels = dim_input[2]
num_filters = [5, 5]
(nn_input_one, nn_input_two, target) = self.get_input_layer(im_width, im_height, num_channels, dim_output)
conv_out_size = int((((im_width * im_height) * num_filters[1]) / (2.0 * pool_size)))
first_dense_size = conv_out_size
weights = {'wc1': self.get_xavier_weights([filter_size, filter_size, num_channels, num_filters[0]], (pool_size, pool_size))}
biases = {'bc1': self.init_bias([num_filters[0]]), 'bc2': self.init_bias([num_filters[1]])}
conv_layer_0_input_one = self.conv2d(img=nn_input_one, w=weights['wc1'], b=biases['bc1'])
conv_layer_0_input_two = self.conv2d(img=nn_input_two, w=weights['wc1'], b=biases['bc1'])
conv_layer_0_input_one = self.max_pool(conv_layer_0_input_one, k=pool_size)
conv_layer_0_input_two = self.max_pool(conv_layer_0_input_two, k=pool_size)
conv_out_flat_input_one = tf.reshape(conv_layer_0_input_one, [(- 1), conv_out_size])
conv_out_flat_input_two = tf.reshape(conv_layer_0_input_two, [(- 1), conv_out_size])
cur_top = conv_out_flat_input_one
in_shape = conv_out_size
cur_weight = self.init_weights([in_shape, layer_size], name='w_feats_one')
cur_bias = self.init_bias([layer_size], name='b_feats_one')
conv_one_features = tf.nn.relu((tf.matmul(cur_top, cur_weight) + cur_bias))
cur_top = conv_out_flat_input_two
conv_two_features = tf.nn.relu((tf.matmul(cur_top, cur_weight) + cur_bias))
fc_input = tf.concat(axis=1, values=[conv_one_features, conv_two_features])
fc_output = self.get_mlp_layers(fc_input, n_mlp_layers, dim_hidden)
(loss, optimizer) = self.get_loss_layer(pred=fc_output, target_output=target)
self.class_target = target
self.nn_input = [nn_input_one, nn_input_two]
self.discrimination_logits = fc_output
self.optimizer = optimizer
self.loss = loss
def get_input_layer(im_width, im_height, num_channels, dim_output=2):
net_input_one = tf.placeholder('float', [None, im_width, im_height, num_channels], name='nn_input_one')
net_input_two = tf.placeholder('float', [None, im_width, im_height, num_channels], name='nn_input_two')
targets = tf.placeholder('float', [None, dim_output], name='targets')
return (net_input_one, net_input_two, targets)
def train(self, data_batch, targets_batch):
if (len(data_batch) != 2):
raise ValueError('data batch should have length two')
cost = self.sess.run([self.optimizer, self.loss], feed_dict={self.nn_input[0]: data_batch[0], self.nn_input[1]: data_batch[1], self.class_target: targets_batch})[1]
return cost
def __call__(self, data, softmax=True):
if (len(data) != 2):
raise ValueError('data size is wrong')
if (softmax is True):
log_prob = self.sess.run([tf.nn.softmax(self.discrimination_logits)], feed_dict={self.nn_input[0]: data[0], self.nn_input[1]: data[1]})[0]
else:
log_prob = self.sess.run([self.discrimination_logits], feed_dict={self.nn_input[0]: data[0], self.nn_input[1]: data[1]})[0]
return log_prob |
def test_resnet_bottleneck():
with pytest.raises(AssertionError):
Bottleneck(64, 64, style='tensorflow')
with pytest.raises(AssertionError):
plugins = [dict(cfg=dict(type='ContextBlock', ratio=(1.0 / 16)), position='after_conv4')]
Bottleneck(64, 16, plugins=plugins)
with pytest.raises(AssertionError):
plugins = [dict(cfg=dict(type='ContextBlock', ratio=(1.0 / 16)), position='after_conv3'), dict(cfg=dict(type='ContextBlock', ratio=(1.0 / 16)), position='after_conv3')]
Bottleneck(64, 16, plugins=plugins)
with pytest.raises(KeyError):
plugins = [dict(cfg=dict(type='WrongPlugin'), position='after_conv3')]
Bottleneck(64, 16, plugins=plugins)
block = Bottleneck(64, 16, with_cp=True)
assert block.with_cp
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert (x_out.shape == torch.Size([1, 64, 56, 56]))
block = Bottleneck(64, 64, stride=2, style='pytorch')
assert (block.conv1.stride == (1, 1))
assert (block.conv2.stride == (2, 2))
block = Bottleneck(64, 64, stride=2, style='caffe')
assert (block.conv1.stride == (2, 2))
assert (block.conv2.stride == (1, 1))
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
with pytest.raises(AssertionError):
Bottleneck(64, 64, dcn=dcn, conv_cfg=dict(type='Conv'))
block = Bottleneck(64, 64, dcn=dcn)
assert isinstance(block.conv2, DeformConv2dPack)
block = Bottleneck(64, 16)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert (x_out.shape == torch.Size([1, 64, 56, 56]))
plugins = [dict(cfg=dict(type='ContextBlock', ratio=(1.0 / 16)), position='after_conv3')]
block = Bottleneck(64, 16, plugins=plugins)
assert (block.context_block.in_channels == 64)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert (x_out.shape == torch.Size([1, 64, 56, 56]))
plugins = [dict(cfg=dict(type='GeneralizedAttention', spatial_range=(- 1), num_heads=8, attention_type='0010', kv_stride=2), position='after_conv2')]
block = Bottleneck(64, 16, plugins=plugins)
assert (block.gen_attention_block.in_channels == 16)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert (x_out.shape == torch.Size([1, 64, 56, 56]))
plugins = [dict(cfg=dict(type='GeneralizedAttention', spatial_range=(- 1), num_heads=8, attention_type='0010', kv_stride=2), position='after_conv2'), dict(cfg=dict(type='NonLocal2d'), position='after_conv2'), dict(cfg=dict(type='ContextBlock', ratio=(1.0 / 16)), position='after_conv3')]
block = Bottleneck(64, 16, plugins=plugins)
assert (block.gen_attention_block.in_channels == 16)
assert (block.nonlocal_block.in_channels == 16)
assert (block.context_block.in_channels == 64)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert (x_out.shape == torch.Size([1, 64, 56, 56]))
plugins = [dict(cfg=dict(type='ContextBlock', ratio=(1.0 / 16), postfix=1), position='after_conv2'), dict(cfg=dict(type='ContextBlock', ratio=(1.0 / 16), postfix=2), position='after_conv3'), dict(cfg=dict(type='ContextBlock', ratio=(1.0 / 16), postfix=3), position='after_conv3')]
block = Bottleneck(64, 16, plugins=plugins)
assert (block.context_block1.in_channels == 16)
assert (block.context_block2.in_channels == 64)
assert (block.context_block3.in_channels == 64)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert (x_out.shape == torch.Size([1, 64, 56, 56])) |
class Predictor(BasePredictor):
def setup(self):
self.device = 'cuda'
self.netEC = ContentEncoder()
self.netEC.eval()
self.netG = Generator()
self.netG.eval()
self.sampler = ICPTrainer(np.empty([0, 256]), 128)
def predict(self, task: str=Input(choices=TASKS, default='cat2dog', description='Choose style type.'), content: Path=Input(description='Input content image, it will be resized to 256'), style: Path=Input(default=None, description='Input style image, it will be resized to 256')) -> Path:
self.netEC.load_state_dict(torch.load('checkpoint/content_encoder.pt', map_location=(lambda storage, loc: storage)))
ckpt = torch.load(f'checkpoint/{task}.pt', map_location=(lambda storage, loc: storage))
self.netG.load_state_dict(ckpt['g_ema'])
self.sampler.icp.netT.load_state_dict(ckpt['sampler'])
self.netEC = self.netEC.to(self.device)
self.netG = self.netG.to(self.device)
self.sampler.icp.netT = self.sampler.icp.netT.to(self.device)
print('Model successfully loaded!')
Ix = F.interpolate(load_image(str(content)), size=256, mode='bilinear', align_corners=True)
if (style is not None):
Iy = F.interpolate(load_image(str(style)), size=256, mode='bilinear', align_corners=True)
with torch.no_grad():
content_feature = self.netEC(Ix.to(self.device), get_feature=True)
with torch.no_grad():
if (style is not None):
(I_yhat, _) = self.netG(content_feature, Iy.to(self.device))
else:
style_features = self.sampler.icp.netT(torch.randn(4, 128).to(self.device))
(I_yhat, _) = self.netG(content_feature.repeat(4, 1, 1, 1), style_features, useZ=True)
I_yhat = torchvision.utils.make_grid(I_yhat, 2, 0)
out_path = (Path(tempfile.mkdtemp()) / 'output.png')
save_image(I_yhat[0].cpu(), str(out_path))
return out_path |
def update_figure(iframe, *args):
print(('Updating figure! (frame %03d)' % iframe))
okada.depth = depths[iframe]
okada.strike = strikes[iframe]
sandbox.processSources()
for (im, comp) in zip(images, components):
args = imargs(comp)
im.set_data(comp)
return images |
def register_all_lvis(root):
for (dataset_name, splits_per_dataset) in _PREDEFINED_SPLITS_LVIS.items():
for (key, (image_root, json_file)) in splits_per_dataset.items():
register_lvis_instances(key, get_lvis_instances_meta(dataset_name), (os.path.join(root, json_file) if ('://' not in json_file) else json_file), os.path.join(root, image_root)) |
class _FdHolder():
fd: int
def __init__(self, fd: int) -> None:
self.fd = (- 1)
if (not isinstance(fd, int)):
raise TypeError('file descriptor must be an int')
self.fd = fd
self._original_is_blocking = os.get_blocking(fd)
os.set_blocking(fd, False)
def closed(self) -> bool:
return (self.fd == (- 1))
def _raw_close(self) -> None:
if self.closed:
return
fd = self.fd
self.fd = (- 1)
os.set_blocking(fd, self._original_is_blocking)
os.close(fd)
def __del__(self) -> None:
self._raw_close()
def close(self) -> None:
if (not self.closed):
trio.lowlevel.notify_closing(self.fd)
self._raw_close() |
class cached_property(property):
def __get__(self, obj, objtype=None):
if (obj is None):
return self
if (self.fget is None):
raise AttributeError('unreadable attribute')
attr = ('__cached_' + self.fget.__name__)
cached = getattr(obj, attr, None)
if (cached is None):
cached = self.fget(obj)
setattr(obj, attr, cached)
return cached |
def get_named_bins_formatter(bins, names, show_values=False):
def formatter(x, pos):
if (len(names) != (len(bins) + 1)):
raise AssertionError(f'EOmaps: the provided number of names ({len(names)}) does not match! Expected {(len(bins) + 1)} names.')
b = np.digitize(x, bins, right=True)
if show_values:
return f'''{x}
{names[b]}'''
else:
return names[b]
return formatter |
def prune_by_percentile(percent, resample=False, reinit=False, **kwargs):
global step
global mask
global model
step = 0
for (name, param) in model.named_parameters():
if ('weight' in name):
tensor = param.data.cpu().numpy()
alive = tensor[np.nonzero(tensor)]
percentile_value = np.percentile(abs(alive), percent)
weight_dev = param.device
new_mask = np.where((abs(tensor) < percentile_value), 0, mask[step])
param.data = torch.from_numpy((tensor * new_mask)).to(weight_dev)
mask[step] = new_mask
step += 1
step = 0 |
def test_keithley2000(monkeypatch):
monkeypatch.setattr(visa.GpibInstrument, 'interface_type', VI_INTF_GPIB)
monkeypatch.setattr(visa.GpibInstrument, 'stb', 64)
print('Test start')
keithley = visa.GpibInstrument(12)
milliseconds = 500
number_of_values = 10
keithley.write(('F0B2M2G0T2Q%dI%dX' % (milliseconds, number_of_values)).encode('ascii'))
keithley.trigger()
keithley.wait_for_srq()
voltages = keithley.read_floats()
if voltages:
print('Average: ', (sum(voltages) / len(voltages)))
print('Test end') |
class TestPluginManager():
def test_default(self, isolation):
builder = MockBuilder(str(isolation))
assert isinstance(builder.plugin_manager, PluginManager)
def test_reuse(self, isolation):
plugin_manager = PluginManager()
builder = MockBuilder(str(isolation), plugin_manager=plugin_manager)
assert (builder.plugin_manager is plugin_manager) |
_network('example')
class ExampleGNN(torch.nn.Module):
def __init__(self, dim_in, dim_out, num_layers=2, model_type='GCN'):
super().__init__()
conv_model = self.build_conv_model(model_type)
self.convs = nn.ModuleList()
self.convs.append(conv_model(dim_in, dim_in))
for _ in range((num_layers - 1)):
self.convs.append(conv_model(dim_in, dim_in))
GNNHead = register.head_dict[cfg.dataset.task]
self.post_mp = GNNHead(dim_in=dim_in, dim_out=dim_out)
def build_conv_model(self, model_type):
if (model_type == 'GCN'):
return pyg_nn.GCNConv
elif (model_type == 'GAT'):
return pyg_nn.GATConv
elif (model_type == 'GraphSage'):
return pyg_nn.SAGEConv
else:
raise ValueError(f'Model {model_type} unavailable')
def forward(self, batch):
(x, edge_index) = (batch.x, batch.edge_index)
for i in range(len(self.convs)):
x = self.convs[i](x, edge_index)
x = F.relu(x)
x = F.dropout(x, p=0.1, training=self.training)
batch.x = x
batch = self.post_mp(batch)
return batch |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.