code stringlengths 281 23.7M |
|---|
_plugin.register_validator(AscIntSequence)
def validate_ascending_seq(data: list, level):
if (data == [2021, 8, 24]):
raise KeyError
prev = float('-inf')
for number in data:
if (not (number > prev)):
raise ValidationError(('%s is not greater than %s' % (number, prev))) |
_test
def test_batchnorm_correctness():
model = Sequential()
norm = normalization.BatchNormalization(input_shape=(10,), momentum=0.8)
model.add(norm)
model.compile(loss='mse', optimizer='sgd')
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 10))
model.fit(x, x, epochs=4, verbose=0)
out = model.predict(x)
out -= K.eval(norm.beta)
out /= K.eval(norm.gamma)
assert_allclose(out.mean(), 0.0, atol=0.1)
assert_allclose(out.std(), 1.0, atol=0.1) |
def usage():
printerr('Usage is: export-to-sqlite.py <database name> [<columns>] [<calls>] [<callchains>] [<pyside-version-1>]')
printerr("where: columns 'all' or 'branches'")
printerr(" calls 'calls' => create calls and call_paths table")
printerr(" callchains 'callchains' => create call_paths table")
printerr(" pyside-version-1 'pyside-version-1' => use pyside version 1")
raise Exception('Too few or bad arguments') |
def add_metaopt_configvars():
config.add('metaopt__verbose', '0 for silent, 1 for only warnings, 2 for full output withtimings and selected implementation', IntParam(0), in_c_key=False)
config.add('metaopt__optimizer_excluding', "exclude optimizers with these tags. Separate tags with ':'.", StrParam(''), in_c_key=False)
config.add('metaopt__optimizer_including', "include optimizers with these tags. Separate tags with ':'.", StrParam(''), in_c_key=False) |
def transform_records(log_records: Iterable[Optional[Record]], replacements: Dict[(str, Any)]) -> Generator[(Record, None, None)]:
def replace(value: Any) -> Any:
if (isinstance(value, tuple) and hasattr(value, '_fields')):
return type(value)(*[replace(inner) for inner in value])
if isinstance(value, (list, tuple)):
return type(value)((replace(inner) for inner in value))
elif isinstance(value, dict):
return {replace(k): replace(v) for (k, v) in value.items()}
str_value = str(value).casefold()
if isinstance(value, str):
keys_in_value = [key for key in replacement_keys if (key in str_value)]
for key in keys_in_value:
try:
repl_start = str_value.index(key)
except ValueError:
continue
value = f'{value[:repl_start]}{replacements[key]}{value[(repl_start + len(key)):]}'
str_value = value.casefold()
return replacements.get(str_value, value)
replacements = {str(k).casefold(): v for (k, v) in replacements.items()}
for (k, v) in copy(replacements).items():
if (isinstance(k, str) and k.startswith('0x') and is_address(k)):
bytes_address = to_canonical_address(k)
replacements[pex(bytes_address)] = v
replacements[repr(bytes_address).casefold()] = v
replacement_keys = replacements.keys()
for record in log_records:
(yield replace(record)) |
def test_measurable_elemwise():
with pytest.raises(TypeError, match=re.escape('scalar_op exp is not valid')):
MeasurableElemwise(exp)
class TestMeasurableElemwise(MeasurableElemwise):
valid_scalar_types = (Exp,)
measurable_exp_op = TestMeasurableElemwise(scalar_op=exp)
measurable_exp = measurable_exp_op(0.0)
assert isinstance(measurable_exp.owner.op, MeasurableVariable) |
class AnnouncementMonth(AnnouncementMixin, MonthArchiveView):
template_name = 'dictionary/announcements/month.html'
date_list_period = 'month'
context_object_name = 'latest'
def get_date_list(self, queryset, **kwargs):
return super().get_date_list(queryset=self.model.objects.all(), ordering='DESC', **kwargs) |
class DuplicateDialog(Gtk.Window):
def __quit(self, widget=None, response=None):
if ((response == Gtk.ResponseType.OK) or (response == Gtk.ResponseType.CLOSE)):
print_d('Exiting plugin on user request...')
self.finished = True
self.destroy()
return
def __songs_popup_menu(self, songlist):
(path, col) = songlist.get_cursor()
menu = songlist.menu(app.library)
if (menu is not None):
return songlist.popup_menu(menu, 0, Gtk.get_current_event_time())
def __init__(self, model):
songs_text = numeric_phrase('%d duplicate group', '%d duplicate groups', len(model))
super().__init__()
self.set_destroy_with_parent(True)
self.set_title(f'Quod Libet - {Duplicates.PLUGIN_NAME} ({songs_text})')
self.finished = False
self.set_default_size(960, 480)
self.set_border_width(6)
swin = Gtk.ScrolledWindow()
swin.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
swin.set_shadow_type(Gtk.ShadowType.IN)
view = DuplicateSongsView(model)
def cell_text(column, cell, model, iter_, index):
text = model[iter_][index]
cell.markup = text
cell.set_property('markup', text)
for (i, (tag, _f)) in enumerate(DuplicatesTreeModel.TAG_MAP):
e = (Pango.EllipsizeMode.START if (tag == '~filename') else Pango.EllipsizeMode.END)
render = Gtk.CellRendererText()
render.set_property('ellipsize', e)
col = Gtk.TreeViewColumn(util.tag(tag), render)
if tag.startswith('~#'):
col.set_fixed_width(80)
col.set_sizing(Gtk.TreeViewColumnSizing.FIXED)
else:
col.set_expand(True)
col.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
col.set_resizable(True)
col.set_cell_data_func(render, cell_text, (i + 1))
view.append_column(col)
view.connect('popup-menu', self.__songs_popup_menu)
swin.add(view)
hbox = Gtk.HBox(spacing=6)
def expand_all(*args):
model = view.get_model()
for row in model:
if view.row_expanded(row.path):
view.collapse_row(row.path)
else:
for row in model:
view.expand_row(row.path, False)
expand = Gtk.Button(_('Collapse / Expand all'))
connect_obj(expand, 'clicked', expand_all, view)
hbox.pack_start(expand, False, True, 0)
label = Gtk.Label(label=(_("Duplicate key expression is '%s'") % Duplicates.get_key_expression()))
hbox.pack_start(label, True, True, 0)
close = Button(_('_Close'), Icons.WINDOW_CLOSE)
close.connect('clicked', self.__quit)
hbox.pack_start(close, False, True, 0)
vbox = Gtk.VBox(spacing=6)
vbox.pack_start(swin, True, True, 0)
vbox.pack_start(hbox, False, True, 0)
self.add(vbox)
self.show_all() |
class MaxPressureAgent(Agent):
def __init__(self, dic_agent_conf, dic_traffic_env_conf, dic_path, cnt_round, intersection_id):
super(MaxPressureAgent, self).__init__(dic_agent_conf, dic_traffic_env_conf, dic_path, intersection_id)
self.current_phase_time = 0
self.phase_length = len(self.dic_traffic_env_conf['PHASE'][self.dic_traffic_env_conf['SIMULATOR_TYPE']])
if (self.dic_traffic_env_conf['SIMULATOR_TYPE'] == 'anon'):
self.DIC_PHASE_MAP = {1: 0, 2: 1, 3: 2, 4: 3, 0: 0}
else:
self.DIC_PHASE_MAP = {0: 0, 1: 1, 2: 2, 3: 3, (- 1): (- 1)}
def choose_action(self, count, state):
if (state['cur_phase'][0] == (- 1)):
return self.action
cur_phase = self.DIC_PHASE_MAP[state['cur_phase'][0]]
phase_1 = max((np.sum(state['coming_vehicle'][:6]) - np.sum(state['leaving_vehicle'][:6])), 0)
phase_2 = max((np.sum(state['coming_vehicle'][6:]) - np.sum(state['leaving_vehicle'][6:])), 0)
if (len(state['coming_vehicle']) != 12):
phase_1 = max((((np.sum(state['coming_vehicle'][3:6]) + np.sum(state['coming_vehicle'][12:15])) - np.sum(state['leaving_vehicle'][3:6])) - np.sum(state['leaving_vehicle'][12:15])), 0)
phase_2 = max((((np.sum(state['coming_vehicle'][21:24]) + np.sum(state['coming_vehicle'][30:33])) - np.sum(state['leaving_vehicle'][21:24])) - np.sum(state['leaving_vehicle'][30:33])), 0)
phase_3 = max((((np.sum(state['coming_vehicle'][0:3]) + np.sum(state['coming_vehicle'][9:12])) - np.sum(state['leaving_vehicle'][0:3])) - np.sum(state['leaving_vehicle'][9:12])), 0)
phase_4 = max((((np.sum(state['coming_vehicle'][18:21]) + np.sum(state['coming_vehicle'][27:30])) - np.sum(state['leaving_vehicle'][18:21])) - np.sum(state['leaving_vehicle'][27:30])), 0)
self.action = np.argmax([phase_1, phase_2, phase_3, phase_4])
if (state['cur_phase'][0] == self.action):
self.current_phase_time += 1
else:
self.current_phase_time = 0
return self.action
if (self.dic_traffic_env_conf['ACTION_PATTERN'] == 'set'):
self.action = np.argmax([phase_1, phase_2])
if (state['cur_phase'][0] == self.action):
self.current_phase_time += 1
else:
self.current_phase_time = 0
return self.action
elif ((state['time_this_phase'][0] >= self.dic_agent_conf['FIXED_TIME'][cur_phase]) and (cur_phase != (- 1))):
self.current_phase_time = 0
self.action = 1
return 1
else:
self.current_phase_time += 1
self.action = 0
return 0
def round_up(self, x, b=5):
round_x = (b * np.ceil((x.astype(float) / b))).astype(int)
round_x[np.where((round_x < self.dic_agent_conf['MIN_PHASE_TIME']))] = self.dic_agent_conf['MIN_PHASE_TIME']
return round_x
def get_phase_split(self, traffic_demand, ratio):
h = 2.45
tL_set = 5
tL = 14
PHF = 1
vc = 1
N = 4
vehicles_count_for_critical_lane_phase = (traffic_demand * (1 + ratio))
max_allowed_vol = (((3600 / h) * PHF) * vc)
total_vol = np.sum(vehicles_count_for_critical_lane_phase)
if ((total_vol / max_allowed_vol) > 0.95):
cycle_length = ((N * tL) / (1 - 0.95))
else:
cycle_length = ((N * tL) / (1 - (total_vol / max_allowed_vol)))
if (cycle_length < 0):
sys.exit('cycle length calculation error')
effect_cycle_length = (cycle_length - (tL_set * N))
if (np.sum(vehicles_count_for_critical_lane_phase) != 0):
phase_split = ((np.copy(vehicles_count_for_critical_lane_phase) / np.sum(vehicles_count_for_critical_lane_phase)) * effect_cycle_length)
else:
phase_split = (np.full(shape=(len(vehicles_count_for_critical_lane_phase),), fill_value=(1 / len(vehicles_count_for_critical_lane_phase))) * effect_cycle_length)
phase_split = (int(phase_split) + 1)
green = (int((phase_split / (1 + ratio))) + 1)
red = (int(((phase_split / (1 + ratio)) * ratio)) + 1)
phase_split = np.array([green, red])
phase_split = self.round_up(phase_split, b=self.dic_agent_conf['MIN_PHASE_TIME'])
if (self.IF_MULTI == True):
green1 = (green / 7)
green2 = ((green / 7) * 6)
red1 = (red / 7)
red2 = ((red / 7) * 6)
phase_split = np.array([green2, green1, red2, red1])
phase_split = self.round_up(phase_split, b=self.dic_agent_conf['MIN_PHASE_TIME'])
return phase_split
return phase_split |
class RegistrationForm(form.Form):
login = fields.StringField(render_kw={'placeholder': 'Username'})
password = fields.PasswordField(render_kw={'placeholder': 'Password'})
def validate_login(self, field):
if User.objects(login=self.login.data):
raise validators.ValidationError('Duplicate username') |
def get_resnetd(blocks, conv1_stride=True, width_scale=1.0, model_name=None, pretrained=False, root=os.path.join('~', '.torch', 'models'), **kwargs):
if (blocks == 10):
layers = [1, 1, 1, 1]
elif (blocks == 12):
layers = [2, 1, 1, 1]
elif (blocks == 14):
layers = [2, 2, 1, 1]
elif (blocks == 16):
layers = [2, 2, 2, 1]
elif (blocks == 18):
layers = [2, 2, 2, 2]
elif (blocks == 34):
layers = [3, 4, 6, 3]
elif (blocks == 50):
layers = [3, 4, 6, 3]
elif (blocks == 101):
layers = [3, 4, 23, 3]
elif (blocks == 152):
layers = [3, 8, 36, 3]
elif (blocks == 200):
layers = [3, 24, 36, 3]
else:
raise ValueError('Unsupported ResNet(D) with number of blocks: {}'.format(blocks))
init_block_channels = 64
if (blocks < 50):
channels_per_layers = [64, 128, 256, 512]
bottleneck = False
else:
channels_per_layers = [256, 512, 1024, 2048]
bottleneck = True
channels = [([ci] * li) for (ci, li) in zip(channels_per_layers, layers)]
if (width_scale != 1.0):
channels = [[(int((cij * width_scale)) if ((i != (len(channels) - 1)) or (j != (len(ci) - 1))) else cij) for (j, cij) in enumerate(ci)] for (i, ci) in enumerate(channels)]
init_block_channels = int((init_block_channels * width_scale))
net = ResNetD(channels=channels, init_block_channels=init_block_channels, bottleneck=bottleneck, conv1_stride=conv1_stride, **kwargs)
if pretrained:
if ((model_name is None) or (not model_name)):
raise ValueError('Parameter `model_name` should be properly initialized for loading pretrained model.')
from .model_store import download_model
download_model(net=net, model_name=model_name, local_model_store_dir_path=root)
return net |
def url_name(request):
try:
match = resolve(request.path)
except Resolver404:
return {}
else:
(namespace, url_name_) = (match.namespace, match.url_name)
if namespace:
url_name_ = f'{namespace}:{url_name_}'
return {'URL_NAMESPACE': namespace, 'URL_NAME': url_name_} |
def user_details(strategy, details, backend, user=None, *args, **kwargs):
if (not user):
return
changed = False
if (strategy.setting('NO_DEFAULT_PROTECTED_USER_FIELDS') is True):
protected = ()
else:
protected = ('username', 'id', 'pk', 'email', 'password', 'is_active', 'is_staff', 'is_superuser')
protected = (protected + tuple(strategy.setting('PROTECTED_USER_FIELDS', [])))
field_mapping = strategy.setting('USER_FIELD_MAPPING', {}, backend)
for (name, value) in details.items():
name = field_mapping.get(name, name)
if ((value is None) or (not hasattr(user, name)) or (name in protected)):
continue
current_value = getattr(user, name, None)
if (current_value == value):
continue
immutable_fields = tuple(strategy.setting('IMMUTABLE_USER_FIELDS', []))
if ((name in immutable_fields) and current_value):
continue
changed = True
setattr(user, name, value)
if changed:
strategy.storage.user.changed(user) |
def expect_element(__funcname=_qualified_name, **named):
def _expect_element(collection):
if isinstance(collection, (set, frozenset)):
collection_for_error_message = tuple(sorted(collection))
else:
collection_for_error_message = collection
template = "%(funcname)s() expected a value in {collection} for argument '%(argname)s', but got %(actual)s instead.".format(collection=collection_for_error_message)
return make_check(ValueError, template, complement(op.contains(collection)), repr, funcname=__funcname)
return preprocess(**valmap(_expect_element, named)) |
def cov_maxGrad_off_maxHess(value_at_max, sigma, l):
d = len(value_at_max)
num_hessian_combo = int(((d * (d - 1)) / 2))
cov_matrix = np.zeros((d, num_hessian_combo))
for i in range(d):
index = 0
for j in range(d):
for k in range((j + 1), d):
cov_matrix[(i, index)] = cov_devdevY_devX(value_at_max, value_at_max, sigma, l, j, k, i)
index = (index + 1)
return cov_matrix |
class Effect7211(BaseEffect):
type = 'passive'
def handler(fit, ship, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Medium Precursor Weapon')), 'damageMultiplierBonusMax', ship.getModifiedItemAttr('eliteBonusHeavyGunship1'), skill='Heavy Assault Cruisers', **kwargs) |
class All2Cross(nn.Module):
def __init__(self, config, img_size=224, in_chans=3, embed_dim=(96, 384), norm_layer=nn.LayerNorm):
super().__init__()
self.cross_pos_embed = config.cross_pos_embed
self.pyramid = PyramidFeatures(config=config, img_size=img_size, in_channels=in_chans)
n_p1 = ((config.image_size // config.patch_size) ** 2)
n_p2 = (((config.image_size // config.patch_size) // 4) ** 2)
num_patches = (n_p1, n_p2)
self.num_branches = 2
self.pos_embed = nn.ParameterList([nn.Parameter(torch.zeros(1, (1 + num_patches[i]), embed_dim[i])) for i in range(self.num_branches)])
total_depth = sum([sum(x[(- 2):]) for x in config.depth])
dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, total_depth)]
dpr_ptr = 0
self.blocks = nn.ModuleList()
for (idx, block_config) in enumerate(config.depth):
curr_depth = (max(block_config[:(- 1)]) + block_config[(- 1)])
dpr_ = dpr[dpr_ptr:(dpr_ptr + curr_depth)]
blk = MultiScaleBlock(embed_dim, num_patches, block_config, num_heads=config.num_heads, mlp_ratio=config.mlp_ratio, qkv_bias=config.qkv_bias, qk_scale=config.qk_scale, drop=config.drop_rate, attn_drop=config.attn_drop_rate, drop_path=dpr_, norm_layer=norm_layer)
dpr_ptr += curr_depth
self.blocks.append(blk)
self.norm = nn.ModuleList([norm_layer(embed_dim[i]) for i in range(self.num_branches)])
for i in range(self.num_branches):
if self.pos_embed[i].requires_grad:
trunc_normal_(self.pos_embed[i], std=0.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if (isinstance(m, nn.Linear) and (m.bias is not None)):
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
.ignore
def no_weight_decay(self):
out = {'cls_token'}
if self.pos_embed[0].requires_grad:
out.add('pos_embed')
return out
def forward(self, x):
xs = self.pyramid(x)
if self.cross_pos_embed:
for i in range(self.num_branches):
xs[i] += self.pos_embed[i]
for blk in self.blocks:
xs = blk(xs)
xs = [self.norm[i](x) for (i, x) in enumerate(xs)]
return xs |
def test_dual_map(page: Page):
page.get_by_role('link', name='misc examples').click()
page.get_by_role('link', name='misc examples').click()
expect(page).to_have_title('streamlit-folium documentation: Misc Examples')
page.locator('label').filter(has_text='Dual map').click()
page.locator('label').filter(has_text='Dual map').click()
try:
page.frame_locator('iframe[title="streamlit_folium\\.st_folium"]').locator('#map_div').get_by_role('img').click()
page.frame_locator('iframe[title="streamlit_folium\\.st_folium"]').locator('#map_div2').get_by_role('img').click()
except Exception as e:
page.screenshot(path='screenshot-dual-map.png', full_page=True)
raise e |
def add_eval_sample_opts(parser):
parser.add_argument('--sample_method', type=str, default='greedy', help='greedy; sample; gumbel; top<int>, top<0-1>')
parser.add_argument('--beam_size', type=int, default=1, help='used when sample_method = greedy, indicates number of beams in beam search. Usually 2 or 3 works well. More is not better. Set this to 1 for faster runtime but a bit worse performance.')
parser.add_argument('--max_length', type=int, default=36, help='Maximum length during sampling')
parser.add_argument('--length_penalty', type=str, default='', help='wu_X or avg_X, X is the alpha')
parser.add_argument('--group_size', type=int, default=1, help="used for diverse beam search. if group_size is 1, then it's normal beam search")
parser.add_argument('--diversity_lambda', type=float, default=0.5, help='used for diverse beam search. Usually from 0.2 to 0.8. Higher value of lambda produces a more diverse list')
parser.add_argument('--temperature', type=float, default=1.0, help='temperature when sampling from distributions (i.e. when sample_method = sample). Lower = "safer" predictions.')
parser.add_argument('--decoding_constraint', type=int, default=0, help='If 1, not allowing same word in a row')
parser.add_argument('--block_trigrams', type=int, default=0, help='block repeated trigram.')
parser.add_argument('--remove_bad_endings', type=int, default=0, help='Remove bad endings')
parser.add_argument('--suppress_UNK', type=int, default=1, help='Not predicting UNK') |
def determine_num_input_channels(plans_manager: PlansManager, configuration_or_config_manager: Union[(str, ConfigurationManager)], dataset_json: dict) -> int:
if isinstance(configuration_or_config_manager, str):
config_manager = plans_manager.get_configuration(configuration_or_config_manager)
else:
config_manager = configuration_or_config_manager
label_manager = plans_manager.get_label_manager(dataset_json)
num_modalities = (len(dataset_json['modality']) if ('modality' in dataset_json.keys()) else len(dataset_json['channel_names']))
if (config_manager.previous_stage_name is not None):
num_label_inputs = len(label_manager.foreground_labels)
num_input_channels = (num_modalities + num_label_inputs)
else:
num_input_channels = num_modalities
return num_input_channels |
class ISNetGTEncoder(nn.Module):
def __init__(self, in_ch=1, out_ch=1):
super(ISNetGTEncoder, self).__init__()
self.conv_in = myrebnconv(in_ch, 16, 3, stride=2, padding=1)
self.stage1 = RSU7(16, 16, 64)
self.pool12 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.stage2 = RSU6(64, 16, 64)
self.pool23 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.stage3 = RSU5(64, 32, 128)
self.pool34 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.stage4 = RSU4(128, 32, 256)
self.pool45 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.stage5 = RSU4F(256, 64, 512)
self.pool56 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
self.stage6 = RSU4F(512, 64, 512)
self.side1 = nn.Conv2d(64, out_ch, 3, padding=1)
self.side2 = nn.Conv2d(64, out_ch, 3, padding=1)
self.side3 = nn.Conv2d(128, out_ch, 3, padding=1)
self.side4 = nn.Conv2d(256, out_ch, 3, padding=1)
self.side5 = nn.Conv2d(512, out_ch, 3, padding=1)
self.side6 = nn.Conv2d(512, out_ch, 3, padding=1)
def compute_loss(self, preds, targets):
return muti_loss_fusion(preds, targets)
def forward(self, x):
hx = x
hxin = self.conv_in(hx)
hx1 = self.stage1(hxin)
hx = self.pool12(hx1)
hx2 = self.stage2(hx)
hx = self.pool23(hx2)
hx3 = self.stage3(hx)
hx = self.pool34(hx3)
hx4 = self.stage4(hx)
hx = self.pool45(hx4)
hx5 = self.stage5(hx)
hx = self.pool56(hx5)
hx6 = self.stage6(hx)
d1 = self.side1(hx1)
d1 = _upsample_like(d1, x)
d2 = self.side2(hx2)
d2 = _upsample_like(d2, x)
d3 = self.side3(hx3)
d3 = _upsample_like(d3, x)
d4 = self.side4(hx4)
d4 = _upsample_like(d4, x)
d5 = self.side5(hx5)
d5 = _upsample_like(d5, x)
d6 = self.side6(hx6)
d6 = _upsample_like(d6, x)
return ([F.sigmoid(d1), F.sigmoid(d2), F.sigmoid(d3), F.sigmoid(d4), F.sigmoid(d5), F.sigmoid(d6)], [hx1, hx2, hx3, hx4, hx5, hx6]) |
class PlayClickWindow(Packet):
id = 9
to = 0
def __init__(self, window_id: int, slot_number: int, button: int, action_number: int, mode: int, slot: dict) -> None:
super().__init__()
self.window_id = window_id
self.slot_number = slot_number
self.button = button
self.action_number = action_number
self.mode = mode
self.slot = slot
def decode(cls, buf: Buffer) -> PlayClickWindow:
return cls(buf.unpack('B'), buf.unpack('h'), buf.unpack('b'), buf.unpack('h'), buf.unpack_varint(), buf.unpack_slot()) |
class FC3_TestCase(CommandTest):
command = 'lilocheck'
def runTest(self):
self.assert_parse('lilocheck', 'lilocheck\n')
self.assert_parse_error('lilocheck foo')
self.assert_parse_error('lilocheck --whatever')
cmd = self.handler().commands[self.command]
cmd.check = False
self.assertEqual(cmd.__str__(), '') |
def test_system_exit_in_setuppy(monkeypatch, tmp_path):
monkeypatch.chdir(tmp_path)
setuppy = "import sys; sys.exit('some error')"
(tmp_path / 'setup.py').write_text(setuppy, encoding='utf-8')
with pytest.raises(SystemExit, match='some error'):
backend = BuildBackend(backend_name='setuptools.build_meta')
backend.get_requires_for_build_wheel() |
def main():
log_file = 'train.log'
logger = create_logger(log_file)
assert os.path.exists(args.config)
cfg = yaml.load(open(args.config, 'r'), Loader=yaml.Loader)
set_random_seed(cfg['random_seed'])
dataset_helper = Kitti_Config()
if args.evaluation:
cfg['dataset']['train']['enable'] = False
cfg['dataset']['val'] = cfg['dataset']['test']
cfg['dataset']['val']['enable'] = True
(train_loader, test_loader) = build_dataloader(cfg['dataset'], dataset_helper, logger)
model = build_model(cfg['model'], dataset_helper, logger)
if args.evaluation:
tester = Tester(cfg['tester'], model, test_loader, logger)
tester.test()
return
optimizer = build_optimizer(cfg['optimizer'], model, logger)
last_epoch = (- 1)
cfg['scheduler']['lr_scheduler'].update({'lr': cfg['optimizer']['lr'], 'last_epoch': last_epoch})
cfg['scheduler']['bnm_scheduler'].update({'last_epoch': last_epoch})
(lr_scheduler, bnm_scheduler) = build_scheduler(cfg['scheduler'], optimizer, model, logger)
trainer = Trainer(cfg['trainer'], model, optimizer, train_loader, test_loader, lr_scheduler, bnm_scheduler, logger)
trainer.train() |
def comments_and_tags_of_parameters_of(*, function_name, args, no_extraction=False):
if (len(args) == 0):
return ([], [], [], [])
comments1 = ([''] * len(args))
comments2 = [[''] for _ in args]
tags1 = ([''] * len(args))
tags2 = [[''] for _ in args]
if (no_extraction or options.sober):
return (comments1, comments2, tags1, tags2)
code = list(reversed(_extract_code(function_name)))
keep = ([True] * len(code))
for (i, line) in enumerate(code):
if (keep[i] and is_comment_line(line)):
j = (i + 1)
while ((j < len(code)) and is_comment_line(code[j])):
j += 1
if ((j == len(code)) or ((j < len(code)) and is_discardable_line(code[j]))):
for k in range(i, j):
keep[k] = False
code = [code[i].strip() for i in range(len(code)) if keep[i]]
for i in range(len(code)):
pos = code[i].find('#')
if (pos not in ((- 1), 0)):
code[i] = code[i][:pos]
are_empty_lines = [is_empty_line(line) for line in code]
code = _delete_bracket_part(code, len(args))
level = 0
i1 = 0
i2 = 0
found = False
for (i, line) in enumerate(code):
if (not is_comment_line(line)):
new_line = ''
for (j, c) in enumerate(line):
if (c == '#'):
break
if (function_name in line):
if (j < line.index(function_name)):
continue
else:
found = True
if (c in {'(', '['}):
level += 1
elif (c in {')', ']'}):
level -= 1
elif (c == ','):
if (level == 1):
i1 += 1
i2 = 0
new_line += c
elif (level == 2):
i2 += 1
comments2[i1].append('')
tags2[i1].append('')
new_line += '$'
else:
assert (level != 0), ((line + ' ') + str(level))
new_line += '_'
else:
new_line += c
code[i] = ('' if (level != 1) else new_line)
if (found and (level == 2) and is_comment_line(line)):
if ((i > 0) and are_empty_lines[(i - 1)]):
comments2[i1][i2] = ''
s = _prepare(line)
comments2[i1][i2] += (('' if (len(comments2[i1][i2]) == 0) else ' - ') + ('' if (s is None) else s))
tags = _find_tags(line)
if tags:
tags2[i1][i2] += (('' if (len(tags2[i1][i2]) == 0) else ' ') + tags)
code[i] = ''
i1 = 0
found = False
stopped_comments = False
for (i, line) in enumerate(code):
if (not found):
if (function_name in line):
found = True
continue
if ((not stopped_comments) and is_comment_line(line) and _prepare(line)):
if ((i > 0) and are_empty_lines[(i - 1)]):
comments1[i1] = ''
j = i
while ((j < len(code)) and is_comment_line(code[j])):
j += 1
if ((j < len(code)) and (not are_empty_lines[j])):
comments1[i1] += (('' if (len(comments1[i1]) == 0) else (' ' if (comments1[i1][(- 1)] in {'.', ','}) else ' - ')) + _prepare(line))
tags = _find_tags(line)
if (tags and (i1 < len(tags1))):
tags1[i1] += (('' if (len(tags1[i1]) == 0) else ' ') + tags)
if (not (is_comment_line(line) or are_empty_lines[i])):
if (',' in line):
i1 += line.count(',')
elif ((i1 + 1) == len(comments1)):
stopped_comments = True
if (i1 >= len(comments1)):
stopped_comments = True
return (comments1, comments2, tags1, tags2) |
class TestLayerSelector(unittest.TestCase):
def test_select_all_conv_layers(self):
mock_output_shape = (1, 1, 1, 1)
layer1 = Layer(Conv2d(10, 20, 5), '', mock_output_shape)
layer2 = Layer(Conv2d(10, 20, 5), '', mock_output_shape)
layer3 = Layer(Conv2d(10, 10, 5, groups=10), '', mock_output_shape)
layer_db = MagicMock()
layer_db.__iter__.return_value = [layer1, layer2, layer3]
layer_selector = ConvNoDepthwiseLayerSelector()
layer_selector.select(layer_db, [])
layer_db.mark_picked_layers.assert_called_once_with([layer1, layer2])
layer1 = Layer(Conv2d(10, 20, 5), '', mock_output_shape)
layer2 = Layer(Linear(10, 20), '', mock_output_shape)
layer_db = MagicMock()
layer_db.__iter__.return_value = [layer1, layer2]
layer_selector.select(layer_db, [])
layer_db.mark_picked_layers.assert_called_once_with([layer1])
layer1 = Layer(Conv2d(10, 20, 5), '', mock_output_shape)
layer2 = Layer(Conv2d(10, 20, 5), '', mock_output_shape)
layer_db = MagicMock()
layer_db.__iter__.return_value = [layer1, layer2]
layer_selector.select(layer_db, [layer2.module])
layer_db.mark_picked_layers.assert_called_once_with([layer1])
def test_select_all_conv_and_fc_layers(self):
mock_output_shape = (1, 1, 1, 1)
layer1 = Layer(Conv2d(10, 10, 5, groups=10), '', mock_output_shape)
layer2 = Layer(Linear(10, 20), '', mock_output_shape)
layer3 = Layer(Conv2d(20, 40, 5), '', mock_output_shape)
layer_db = MagicMock()
layer_db.__iter__.return_value = [layer1, layer2, layer3]
layer_selector = ConvFcLayerSelector()
layer_selector.select(layer_db, [])
layer_db.mark_picked_layers.assert_called_once_with([layer2, layer3])
layer1 = Layer(Conv2d(10, 20, 5), '', mock_output_shape)
layer2 = Layer(Linear(10, 20), '', mock_output_shape)
layer3 = Layer(Conv2d(20, 40, 5), '', mock_output_shape)
layer_db = MagicMock()
layer_db.__iter__.return_value = [layer1, layer2, layer3]
layer_selector.select(layer_db, [layer2.module])
layer_db.mark_picked_layers.assert_called_once_with([layer1, layer3]) |
class RequiredTextAssetConfiguration(AssetConfigurationMixin, BaseRequiredTextAsset, BenefitFeatureConfiguration):
class Meta(BaseRequiredTextAsset.Meta, BenefitFeatureConfiguration.Meta):
verbose_name = 'Require Text Configuration'
verbose_name_plural = 'Require Text Configurations'
constraints = [UniqueConstraint(fields=['internal_name'], name='uniq_text_asset_cfg')]
def __str__(self):
return f'Require text configuration'
def benefit_feature_class(self):
return RequiredTextAsset |
class MixerBlock(nn.Module):
def __init__(self, tokens_mlp_dim=16, channels_mlp_dim=1024, tokens_hidden_dim=32, channels_hidden_dim=1024):
super().__init__()
self.ln = nn.LayerNorm(channels_mlp_dim)
self.tokens_mlp_block = MlpBlock(tokens_mlp_dim, mlp_dim=tokens_hidden_dim)
self.channels_mlp_block = MlpBlock(channels_mlp_dim, mlp_dim=channels_hidden_dim)
def forward(self, x):
y = self.ln(x)
y = y.transpose(1, 2)
y = self.tokens_mlp_block(y)
y = y.transpose(1, 2)
out = (x + y)
y = self.ln(out)
y = (out + self.channels_mlp_block(y))
return y |
.parametrize('regex,doc', [(signature.SPHINX, ' :param test: parameter docstring'), (signature.EPYDOC, ' test: parameter docstring'), (signature.GOOGLE, ' test (str): parameter docstring')])
def test_docstring_params(regex, doc):
m = regex.match(doc)
assert (m.group('param') == 'test')
assert (m.group('doc') == 'parameter docstring') |
class WhoamiCommand(BaseUserCommand):
def run(self):
token = HfFolder.get_token()
if (token is None):
print('Not logged in')
exit()
try:
user = self._api.whoami(token)
print(user)
except HTTPError as e:
print(e) |
class MachPortManager():
def __init__(self, ql, my_port):
self.ql = ql
self.host_port = MachPort(771)
self.clock_port = MachPort(2051)
self.semaphore_port = MachPort(2307)
self.special_port = MachPort(1799)
self.my_port = my_port
def deal_with_msg(self, msg, addr):
if (msg.header.msgh_id == 200):
out_msg = self.ql.os.macho_host_server.host_info(msg.header, msg.content)
out_msg.write_msg_to_mem(addr)
elif (msg.header.msgh_id == 206):
out_msg = self.ql.os.macho_host_server.host_get_clock_service(msg.header, msg.content)
out_msg.write_msg_to_mem(addr)
elif (msg.header.msgh_id == 3418):
out_msg = self.ql.os.macho_task_server.semaphore_create(msg.header, msg.content)
out_msg.write_msg_to_mem(addr)
elif (msg.header.msgh_id == 3409):
out_msg = self.ql.os.macho_task_server.get_special_port(msg.header, msg.content)
out_msg.write_msg_to_mem(addr)
else:
self.ql.log.info('Error Mach Msgid {} can not handled'.format(msg.header.msgh_id))
raise Exception('Mach Msgid Not Found')
self.ql.log.debug('Reply-> Header: {}, Content: {}'.format(out_msg.header, out_msg.content))
def get_thread_port(self, MachoThread):
return MachoThread.port.name |
class BaseTrainer(nn.Module):
def __init__(self, experiment_name=None, warm_start=False, verbose=False, num_averaged_checkpoints=1, keep_checkpoints=None, **extra_attrs):
super().__init__()
self.keep_checkpoints = (keep_checkpoints or num_averaged_checkpoints)
self.num_averaged_checkpoints = num_averaged_checkpoints
self.verbose = verbose
self.total_steps = 0
self.best_metrics = {}
self.optimizers = {}
for (key, attr) in extra_attrs.items():
if isinstance(attr, torch.optim.Optimizer):
self.optimizers[key] = attr
setattr(self, key, attr)
if (experiment_name is None):
experiment_name = 'untitled_{}.{:0>2d}.{:0>2d}_{:0>2d}:{:0>2d}:{:0>2d}'.format(*time.gmtime()[:6])
if self.verbose:
print(('using automatic experiment name: ' + experiment_name))
self.experiment_path = os.path.join('logs/', experiment_name)
if ((not warm_start) and (experiment_name != 'debug')):
assert (not os.path.exists(self.experiment_path)), 'experiment {} already exists'.format(experiment_name)
self.writer = SummaryWriter(self.experiment_path, comment=experiment_name)
if warm_start:
try:
self.load_checkpoint()
except BaseException as e:
warn('Failed to load checkpoint ({}). Ignoring warm_start option.'.format(e))
def train_on_batch(self, *args, **kwargs):
raise NotImplementedError()
def evaluate_metrics(self, *args, **kwargs):
raise NotImplementedError()
def record(self, *, prefix='', **metrics):
if (not ((prefix == '') or prefix.endswith('/'))):
warn('It is recommended that prefix ends with slash(/) for readability')
for (key, value) in metrics.items():
assert (np.shape(value) == ()), 'metric {} must be scalar, but got {}'.format(key, np.shape(value))
self.writer.add_scalar((prefix + str(key)), value, self.total_steps)
return metrics
def save_checkpoint(self, tag=None, path=None, mkdir=True, clear_old=False, extras=None, number_ckpts_to_keep=None, **kwargs):
assert ((tag is None) or (path is None)), 'please provide either tag or path or nothing, not both'
if ((tag is None) and (path is None)):
tag = 'temp_{}'.format(self.total_steps)
if (path is None):
path = os.path.join(self.experiment_path, 'checkpoint_{}.pth'.format(tag))
if mkdir:
os.makedirs(os.path.dirname(path), exist_ok=True)
torch.save(OrderedDict([('state_dict', self.state_dict(**kwargs)), ('optimizers', {key: opt.state_dict() for (key, opt) in self.optimizers.items()}), ('step', self.total_steps), ('best_metrics', self.best_metrics), ('extras', extras)]), path)
if self.verbose:
print(('Saved ' + path))
if clear_old:
self.remove_old_temp_checkpoints(number_ckpts_to_keep)
return path
def load_checkpoint(self, tag=None, path=None, **kwargs):
assert ((tag is None) or (path is None)), 'please provide either tag or path or nothing, not both'
if ((tag is None) and (path is None)):
path = get_latest_file(os.path.join(self.experiment_path, 'checkpoint_temp_[0-9]*.pth'))
elif ((tag is not None) and (path is None)):
path = os.path.join(self.experiment_path, 'checkpoint_{}.pth'.format(tag))
checkpoint = torch.load(path)
for (key, state_dict) in checkpoint['optimizers'].items():
self.optimizers[key].load_state_dict(state_dict)
self.total_steps = int(checkpoint['step'])
self.best_metrics = checkpoint['best_metrics']
self.load_state_dict(checkpoint['state_dict'], **kwargs)
if self.verbose:
print(('Loaded ' + path))
return (self, checkpoint['extras'])
def using_checkpoint(self, **kwargs):
current_checkpoint_tag = 'current'
while True:
current_checkpoint_tag += '_backup'
path = os.path.join(self.experiment_path, 'checkpoint_{}.pth'.format(current_checkpoint_tag))
if (not os.path.exists(path)):
break
self.save_checkpoint(current_checkpoint_tag)
self.load_checkpoint(**kwargs)
(yield)
self.load_checkpoint(current_checkpoint_tag)
os.remove(path)
def average_checkpoints(self, tags=None, paths=None, out_tag='avg', out_path=None):
assert ((tags is None) or (paths is None)), 'please provide either tags or paths or nothing, not both'
assert ((out_tag is not None) or (out_path is not None)), 'please provide either out_tag or out_path or both'
if ((tags is None) and (paths is None)):
paths = self.get_latest_checkpoints(os.path.join(self.experiment_path, 'checkpoint_temp_[0-9]*.pth'), self.num_averaged_checkpoints)
elif ((tags is not None) and (paths is None)):
paths = [os.path.join(self.experiment_path, 'checkpoint_{}.pth'.format(tag)) for tag in tags]
checkpoints = [torch.load(path) for path in paths]
averaged_ckpt = deepcopy(checkpoints[0])
for key in averaged_ckpt['model']:
values = [ckpt['model'][key] for ckpt in checkpoints]
averaged_ckpt['model'][key] = (sum(values) / len(values))
if (out_path is None):
out_path = os.path.join(self.experiment_path, 'checkpoint_{}.pth'.format(out_tag))
torch.save(averaged_ckpt, out_path)
def get_latest_checkpoints(self, pattern, n_last=None):
list_of_files = glob.glob(pattern)
assert (len(list_of_files) > 0), ('No files found: ' + pattern)
return sorted(list_of_files, key=os.path.getctime, reverse=True)[:n_last]
def remove_old_temp_checkpoints(self, number_ckpts_to_keep=None):
if (number_ckpts_to_keep is None):
number_ckpts_to_keep = self.keep_checkpoints
paths = self.get_latest_checkpoints(os.path.join(self.experiment_path, 'checkpoint_temp_[0-9]*.pth'))
paths_to_delete = paths[number_ckpts_to_keep:]
for ckpt in paths_to_delete:
if self.verbose:
print('Removing', ckpt)
os.remove(ckpt)
def step(self, *args, **kwargs):
was_steps = self.total_steps
metrics = self.train_on_batch(*args, **kwargs)
assert (self.total_steps == was_steps), 'total_steps changed within train_on_batch'
self.total_steps += 1
return metrics
def forward(self, *inputs, **kwargs):
return self.step(*inputs, **kwargs)
def fit(self, training_data, batch_size=None, shuffle=True, epochs=1, start_epoch=1, batches_per_epoch=None, batcher_kwargs=None, progressbar=None, clear_outputs=False, device='auto', validate=None, val_data=None, eval_kwargs=None, early_stopping_minimize=(), early_stopping_maximize=(), early_stopping_epochs=None, **kwargs):
device = (getattr(self, 'device', infer_model_device(self)) if (device == 'auto') else device)
progressbar = (tqdm if (progressbar is True) else (progressbar or nop))
(epochs, early_stopping_epochs) = ((epochs or float('inf')), (early_stopping_epochs or float('inf')))
(eval_kwargs, batcher_kwargs) = ((eval_kwargs or dict()), (batcher_kwargs or dict()))
validate = ((val_data is not None) if (validate is None) else validate)
val_data = ([] if (validate and (val_data is None)) else val_data)
if isinstance(early_stopping_minimize, str):
early_stopping_minimize = [early_stopping_minimize]
if isinstance(early_stopping_maximize, str):
early_stopping_maximize = [early_stopping_maximize]
number_of_epochs_without_improvement = 0
if isinstance(training_data, DataLoader):
make_training_epoch = (lambda : iter(progressbar(training_data)))
elif isinstance(training_data, Dataset):
make_training_epoch = torch.utils.data.DataLoader(training_data, batch_size=batch_size, shuffle=shuffle, **batcher_kwargs)
elif isinstance(training_data, (list, tuple)):
make_training_epoch = (lambda : iterate_minibatches(*training_data, batch_size=batch_size, epochs=1, shuffle=shuffle, callback=progressbar, **batcher_kwargs))
else:
training_data = iter(training_data)
assert ((batches_per_epoch is not None) or (epochs == 1)), 'if data is an iterator, please provide batches_per_epoch or use a single epoch'
def make_training_epoch():
for _ in progressbar((range(batches_per_epoch) if batches_per_epoch else count())):
(yield next(training_data))
for epoch_i in count(start=start_epoch):
if (epoch_i >= (epochs + start_epoch)):
if self.verbose:
print('Stopping because of reaching target number of epochs')
break
if self.verbose:
print('Epoch #{}/{}'.format(epoch_i, epochs))
for batch in make_training_epoch():
self.step(*batch, **kwargs)
if clear_outputs:
clear_output()
self.save_checkpoint(clear_old=True)
if (self.num_averaged_checkpoints > 1):
self.average_checkpoints(out_tag='avg')
if validate:
if self.verbose:
print('Evaluating...')
with (self.using_checkpoint(tag='avg') if (self.num_averaged_checkpoints > 1) else nop_ctx()):
val_metrics = self.evaluate_metrics(*val_data, **eval_kwargs)
if self.verbose:
for (key, value) in val_metrics.items():
print(key, value)
print()
number_of_epochs_without_improvement += 1
for (key, value) in val_metrics.items():
found_new_best = False
if (key in early_stopping_maximize):
if (value > self.best_metrics.get(key, (- float('inf')))):
found_new_best = True
if (key in early_stopping_minimize):
if (value < self.best_metrics.get(key, float('inf'))):
found_new_best = True
if found_new_best:
self.best_metrics[key] = value
number_of_epochs_without_improvement = 0
self.save_checkpoint(tag=('best_' + key))
for key in chain(early_stopping_maximize, early_stopping_minimize):
if (key not in val_metrics):
warn('Metric name {} not found but requested for maximizing/minimizing')
if (number_of_epochs_without_improvement >= early_stopping_epochs):
if self.verbose:
print('Early stopping because of no improvement in {} epochs'.format(number_of_epochs_without_improvement))
break
else:
assert (eval_kwargs is None), 'Eval kwargs is unused if val_data is None'
assert (early_stopping_epochs == float('inf')), 'Early stopping requires val_data'
assert (len(early_stopping_minimize) == len(early_stopping_maximize) == 0), 'Please provide val_data'
return self |
_tokenizers
class RetriBertTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = RetriBertTokenizer
test_slow_tokenizer = True
rust_tokenizer_class = RetriBertTokenizerFast
test_rust_tokenizer = True
space_between_special_tokens = True
from_pretrained_filter = filter_non_english
def setUp(self):
super().setUp()
vocab_tokens = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest']
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([(x + '\n') for x in vocab_tokens]))
def get_input_output_texts(self, tokenizer):
input_text = 'UNwanted,running'
output_text = 'unwanted, running'
return (input_text, output_text)
def test_full_tokenizer(self):
tokenizer = self.tokenizer_class(self.vocab_file)
tokens = tokenizer.tokenize('UNwanted,running')
self.assertListEqual(tokens, ['un', '##want', '##ed', ',', 'runn', '##ing'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [9, 6, 7, 12, 10, 11])
def test_rust_and_python_full_tokenizers(self):
if (not self.test_rust_tokenizer):
return
tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer()
sequence = 'UNwanted,running'
tokens = tokenizer.tokenize(sequence)
rust_tokens = rust_tokenizer.tokenize(sequence)
self.assertListEqual(tokens, rust_tokens)
ids = tokenizer.encode(sequence, add_special_tokens=False)
rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False)
self.assertListEqual(ids, rust_ids)
rust_tokenizer = self.get_rust_tokenizer()
ids = tokenizer.encode(sequence)
rust_ids = rust_tokenizer.encode(sequence)
self.assertListEqual(ids, rust_ids)
tokenizer = self.get_tokenizer(do_lower_case=True)
rust_tokenizer = self.get_rust_tokenizer(do_lower_case=True)
sequence = 'UNwanted,running'
tokens = tokenizer.tokenize(sequence)
rust_tokens = rust_tokenizer.tokenize(sequence)
self.assertListEqual(tokens, rust_tokens)
ids = tokenizer.encode(sequence, add_special_tokens=False)
rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False)
self.assertListEqual(ids, rust_ids)
rust_tokenizer = self.get_rust_tokenizer()
ids = tokenizer.encode(sequence)
rust_ids = rust_tokenizer.encode(sequence)
self.assertListEqual(ids, rust_ids)
def test_chinese(self):
tokenizer = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ahzz'), ['ah', '', '', 'zz'])
def test_basic_tokenizer_lower(self):
tokenizer = BasicTokenizer(do_lower_case=True)
self.assertListEqual(tokenizer.tokenize(' \tHeLLo!how \n Are yoU? '), ['hello', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('Hello'), ['hello'])
def test_basic_tokenizer_lower_strip_accents_false(self):
tokenizer = BasicTokenizer(do_lower_case=True, strip_accents=False)
self.assertListEqual(tokenizer.tokenize(' \tHaLLo!how \n Are yoU? '), ['hallo', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('Hello'), ['hello'])
def test_basic_tokenizer_lower_strip_accents_true(self):
tokenizer = BasicTokenizer(do_lower_case=True, strip_accents=True)
self.assertListEqual(tokenizer.tokenize(' \tHaLLo!how \n Are yoU? '), ['hallo', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('Hello'), ['hello'])
def test_basic_tokenizer_lower_strip_accents_default(self):
tokenizer = BasicTokenizer(do_lower_case=True)
self.assertListEqual(tokenizer.tokenize(' \tHaLLo!how \n Are yoU? '), ['hallo', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('Hello'), ['hello'])
def test_basic_tokenizer_no_lower(self):
tokenizer = BasicTokenizer(do_lower_case=False)
self.assertListEqual(tokenizer.tokenize(' \tHeLLo!how \n Are yoU? '), ['HeLLo', '!', 'how', 'Are', 'yoU', '?'])
def test_basic_tokenizer_no_lower_strip_accents_false(self):
tokenizer = BasicTokenizer(do_lower_case=False, strip_accents=False)
self.assertListEqual(tokenizer.tokenize(' \tHaLLo!how \n Are yoU? '), ['HaLLo', '!', 'how', 'Are', 'yoU', '?'])
def test_basic_tokenizer_no_lower_strip_accents_true(self):
tokenizer = BasicTokenizer(do_lower_case=False, strip_accents=True)
self.assertListEqual(tokenizer.tokenize(' \tHaLLo!how \n Are yoU? '), ['HaLLo', '!', 'how', 'Are', 'yoU', '?'])
def test_basic_tokenizer_respects_never_split_tokens(self):
tokenizer = BasicTokenizer(do_lower_case=False, never_split=['[UNK]'])
self.assertListEqual(tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]'), ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'])
def test_wordpiece_tokenizer(self):
vocab_tokens = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
vocab = {}
for (i, token) in enumerate(vocab_tokens):
vocab[token] = i
tokenizer = WordpieceTokenizer(vocab=vocab, unk_token='[UNK]')
self.assertListEqual(tokenizer.tokenize(''), [])
self.assertListEqual(tokenizer.tokenize('unwanted running'), ['un', '##want', '##ed', 'runn', '##ing'])
self.assertListEqual(tokenizer.tokenize('unwantedX running'), ['[UNK]', 'runn', '##ing'])
def test_is_whitespace(self):
self.assertTrue(_is_whitespace(' '))
self.assertTrue(_is_whitespace('\t'))
self.assertTrue(_is_whitespace('\r'))
self.assertTrue(_is_whitespace('\n'))
self.assertTrue(_is_whitespace('\xa0'))
self.assertFalse(_is_whitespace('A'))
self.assertFalse(_is_whitespace('-'))
def test_is_control(self):
self.assertTrue(_is_control('\x05'))
self.assertFalse(_is_control('A'))
self.assertFalse(_is_control(' '))
self.assertFalse(_is_control('\t'))
self.assertFalse(_is_control('\r'))
def test_is_punctuation(self):
self.assertTrue(_is_punctuation('-'))
self.assertTrue(_is_punctuation('$'))
self.assertTrue(_is_punctuation('`'))
self.assertTrue(_is_punctuation('.'))
self.assertFalse(_is_punctuation('A'))
self.assertFalse(_is_punctuation(' '))
def test_clean_text(self):
tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer()
self.assertListEqual([tokenizer.tokenize(t) for t in ['Test', '\xad', 'test']], [['[UNK]'], [], ['[UNK]']])
self.assertListEqual([rust_tokenizer.tokenize(t) for t in ['Test', '\xad', 'test']], [['[UNK]'], [], ['[UNK]']])
def test_sequence_builders(self):
tokenizer = self.tokenizer_class.from_pretrained('yjernite/retribert-base-uncased')
text = tokenizer.encode('sequence builders', add_special_tokens=False)
text_2 = tokenizer.encode('multi-sequence build', add_special_tokens=False)
encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
assert (encoded_sentence == (([101] + text) + [102]))
assert (encoded_pair == (((([101] + text) + [102]) + text_2) + [102]))
def test_offsets_with_special_characters(self):
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
sentence = f'A, naive {tokenizer_r.mask_token} AllenNLP sentence.'
tokens = tokenizer_r.encode_plus(sentence, return_attention_mask=False, return_token_type_ids=False, return_offsets_mapping=True, add_special_tokens=True)
do_lower_case = (tokenizer_r.do_lower_case if hasattr(tokenizer_r, 'do_lower_case') else False)
expected_results = ([((0, 0), tokenizer_r.cls_token), ((0, 1), 'A'), ((1, 2), ','), ((3, 5), 'na'), ((5, 6), '##i'), ((6, 8), '##ve'), ((9, 15), tokenizer_r.mask_token), ((16, 21), 'Allen'), ((21, 23), '##NL'), ((23, 24), '##P'), ((25, 33), 'sentence'), ((33, 34), '.'), ((0, 0), tokenizer_r.sep_token)] if (not do_lower_case) else [((0, 0), tokenizer_r.cls_token), ((0, 1), 'a'), ((1, 2), ','), ((3, 8), 'naive'), ((9, 15), tokenizer_r.mask_token), ((16, 21), 'allen'), ((21, 23), '##nl'), ((23, 24), '##p'), ((25, 33), 'sentence'), ((33, 34), '.'), ((0, 0), tokenizer_r.sep_token)])
self.assertEqual([e[1] for e in expected_results], tokenizer_r.convert_ids_to_tokens(tokens['input_ids']))
self.assertEqual([e[0] for e in expected_results], tokens['offset_mapping'])
def test_change_tokenize_chinese_chars(self):
list_of_commun_chinese_char = ['', '', '']
text_with_chinese_char = ''.join(list_of_commun_chinese_char)
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
kwargs['tokenize_chinese_chars'] = True
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
ids_without_spe_char_p = tokenizer_p.encode(text_with_chinese_char, add_special_tokens=False)
ids_without_spe_char_r = tokenizer_r.encode(text_with_chinese_char, add_special_tokens=False)
tokens_without_spe_char_r = tokenizer_r.convert_ids_to_tokens(ids_without_spe_char_r)
tokens_without_spe_char_p = tokenizer_p.convert_ids_to_tokens(ids_without_spe_char_p)
self.assertListEqual(tokens_without_spe_char_p, list_of_commun_chinese_char)
self.assertListEqual(tokens_without_spe_char_r, list_of_commun_chinese_char)
kwargs['tokenize_chinese_chars'] = False
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
ids_without_spe_char_r = tokenizer_r.encode(text_with_chinese_char, add_special_tokens=False)
ids_without_spe_char_p = tokenizer_p.encode(text_with_chinese_char, add_special_tokens=False)
tokens_without_spe_char_r = tokenizer_r.convert_ids_to_tokens(ids_without_spe_char_r)
tokens_without_spe_char_p = tokenizer_p.convert_ids_to_tokens(ids_without_spe_char_p)
expected_tokens = [(f'##{token}' if (idx != 0) else token) for (idx, token) in enumerate(list_of_commun_chinese_char)]
self.assertListEqual(tokens_without_spe_char_p, expected_tokens)
self.assertListEqual(tokens_without_spe_char_r, expected_tokens)
_torch
def test_torch_encode_plus_sent_to_model(self):
import torch
from transformers import MODEL_MAPPING, TOKENIZER_MAPPING
MODEL_TOKENIZER_MAPPING = merge_model_tokenizer_mappings(MODEL_MAPPING, TOKENIZER_MAPPING)
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
if (tokenizer.__class__ not in MODEL_TOKENIZER_MAPPING):
return
(config_class, model_class) = MODEL_TOKENIZER_MAPPING[tokenizer.__class__]
config = config_class()
if (config.is_encoder_decoder or (config.pad_token_id is None)):
return
model = model_class(config)
self.assertGreaterEqual(model.bert_query.get_input_embeddings().weight.shape[0], len(tokenizer))
first_ten_tokens = list(tokenizer.get_vocab().keys())[:10]
sequence = ' '.join(first_ten_tokens)
encoded_sequence = tokenizer.encode_plus(sequence, return_tensors='pt')
encoded_sequence.to(model.device)
batch_encoded_sequence = tokenizer.batch_encode_plus([sequence, sequence], return_tensors='pt')
with torch.no_grad():
model.embed_questions(**encoded_sequence)
model.embed_questions(**batch_encoded_sequence) |
def convert_database_command_logic(args):
from randovania.game_description import data_reader, data_writer
data = decode_data_file(args)
if args.decode_to_game_description:
data = data_writer.write_game_description(data_reader.decode_data(data))
output_binary: (Path | None) = args.output_binary
output_json: (Path | None) = args.output_json
if (output_binary is not None):
export_as_binary(data, output_binary)
elif (output_json is not None):
json_lib.write_path(output_json, data)
else:
raise ValueError('Neither binary nor JSON set. Argparse is broken?') |
def check_all_python_exist(*, platform_configs: Iterable[PythonConfiguration], container: OCIContainer) -> None:
exist = True
has_manylinux_interpreters = True
messages = []
try:
container.call(['manylinux-interpreters', '--help'], capture_output=True)
except subprocess.CalledProcessError:
has_manylinux_interpreters = False
for config in platform_configs:
python_path = ((config.path / 'bin') / 'python')
try:
if has_manylinux_interpreters:
container.call(['manylinux-interpreters', 'ensure', config.path.name])
container.call(['test', '-x', python_path])
except subprocess.CalledProcessError:
messages.append(f" '{python_path}' executable doesn't exist in image '{container.image}' to build '{config.identifier}'.")
exist = False
if (not exist):
message = '\n'.join(messages)
print(f'''cibuildwheel:
{message}''', file=sys.stderr)
sys.exit(1) |
class SubCommand(Protocol):
editable_mode: bool = False
build_lib: str
def initialize_options(self):
def finalize_options(self):
def run(self):
def get_source_files(self) -> List[str]:
def get_outputs(self) -> List[str]:
def get_output_mapping(self) -> Dict[(str, str)]: |
def date_key(datestr):
default = [0, 1, 1]
parts = datestr.split('-')
parts += default[len(parts):]
value = 0
for (d, p, m) in zip(default, parts, (10000, 100, 1), strict=False):
try:
value += (int(p) * m)
except ValueError:
value += (d * m)
return value |
class TestSetSelectionOwner(EndianTest):
def setUp(self):
self.req_args_0 = {'selection': , 'time': , 'window': }
self.req_bin_0 = b'\x16\x00\x00\\x144\xafa\x88\xfa7\x16\xdf\x10\x9a'
def testPackRequest0(self):
bin = request.SetSelectionOwner._request.to_binary(*(), **self.req_args_0)
self.assertBinaryEqual(bin, self.req_bin_0)
def testUnpackRequest0(self):
(args, remain) = request.SetSelectionOwner._request.parse_binary(self.req_bin_0, dummy_display, 1)
self.assertBinaryEmpty(remain)
self.assertEqual(args, self.req_args_0) |
class CmdApproach(Command):
key = 'approach'
help_category = 'combat'
def func(self):
if (not is_in_combat(self.caller)):
self.caller.msg('You can only do that in combat. (see: help fight)')
return
if (not is_turn(self.caller)):
self.caller.msg('You can only do that on your turn.')
return
if (not self.caller.db.hp):
self.caller.msg("You can't move, you've been defeated.")
return
mover = self.caller
target = self.caller.search(self.args)
if (not target):
return
if (not target.db.combat_range):
self.caller.msg("You can't move toward that!")
return
if (mover == target):
self.caller.msg("You can't move toward yourself!")
return
if (get_range(mover, target) <= 0):
self.caller.msg("You're already next to that target!")
return
approach(mover, target)
mover.location.msg_contents(('%s moves toward %s.' % (mover, target)))
spend_action(self.caller, 1, action_name='move') |
class BloombergFutureTicker(FutureTicker, BloombergTicker):
def __init__(self, name: str, family_id: str, N: int, days_before_exp_date: int, point_value: int=1, designated_contracts: str='FGHJKMNQUVXZ', security_type: SecurityType=SecurityType.FUTURE):
if (not (len(designated_contracts) > 0)):
raise ValueError('At least one month code should be provided.')
super().__init__(name, family_id, N, days_before_exp_date, point_value, designated_contracts, security_type)
def get_active_ticker(self) -> BloombergTicker:
specific_ticker_string = self.family_id.format('A')
return BloombergTicker.from_string(specific_ticker_string, self.security_type, self.point_value)
def _get_futures_chain_tickers(self):
futures_chain_tickers_df = self._data_provider.get_futures_chain_tickers(self, ExpirationDateField.all_dates())[self]
futures_chain_tickers_series = futures_chain_tickers_df.min(axis=1)
month_codes = '|'.join(self.designated_contracts)
contracts_pattern = self.family_id.format(f'({month_codes})\d{{1,2}}')
designated_contracts = futures_chain_tickers_series.index[futures_chain_tickers_series.index.map((lambda t: bool(re.search(f'^{contracts_pattern}$', t.as_string()))))]
futures_chain_tickers_series = futures_chain_tickers_series.loc[designated_contracts]
futures_chain_tickers = QFSeries(futures_chain_tickers_series.index, futures_chain_tickers_series.values)
futures_chain_tickers.index = to_datetime(futures_chain_tickers.index)
return futures_chain_tickers
def belongs_to_family(self, ticker: BloombergTicker) -> bool:
pattern = self.family_id.format('[A-Z]\\d{1,2}')
return (bool(re.match(f'^{pattern}$', ticker.ticker)) and ((ticker.point_value, ticker.security_type) == (self.point_value, self.security_type)))
def supported_ticker_type(self) -> Type[Ticker]:
return BloombergTicker |
def ChaCha20_round(H):
for (a, b, c, d) in ORDERS_CHACHA20:
H[a] += H[b]
H[d] = ROL((H[d] ^ H[a]), 16)
H[c] += H[d]
H[b] = ROL((H[b] ^ H[c]), 12)
H[a] += H[b]
H[d] = ROL((H[d] ^ H[a]), 8)
H[c] += H[d]
H[b] = ROL((H[b] ^ H[c]), 7)
return H |
class PerlinNoiseFactory():
def __init__(self, dimension: int, octaves: int=1, tile: tuple[(int, ...)]=(), unbias: bool=False):
self.dimension = dimension
self.octaves = octaves
self.tile = (tile + ((0,) * dimension))
self.unbias = unbias
self.scale_factor = (2 * (dimension ** (- 0.5)))
self.gradient = {}
def _generate_gradient(self) -> tuple[(float, ...)]:
if (self.dimension == 1):
return (random.uniform((- 1), 1),)
random_point = [random.gauss(0, 1) for _ in range(self.dimension)]
scale = (sum(((n * n) for n in random_point)) ** (- 0.5))
return tuple(((coord * scale) for coord in random_point))
def get_plain_noise(self, *point) -> float:
if (len(point) != self.dimension):
raise ValueError(f'Expected {self.dimension} values, got {len(point)}')
grid_coords = []
for coord in point:
min_coord = math.floor(coord)
max_coord = (min_coord + 1)
grid_coords.append((min_coord, max_coord))
dots = []
for grid_point in product(*grid_coords):
if (grid_point not in self.gradient):
self.gradient[grid_point] = self._generate_gradient()
gradient = self.gradient[grid_point]
dot = 0
for i in range(self.dimension):
dot += (gradient[i] * (point[i] - grid_point[i]))
dots.append(dot)
dim = self.dimension
while (len(dots) > 1):
dim -= 1
s = smoothstep((point[dim] - grid_coords[dim][0]))
next_dots = []
while dots:
next_dots.append(lerp(s, dots.pop(0), dots.pop(0)))
dots = next_dots
return (dots[0] * self.scale_factor)
def __call__(self, *point) -> float:
ret = 0
for o in range(self.octaves):
o2 = (1 << o)
new_point = []
for (i, coord) in enumerate(point):
coord *= o2
if self.tile[i]:
coord %= (self.tile[i] * o2)
new_point.append(coord)
ret += (self.get_plain_noise(*new_point) / o2)
ret /= (2 - (2 ** (1 - self.octaves)))
if self.unbias:
r = ((ret + 1) / 2)
for _ in range(int(((self.octaves / 2) + 0.5))):
r = smoothstep(r)
ret = ((r * 2) - 1)
return ret |
def get_train_batch(b):
begin = (b * _batch_size)
end = min(len(_data_generator.pos_list), (begin + _batch_size))
(u_batch, po_batch, plen_batch, no_batch, nlen_batch) = ([], [], [], [], [])
for p in range(begin, end):
(u, pos_o) = _data_generator.pos_list[p]
neg_o = pos_o
while ((neg_o in _data_generator.train_u_outfits_dict[u]) or (neg_o in _data_generator.test_u_outfits_dict[u])):
neg_o = rd.randrange(_n_outfits)
u_batch.append(u)
po_batch.append(pos_o)
no_batch.append(neg_o)
plen_batch.append(_data_generator.outfit_len[pos_o])
nlen_batch.append(_data_generator.outfit_len[neg_o])
u_batch = np.array(u_batch)
po_batch = np.array(po_batch)
plen_batch = np.array(plen_batch)
no_batch = np.array(no_batch)
nlen_batch = np.array(nlen_batch)
return (u_batch, po_batch, plen_batch, no_batch, nlen_batch) |
def test_const_connect_nested_struct_signal_to_struct():
class SomeMsg1():
a: Bits8
b: Bits32
class SomeMsg2():
a: SomeMsg1
b: Bits32
class Top(ComponentLevel3):
def construct(s):
s.out = OutPort(SomeMsg2)
connect(s.out, SomeMsg2(SomeMsg1(1, 2), 3))
x = Top()
x.elaborate()
simple_sim_pass(x)
x.tick()
assert (x.out == SomeMsg2(SomeMsg1(1, 2), 3)) |
class TwoLayerBidirectionalLSTMModel(nn.Module):
def __init__(self):
super(TwoLayerBidirectionalLSTMModel, self).__init__()
self.recurrent = torch.nn.LSTM(input_size=3, hidden_size=5, num_layers=2, bidirectional=True)
def forward(self, x, hx=None):
return self.recurrent(x, hx) |
def test_replace():
class SomethingElse():
def foo(self, n, y=None):
assert None, 'This should never be reached in this test'
s = SomethingElse()
def replacement(n, y=None):
return y
original_method = s.foo.__func__
with replaced(s.foo, replacement):
assert (s.foo(1, y='a') == 'a')
assert (s.foo(2) is None)
assert (s.foo.__func__ is original_method)
s = SomethingElse()
def replacement(self, n, y=None):
return y
original_method = SomethingElse.foo
with replaced(SomethingElse.foo, replacement, on=SomethingElse):
assert (s.foo(1, y='a') == 'a')
assert (s.foo(2) is None)
restored_method = SomethingElse.foo
assert (restored_method is original_method)
s = SomethingElse()
def replacement(self, n, y=None):
return y
with pytest.raises(ValueError, match='You have to supply a on= when stubbing an unbound method'):
with replaced(SomethingElse.foo, replacement):
pass |
def caesarCipher(s, k):
encr_string = ''
for letter in s:
if letter.isalpha():
uni = ord(letter)
base = (97 if letter.islower() else 65)
balance = (((uni + k) - base) % 26)
encr_string += chr((balance + base))
else:
encr_string += letter
return encr_string |
('(float32, float32, float32[:])', device=True, inline=True)
def point_in_quadrilateral(pt_x, pt_y, corners):
ab0 = (corners[2] - corners[0])
ab1 = (corners[3] - corners[1])
ad0 = (corners[6] - corners[0])
ad1 = (corners[7] - corners[1])
ap0 = (pt_x - corners[0])
ap1 = (pt_y - corners[1])
abab = ((ab0 * ab0) + (ab1 * ab1))
abap = ((ab0 * ap0) + (ab1 * ap1))
adad = ((ad0 * ad0) + (ad1 * ad1))
adap = ((ad0 * ap0) + (ad1 * ap1))
return ((abab >= abap) and (abap >= 0) and (adad >= adap) and (adap >= 0)) |
class QuantAnalyzer():
def __init__(self, model: tf.keras.Model, forward_pass_callback: CallbackFunc, eval_callback: CallbackFunc):
if (not isinstance(forward_pass_callback, CallbackFunc)):
raise ValueError('forward_pass_callback and its argument(s) are not encapsulated by CallbackFunc class.')
if (not isinstance(eval_callback, CallbackFunc)):
raise ValueError('eval_callback and its argument(s) are not encapsulated by CallbackFunc class.')
self._model = model
self._forward_pass_callback = forward_pass_callback
self._eval_callback = eval_callback
self._unlabeled_dataset = None
self._num_batches = None
def analyze(self, quant_scheme: QuantScheme=QuantScheme.post_training_tf_enhanced, rounding_mode: str='nearest', default_param_bw: int=8, default_output_bw: int=8, config_file: str=None, results_dir: str='./tmp/'):
results_dir = os.path.abspath(results_dir)
os.makedirs(results_dir, exist_ok=True)
sim = self._create_quantsim_and_encodings(quant_scheme, rounding_mode, default_param_bw, default_output_bw, config_file)
self.check_model_sensitivity_to_quantization(sim, default_param_bw, default_output_bw)
self.perform_per_layer_analysis_by_enabling_quant_wrappers(sim, results_dir)
self.perform_per_layer_analysis_by_disabling_quant_wrappers(sim, results_dir)
self.export_per_layer_encoding_min_max_range(sim, results_dir)
if (quant_scheme == QuantScheme.post_training_tf_enhanced):
self.export_per_layer_stats_histogram(sim, results_dir)
if (self._unlabeled_dataset and self._num_batches):
self.export_per_layer_mse_loss(sim, results_dir)
def _create_quantsim_and_encodings(self, quant_scheme: QuantScheme, rounding_mode: str, default_param_bw: int, default_output_bw: int, config_file: str) -> QuantizationSimModel:
(_, self._model) = fold_all_batch_norms(self._model)
sim = QuantizationSimModel(self._model, quant_scheme=quant_scheme, rounding_mode=rounding_mode, default_output_bw=default_output_bw, default_param_bw=default_param_bw, config_file=config_file)
sim.compute_encodings(forward_pass_callback=self._forward_pass_callback.func, forward_pass_callback_args=self._forward_pass_callback.args)
return sim
def check_model_sensitivity_to_quantization(self, sim: QuantizationSimModel, default_param_bw: int, default_output_bw: int):
fp32_eval_score = self._eval_model(self._model)
_logger.info('FP32 eval score (W32A32): %f', fp32_eval_score)
weight_quantized_eval_score = self._eval_weight_quantized_model(sim)
_logger.info('Weight-quantized eval score (W%dA32): %f', default_param_bw, weight_quantized_eval_score)
act_quantized_eval_score = self._eval_activation_quantized_model(sim)
_logger.info('Activation-quantized eval score (W32A%d): %f', default_output_bw, act_quantized_eval_score)
def _eval_model(self, model: tf.keras.Model) -> float:
return self._eval_callback.func(model, self._eval_callback.args)
def _eval_weight_quantized_model(self, sim: QuantizationSimModel) -> float:
enabled_activation_quantizers = get_enabled_activation_quantizers(sim)
enable_disable_quantizers(enabled_activation_quantizers, enabled=False)
eval_score = self._eval_model(sim.model)
enable_disable_quantizers(enabled_activation_quantizers, enabled=True)
return eval_score
def _eval_activation_quantized_model(self, sim: QuantizationSimModel) -> float:
enabled_param_quantizers = get_enabled_param_quantizers(sim)
enable_disable_quantizers(enabled_param_quantizers, enabled=False)
eval_score = self._eval_model(sim.model)
enable_disable_quantizers(enabled_param_quantizers, enabled=True)
return eval_score
def perform_per_layer_analysis_by_enabling_quant_wrappers(self, sim: QuantizationSimModel, results_dir: str) -> Dict[(str, float)]:
results_dir = os.path.abspath(results_dir)
os.makedirs(results_dir, exist_ok=True)
_logger.info('\nOPTION-1:\nAll the quant wrappers are disabled.\nStarting per-layer analysis by enabling quant wrappers as per config file.')
layer_wise_eval_score_dict = self._perform_per_layer_analysis(sim, disable_all_quantizers=True, enabled_before=True, enabled_after=False)
export_per_layer_sensitivity_analysis_plot(layer_wise_eval_score_dict, results_dir, title='per_layer_quant_enabled')
save_json(layer_wise_eval_score_dict, results_dir, title='per_layer_quant_enabled.json')
_logger.info('Exported per-layer quant analysis (enabled) plot.')
return layer_wise_eval_score_dict
def perform_per_layer_analysis_by_disabling_quant_wrappers(self, sim: QuantizationSimModel, results_dir: str) -> Dict[(str, float)]:
results_dir = os.path.abspath(results_dir)
os.makedirs(results_dir, exist_ok=True)
_logger.info('\nOPTION-2:\nAll the quant wrappers are enabled as per config file.\nStarting per-layer analysis by disabling quant wrappers.')
layer_wise_eval_score_dict = self._perform_per_layer_analysis(sim, disable_all_quantizers=False, enabled_before=False, enabled_after=True)
export_per_layer_sensitivity_analysis_plot(layer_wise_eval_score_dict, results_dir, title='per_layer_quant_disabled')
save_json(layer_wise_eval_score_dict, results_dir, title='per_layer_quant_disabled.json')
_logger.info('Exported per-layer quant analysis (disabled) plot.')
return layer_wise_eval_score_dict
def _perform_per_layer_analysis(self, sim: QuantizationSimModel, disable_all_quantizers: bool, enabled_before: bool, enabled_after: bool) -> Dict[(str, float)]:
sorted_quant_wrappers = _sort_quant_wrappers_based_on_occurrence(sim)
enabled_quant_wrappers = _get_enabled_quantizers(sorted_quant_wrappers)
if disable_all_quantizers:
for enabled_quantizers in enabled_quant_wrappers.values():
enable_disable_quantizers(enabled_quantizers, enabled=False)
eval_score_dict = {}
for (name, quant_wrapper) in sorted_quant_wrappers.items():
if (quant_wrapper in enabled_quant_wrappers):
enabled_quantizers = enabled_quant_wrappers[quant_wrapper]
enable_disable_quantizers(enabled_quantizers, enabled=enabled_before)
eval_score_dict[name] = self._eval_model(sim.model)
_logger.debug('For layer: %s, the eval score is: %f', name, eval_score_dict[name])
enable_disable_quantizers(enabled_quantizers, enabled=enabled_after)
if disable_all_quantizers:
for enabled_quantizers in enabled_quant_wrappers.values():
enable_disable_quantizers(enabled_quantizers, enabled=True)
return eval_score_dict
def export_per_layer_encoding_min_max_range(self, sim: QuantizationSimModel, results_dir: str) -> Tuple[(Dict, Dict)]:
min_max_ranges_dir = os.path.join(results_dir, 'min_max_ranges')
min_max_range_for_activations_dict = {}
min_max_range_for_weights_dict = {}
for quant_wrapper in sim.quant_wrappers():
wrapped_layer_name = quant_wrapper.original_layer.name
for (index, quantizer) in enumerate(quant_wrapper.input_quantizers):
if quantizer.is_enabled():
name = f'{wrapped_layer_name}_input_{index}'
min_max_range_for_activations_dict[name] = (quantizer.encoding.min, quantizer.encoding.max)
for (index, quantizer) in enumerate(quant_wrapper.output_quantizers):
if quantizer.is_enabled():
name = f'{wrapped_layer_name}_output_{index}'
min_max_range_for_activations_dict[name] = (quantizer.encoding.min, quantizer.encoding.max)
for quantizer in quant_wrapper.param_quantizers:
if quantizer.is_enabled():
quantizer_name = quantizer.name.replace('/', '-')
name = f'{wrapped_layer_name}_{quantizer_name}'
if isinstance(quantizer.encoding, List):
per_channel_encodings = {}
for (index, encoding) in enumerate(quantizer.encoding):
per_channel_encodings[f'{name}_{index}'] = (encoding.min, encoding.max)
min_max_range_for_weights_dict[name] = per_channel_encodings
else:
min_max_range_for_weights_dict[name] = (quantizer.encoding.min, quantizer.encoding.max)
create_and_export_min_max_ranges_plot(min_max_range_for_weights_dict, min_max_ranges_dir, title='weights')
create_and_export_min_max_ranges_plot(min_max_range_for_activations_dict, min_max_ranges_dir, title='activations')
save_json(min_max_range_for_weights_dict, min_max_ranges_dir, title='weights.json')
save_json(min_max_range_for_activations_dict, min_max_ranges_dir, title='activations.json')
_logger.info('Exported per layer encodings min-max ranges plot(s).')
return (min_max_range_for_weights_dict, min_max_range_for_activations_dict)
def export_per_layer_stats_histogram(self, sim: QuantizationSimModel, results_dir: str) -> None:
weights_pdf_dir = os.path.join(results_dir, 'weights_pdf')
activations_pdf_dir = os.path.join(results_dir, 'activations_pdf')
for quant_wrapper in sim.quant_wrappers():
wrapped_layer_name = quant_wrapper.original_layer.name
for (index, quantizer) in enumerate(quant_wrapper.input_quantizers):
if quantizer.encoding:
self._create_and_export_stats_histogram_plot(quantizer, activations_pdf_dir, title=f'{wrapped_layer_name}_input_q{index}')
for (index, quantizer) in enumerate(quant_wrapper.output_quantizers):
if quantizer.encoding:
self._create_and_export_stats_histogram_plot(quantizer, activations_pdf_dir, title=f'{wrapped_layer_name}_output_q{index}')
for quantizer in quant_wrapper.param_quantizers:
if quantizer.encoding:
param_name = quantizer.name.replace('/', '-')
self._create_and_export_stats_histogram_plot(quantizer, os.path.join(weights_pdf_dir, wrapped_layer_name), title=f'{wrapped_layer_name}_{param_name}')
_logger.info('Exported per layer stats histogram plot(s).')
def _create_and_export_stats_histogram_plot(quantizer: TensorQuantizer, results_dir: str, title: str) -> None:
os.makedirs(results_dir, exist_ok=True)
histograms = quantizer.get_stats_histogram()
encodings = quantizer.encoding
if (not isinstance(encodings, List)):
encodings = [encodings]
for (index, (histogram, encoding)) in enumerate(zip(histograms, encodings)):
export_stats_histogram_plot(histogram, encoding, results_dir, title=f'{title}_{index}')
def export_per_layer_mse_loss(self, sim: QuantizationSimModel, results_dir: str) -> Dict[(str, float)]:
results_dir = os.path.abspath(results_dir)
os.makedirs(results_dir, exist_ok=True)
mse_loss_dict = {}
with Spinner('Calculating per-layer MSE loss'):
for (index, layer) in enumerate(self._model.layers):
if (isinstance(layer, tf.keras.layers.InputLayer) or GraphSearchUtils.is_folded_batch_normalization(layer)):
continue
loss = self._compute_mse_loss(sim, index)
mse_loss_dict[layer.name] = loss
export_per_layer_mse_plot(mse_loss_dict, results_dir, title='per_layer_mse_loss')
save_json(mse_loss_dict, results_dir, title='per_layer_mse_loss.json')
_logger.info('Exported per layer MSE loss plot.')
return mse_loss_dict
def _compute_mse_loss(self, sim: QuantizationSimModel, index: int) -> float:
loss = 0.0
total = 0
mse = tf.keras.losses.MeanSquaredError()
for tensor in self._unlabeled_dataset.take(self._num_batches):
quantized_output = _get_output_of_intermediate_layer(sim.model, tensor, index)
fp32_output = _get_output_of_intermediate_layer(self._model, tensor, index)
loss += mse(quantized_output, fp32_output).numpy()
total += tensor.shape[0]
return (loss / total)
def enable_per_layer_mse_loss(self, unlabeled_dataset: tf.data.Dataset, num_batches: int) -> None:
self._unlabeled_dataset = unlabeled_dataset
self._num_batches = num_batches |
.cli
.network
_CLI_ENDPONTS
def test_sync__area_of_use__list(input_command, tmpdir):
with tmp_chdir(str(tmpdir)):
output = subprocess.check_output((input_command + ['sync', '--area-of-use', 'France', '--list-files', '--include-already-downloaded']), stderr=subprocess.STDOUT).decode('utf-8')
lines = output.strip().split('\n')
assert (len(lines) > 2)
_check_list_files_header(lines)
for line in lines[2:]:
assert ('France' in line.split('|')[(- 1)]) |
class Effect5500(BaseEffect):
type = 'passive'
def handler(fit, ship, context, projectionRange, **kwargs):
fit.modules.filteredChargeBoost((lambda mod: mod.charge.requiresSkill('Heavy Missiles')), 'aoeCloudSize', ship.getModifiedItemAttr('eliteBonusCommandShips2'), skill='Command Ships', **kwargs) |
.parametrize(('tag', 'node_date', 'expected'), [pytest.param('20.03.03', date(2020, 3, 4), '20.03.04.0', id='next day'), pytest.param('20.03.03', date(2020, 3, 3), '20.03.03.1', id='same day'), pytest.param('20.03.03.2', date(2020, 3, 3), '20.03.03.3', id='same day with patch'), pytest.param('v20.03.03', date(2020, 3, 4), 'v20.03.04.0', id='next day with v prefix')])
def test_calver_guess_next_data(tag: str, node_date: date, expected: str) -> None:
version = meta(tag, config=c_non_normalize, node_date=node_date)
next = guess_next_date_ver(version, node_date=node_date, version_cls=c_non_normalize.version_cls)
assert (next == expected) |
.parametrize('strategy, level', ([PropagateCastOpsStrategy.NONE, PropagateCastLevel.NOT_USED], [PropagateCastOpsStrategy.INSERT_AND_REDUCE, PropagateCastLevel.FASTER_KEEP_PRECISION], [PropagateCastOpsStrategy.FLOOD_FILL, PropagateCastLevel.FASTER_KEEP_PRECISION]))
def test_set_propagate_cast(strategy, level):
(D_in, H, D_out) = (784, 500, 10)
model = NeuralNetSinglePositionalArgument(D_in, H, D_out)
model = ORTModule(model)
set_propagate_cast_ops_optimization(model=model, level=level, strategy=strategy)
for mode in [True, False]:
assert (model._torch_module._execution_manager(is_training=mode)._propagate_cast_ops_strategy == strategy)
assert (model._torch_module._execution_manager(is_training=mode)._propagate_cast_ops_level == level) |
def test_version_increments_are_correct():
(versions, _) = zip(*get_releases())
for (prev, current) in zip(versions[1:], versions):
assert (prev < current)
assert (current in (prev._replace(patch=(prev.patch + 1)), prev._replace(minor=(prev.minor + 1), patch=0), prev._replace(major=(prev.major + 1), minor=0, patch=0))), f'{current} does not follow {prev}' |
class TestLayerOutputUtil():
def test_generate_layer_outputs(self):
(quantsim, starting_ops, output_ops, output_names, dummy_input) = get_quantsim_artifacts()
(dummy_dataset, data_count, first_input) = get_dataset_artifacts()
temp_dir_path = os.path.dirname(os.path.abspath(__file__))
temp_dir_path = os.path.join(temp_dir_path, 'temp_dir')
layer_output_util = LayerOutputUtil(session=quantsim.session, starting_op_names=starting_ops, output_op_names=output_ops, dir_path=temp_dir_path)
iterator = iterate_tf_dataset(dummy_dataset)
for input_batch in iterator:
layer_output_util.generate_layer_outputs(input_batch)
assert (data_count == len(os.listdir(os.path.join(temp_dir_path, 'inputs'))))
assert (data_count == len(os.listdir(os.path.join(temp_dir_path, 'outputs'))))
saved_layer_outputs = os.listdir(os.path.join(temp_dir_path, 'outputs', 'layer_outputs_0'))
saved_layer_outputs = [i[:(- len('.raw'))] for i in saved_layer_outputs]
for name in output_names:
assert (name in saved_layer_outputs)
saved_last_layer_output = np.fromfile(os.path.join(temp_dir_path, 'outputs', 'layer_outputs_0', 'keras_model_Softmax_0.raw'), dtype=np.float32).reshape((1, 2))
last_layer_output = quantsim_forward_pass_callback(quantsim.session, np.expand_dims(first_input, axis=0))
assert np.array_equal(saved_last_layer_output, last_layer_output)
shutil.rmtree(temp_dir_path, ignore_errors=False, onerror=None)
quantsim.session.close()
del quantsim |
def pytest_generate_tests(metafunc):
if ('host' in metafunc.fixturenames):
for marker in getattr(metafunc.function, 'pytestmark', []):
if (marker.name == 'testinfra_hosts'):
hosts = marker.args
break
else:
hosts = ['docker://debian_bookworm']
metafunc.parametrize('host', hosts, indirect=True, scope='function') |
('pypyr.retries.random.uniform', return_value=999)
def test_retries_jitter_list_jrc_up_max(mock_random):
j = pypyr.retries.jitter(sleep=[100, 200, 300], jrc=2, max_sleep=200)
assert (j(0) == 999)
assert (j(1) == 999)
assert (j(2) == 999)
assert (j(1) == 999)
assert (mock_random.mock_calls == [call(200, 100), call(400, 200), call(400, 200), call(400, 200)]) |
class Distribution(_Distribution):
def __init__(self, attrs=None):
_Distribution.__init__(self, attrs)
if (not self.ext_modules):
return
for idx in range((len(self.ext_modules) - 1), (- 1), (- 1)):
ext = self.ext_modules[idx]
if (not isinstance(ext, Extension)):
continue
setattr(self, ext.attr_name, None)
self.global_options = ([(ext.option_name, None, ('include %s (default if %s is available)' % (ext.feature_description, ext.feature_name))), (ext.neg_option_name, None, ('exclude %s' % ext.feature_description))] + self.global_options)
self.negative_opt = self.negative_opt.copy()
self.negative_opt[ext.neg_option_name] = ext.option_name
def has_ext_modules(self):
if (not self.ext_modules):
return False
for ext in self.ext_modules:
with_ext = self.ext_status(ext)
if ((with_ext is None) or with_ext):
return True
return False
def ext_status(self, ext):
implementation = platform.python_implementation()
if (implementation != 'CPython'):
return False
if isinstance(ext, Extension):
with_ext = getattr(self, ext.attr_name)
return with_ext
else:
return True |
def draw_words(transcribed_data, midi_notes):
if (transcribed_data is not None):
for (i, data) in enumerate(transcribed_data):
note_frequency = librosa.note_to_hz(midi_notes[i])
frequency_range = get_frequency_range(midi_notes[i])
half_frequency_range = (frequency_range / 2)
height = (numpy.log10([(note_frequency + half_frequency_range)])[0] - numpy.log10([(note_frequency - half_frequency_range)])[0])
xy_start_pos = (data.start, 1)
width = (data.end - data.start)
rect = Rectangle(xy_start_pos, width, height, edgecolor='none', facecolor='red', alpha=0.5)
plt.gca().add_patch(rect) |
def _iter_sections(lines):
lines = (line.split('#')[0].strip() for line in lines)
name = None
section = None
for line in lines:
if (not line):
continue
if (line.startswith('[') and line.endswith(']')):
if name:
(yield (name, section))
name = line[1:(- 1)].strip()
section = []
else:
if (not name):
raise ValueError(f'expected new section, got {line!r}')
section.append(line)
if name:
(yield (name, section))
else:
raise ValueError('invalid manifest file, no sections found') |
class OrderCriterion(Criterion):
def __init__(self, term, direction):
super(OrderCriterion, self).__init__()
self.term = term
self.direction = direction
def get_query(self, **kwargs):
term = self.term.get_query(**kwargs)
if ((self.direction == Order.asc) or (isinstance(self.direction, six.string_types) and (self.direction.lower() == 'asc'))):
return 'ORDERBY{term}'.format(term=term)
elif ((self.direction == Order.desc) or (isinstance(self.direction, six.string_types) and (self.direction.lower() == 'desc'))):
return 'ORDERBYDESC{term}'.format(term=term)
else:
raise QueryTypeError(("Expected 'asc', 'desc', or an instance of Order, not %s" % type(self.direction))) |
class DefaultArgumentHandler(ArgumentHandler):
def __call__(self, *args, **kwargs):
if ('X' in kwargs):
return kwargs['X']
elif ('data' in kwargs):
return kwargs['data']
elif (len(args) == 1):
if isinstance(args[0], list):
return args[0]
else:
return [args[0]]
elif (len(args) > 1):
return list(args)
raise ValueError('Unable to infer the format of the provided data (X=, data=, ...)') |
class CNOT(Bloq):
_property
def signature(self) -> 'Signature':
return Signature.build(ctrl=1, target=1)
def decompose_bloq(self) -> 'CompositeBloq':
raise DecomposeTypeError(f'{self} is atomic')
def add_my_tensors(self, tn: qtn.TensorNetwork, tag: Any, *, incoming: Dict[(str, SoquetT)], outgoing: Dict[(str, SoquetT)]):
internal = qtn.rand_uuid()
tn.add(qtn.Tensor(data=COPY, inds=(incoming['ctrl'], outgoing['ctrl'], internal), tags=['COPY', tag]))
tn.add(qtn.Tensor(data=XOR, inds=(incoming['target'], outgoing['target'], internal), tags=['XOR']))
def on_classical_vals(self, ctrl: int, target: int) -> Dict[(str, 'ClassicalValT')]:
return {'ctrl': ctrl, 'target': ((ctrl + target) % 2)}
def as_cirq_op(self, qubit_manager: 'cirq.QubitManager', ctrl: 'CirqQuregT', target: 'CirqQuregT') -> Tuple[('cirq.Operation', Dict[(str, 'CirqQuregT')])]:
import cirq
(ctrl,) = ctrl
(target,) = target
return (cirq.CNOT(ctrl, target), {'ctrl': np.array([ctrl]), 'target': np.array([target])})
def wire_symbol(self, soq: 'Soquet') -> 'WireSymbol':
if (soq.reg.name == 'ctrl'):
return Circle(filled=True)
elif (soq.reg.name == 'target'):
return ModPlus()
raise ValueError(f'Bad wire symbol soquet: {soq}') |
def alt(*parsers: Parser) -> Parser:
if (not parsers):
return fail('<empty alt>')
def alt_parser(stream, index):
result = None
for parser in parsers:
result = parser(stream, index).aggregate(result)
if result.status:
return result
return result
return alt_parser |
_args
class PrepareLiterals(Transformer_InPlace):
def literal(self, literal):
return ST('pattern', [_literal_to_pattern(literal)])
def range(self, start, end):
assert (start.type == end.type == 'STRING')
start = start.value[1:(- 1)]
end = end.value[1:(- 1)]
assert (len(eval_escaping(start)) == len(eval_escaping(end)) == 1)
regexp = ('[%s-%s]' % (start, end))
return ST('pattern', [PatternRE(regexp)]) |
class ROIAlign(nn.Module):
def __init__(self, output_size, spatial_scale, sampling_ratio, aligned=True):
super().__init__()
self.output_size = output_size
self.spatial_scale = spatial_scale
self.sampling_ratio = sampling_ratio
self.aligned = aligned
from torchvision import __version__
version = tuple((int(x) for x in __version__.split('.')[:2]))
assert (version >= (0, 7)), 'Require torchvision >= 0.7'
def forward(self, input, rois):
assert ((rois.dim() == 2) and (rois.size(1) == 5))
if input.is_quantized:
input = input.dequantize()
return roi_align(input, rois.to(dtype=input.dtype), self.output_size, self.spatial_scale, self.sampling_ratio, self.aligned)
def __repr__(self):
tmpstr = (self.__class__.__name__ + '(')
tmpstr += ('output_size=' + str(self.output_size))
tmpstr += (', spatial_scale=' + str(self.spatial_scale))
tmpstr += (', sampling_ratio=' + str(self.sampling_ratio))
tmpstr += (', aligned=' + str(self.aligned))
tmpstr += ')'
return tmpstr |
('/json/update_accounts', methods=['POST'], endpoint='update_accounts')
_required('ACCOUNTS')
def update_accounts():
deleted = []
updated = {}
api = flask.current_app.config['PYLOAD_API']
for (name, value) in flask.request.form.items():
value = value.strip()
if (not value):
continue
(tmp, user) = name.split(';')
(plugin, action) = tmp.split('|')
if (action == 'delete'):
deleted.append((plugin, user))
api.remove_account(plugin, user)
elif (action == 'password'):
(password, options) = updated.get((plugin, user), (None, {}))
password = value
updated[(plugin, user)] = (password, options)
elif ((action == 'time') and ('-' in value)):
(password, options) = updated.get((plugin, user), (None, {}))
options['time'] = [value]
updated[(plugin, user)] = (password, options)
elif ((action == 'limitdl') and value.isdigit()):
(password, options) = updated.get((plugin, user), (None, {}))
options['limit_dl'] = [value]
updated[(plugin, user)] = (password, options)
for (tmp, options) in updated.items():
(plugin, user) = tmp
if ((plugin, user) in deleted):
continue
(password, options) = options
api.update_account(plugin, user, password, options=options)
return jsonify(True) |
def convert_pandas_data_frame_to_bokeh_data_table(data):
data['index'] = data.index
data = data[(['index'] + data.columns[:(- 1)].tolist())]
data.columns.map(str)
source = ColumnDataSource(data=data)
columns = [TableColumn(field=column_str, title=column_str) for column_str in data.columns]
data_table = DataTable(source=source, columns=columns)
layout = add_title(data_table, 'Table Summarizing Weight Ranges')
return layout |
def test_L1_const_index():
a = CaseConnectConstToOutComp.DUT()
a.elaborate()
a.apply(StructuralRTLIRGenL1Pass(gen_connections(a)))
connections = a.get_metadata(StructuralRTLIRGenL1Pass.connections)
comp = sexp.CurComp(a, 's')
assert (connections == [(sexp.ConstInstance(Bits32(a.const_[2]), 42), sexp.CurCompAttr(comp, 'out'))]) |
class LinearFeatureBaseline(Baseline):
def __init__(self, env_spec, reg_coeff=1e-05):
self._coeffs = None
self._reg_coeff = reg_coeff
def get_param_values(self, **tags):
return self._coeffs
def set_param_values(self, val, **tags):
self._coeffs = val
def _features(self, path):
o = np.clip(path['observations'], (- 10), 10)
l = len(path['rewards'])
al = (np.arange(l).reshape((- 1), 1) / 100.0)
return np.concatenate([o, (o ** 2), al, (al ** 2), (al ** 3), np.ones((l, 1))], axis=1)
def fit(self, paths):
featmat = np.concatenate([self._features(path) for path in paths])
returns = np.concatenate([path['returns'] for path in paths])
reg_coeff = self._reg_coeff
for _ in range(5):
self._coeffs = np.linalg.lstsq((featmat.T.dot(featmat) + (reg_coeff * np.identity(featmat.shape[1]))), featmat.T.dot(returns))[0]
if (not np.any(np.isnan(self._coeffs))):
break
reg_coeff *= 10
def predict(self, path):
if (self._coeffs is None):
return np.zeros(len(path['rewards']))
return self._features(path).dot(self._coeffs) |
def _lambert_conformal_conic(cf_params):
(first_parallel, second_parallel) = _get_standard_parallels(cf_params['standard_parallel'])
if (second_parallel is not None):
return LambertConformalConic2SPConversion(latitude_first_parallel=first_parallel, latitude_second_parallel=second_parallel, latitude_false_origin=cf_params.get('latitude_of_projection_origin', 0.0), longitude_false_origin=cf_params.get('longitude_of_central_meridian', 0.0), easting_false_origin=cf_params.get('false_easting', 0.0), northing_false_origin=cf_params.get('false_northing', 0.0))
return LambertConformalConic1SPConversion(latitude_natural_origin=first_parallel, longitude_natural_origin=cf_params.get('longitude_of_central_meridian', 0.0), false_easting=cf_params.get('false_easting', 0.0), false_northing=cf_params.get('false_northing', 0.0)) |
def resize_min_side(im, size, method):
(h, w) = im.shape[(- 2):]
min_side = min(h, w)
ratio = (size / min_side)
if (method == 'bilinear'):
return F.interpolate(im, scale_factor=ratio, mode=method, align_corners=False)
else:
return F.interpolate(im, scale_factor=ratio, mode=method) |
def simu_subtomo(op, packing_op, output, save_tomo=0, save_target=1, save_tomo_slice=0):
import datetime
starttime = datetime.datetime.now()
v = op['v']
import packing_single_sphere.simulate as SI
target_name = packing_op['target']
packing_result = SI.packing_with_target(packing_op)
protein_name = packing_result['optimal_result']['pdb_id']
x = (packing_result['optimal_result']['x'] / 10)
y = (packing_result['optimal_result']['y'] / 10)
z = (packing_result['optimal_result']['z'] / 10)
x0 = (np.array(packing_result['optimal_result']['initialization'][0]) / 10)
y0 = (np.array(packing_result['optimal_result']['initialization'][1]) / 10)
z0 = (np.array(packing_result['optimal_result']['initialization'][2]) / 10)
box_size = (packing_result['general_info']['box_size'] / 10)
print('get packing info done')
import map_tomo.merge_map as MM
(initmap, init_angle_list) = MM.merge_map(v, protein_name, x0, y0, z0, box_size)
(packmap, pack_angle_list) = MM.merge_map(v, protein_name, x, y, z, box_size)
packing_result['optimal_result']['initmap_rotate_angle'] = init_angle_list
packing_result['optimal_result']['packmap_rotate_angle'] = pack_angle_list
print('merge huge map done ')
with open(output['json']['pack'], 'w') as f:
json.dump(packing_result, f, cls=MM.NumpyEncoder)
print('save packing info done')
if (save_tomo != 0):
IM.map2mrc(initmap, output['initmap']['mrc'])
IM.map2mrc(packmap, output['packmap']['mrc'])
IM.map2png(initmap, output['initmap']['png'])
IM.map2png(packmap, output['packmap']['png'])
IM.map2mrc(tomo, output['tomo']['mrc'])
IM.map2png(tomo, output['tomo']['png'])
i = protein_name.index(target_name)
(target_packmap, loc_r) = MM.trim_target(packmap, np.array([x[i], y[i], z[i]]), op['target_size'])
print('trim target done')
if (save_target != 0):
IM.map2mrc(target_packmap, output['packmap']['target']['mrc'])
target_info = {}
target_info['loc'] = loc_r
target_info['rotate'] = pack_angle_list[i]
target_info['name'] = packing_op['target']
target_info['SNR'] = op['tomo']['model']['SNR']
with open(output['json']['target'], 'w') as f:
json.dump(target_info, f, cls=MM.NumpyEncoder)
print('get target info done')
if (save_tomo_slice != 0):
import map_tomo.mrc2singlepic as MS
MS.mrc2singlepic(output['packmap']['mrc'], (output['packmap']['png'] + 'packmap{}/'.format(num)), 'packmap{}'.format(num))
MS.mrc2singlepic(output['tomo']['mrc'], (output['tomo']['png'] + 'tomo{}/'.format(num)), 'tomo{}'.format(num))
target_simu_tomo = {}
target_simu_tomo['density_map'] = target_packmap
target_simu_tomo['info'] = target_info
print('all done')
endtime = datetime.datetime.now()
print('simulation time:', (endtime - starttime).seconds, 'seconds')
return target_simu_tomo |
def _do_test_3D_models(recognizer, target_layer_name, input_shape, num_classes=400):
(blended_imgs_target_shape, preds_target_shape) = _get_target_shapes(input_shape, num_classes=num_classes, model_type='3D')
demo_inputs = generate_gradcam_inputs(input_shape, '3D')
if (torch.__version__ == 'parrots'):
if torch.cuda.is_available():
recognizer = recognizer.cuda()
demo_inputs['imgs'] = demo_inputs['imgs'].cuda()
demo_inputs['label'] = demo_inputs['label'].cuda()
gradcam = GradCAM(recognizer, target_layer_name)
(blended_imgs, preds) = gradcam(demo_inputs)
assert (blended_imgs.size() == blended_imgs_target_shape)
assert (preds.size() == preds_target_shape)
(blended_imgs, preds) = gradcam(demo_inputs, True)
assert (blended_imgs.size() == blended_imgs_target_shape)
assert (preds.size() == preds_target_shape)
else:
gradcam = GradCAM(recognizer, target_layer_name)
(blended_imgs, preds) = gradcam(demo_inputs)
assert (blended_imgs.size() == blended_imgs_target_shape)
assert (preds.size() == preds_target_shape)
(blended_imgs, preds) = gradcam(demo_inputs, True)
assert (blended_imgs.size() == blended_imgs_target_shape)
assert (preds.size() == preds_target_shape) |
class BSplineFamily(BasisFamily):
def __init__(self, breakpoints, degree, smoothness=None, vars=None):
breakpoints = np.array(breakpoints, dtype=float)
if (breakpoints.ndim == 2):
raise NotImplementedError('breakpoints for each spline variable not yet supported')
elif (breakpoints.ndim != 1):
raise ValueError('breakpoints must be convertable to a 1D array')
elif (breakpoints.size < 2):
raise ValueError('break point vector must have at least 2 values')
elif np.any((np.diff(breakpoints) <= 0)):
raise ValueError('break points must be strictly increasing values')
if (vars is None):
nvars = 1
self.nvars = None
elif (not isinstance(vars, int)):
raise TypeError('vars must be an integer')
else:
nvars = vars
self.nvars = nvars
def process_spline_parameters(values, length, allowed_types, minimum=0, default=None, name='unknown'):
if ((values is None) and (default is None)):
return None
elif (values is None):
values = default
elif isinstance(values, np.ndarray):
values = values.tolist()
if isinstance(values, allowed_types):
values = [values for i in range(length)]
elif all([isinstance(v, allowed_types) for v in values]):
if (len(values) != length):
raise ValueError(f"length of '{name}' does not match number of variables")
else:
raise ValueError(f"could not parse '{name}' keyword")
if ((values is not None) and any([(val < minimum) for val in values])):
raise ValueError(f"invalid value for '{name}'; must be at least {minimum}")
return values
degree = process_spline_parameters(degree, nvars, int, name='degree', minimum=1)
smoothness = process_spline_parameters(smoothness, nvars, int, name='smoothness', minimum=0, default=[(d - 1) for d in degree])
if any([((degree[i] - smoothness[i]) < 1) for i in range(nvars)]):
raise ValueError('degree must be greater than smoothness')
self.breakpoints = breakpoints
self.degree = degree
self.smoothness = smoothness
(self.coef_offset, self.coef_length, offset) = ([], [], 0)
for i in range(nvars):
ncoefs = (((self.degree[i] + 1) * (len(self.breakpoints) - 1)) - ((self.smoothness[i] + 1) * (len(self.breakpoints) - 2)))
self.coef_offset.append(offset)
self.coef_length.append(ncoefs)
offset += ncoefs
self.N = offset
self.knotpoints = []
for i in range(nvars):
self.knotpoints.append(np.empty((((self.degree[i] + 1) + ((len(self.breakpoints) - 2) * (self.degree[i] - self.smoothness[i]))) + (self.degree[i] + 1))))
self.knotpoints[i][0:(self.degree[i] + 1)] = self.breakpoints[0]
offset = (self.degree[i] + 1)
nknots = (self.degree[i] - self.smoothness[i])
assert (nknots > 0)
for j in range(1, (self.breakpoints.size - 1)):
self.knotpoints[i][offset:(offset + nknots)] = self.breakpoints[j]
offset += nknots
self.knotpoints[i][offset:((offset + self.degree[i]) + 1)] = self.breakpoints[(- 1)]
def __repr__(self):
return (f'<{self.__class__.__name__}: nvars={self.nvars}, ' + f'degree={self.degree}, smoothness={self.smoothness}>')
def eval_deriv(self, i, k, t, var=None):
if ((self.nvars is None) or ((self.nvars == 1) and (var is None))):
var = 0
elif ((self.nvars > 1) and (var is None)):
raise SystemError('scalar variable call to multi-variable splines')
coefs = np.zeros(self.coef_length[var])
coefs[i] = 1
return BSpline(self.knotpoints[var], coefs, self.degree[var]).derivative(k)(t) |
class ToTensor(object):
def __init__(self, is_test=False):
self.is_test = is_test
def __call__(self, sample):
(image, depth) = (sample['image'], sample['depth'])
image = self.to_tensor(image)
if self.is_test:
depth = (self.to_tensor(depth).float() / 1000)
else:
depth = (self.to_tensor(depth).float() * 10)
return {'image': image, 'depth': depth}
def to_tensor(self, pic):
if (not (_is_pil_image(pic) or _is_numpy_image(pic))):
raise TypeError('pic should be PIL Image or ndarray. Got {}'.format(type(pic)))
if isinstance(pic, np.ndarray):
img = torch.from_numpy(pic.transpose((2, 0, 1)))
return img.float().div(255)
if ((accimage is not None) and isinstance(pic, accimage.Image)):
nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.float32)
pic.copyto(nppic)
return torch.from_numpy(nppic)
if (pic.mode == 'I'):
img = torch.from_numpy(np.array(pic, np.int32, copy=False))
elif (pic.mode == 'I;16'):
img = torch.from_numpy(np.array(pic, np.int16, copy=False))
else:
img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))
if (pic.mode == 'YCbCr'):
nchannel = 3
elif (pic.mode == 'I;16'):
nchannel = 1
else:
nchannel = len(pic.mode)
img = img.view(pic.size[1], pic.size[0], nchannel)
img = img.transpose(0, 1).transpose(0, 2).contiguous()
if isinstance(img, torch.ByteTensor):
return img.float().div(255)
else:
return img |
class F8_RepoData(FC6_RepoData):
removedKeywords = FC6_RepoData.removedKeywords
removedAttrs = FC6_RepoData.removedAttrs
def __init__(self, *args, **kwargs):
FC6_RepoData.__init__(self, *args, **kwargs)
self.cost = kwargs.get('cost', None)
self.includepkgs = kwargs.get('includepkgs', [])
self.excludepkgs = kwargs.get('excludepkgs', [])
def _getArgsAsStr(self):
retval = FC6_RepoData._getArgsAsStr(self)
if self.cost:
retval += (' --cost=%s' % self.cost)
if self.includepkgs:
retval += (' --includepkgs="%s"' % ','.join(self.includepkgs))
if self.excludepkgs:
retval += (' --excludepkgs="%s"' % ','.join(self.excludepkgs))
return retval |
def test_django_assert_num_queries_output(django_pytester: DjangoPytester) -> None:
django_pytester.create_test_module('\n from django.contrib.contenttypes.models import ContentType\n import pytest\n\n .django_db\n def test_queries(django_assert_num_queries):\n with django_assert_num_queries(1):\n list(ContentType.objects.all())\n ContentType.objects.count()\n ')
result = django_pytester.runpytest_subprocess('--tb=short')
result.stdout.fnmatch_lines(['*Expected to perform 1 queries but 2 were done*'])
assert (result.ret == 1) |
class UT_HAR_BiLSTM(nn.Module):
def __init__(self, hidden_dim=64):
super(UT_HAR_BiLSTM, self).__init__()
self.lstm = nn.LSTM(90, hidden_dim, num_layers=1, bidirectional=True)
self.fc = nn.Linear(hidden_dim, 7)
def forward(self, x):
x = x.view((- 1), 250, 90)
x = x.permute(1, 0, 2)
(_, (ht, ct)) = self.lstm(x)
outputs = self.fc(ht[(- 1)])
return outputs |
def test_const_connect_nested_struct_signal_to_struct():
class SomeMsg1():
a: Bits8
b: Bits32
class SomeMsg2():
a: SomeMsg1
b: Bits32
class Top(Component):
def construct(s):
s.out = OutPort(SomeMsg2)
connect(s.out, SomeMsg2(SomeMsg1(1, 2), 3))
x = Top()
x.elaborate()
x.apply(GenDAGPass())
x.apply(DynamicSchedulePass())
x.apply(PrepareSimPass(print_line_trace=False))
x.sim_reset()
x.sim_tick()
assert (x.out == SomeMsg2(SomeMsg1(1, 2), 3)) |
def main(_):
assert FLAGS.checkpoint_dir, '--checkpoint_dir is required'
if (not os.path.isfile(FLAGS.index_file)):
print('index pickle file {} does not exist'.format(FLAGS.index_file))
return
if (not os.path.isfile(FLAGS.bshape_base_file)):
print(' bshape base file {} does not exist'.format(FLAGS.bshape_base_file))
return
if (not os.path.isdir(FLAGS.image_dir)):
print('image dir {} does not exist'.format(FLAGS.image_dir))
return
if (not os.path.isdir(FLAGS.checkpoint_dir)):
print('checkpoint file {} does not exist'.format(FLAGS.checkpoint_dir))
return
if (not tf.gfile.IsDirectory(FLAGS.output_dir)):
tf.logging.info('Creating output dir : %s', FLAGS.output_dir)
tf.gfile.MakeDirs(FLAGS.output_dir)
image_files = glob.glob((FLAGS.image_dir + '/*.bmp'))
print('# images : {}'.format(len(image_files)))
try:
index_file = open(FLAGS.index_file)
except IOError:
return
index = pickle.load(index_file)
index = index.astype(int)
(base_v, base_f) = load_obj(FLAGS.bshape_base_file)
config = ModelConfig()
model = Module(config, mode='inference')
model.build()
saver = tf.train.Saver()
coef = open(FLAGS.save, 'w')
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
sess.run(tf.global_variables_initializer())
ckpt = tf.train.get_checkpoint_state(checkpoint_dir=FLAGS.checkpoint_dir)
print('ckpt model checkpoint path {}'.format(ckpt.model_checkpoint_path))
if (ckpt and ckpt.model_checkpoint_path):
saver.restore(sess, ckpt.model_checkpoint_path)
print('Restored')
else:
return
for ii in image_files:
if (not os.path.isfile(ii)):
print('image file {} does not exist'.format(ii))
continue
try:
img = Image.open(ii)
except IOError:
continue
img = img.convert(mode='L')
prediction = np.squeeze(sess.run(model.prediction, feed_dict={model.image_feed: img.tobytes()}))
pca_coef = np.squeeze(sess.run(model.pca_coef, feed_dict={model.image_feed: img.tobytes()}))
for jj in pca_coef:
coef.write('{} '.format(jj))
coef.write('\n')
prediction = prediction.reshape((int((len(prediction) / 3)), 3))
deform_v = base_v
deform_v[index] = prediction
output_fname = (((FLAGS.output_dir + '/') + ii[len(FLAGS.image_dir):(- 4)]) + '_inference.obj')
print('output {}'.format(output_fname))
plot_obj(deform_v, base_f, output_fname)
plot_ply(deform_v, base_f, (output_fname[:(- 4)] + '.ply')) |
_fixtures(WebFixture, FileUploadInputFixture)
def test_prevent_duplicate_upload_js(web_fixture, file_upload_input_fixture):
fixture = file_upload_input_fixture
web_fixture.reahl_server.set_app(fixture.new_wsgi_app(enable_js=True))
browser = web_fixture.driver_browser
error_locator = XPath.span().including_text('uploaded files should all have different names')
def error_is_visible():
return browser.is_visible(error_locator)
browser.open('/')
browser.type(XPath.input_labelled('Choose file(s)'), fixture.file_to_upload1.name)
browser.wait_for_not(error_is_visible)
browser.type(XPath.input_labelled('Choose file(s)'), fixture.file_to_upload2.name)
browser.wait_for_not(error_is_visible)
with web_fixture.reahl_server.paused():
browser.type(XPath.input_labelled('Choose file(s)'), fixture.file_to_upload1.name)
assert (not fixture.upload_file_is_queued(fixture.file_to_upload1.name))
browser.wait_for(error_is_visible)
browser.click(XPath.button_labelled('Remove', filename=fixture.file_to_upload2_name))
browser.wait_for_not(error_is_visible) |
def test_multiple_timers(minimal_conf_noscreen, manager_nospawn):
config = minimal_conf_noscreen
config.screens = [libqtile.config.Screen(top=libqtile.bar.Bar([TimerWidget(10)], 10))]
manager_nospawn.start(config)
assert (manager_nospawn.c.widget['timerwidget'].get_active_timers() == 0)
manager_nospawn.c.widget['timerwidget'].set_timer1()
manager_nospawn.c.widget['timerwidget'].set_timer2()
assert (manager_nospawn.c.widget['timerwidget'].get_active_timers() == 2)
manager_nospawn.c.widget['timerwidget'].cancel_timer1()
assert (manager_nospawn.c.widget['timerwidget'].get_active_timers() == 1)
manager_nospawn.c.widget['timerwidget'].cancel_timer2()
assert (manager_nospawn.c.widget['timerwidget'].get_active_timers() == 0)
manager_nospawn.c.widget['timerwidget'].set_timer1()
manager_nospawn.c.widget['timerwidget'].set_timer2()
assert (manager_nospawn.c.widget['timerwidget'].get_active_timers() == 2)
manager_nospawn.c.widget['timerwidget'].eval('self.finalize()')
assert (manager_nospawn.c.widget['timerwidget'].get_active_timers() == 0) |
def get_preprocess_fn(data_args: argparse.Namespace, processor: Union[(PangoCairoTextRenderer, PreTrainedTokenizerFast)], modality: Modality, split: Split, column_names: List[str]):
question_column_name = ('question' if ('question' in column_names) else column_names[0])
context_column_name = ('context' if ('context' in column_names) else column_names[1])
answer_column_name = ('answers' if ('answers' in column_names) else column_names[2])
if (modality == Modality.IMAGE):
transforms = get_transforms(do_resize=True, size=(processor.pixels_per_patch, (processor.pixels_per_patch * processor.max_seq_length)))
def prepare_train_image_features(examples):
encodings = [processor(text=(q.strip(), c.strip()), stride=data_args.doc_stride, return_overflowing_patches=True, return_offset_mapping=True, text_a_max_length=data_args.question_max_length, rtl=data_args.is_rtl_language) for (q, c) in zip(examples[question_column_name], examples[context_column_name])]
sample_mapping = []
full_encodings = []
for (sample_id, encoding) in enumerate(encodings):
full_encodings.append(encoding)
sample_mapping.append(sample_id)
if (encoding.overflowing_patches is not None):
for overflow_encoding in encoding.overflowing_patches:
full_encodings.append(overflow_encoding)
sample_mapping.append(sample_id)
processed_examples = {}
processed_examples['pixel_values'] = [transforms(Image.fromarray(encoding.pixel_values)) for encoding in full_encodings]
processed_examples['attention_mask'] = [get_attention_mask(encoding.num_text_patches, seq_length=data_args.max_seq_length) for encoding in full_encodings]
offset_mapping = [encoding.offset_mapping for encoding in full_encodings]
processed_examples['start_positions'] = []
processed_examples['end_positions'] = []
for (i, offsets) in enumerate(offset_mapping):
cls_index = processor.max_seq_length
sample_index = sample_mapping[i]
answers = examples[answer_column_name][sample_index]
if (len(answers['answer_start']) == 0):
processed_examples['start_positions'].append(cls_index)
processed_examples['end_positions'].append(cls_index)
else:
start_char = answers['answer_start'][0]
char_idx = 0
while (examples[context_column_name][sample_index][char_idx] in string.whitespace):
start_char -= 1
char_idx += 1
text_key = ('text' if ('text' in answers) else 'answer_text')
end_char = (start_char + len(answers[text_key][0]))
token_start_index = 0
while (offsets[token_start_index] != (0, 0)):
token_start_index += 1
token_start_index += 1
token_end_index = (len(processed_examples['attention_mask'][i]) - 1)
while ((offsets[token_end_index] == (0, 0)) or (offsets[token_end_index] == ((- 1), (- 1)))):
token_end_index -= 1
if (not (data_args.is_rtl_language or processor.is_rtl(examples[context_column_name][sample_index]))):
if (not ((offsets[token_start_index][0] <= start_char) and (offsets[token_end_index][1] >= end_char))):
processed_examples['start_positions'].append(cls_index)
processed_examples['end_positions'].append(cls_index)
else:
while ((token_start_index < len(offsets)) and (offsets[token_start_index][0] <= start_char)):
token_start_index += 1
processed_examples['start_positions'].append((token_start_index - 1))
while (offsets[token_end_index][1] >= end_char):
token_end_index -= 1
processed_examples['end_positions'].append((token_end_index + 1))
elif (not ((offsets[token_end_index][1] <= start_char) and (offsets[token_start_index][0] >= end_char))):
processed_examples['start_positions'].append(cls_index)
processed_examples['end_positions'].append(cls_index)
else:
while ((token_start_index < len(offsets)) and (offsets[token_start_index][0] >= end_char)):
token_start_index += 1
processed_examples['start_positions'].append((token_start_index - 1))
while ((offsets[token_end_index][1] <= start_char) and (offsets[token_end_index] != (0, 0))):
token_end_index -= 1
processed_examples['end_positions'].append((token_end_index + 1))
return processed_examples
def prepare_validation_image_features(examples):
encodings = [processor(text=(q.strip(), c.strip()), stride=data_args.doc_stride, return_overflowing_patches=True, return_offset_mapping=True, text_a_max_length=data_args.question_max_length, rtl=data_args.is_rtl_language) for (q, c) in zip(examples[question_column_name], examples[context_column_name])]
sample_mapping = []
full_encodings = []
for (sample_id, encoding) in enumerate(encodings):
full_encodings.append(encoding)
sample_mapping.append(sample_id)
if (encoding.overflowing_patches is not None):
for overflow_encoding in encoding.overflowing_patches:
full_encodings.append(overflow_encoding)
sample_mapping.append(sample_id)
processed_examples = {}
processed_examples['pixel_values'] = np.array([transforms(Image.fromarray(encoding.pixel_values)).numpy() for encoding in full_encodings])
processed_examples['attention_mask'] = np.array([get_attention_mask(encoding.num_text_patches, seq_length=data_args.max_seq_length).numpy() for encoding in full_encodings])
processed_examples['offset_mapping'] = [encoding.offset_mapping for encoding in full_encodings]
processed_examples['example_id'] = []
for (i, offsets) in enumerate(processed_examples['offset_mapping']):
sample_index = sample_mapping[i]
processed_examples['example_id'].append(examples['id'][sample_index])
patch_start_idx = 0
while (offsets[patch_start_idx] != (0, 0)):
offsets[patch_start_idx] = None
patch_start_idx += 1
offsets[patch_start_idx] = None
patch_end_index = (len(processed_examples['attention_mask'][i]) - 1)
while (offsets[patch_end_index] == (0, 0)):
offsets[patch_end_index] = None
patch_end_index -= 1
return processed_examples
preprocess_fn = (prepare_train_image_features if (split == Split.TRAIN) else prepare_validation_image_features)
elif (modality == Modality.TEXT):
pad_on_right = (processor.padding_side == 'right')
def prepare_train_text_features(examples):
tokenized_examples = processor(examples[(question_column_name if pad_on_right else context_column_name)], examples[(context_column_name if pad_on_right else question_column_name)], truncation=('only_second' if pad_on_right else 'only_first'), max_length=data_args.max_seq_length, stride=data_args.doc_stride, return_overflowing_tokens=True, return_offsets_mapping=True, padding=('max_length' if data_args.pad_to_max_length else False))
sample_mapping = tokenized_examples.pop('overflow_to_sample_mapping')
offset_mapping = tokenized_examples.pop('offset_mapping')
tokenized_examples['start_positions'] = []
tokenized_examples['end_positions'] = []
for (i, offsets) in enumerate(offset_mapping):
input_ids = tokenized_examples['input_ids'][i]
cls_index = input_ids.index(processor.cls_token_id)
sequence_ids = tokenized_examples.sequence_ids(i)
sample_index = sample_mapping[i]
answers = examples[answer_column_name][sample_index]
if (len(answers['answer_start']) == 0):
tokenized_examples['start_positions'].append(cls_index)
tokenized_examples['end_positions'].append(cls_index)
else:
start_char = answers['answer_start'][0]
end_char = (start_char + len(answers['text'][0]))
token_start_index = 0
while (sequence_ids[token_start_index] != (1 if pad_on_right else 0)):
token_start_index += 1
token_end_index = (len(input_ids) - 1)
while (sequence_ids[token_end_index] != (1 if pad_on_right else 0)):
token_end_index -= 1
if (not ((offsets[token_start_index][0] <= start_char) and (offsets[token_end_index][1] >= end_char))):
tokenized_examples['start_positions'].append(cls_index)
tokenized_examples['end_positions'].append(cls_index)
else:
while ((token_start_index < len(offsets)) and (offsets[token_start_index][0] <= start_char)):
token_start_index += 1
tokenized_examples['start_positions'].append((token_start_index - 1))
while (offsets[token_end_index][1] >= end_char):
token_end_index -= 1
tokenized_examples['end_positions'].append((token_end_index + 1))
return tokenized_examples
def prepare_validation_text_features(examples):
tokenized_examples = processor(examples[(question_column_name if pad_on_right else context_column_name)], examples[(context_column_name if pad_on_right else question_column_name)], truncation=('only_second' if pad_on_right else 'only_first'), max_length=data_args.max_seq_length, stride=data_args.doc_stride, return_overflowing_tokens=True, return_offsets_mapping=True, padding=('max_length' if data_args.pad_to_max_length else False))
sample_mapping = tokenized_examples.pop('overflow_to_sample_mapping')
tokenized_examples['example_id'] = []
for i in range(len(tokenized_examples['input_ids'])):
sequence_ids = tokenized_examples.sequence_ids(i)
context_index = (1 if pad_on_right else 0)
sample_index = sample_mapping[i]
tokenized_examples['example_id'].append(examples['id'][sample_index])
tokenized_examples['offset_mapping'][i] = [(o if (sequence_ids[k] == context_index) else None) for (k, o) in enumerate(tokenized_examples['offset_mapping'][i])]
return tokenized_examples
preprocess_fn = (prepare_train_text_features if (split == Split.TRAIN) else prepare_validation_text_features)
else:
raise ValueError(f'Modality {modality} not supported.')
return preprocess_fn |
def test_keys_args_parses_to_dict():
out = pypyr.parser.keys.get_parsed_context(['value 1', 'value 2', 'value3'])
assert out['value 1'], 'value 1 should be True'
assert out['value 2'], 'value 2 should be True'
assert out['value3'], 'value 3 should be True'
assert (len(out) == 3), '3 items expected' |
def test_connect_wr_x_conn_As_rd_y_conn_A_mark_writer():
class Top(ComponentLevel3):
def construct(s):
s.x = Wire(Bits24)
s.A = Wire(Bits32)
s.y = Wire(Bits32)
connect(s.A[8:32], s.x)
connect(s.A, s.y)
def up_wr_x():
s.x = Bits24(1193046)
def up_rd_y():
assert (s.y == )
_test_model(Top) |
class TestShadowRoot(PyScriptTest):
.skip('NEXT: Element interface is gone. Replace with PyDom')
def test_reachable_shadow_root(self):
self.pyscript_run('\n <script>\n // reason to wait for py-script is that it\'s the entry point for\n // all patches and the MutationObserver, otherwise being this a synchronous\n // script the constructor gets instantly invoked at the node before\n // py-script gets a chance to initialize itself.\n customElements.whenDefined(\'py-script\').then(() => {\n customElements.define(\'s-r\', class extends HTMLElement {\n constructor() {\n super().attachShadow({mode: \'closed\'}).innerHTML =\n \'<div id="shadowed">OK</div>\';\n }\n });\n });\n </script>\n <s-r></s-r>\n <script type="py">\n import js\n js.console.log(Element("shadowed").innerHtml)\n </script>\n ')
assert (self.console.log.lines[(- 1)] == 'OK') |
def create_parquet_in_tempdir(filename: str, num_rows: int, num_features: int, num_classes: int=2, num_partitions: int=1) -> Tuple[(str, str)]:
temp_dir = tempfile.mkdtemp()
path = os.path.join(temp_dir, filename)
create_parquet(path, num_rows=num_rows, num_features=num_features, num_classes=num_classes, num_partitions=num_partitions)
return (temp_dir, path) |
class WafToApiGatewayConstruct(Construct):
def __init__(self, scope: Construct, id: str, api: apigateway.RestApi, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
web_acl = waf.CfnWebACL(self, 'ProductApiGatewayWebAcl', scope='REGIONAL', default_action=waf.CfnWebACL.DefaultActionProperty(allow={}), name=f'{id}-Waf', visibility_config=waf.CfnWebACL.VisibilityConfigProperty(sampled_requests_enabled=True, cloud_watch_metrics_enabled=True, metric_name='ProductApiGatewayWebAcl'), rules=[waf.CfnWebACL.RuleProperty(name='Product-AWSManagedRulesCommonRuleSet', priority=0, override_action={'none': {}}, statement=waf.CfnWebACL.StatementProperty(managed_rule_group_statement=waf.CfnWebACL.ManagedRuleGroupStatementProperty(name='AWSManagedRulesCommonRuleSet', vendor_name='AWS')), visibility_config=waf.CfnWebACL.VisibilityConfigProperty(sampled_requests_enabled=True, cloud_watch_metrics_enabled=True, metric_name='Product-AWSManagedRulesCommonRuleSet')), waf.CfnWebACL.RuleProperty(name='Product-AWSManagedRulesAmazonIpReputationList', priority=1, override_action={'none': {}}, statement=waf.CfnWebACL.StatementProperty(managed_rule_group_statement=waf.CfnWebACL.ManagedRuleGroupStatementProperty(name='AWSManagedRulesAmazonIpReputationList', vendor_name='AWS')), visibility_config=waf.CfnWebACL.VisibilityConfigProperty(sampled_requests_enabled=True, cloud_watch_metrics_enabled=True, metric_name='Product-AWSManagedRulesAmazonIpReputationList')), waf.CfnWebACL.RuleProperty(name='Product-AWSManagedRulesAnonymousIpList', priority=2, override_action={'none': {}}, statement=waf.CfnWebACL.StatementProperty(managed_rule_group_statement=waf.CfnWebACL.ManagedRuleGroupStatementProperty(name='AWSManagedRulesAnonymousIpList', vendor_name='AWS')), visibility_config=waf.CfnWebACL.VisibilityConfigProperty(sampled_requests_enabled=True, cloud_watch_metrics_enabled=True, metric_name='Product-AWSManagedRulesAnonymousIpList')), waf.CfnWebACL.RuleProperty(name='Product-AWSManagedRulesKnownBadInputsRuleSet', priority=3, override_action={'none': {}}, statement=waf.CfnWebACL.StatementProperty(managed_rule_group_statement=waf.CfnWebACL.ManagedRuleGroupStatementProperty(name='AWSManagedRulesKnownBadInputsRuleSet', vendor_name='AWS')), visibility_config=waf.CfnWebACL.VisibilityConfigProperty(sampled_requests_enabled=True, cloud_watch_metrics_enabled=True, metric_name='Product-AWSManagedRulesKnownBadInputsRuleSet'))])
waf.CfnWebACLAssociation(self, 'ApiGatewayWafAssociation', resource_arn=api.deployment_stage.stage_arn, web_acl_arn=web_acl.attr_arn) |
class MakeKeysStrTests(unittest.TestCase):
def test_bytes(self):
expected_string = 'key_1,key_2'
keys = [b'key_1', b'key_2']
self.assertEqual(expected_string, make_keys_str(keys))
def test_str(self):
expected_string = 'key_1,key_2'
keys = ['key_1', 'key_2']
self.assertEqual(expected_string, make_keys_str(keys)) |
class TypeguardTest(TestCase):
def test_trivial_fail(self):
with self.assertRaises(Exception):
fun(42)
def test_success(self):
fun(np.random.randn(2, 2))
def test_fail_shape(self):
with self.assertRaises(Exception):
fun(np.random.randn(3, 2))
def test_fail_dtype(self):
with self.assertRaises(Exception):
fun(np.random.randn(2, 2).astype(int)) |
.parametrize('prefer_grpc', [False, True])
def test_points_crud(prefer_grpc):
client = QdrantClient(prefer_grpc=prefer_grpc, timeout=TIMEOUT)
client.recreate_collection(collection_name=COLLECTION_NAME, vectors_config=VectorParams(size=DIM, distance=Distance.DOT), timeout=TIMEOUT)
client.upsert(collection_name=COLLECTION_NAME, points=[PointStruct(id=123, payload={'test': 'value'}, vector=np.random.rand(DIM).tolist())], wait=True)
client.upsert(collection_name=COLLECTION_NAME, points=Batch(ids=[3, 4], vectors=[np.random.rand(DIM).tolist(), np.random.rand(DIM).tolist()], payloads=[{'test': 'value', 'test2': 'value2'}, {'test': 'value', 'test2': {'haha': '???'}}]))
points = client.retrieve(COLLECTION_NAME, ids=[123])
print('read a single point', points)
client.set_payload(collection_name=COLLECTION_NAME, payload={'test2': ['value2', 'value3']}, points=[123])
client.delete(collection_name=COLLECTION_NAME, points_selector=PointIdsList(points=[123])) |
def test_wild_extra_targets(debug_ctx, debug_trail, acc_schema):
dumper_getter = make_dumper_getter(shape=shape(TestField('a', acc_schema.accessor_maker('a', is_required=True))), name_layout=OutputNameLayout(crown=OutDictCrown({'a': OutFieldCrown('a')}, sieves={}), extra_move=ExtraTargets(('b',))), debug_trail=debug_trail, debug_ctx=debug_ctx)
pytest.raises(ValueError, dumper_getter).match(full_match_regex_str("ExtraTargets ['b'] are attached to non-existing fields")) |
_model
def randformer_s36(pretrained=False, **kwargs):
model = MetaFormer(depths=[6, 6, 18, 6], dims=[64, 128, 320, 512], token_mixers=[nn.Identity, nn.Identity, RandomMixing, partial(RandomMixing, num_tokens=49)], norm_layers=partial(LayerNormGeneral, normalized_dim=(1, 2, 3), eps=1e-06, bias=False), **kwargs)
model.default_cfg = default_cfgs['randformer_s36']
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(url=model.default_cfg['url'], map_location='cpu', check_hash=True)
model.load_state_dict(state_dict)
return model |
class TestIgnoreWorkbookCorruption(TestCase):
def test_not_corrupted(self):
with self.assertRaises(Exception) as context:
xlrd.open_workbook(from_sample('corrupted_error.xls'))
self.assertTrue(('Workbook corruption' in str(context.exception)))
xlrd.open_workbook(from_sample('corrupted_error.xls'), ignore_workbook_corruption=True) |
class RejectSponsorshipApplicationUseCaseTests(TestCase):
def setUp(self):
self.notifications = [Mock(), Mock()]
self.use_case = use_cases.RejectSponsorshipApplicationUseCase(self.notifications)
self.user = baker.make(settings.AUTH_USER_MODEL)
self.sponsorship = baker.make(Sponsorship)
def test_update_sponsorship_as_rejected(self):
self.use_case.execute(self.sponsorship)
self.sponsorship.refresh_from_db()
today = timezone.now().date()
self.assertEqual(self.sponsorship.rejected_on, today)
self.assertEqual(self.sponsorship.status, Sponsorship.REJECTED)
def test_send_notifications_using_sponsorship(self):
self.use_case.execute(self.sponsorship)
for n in self.notifications:
n.notify.assert_called_once_with(request=None, sponsorship=self.sponsorship)
def test_build_use_case_with_correct_notifications(self):
uc = use_cases.RejectSponsorshipApplicationUseCase.build()
self.assertEqual(len(uc.notifications), 2)
self.assertIsInstance(uc.notifications[0], RejectedSponsorshipNotificationToPSF)
self.assertIsInstance(uc.notifications[1], RejectedSponsorshipNotificationToSponsors) |
class GAN_decoder_AE(nn.Module):
def __init__(self, params):
super(GAN_decoder_AE, self).__init__()
input_dim_b = params['input_dim_b']
ch = params['ch']
n_gen_res_blk = params['n_gen_res_blk']
n_gen_front_blk = params['n_gen_front_blk']
if ('res_dropout_ratio' in params.keys()):
res_dropout_ratio = params['res_dropout_ratio']
else:
res_dropout_ratio = 0
if ('neww' in params.keys()):
neww = params['neww']
else:
neww = 64
if ('newh' in params.keys()):
newh = params['newh']
else:
newh = 64
tch = ch
decB = []
decA = []
decB += [LinUnsRes_cluster(128, neww, newh)]
decA += [LinUnsRes_cluster(128, neww, newh)]
for i in range(0, n_gen_res_blk):
decB += [INSResBlock(tch, tch, dropout=res_dropout_ratio)]
decA += [INSResBlock(tch, tch, dropout=res_dropout_ratio)]
for i in range(0, (n_gen_front_blk - 1)):
decB += [LeakyReLUConvTranspose2d_2(tch, (tch // 2), kernel_size=3, stride=1, padding=1, output_padding=0)]
decA += [LeakyReLUConvTranspose2d_2(tch, (tch // 2), kernel_size=3, stride=1, padding=1, output_padding=0)]
tch = (tch // 2)
decB += [nn.ConvTranspose2d(tch, input_dim_b, kernel_size=1, stride=1, padding=0)]
decA += [nn.ConvTranspose2d(tch, input_dim_b, kernel_size=1, stride=1, padding=0)]
decB += [nn.Tanh()]
decA += [nn.Tanh()]
self.decode_B = nn.Sequential(*decB)
self.decode_B.apply(gaussian_weights_init)
self.decode_A = nn.Sequential(*decA)
self.decode_A.apply(gaussian_weights_init)
def forward(self, x_aa, x_bb):
out1 = self.decode_A(x_aa)
out2 = self.decode_B(x_bb)
return (out1, out2) |
class SpectralAudioParser():
def __init__(self, input_audio, offset, frames_per_second, filters):
if (len(filters) < 1):
raise RuntimeError('When using input_audio, at least 1 filter must be specified')
pipe = subprocess.Popen(['ffmpeg', '-i', input_audio, '-f', 's16le', '-acodec', 'pcm_s16le', '-ar', str(SAMPLERATE), '-ac', '1', '-'], stdout=subprocess.PIPE, bufsize=(10 ** 8))
self.audio_samples = np.array([], dtype=np.int16)
while True:
buf = pipe.stdout.read(SAMPLERATE)
self.audio_samples = np.append(self.audio_samples, np.frombuffer(buf, dtype=np.int16))
if (len(buf) < SAMPLERATE):
break
if (len(self.audio_samples) < 0):
raise RuntimeError('Audio samples are empty, assuming load failed')
self.duration = (len(self.audio_samples) / SAMPLERATE)
logger.debug(f'initialized audio file {input_audio}, samples read: {len(self.audio_samples)}, total duration: {self.duration}s')
self.offset = offset
if (offset > self.duration):
raise RuntimeError(f'Audio offset set at {offset}s but input audio is only {duration}s long')
self.window_size = int(((1 / frames_per_second) * SAMPLERATE))
self.filters = filters
steps = int(((self.duration - self.offset) * frames_per_second))
interval = (1 / frames_per_second)
maxima = {}
time_steps = (np.linspace(0, steps, num=steps) * interval)
for t in time_steps:
sample_offset = int((t * SAMPLERATE))
cur_maxima = bp_filtered(self.audio_samples[sample_offset:(sample_offset + self.window_size)], filters)
for key in cur_maxima:
if (key in maxima):
maxima[key] = max(maxima[key], cur_maxima[key])
else:
maxima[key] = cur_maxima[key]
self.band_maxima = maxima
logger.debug(f'initialized band maxima for {len(filters)} filters: {self.band_maxima}')
def get_params(self, t) -> typing.Dict[(str, float)]:
sample_offset = int(((t * SAMPLERATE) + (self.offset * SAMPLERATE)))
logger.debug(f'Analyzing audio at {(self.offset + t)}s')
if (sample_offset < len(self.audio_samples)):
window_samples = self.audio_samples[sample_offset:(sample_offset + self.window_size)]
if (len(window_samples) < self.window_size):
logger.debug(f'Warning: sample offset is out of range at time offset {(t + self.offset)}s. Returning null result')
return {}
return bp_filtered_norm(window_samples, self.filters, self.band_maxima)
else:
logger.debug(f'Warning: Audio input has ended. Returning null result')
return {}
def get_duration(self):
return self.duration |
def setup_checkpoint_config(args):
save_checkpoints_config = {}
save_checkpoints_config['model_state_dict'] = (True if args.checkpoint_save_model else False)
save_checkpoints_config['optimizer_state_dict'] = (True if args.checkpoint_save_optim else False)
save_checkpoints_config['train_metric_info'] = (True if args.checkpoint_save_train_metric else False)
save_checkpoints_config['test_metric_info'] = (True if args.checkpoint_save_test_metric else False)
save_checkpoints_config['checkpoint_root_path'] = args.checkpoint_root_path
save_checkpoints_config['checkpoint_epoch_list'] = args.checkpoint_epoch_list
save_checkpoints_config['checkpoint_file_name_save_list'] = args.checkpoint_file_name_save_list
save_checkpoints_config['checkpoint_file_name_prefix'] = setup_checkpoint_file_name_prefix(args)
return save_checkpoints_config |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.