code stringlengths 281 23.7M |
|---|
.testinfra_hosts('ansible://debian_bookworm', 'ansible://_bookworm')
def test_ansible_module_become(host):
user_name = host.user().name
assert (host.ansible('shell', 'echo $USER', check=False)['stdout'] == user_name)
assert (host.ansible('shell', 'echo $USER', check=False, become=True)['stdout'] == 'root')
with host.sudo():
assert (host.user().name == 'root')
assert (host.ansible('shell', 'echo $USER', check=False)['stdout'] == user_name)
assert (host.ansible('shell', 'echo $USER', check=False, become=True)['stdout'] == 'root') |
class IgnoreControl(ScalarControl):
def __init__(self, type, name, attrs, index=None):
ScalarControl.__init__(self, type, name, attrs, index)
self._value = None
def is_of_kind(self, kind):
return False
def __setattr__(self, name, value):
if (name == 'value'):
raise AttributeError(("control '%s' is ignored, hence read-only" % self.name))
elif (name in ('name', 'type')):
raise AttributeError(('%s attribute is readonly' % name))
else:
self.__dict__[name] = value |
class TestSyntheticLocate():
def setup_method(self) -> None:
self.dirpath = os.path.join(os.path.dirname(__file__), './data/')
lattice = spaghetti.regular_lattice((0, 0, 10, 10), 9, exterior=True)
ntw = spaghetti.Network(in_data=lattice)
gdf = spaghetti.element_as_gdf(ntw, arcs=True)
street = geopandas.GeoDataFrame(geopandas.GeoSeries(gdf['geometry'].buffer(0.2).unary_union), crs=gdf.crs, columns=['geometry'])
client_count = 100
facility_count = 5
self.client_points = simulated_geo_points(street, needed=client_count, seed=5)
self.facility_points = simulated_geo_points(street, needed=facility_count, seed=6)
ntw = spaghetti.Network(in_data=lattice)
ntw.snapobservations(self.client_points, 'clients', attribute=True)
ntw.snapobservations(self.facility_points, 'facilities', attribute=True)
self.clients_snapped = spaghetti.element_as_gdf(ntw, pp_name='clients', snapped=True)
self.facilities_snapped = spaghetti.element_as_gdf(ntw, pp_name='facilities', snapped=True)
self.cost_matrix = ntw.allneighbordistances(sourcepattern=ntw.pointpatterns['clients'], destpattern=ntw.pointpatterns['facilities'])
def test_lscp_from_cost_matrix(self):
lscp = LSCP.from_cost_matrix(self.cost_matrix, 10)
result = lscp.solve(pulp.PULP_CBC_CMD(msg=False))
assert isinstance(result, LSCP)
def test_lscp_from_cost_matrix_no_results(self):
lscp = LSCP.from_cost_matrix(self.cost_matrix, 10)
result = lscp.solve(pulp.PULP_CBC_CMD(msg=False), results=False)
assert isinstance(result, LSCP)
with pytest.raises(AttributeError):
result.cli2fac
with pytest.raises(AttributeError):
result.fac2cli
def test_lscp_facility_client_array_from_cost_matrix(self):
with open((self.dirpath + 'lscp_fac2cli.pkl'), 'rb') as f:
lscp_objective = pickle.load(f)
lscp = LSCP.from_cost_matrix(self.cost_matrix, 8)
lscp = lscp.solve(pulp.PULP_CBC_CMD(msg=False))
numpy.testing.assert_array_equal(numpy.array(lscp.fac2cli, dtype=object), numpy.array(lscp_objective, dtype=object))
def test_lscp_client_facility_array_from_cost_matrix(self):
with open((self.dirpath + 'lscp_cli2fac.pkl'), 'rb') as f:
lscp_objective = pickle.load(f)
lscp = LSCP.from_cost_matrix(self.cost_matrix, 8)
lscp = lscp.solve(pulp.PULP_CBC_CMD(msg=False))
numpy.testing.assert_array_equal(numpy.array(lscp.cli2fac, dtype=object), numpy.array(lscp_objective, dtype=object))
def test_lscp_from_geodataframe(self):
lscp = LSCP.from_geodataframe(self.clients_snapped, self.facilities_snapped, 'geometry', 'geometry', 10)
result = lscp.solve(pulp.PULP_CBC_CMD(msg=False))
assert isinstance(result, LSCP)
def test_lscp_facility_client_array_from_geodataframe(self):
with open((self.dirpath + 'lscp_geodataframe_fac2cli.pkl'), 'rb') as f:
lscp_objective = pickle.load(f)
lscp = LSCP.from_geodataframe(self.clients_snapped, self.facilities_snapped, 'geometry', 'geometry', 8)
lscp = lscp.solve(pulp.PULP_CBC_CMD(msg=False))
numpy.testing.assert_array_equal(numpy.array(lscp.fac2cli, dtype=object), numpy.array(lscp_objective, dtype=object))
def test_lscp_client_facility_array_from_geodataframe(self):
with open((self.dirpath + 'lscp_geodataframe_cli2fac.pkl'), 'rb') as f:
lscp_objective = pickle.load(f)
lscp = LSCP.from_geodataframe(self.clients_snapped, self.facilities_snapped, 'geometry', 'geometry', 8)
lscp = lscp.solve(pulp.PULP_CBC_CMD(msg=False))
numpy.testing.assert_array_equal(numpy.array(lscp.cli2fac, dtype=object), numpy.array(lscp_objective, dtype=object))
def test_lscp_preselected_facility_client_array_from_geodataframe(self):
with open((self.dirpath + 'lscp_preselected_loc_geodataframe_fac2cli.pkl'), 'rb') as f:
lscp_objective = pickle.load(f)
fac_snapped = self.facilities_snapped.copy()
fac_snapped['predefined_loc'] = numpy.array([0, 0, 0, 0, 1])
lscp = LSCP.from_geodataframe(self.clients_snapped, fac_snapped, 'geometry', 'geometry', predefined_facility_col='predefined_loc', service_radius=8)
lscp = lscp.solve(pulp.PULP_CBC_CMD(msg=False, warmStart=True))
numpy.testing.assert_array_equal(numpy.array(lscp.fac2cli, dtype=object), numpy.array(lscp_objective, dtype=object)) |
def download_pretrained_models(method, file_ids):
save_path_root = f'./experiments/pretrained_models/{method}'
os.makedirs(save_path_root, exist_ok=True)
for (file_name, file_id) in file_ids.items():
save_path = osp.abspath(osp.join(save_path_root, file_name))
if osp.exists(save_path):
user_response = input(f'''{file_name} already exist. Do you want to cover it? Y/N
''')
if (user_response.lower() == 'y'):
print(f'Covering {file_name} to {save_path}')
download_file_from_google_drive(file_id, save_path)
elif (user_response.lower() == 'n'):
print(f'Skipping {file_name}')
else:
raise ValueError('Wrong input. Only accepts Y/N.')
else:
print(f'Downloading {file_name} to {save_path}')
download_file_from_google_drive(file_id, save_path) |
class global_sttr(nn.Module):
def __init__(self, config):
super(global_sttr, self).__init__()
self.embed_channel = config.embed_channel
self.tokenizer = tokenizer(config)
encoder_layer_actor = TransformerEncoderLayer(self.embed_channel, config.Nhead, dropout=config.dropout_porb, normalize_before=True)
encoder_norm_actor = nn.LayerNorm(self.embed_channel)
self.encoder = TransformerEncoder(encoder_layer_actor, num_layers=config.num_encoder_layers, norm=encoder_norm_actor)
def forward(self, x):
token = self.tokenizer(x)
token_s = token.permute(2, 0, 1)
token_s = self.encoder(token_s)
return token_s |
class CommandDispatcher():
_commands: list[Command] = field(default_factory=list)
_commands_by_trigger: dict[(str, Command)] = field(default_factory=dict)
def _register(self, command: Command) -> None:
self._commands.append(command)
self._commands_by_trigger.update({trigger: command for trigger in command.triggers})
def handle(self, playerid: int, command_text: str) -> bool:
(trigger, _, args_text) = command_text.partition(' ')
command = self._commands_by_trigger.get(trigger)
if (not command):
return False
command.handle(playerid, args_text)
return True |
def pack_rows(rows, bitdepth):
assert (bitdepth < 8)
assert ((8 % bitdepth) == 0)
spb = int((8 / bitdepth))
def make_byte(block):
res = 0
for v in block:
res = ((res << bitdepth) + v)
return res
for row in rows:
a = bytearray(row)
n = float(len(a))
extra = ((math.ceil((n / spb)) * spb) - n)
a.extend(([0] * int(extra)))
blocks = group(a, spb)
(yield bytearray((make_byte(block) for block in blocks))) |
def build_voc_dirs(outdir):
mkdir = (lambda dir: (os.makedirs(dir) if (not os.path.exists(dir)) else None))
mkdir(outdir)
mkdir(os.path.join(outdir, 'Annotations'))
mkdir(os.path.join(outdir, 'ImageSets'))
mkdir(os.path.join(outdir, 'ImageSets', 'Layout'))
mkdir(os.path.join(outdir, 'ImageSets', 'Main'))
mkdir(os.path.join(outdir, 'ImageSets', 'Segmentation'))
mkdir(os.path.join(outdir, 'JPEGImages'))
mkdir(os.path.join(outdir, 'SegmentationClass'))
mkdir(os.path.join(outdir, 'SegmentationObject'))
return (os.path.join(outdir, 'Annotations'), os.path.join(outdir, 'JPEGImages'), os.path.join(outdir, 'ImageSets', 'Main')) |
def separate_attributes_and_event_handlers(attributes: Mapping[(str, Any)]) -> tuple[(dict[(str, Any)], EventHandlerDict)]:
separated_attributes = {}
separated_event_handlers: dict[(str, EventHandlerType)] = {}
for (k, v) in attributes.items():
handler: EventHandlerType
if callable(v):
handler = EventHandler(to_event_handler_function(v))
elif (hasattr(v, 'function') and isinstance(v, EventHandlerType)):
handler = v
else:
separated_attributes[k] = v
continue
separated_event_handlers[k] = handler
return (separated_attributes, dict(separated_event_handlers.items())) |
def merge_annotations(annotations, comment_annotations):
for (ann, comment_ann) in itertools.zip_longest(annotations, comment_annotations):
if (ann and (not _is_ellipsis(ann))):
(yield ann)
elif (comment_ann and (not _is_ellipsis(comment_ann))):
(yield comment_ann)
else:
(yield None) |
def test_partial():
C = Bobby
with inspect_node(C) as ci:
assert (ci.type == Bobby)
assert (not ci.instance)
assert (len(ci.options) == 4)
assert (len(ci.processors) == 1)
assert (not ci.partial)
f1 = MagicMock()
C = C(f1)
with inspect_node(C) as ci:
assert (ci.type == Bobby)
assert (not ci.instance)
assert (len(ci.options) == 4)
assert (len(ci.processors) == 1)
assert ci.partial
assert (ci.partial[0] == (f1,))
assert (not len(ci.partial[1]))
f2 = MagicMock()
C = C(f2)
with inspect_node(C) as ci:
assert (ci.type == Bobby)
assert (not ci.instance)
assert (len(ci.options) == 4)
assert (len(ci.processors) == 1)
assert ci.partial
assert (ci.partial[0] == (f1, f2))
assert (not len(ci.partial[1]))
c = C('foo')
with inspect_node(c) as ci:
assert (ci.type == Bobby)
assert ci.instance
assert (len(ci.options) == 4)
assert (len(ci.processors) == 1)
assert (not ci.partial) |
class SemisuperDecayEnv(SemisuperEnv):
DECAY_RATE = 0.999
def __init__(self):
super(SemisuperDecayEnv, self).__init__()
self.prob_get_reward = 1.0
def _distort_reward(self, true_reward):
self.prob_get_reward *= SemisuperDecayEnv.DECAY_RATE
if (self.np_random.uniform() < self.prob_get_reward):
return true_reward
else:
return 0 |
def get_supported_kernel_in_dict_format(act_constraint: Dict, weight_constraint: Dict) -> Dict:
supported_kernel_in_dict_format = {'activation': {'bitwidth': act_constraint['bitwidth'], 'dtype': act_constraint['dtype']}, 'param': {'bitwidth': weight_constraint['bitwidth'], 'dtype': weight_constraint['dtype']}}
return supported_kernel_in_dict_format |
class CalendarParameter(Parameter):
itemClass = CalendarParameterItem
def __init__(self, **opts):
opts.setdefault('format', 'TextDate')
super().__init__(**opts)
def _interpretFormat(self, fmt=None):
fmt = (fmt or self.opts.get('format'))
if hasattr(QtCore.Qt.DateFormat, fmt):
fmt = getattr(QtCore.Qt.DateFormat, fmt)
return fmt
def _interpretValue(self, v):
if isinstance(v, str):
fmt = self._interpretFormat()
if (fmt is None):
raise ValueError('Cannot parse date string without a set format')
v = QtCore.QDate.fromString(v, fmt)
return v
def saveState(self, filter=None):
state = super().saveState(filter)
fmt = self._interpretFormat()
if (state['value'] is not None):
state['value'] = state['value'].toString(fmt)
return state |
class deprecated():
def __init__(self, new_name=None, removed_after='next major patch'):
self.new_name = new_name
self.removed_after = removed_after
def __call__(self, func):
def newFunc(*args, **kwargs):
warning = ('%s is deprecated and will be removed in %s. ' % (func.__name__, self.removed_after))
if (self.new_name is not None):
warning += ('This functionality has been replaced with %s.' % self.new_name)
warn(warning, category=DeprecationWarning)
return func(*args, **kwargs)
newFunc.__name__ = func.__name__
newFunc.__doc__ = func.__doc__
newFunc.__dict__.update(func.__dict__)
return newFunc |
def render_pcl_front_view(vis, cam_params=None, fn=None, img_save_fn=None, pt_size=3):
mesh = o3d.io.read_point_cloud(fn)
vis.add_geometry(mesh)
opt = vis.get_render_option()
opt.point_size = pt_size
ctr = vis.get_view_control()
ctr.convert_from_pinhole_camera_parameters(cam_params)
vis.poll_events()
vis.update_renderer()
vis.capture_screen_image(img_save_fn, True)
vis.clear_geometries() |
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_channels, channels, stride=1, groups=1, width_per_group=64, sd=0.0, **block_kwargs):
super(BasicBlock, self).__init__()
if ((groups != 1) or (width_per_group != 64)):
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
width = (int((channels * (width_per_group / 64.0))) * groups)
self.shortcut = []
if ((stride != 1) or (in_channels != (channels * self.expansion))):
self.shortcut.append(layers.conv1x1(in_channels, (channels * self.expansion), stride=stride))
self.shortcut.append(layers.bn((channels * self.expansion)))
self.shortcut = nn.Sequential(*self.shortcut)
self.conv1 = nn.Sequential(layers.conv3x3(in_channels, width, stride=stride), layers.bn(width), layers.relu())
self.conv2 = nn.Sequential(layers.conv3x3(width, (channels * self.expansion)), layers.bn((channels * self.expansion)))
self.relu = layers.relu()
self.sd = (layers.DropPath(sd) if (sd > 0.0) else nn.Identity())
def forward(self, x):
skip = self.shortcut(x)
x = self.conv1(x)
x = self.conv2(x)
x = (self.sd(x) + skip)
x = self.relu(x)
return x |
def check_file(filename: str) -> List[LintMessage]:
try:
top_of_file_cat = usort_config.Category('top_of_file')
known = usort_config.known_factory()
known['__strict__'] = top_of_file_cat
known['__static__'] = top_of_file_cat
config = usort_config.Config(categories=(usort_config.CAT_FUTURE, top_of_file_cat, usort_config.CAT_STANDARD_LIBRARY, usort_config.CAT_THIRD_PARTY, usort_config.CAT_FIRST_PARTY), known=known)
with open(filename, mode='rb') as f:
original = f.read()
result = usort(original, config)
if result.error:
raise result.error
except subprocess.TimeoutExpired:
return [LintMessage(path=filename, line=None, char=None, code='USORT', severity=LintSeverity.ERROR, name='timeout', original=None, replacement=None, description='usort timed out while trying to process a file. Please report an issue in pytorch/torchrec.')]
except (OSError, subprocess.CalledProcessError) as err:
return [LintMessage(path=filename, line=None, char=None, code='USORT', severity=LintSeverity.ADVICE, name='command-failed', original=None, replacement=None, description=(f'''Failed due to {err.__class__.__name__}:
{err}''' if (not isinstance(err, subprocess.CalledProcessError)) else 'COMMAND (exit code {returncode})\n{command}\n\nSTDERR\n{stderr}\n\nSTDOUT\n{stdout}'.format(returncode=err.returncode, command=' '.join((as_posix(x) for x in err.cmd)), stderr=(err.stderr.decode('utf-8').strip() or '(empty)'), stdout=(err.stdout.decode('utf-8').strip() or '(empty)'))))]
replacement = result.output
if (original == replacement):
return []
return [LintMessage(path=filename, line=None, char=None, code='USORT', severity=LintSeverity.WARNING, name='format', original=original.decode('utf-8'), replacement=replacement.decode('utf-8'), description='Run `lintrunner -a` to apply this patch.')] |
def test_logins_fails_with_wrong_password(graphql_client):
user = UserFactory(email='', password='test')
response = graphql_client.query('mutation($input: LoginInput!) {\n login(input: $input) {\n __typename\n }\n }', variables={'input': {'email': user.email, 'password': 'incorrect'}})
assert (response['data']['login']['__typename'] == 'WrongEmailOrPassword') |
def get_icns(svg_path):
ICONS = [(b'is32', 16, 'icon'), (b's8mk', 16, 'mask'), (b'il32', 32, 'icon'), (b'l8mk', 32, 'mask'), (b'ic08', 256, 'png'), (b'ic09', 512, 'png'), (b'icp6', 64, 'png'), (b'ic07', 128, 'png'), (b'ic11', 32, 'png'), (b'ic12', 64, 'png'), (b'ic13', 256, 'png'), (b'ic14', 512, 'png')]
funcs = {'png': get_png, 'icon': get_icon, 'mask': get_mask}
icons = {}
for (name, size, type_) in ICONS:
key = (size, type_)
if (key not in icons):
icons[key] = funcs[type_](svg_path, size)
toc = bytearray(b'TOC ')
toc += struct.pack('>I', (8 + (len(ICONS) * 8)))
data = bytearray()
for (name, size, type_) in ICONS:
key = (size, type_)
data += name
toc += name
icon = icons[key]
pack_size = struct.pack('>I', (8 + len(icon)))
data += pack_size
toc += pack_size
data += icon
data[0:0] = toc
header = bytearray()
header += b'icns'
header += struct.pack('>I', (8 + len(data)))
data[0:0] = header
return data |
class TestNcNWCSAFFileKeyPrefix():
def test_get_dataset_uses_file_key_prefix(self, nwcsaf_pps_cmic_filehandler):
dsid_cpp = {'name': 'cpp_cot'}
dsid_cmic = {'name': 'cmic_cot'}
file_key = 'cot'
info_cpp = dict(name='cpp_cot', file_key=file_key, file_type='nc_nwcsaf_cpp')
res_cpp = nwcsaf_pps_cmic_filehandler.get_dataset(dsid_cpp, info_cpp)
info_cmic = dict(name='cmic_cot', file_type='nc_nwcsaf_cpp')
res_cmic = nwcsaf_pps_cmic_filehandler.get_dataset(dsid_cmic, info_cmic)
np.testing.assert_allclose(res_cpp, res_cmic)
def test_get_dataset_scales_and_offsets_palette_meanings_using_other_dataset(self, nwcsaf_pps_cmic_filehandler):
dsid = {'name': 'cpp_cot_pal'}
info = dict(name='cpp_cot_pal', file_key='cot_pal', file_type='nc_nwcsaf_cpp', scale_offset_dataset='cot')
res = nwcsaf_pps_cmic_filehandler.get_dataset(dsid, info)
palette_meanings = np.array(COT_PALETTE_MEANINGS.split()).astype(int)
np.testing.assert_allclose(res.attrs['palette_meanings'], ((palette_meanings * COT_SCALE) + COT_OFFSET)) |
def functionParams(args, vars):
params = {}
index = 1
for var in vars:
value = args.get(var)
if (value is None):
value = args.get(str(index))
if (value is None):
value = ''
else:
index += 1
params[var] = value
return params |
def collapse_lora(model, alpha=1.0):
for (_module, name, _child_module) in _find_modules(model, (UNET_EXTENDED_TARGET_REPLACE | TEXT_ENCODER_EXTENDED_TARGET_REPLACE), search_class=[LoraInjectedLinear, LoraInjectedConv2d]):
if isinstance(_child_module, LoraInjectedLinear):
print('Collapsing Lin Lora in', name)
_child_module.linear.weight = nn.Parameter((_child_module.linear.weight.data + (alpha * (_child_module.lora_up.weight.data _child_module.lora_down.weight.data).type(_child_module.linear.weight.dtype).to(_child_module.linear.weight.device))))
else:
print('Collapsing Conv Lora in', name)
_child_module.conv.weight = nn.Parameter((_child_module.conv.weight.data + (alpha * (_child_module.lora_up.weight.data.flatten(start_dim=1) _child_module.lora_down.weight.data.flatten(start_dim=1)).reshape(_child_module.conv.weight.data.shape).type(_child_module.conv.weight.dtype).to(_child_module.conv.weight.device)))) |
def get_datasets():
configurations_all = {'Task01_BrainTumour': ('3d_fullres', '2d'), 'Task02_Heart': ('3d_fullres', '2d'), 'Task03_Liver': ('3d_cascade_fullres', '3d_fullres', '3d_lowres', '2d'), 'Task04_Hippocampus': ('3d_fullres', '2d'), 'Task05_Prostate': ('3d_fullres', '2d'), 'Task06_Lung': ('3d_cascade_fullres', '3d_fullres', '3d_lowres', '2d'), 'Task07_Pancreas': ('3d_cascade_fullres', '3d_fullres', '3d_lowres', '2d'), 'Task08_HepaticVessel': ('3d_cascade_fullres', '3d_fullres', '3d_lowres', '2d'), 'Task09_Spleen': ('3d_cascade_fullres', '3d_fullres', '3d_lowres', '2d'), 'Task10_Colon': ('3d_cascade_fullres', '3d_fullres', '3d_lowres', '2d'), 'Task48_KiTS_clean': ('3d_cascade_fullres', '3d_lowres', '3d_fullres', '2d'), 'Task27_ACDC': ('3d_fullres', '2d'), 'Task24_Promise': ('3d_fullres', '2d'), 'Task35_ISBILesionSegmentation': ('3d_fullres', '2d'), 'Task38_CHAOS_Task_3_5_Variant2': ('3d_fullres', '2d'), 'Task29_LITS': ('3d_cascade_fullres', '3d_lowres', '2d', '3d_fullres'), 'Task17_AbdominalOrganSegmentation': ('3d_cascade_fullres', '3d_lowres', '2d', '3d_fullres'), 'Task55_SegTHOR': ('3d_cascade_fullres', '3d_lowres', '3d_fullres', '2d'), 'Task56_VerSe': ('3d_cascade_fullres', '3d_lowres', '3d_fullres', '2d')}
return configurations_all |
class Cauchy(Continuous):
rv_op = cauchy
def dist(cls, alpha, beta, *args, **kwargs):
alpha = pt.as_tensor_variable(floatX(alpha))
beta = pt.as_tensor_variable(floatX(beta))
return super().dist([alpha, beta], **kwargs)
def moment(rv, size, alpha, beta):
(alpha, _) = pt.broadcast_arrays(alpha, beta)
if (not rv_size_is_none(size)):
alpha = pt.full(size, alpha)
return alpha
def logp(value, alpha, beta):
res = (((- pt.log(np.pi)) - pt.log(beta)) - pt.log1p(pt.pow(((value - alpha) / beta), 2)))
return check_parameters(res, (beta > 0), msg='beta > 0')
def logcdf(value, alpha, beta):
res = pt.log((0.5 + (pt.arctan(((value - alpha) / beta)) / np.pi)))
return check_parameters(res, (beta > 0), msg='beta > 0')
def icdf(value, alpha, beta):
res = (alpha + (beta * pt.tan((np.pi * (value - 0.5)))))
res = check_icdf_value(res, value)
return check_parameters(res, (beta > 0), msg='beta > 0') |
.parametrize('function_', ['DCV', 'ACV', 'DCI', 'R2W'])
def test_resolution(resetted_hp34401a, function_):
resetted_hp34401a.function_ = function_
resetted_hp34401a.range_ = 1
resetted_hp34401a.resolution = 0.0001
assert (len(resetted_hp34401a.check_errors()) == 0)
assert (resetted_hp34401a.resolution == 0.0001) |
class ClientInput():
content: bytes
log_time: str
recv_time: str
elapsed: str
hash: str
path: str
seed_status: SeedType
fuzzer_id: bytes
fuzzer_name: str
broker_status: str
replay_status: str
replay_time: float
new_coverage: list[tuple[(int, int)]] |
def create_unique_list(apps: Apps, _):
filter_list: pydis_site.apps.api.models.FilterList = apps.get_model('api', 'FilterList')
filter_: pydis_site.apps.api.models.Filter = apps.get_model('api', 'Filter')
list_ = filter_list.objects.create(name='unique', list_type=0, guild_pings=[], filter_dm=True, dm_pings=[], remove_context=False, bypass_roles=[], enabled=True, dm_content='', dm_embed='', infraction_type='NONE', infraction_reason='', infraction_duration=timedelta(seconds=0), infraction_channel=0, disabled_channels=[], disabled_categories=[], enabled_channels=[], enabled_categories=[], send_alert=True)
everyone = filter_.objects.create(content='everyone', filter_list=list_, description='', remove_context=True, bypass_roles=['Helpers'], dm_content="Please don't try to ping `` or ``. Your message has been removed. If you believe this was a mistake, please let staff know!", disabled_categories=['CODE JAM'])
everyone.save()
webhook = filter_.objects.create(content='webhook', filter_list=list_, description='', remove_context=True, dm_content='Looks like you posted a Discord webhook URL. Therefore, your message has been removed, and your webhook has been deleted. You can re-create it if you wish to. If you believe this was a mistake, please let us know.')
webhook.save()
rich_embed = filter_.objects.create(content='rich_embed', filter_list=list_, description='', guild_pings=['Moderators'], dm_pings=['Moderators'])
rich_embed.save()
discord_token = filter_.objects.create(content='discord_token', filter_list=list_, filter_dm=False, remove_context=True, dm_content='I noticed you posted a seemingly valid Discord API token in your message and have removed your message. This means that your token has been **compromised**. Please change your token **immediately** at: < free to re-post it with the token removed. If you believe this was a mistake, please let us know!')
discord_token.save() |
def dir(root, type='f', addroot=True):
dirList = []
fileList = []
root = (root + '/')
files = os.listdir(root)
for f in files:
if os.path.isdir((root + f)):
if (addroot == True):
dirList.append((root + f))
else:
dirList.append(f)
if os.path.isfile((root + f)):
if (addroot == True):
fileList.append((root + f))
else:
fileList.append(f)
if (type == 'f'):
return fileList
elif (type == 'd'):
return dirList
else:
print('ERROR: TMC.dir(root,type) type must be [f] for file or [d] for dir')
return 0 |
def _get_lines_for_constructing_an_object(func: Callable):
(def_line, *pre, ret_line) = inspect.getsource(func).splitlines()
assert def_line.startswith('def '), def_line
trimmed_lines = []
for line in pre:
assert ((line == '') or line.startswith((' ' * 4))), line
trimmed_lines.append(line[4:])
assert ret_line.startswith(' return '), ret_line
obj_expression = ret_line[len(' return '):]
return (trimmed_lines, obj_expression) |
.parametrize('helper', model_helpers)
def test_management_navigation(logged_in_admin_user: Page, helper: ModelHelper) -> None:
page = logged_in_admin_user
expect(page.get_by_role('heading', name='Management')).to_be_visible()
name = helper.verbose_name_plural
page.get_by_role('link', name=name, exact=True).click()
url_name = name.lower()
url_name = url_name.replace(' ', '')
expect(page).to_have_url(re.compile(f'.*/{url_name}/'))
if (helper.model == Catalog):
item_in_ui = page.locator('.list-group > .list-group-item').first
expect(item_in_ui).to_be_visible()
page.screenshot(path='screenshots/management-navigation-catalog.png', full_page=True) |
class InstructionMRCForFewCLUEProcessor(CLSProcessor):
def __init__(self, data_args, training_args, model_args, tokenizer=None, post_tokenizer=False, keep_raw_data=True):
super().__init__(data_args, training_args, model_args, tokenizer, post_tokenizer=post_tokenizer, keep_raw_data=keep_raw_data)
param = {p.split('=')[0]: p.split('=')[1] for p in data_args.user_defined.split(' ')}
assert ('data_name' in param), "You must add one defined param 'data_name=xxx' in the user_defined parameter."
self.data_name = param['data_name']
self.is_pseudo = False
self.pseudo_threshold = 1.0
if ('is_pseudo' in param.keys()):
self.is_pseudo = bool(param['is_pseudo'])
self.pseudo_threshold = float(param['pseudo_threshold'])
self.data_dir = data_args.data_dir
assert (self.data_name in clue_processors.keys()), 'Unknown task name {}'.format(self.data_name)
self.processor = clue_processors[self.data_name]
self.output_modes = clue_output_modes[self.data_name]
self.train_file = os.path.join(data_args.data_dir, 'train_few_all.json')
self.dev_file = os.path.join(data_args.data_dir, 'dev_few_all.json')
self.test_file = os.path.join(data_args.data_dir, 'test.json')
self.test_file = os.path.join(data_args.data_dir, 'test_public.json')
self.max_len = data_args.max_seq_length
self.doc_stride = data_args.doc_stride
self.sentence1_key = None
self.labels = self.processor.get_labels()
self.verbalizers = self.processor.get_verbalizers()
def get_data_collator(self):
pad_to_multiple_of_8 = (self.training_args.fp16 and (not self.data_args.pad_to_max_length))
return DataCollatorForGlobalPointer(self.tokenizer, pad_to_multiple_of=(8 if pad_to_multiple_of_8 else None), pad_to_max_length=self.data_args.pad_to_max_length)
def convert_to_instruction_template(self, examples: List[InputExample]):
label_mappings = self.verbalizers
instruction_type = clue_task_to_instruction_type[self.data_name]
format_info = dataset2instruction[instruction_type]
instruction_processor = format_info['instruction'](self.data_name, examples, label_mappings, format_info['prompt'], format_info['keys_order'], format_info['data_type'])
instruction_data = instruction_processor.transform2instruction()
return instruction_data
def get_examples(self, set_type):
if (set_type == 'train'):
examples = self.processor.get_train_examples(self.data_dir)
instruction_data = self.convert_to_instruction_template(examples)
examples = self._create_examples(instruction_data, 'train')
examples = examples[:self.data_args.max_train_samples]
self.train_examples = examples
elif (set_type == 'dev'):
examples = self.processor.get_dev_examples(self.data_dir)
instruction_data = self.convert_to_instruction_template(examples)
examples = self._create_examples(instruction_data, 'dev')
examples = examples[:self.data_args.max_eval_samples]
self.dev_examples = examples
elif (set_type == 'test'):
examples = self.processor.get_test_examples(self.data_dir)
instruction_data = self.convert_to_instruction_template(examples)
examples = self._create_examples(instruction_data, 'test')
examples = examples[:self.data_args.max_predict_samples]
self.test_examples = examples
return examples
def _create_examples(self, lines, set_type):
examples = []
is_train = (0 if (set_type == 'test') else 1)
for line in lines:
id_ = line['guid']
text = line['instruction']
target = line['target']
start = line['start']
data_type = line['data_type']
if (data_type == 'ner'):
(new_start, new_end) = ([], [])
for (t, entity_starts) in zip(target, start):
for s in entity_starts:
new_start.append(s)
new_end.append((s + len(t)))
(start, end) = (new_start, new_end)
target = '|'.join(target)
else:
(start, end) = ([start], [(start + len(target))])
examples.append({'id': id_, 'content': text, 'start': start, 'end': end, 'target': target, 'data_type': data_type, 'is_train': is_train})
return examples
def set_config(self, config):
config.ent_type_size = 1
config.inner_dim = 64
config.RoPE = True
def build_preprocess_function(self):
tokenizer = self.tokenizer
max_seq_length = self.data_args.max_seq_length
def func(examples):
tokenized_examples = tokenizer(examples['content'], truncation=True, max_length=max_seq_length, padding=('max_length' if self.data_args.pad_to_max_length else False), return_offsets_mapping=True)
return tokenized_examples
return func
def fush_multi_answer(self, has_answer, new_answer):
for (ans, value) in new_answer.items():
if (ans not in has_answer.keys()):
has_answer[ans] = value
else:
has_answer[ans]['prob'] += value['prob']
has_answer[ans]['pos'].extend(value['pos'])
return has_answer
def get_predict_result(self, logits, examples):
(probs, indices) = logits
probs = probs.squeeze(1)
indices = indices.squeeze(1)
predictions = {}
topk_predictions = {}
for (prob, index, example) in zip(probs, indices, examples):
data_type = example['data_type']
id_ = example['id']
index_ids = torch.Tensor([i for i in range(len(index))]).long()
topk_answer = list()
if (data_type == 'ner'):
answer = []
topk_answer_dict = dict()
entity_index = index[(prob > 0.0)]
index_ids = index_ids[(prob > 0.0)]
for (ei, entity) in enumerate(entity_index):
start_end = np.unravel_index(entity, (self.data_args.max_seq_length, self.data_args.max_seq_length))
s = example['offset_mapping'][start_end[0]][0]
e = example['offset_mapping'][start_end[1]][1]
ans = example['content'][s:e]
if (ans not in answer):
answer.append(ans)
topk_answer_dict[ans] = {'prob': float(prob[index_ids[ei]]), 'pos': [(s, e)]}
predictions[id_] = answer
if (id_ not in topk_predictions.keys()):
topk_predictions[id_] = topk_answer_dict
else:
topk_predictions[id_] = self.fush_multi_answer(topk_predictions[id_], topk_answer_dict)
else:
best_start_end = np.unravel_index(index[0], (self.data_args.max_seq_length, self.data_args.max_seq_length))
s = example['offset_mapping'][best_start_end[0]][0]
e = example['offset_mapping'][best_start_end[1]][1]
answer = example['content'][s:e]
predictions[id_] = answer
topk_answer_dict = dict()
topk_index = index[(prob > 0.0)]
index_ids = index_ids[(prob > 0.0)]
for (ei, index) in enumerate(topk_index):
if (ei > 6):
break
start_end = np.unravel_index(index, (self.data_args.max_seq_length, self.data_args.max_seq_length))
s = example['offset_mapping'][start_end[0]][0]
e = example['offset_mapping'][start_end[1]][1]
ans = example['content'][s:e]
topk_answer_dict[ans] = {'prob': float(prob[index_ids[ei]]), 'pos': [(s, e)]}
predictions[id_] = answer
if (id_ not in topk_predictions.keys()):
topk_predictions[id_] = topk_answer_dict
else:
topk_predictions[id_] = self.fush_multi_answer(topk_predictions[id_], topk_answer_dict)
for (id_, values) in topk_predictions.items():
answer_list = list()
for (ans, value) in values.items():
answer_list.append({'answer': ans, 'prob': value['prob'], 'pos': value['pos']})
topk_predictions[id_] = answer_list
return (predictions, topk_predictions)
def compute_metrics(self, eval_predictions):
examples = self.raw_datasets['validation']
(golden, dataname_map, dataname_type) = ({}, defaultdict(list), {})
(predictions, _) = self.get_predict_result(eval_predictions[0], examples)
for example in examples:
data_type = example['data_type']
dataname = '_'.join(example['id'].split('_')[:(- 1)])
if (dataname not in dataname_type):
dataname_type[dataname] = data_type
id_ = example['id']
dataname_map[dataname].append(id_)
if (data_type == 'ner'):
golden[id_] = example['target'].split('|')
else:
golden[id_] = example['target']
all_metrics = {'macro_f1': 0.0, 'micro_f1': 0.0, 'eval_num': 0}
for (dataname, data_ids) in dataname_map.items():
metric = datatype2metrics[dataname_type[dataname]]()
gold = {k: v for (k, v) in golden.items() if (k in data_ids)}
pred = {k: v for (k, v) in predictions.items() if (k in data_ids)}
score = metric.calc_metric(golden=gold, predictions=pred)
(acc, f1) = (score['acc'], score['f1'])
all_metrics['macro_f1'] += f1
all_metrics['micro_f1'] += (f1 * len(data_ids))
all_metrics['eval_num'] += len(data_ids)
all_metrics[dataname] = round(acc, 4)
all_metrics['macro_f1'] = round((all_metrics['macro_f1'] / len(dataname_map)), 4)
all_metrics['micro_f1'] = round((all_metrics['micro_f1'] / all_metrics['eval_num']), 4)
return all_metrics
def save_result(self, logits, label_ids):
examples = self.raw_datasets['test']
(predicts, topk_predicts) = self.get_predict_result(logits, examples)
verbalizer_ = {v: k for (k, v) in self.verbalizers.items()}
labels = self.labels
submit_predicts = dict()
for (key, value) in predicts.items():
submit_predicts[int(key.split('-')[1])] = labels[int(verbalizer_[value])]
outfile = os.path.join(self.training_args.output_dir, 'answer.json')
with open(outfile, 'w', encoding='utf8') as f:
json.dump(submit_predicts, f, ensure_ascii=False, indent=2)
topk_file = os.path.join(self.training_args.output_dir, 'topk_prob.json')
with open(topk_file, 'w', encoding='utf8') as f2:
json.dump(topk_predicts, f2, ensure_ascii=False, indent=2)
def create_test_label_data(self, examples, out, pos, tag: dict=None, threshole=0.9):
model_num = 6
template_per_model_num = 1
correct_answer = dict()
for (k, v) in out.items():
if ('ner' in k.lower()):
continue
v = sorted(v.items(), key=(lambda x: x[1]), reverse=True)
(best_result, best_prob) = (v[0][0], v[0][1])
best_pos = pos[k][best_result]
if (best_prob >= ((threshole * model_num) * template_per_model_num)):
correct_answer[k] = (best_pos, best_result)
new_example = list()
for example in examples:
id = example['id']
if (id in correct_answer.keys()):
content = example['content']
target = correct_answer[id][1]
pos = correct_answer[id][0]
if (type(pos[0]) == int):
if (content[pos[0]:pos[1]] != target):
continue
example['start'] = [pos[0]]
example['end'] = [pos[1]]
example['target'] = target
new_example.append(example)
else:
assert ((type(pos) == list) and (type(pos[0]) == list) and (type(pos[0][0]) == int))
for pos_i in pos:
if (content[pos_i[0]:pos_i[1]] == target):
example['start'] = [pos_i[0]]
example['end'] = [pos_i[1]]
example['target'] = target
new_example.append(example)
break
print('example ==')
print(new_example[0])
print('correct answer num: {}'.format(len(new_example)))
return new_example |
def random_sources_in_disk(size, power, wavelength_mean, bandwidth, r):
source_list = ([None] * size)
(x0, y0) = random_in_unit_disk(size)
(x0, y0) = ((r * x0), (r * y0))
phase = ((np.random.rand(size) * 2) * np.pi)
wavelength = (wavelength_mean + (bandwidth * (np.random.rand(size) - 0.5)))
for i in range(size):
source_list[i] = Source(power=1, =wavelength[i], phase=phase[i], pos=vec3(x0[i], y0[i], 0))
return source_list |
def LoadAbsorption(layer, T, wavelengths, use_Adachi=False):
if use_Adachi:
try:
absorption = adachi_alpha.create_adachi_alpha(InLineComposition(layer), T=T, wl=wavelengths)[3]
except:
print('Warning: Using experimental data to estimate the absorption coefficient of material: ', InLineComposition(layer))
absorption = ToSolcoreMaterial(layer['properties']['composition'], T, execute=True).alpha(wavelengths)
if (layer['properties']['composition']['material'] == 'InGaAs'):
print('Warning: Extrapolation of experimental absorption data for InGaAs is not reliable at longer wavelengths.')
print(' >>>: We truncate the absorption at the bandgap wavelength.')
edge = (1.24e-06 / (layer['properties']['band_gap'] / q))
edgeidx = np.abs((wavelengths - edge)).argmin()
absorption[edgeidx:] = 0
else:
print(layer['properties']['composition'])
absorption = ToSolcoreMaterial(layer['properties']['composition'], T, execute=True).alpha(wavelengths)
try:
if (layer['properties']['composition']['material'] == 'InGaAs'):
print('Warning: Extrapolation of experimental absorption data for InGaAs is not reliable at longer wavelengths.')
print(' >>>: We truncate the absorption at the bulk bandgap wavelength.')
edge = (1.24e-06 / (layer['properties']['band_gap'] / q))
edgeidx = np.abs((wavelengths - edge)).argmin()
absorption[edgeidx:] = 0
except Exception as err:
print('Warning: Using Adachi calculation to estimate the absorption coefficient of material: ', InLineComposition(layer))
try:
absorption = adachi_alpha.create_adachi_alpha(InLineComposition(layer), T=T, wl=wavelengths)[3]
except:
print('Warning: No absorption information found for material {}. Setting it equal to zero.'.format(InLineComposition(layer)))
absorption = (0 * wavelengths)
return [wavelengths.tolist(), absorption.tolist()] |
def test_set_parent_for_preset(tmp_path):
opt = Options(tmp_path)
u1 = uuid.UUID('b41fde84-1f57-4b79-8cd6-3e5a78077fa6')
u2 = uuid.UUID('b51fdeaa-1fff-4b79-8cd6-3e5a78077fa6')
assert (opt.get_parent_for_preset(u1) is None)
opt.set_parent_for_preset(u1, u2)
assert (opt.get_parent_for_preset(u1) == u2) |
class ModelUtils():
convert: dict = {'gpt-3.5-turbo': Model.gpt_35_turbo, 'gpt-3.5-turbo-0613': Model.gpt_35_turbo_0613, 'gpt-3.5-turbo-0301': Model.gpt_35_turbo_0301, 'gpt-4': Model.gpt_4, 'gpt-4-0613': Model.gpt_4_0613, 'gpt-4-for-dev': Model.gpt_4_dev, 'gpt-3.5-turbo-16k': Model.gpt_35_turbo_16k, 'gpt-3.5-turbo-16k-0613': Model.gpt_35_turbo_16k_0613, 'claude-instant-v1-100k': Model.claude_instant_v1_100k, 'claude-v1-100k': Model.claude_v1_100k, 'claude-instant-v1': Model.claude_instant_v1, 'claude-v1': Model.claude_v1, 'alpaca-7b': Model.alpaca_7b, 'stablelm-tuned-alpha-7b': Model.stablelm_tuned_alpha_7b, 'bloom': Model.bloom, 'bloomz': Model.bloomz, 'flan-t5-xxl': Model.flan_t5_xxl, 'flan-ul2': Model.flan_ul2, 'gpt-neox-20b': Model.gpt_neox_20b, 'oasst-sft-4-pythia-12b-epoch-3.5': Model.oasst_sft_4_pythia_12b_epoch_35, 'santacoder': Model.santacoder, 'command-medium-nightly': Model.command_medium_nightly, 'command-xlarge-nightly': Model.command_xlarge_nightly, 'code-cushman-001': Model.code_cushman_001, 'code-davinci-002': Model.code_davinci_002, 'text-ada-001': Model.text_ada_001, 'text-babbage-001': Model.text_babbage_001, 'text-curie-001': Model.text_curie_001, 'text-davinci-002': Model.text_davinci_002, 'text-davinci-003': Model.text_davinci_003, 'palm2': Model.palm, 'palm': Model.palm, 'google': Model.palm, 'google-bard': Model.palm, 'google-palm': Model.palm, 'bard': Model.palm, 'falcon-40b': Model.falcon_40b, 'falcon-7b': Model.falcon_7b, 'llama-13b': Model.llama_13b} |
class Effect11999(BaseEffect):
type = 'passive'
def handler(fit, src, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Small Hybrid Turret')), 'trackingSpeed', src.getModifiedItemAttr('eliteBonusGunship2'), skill='Assault Frigates', **kwargs) |
class BattleCmdSet(default_cmds.CharacterCmdSet):
key = 'DefaultCharacter'
def at_cmdset_creation(self):
self.add(CmdFight())
self.add(CmdAttack())
self.add(CmdRest())
self.add(CmdPass())
self.add(CmdDisengage())
self.add(CmdCombatHelp())
self.add(CmdLearnSpell())
self.add(CmdCast())
self.add(CmdStatus()) |
class TestNoOperation(EndianTest):
def setUp(self):
self.req_args_0 = {}
self.req_bin_0 = b'\x7f\x00\x00\x01'
def testPackRequest0(self):
bin = request.NoOperation._request.to_binary(*(), **self.req_args_0)
self.assertBinaryEqual(bin, self.req_bin_0)
def testUnpackRequest0(self):
(args, remain) = request.NoOperation._request.parse_binary(self.req_bin_0, dummy_display, 1)
self.assertBinaryEmpty(remain)
self.assertEqual(args, self.req_args_0) |
def add_index(shard_dir, index_path):
data_shard_list = []
for shard_address in glob((str(shard_dir) + '/*/')):
data_shard_list.append(load_from_disk(shard_address))
concat = concatenate_datasets(data_shard_list)
faiss.omp_set_num_threads(96)
index = faiss.IndexHNSWFlat(768, 128, faiss.METRIC_INNER_PRODUCT)
concat.add_faiss_index('embeddings', custom_index=index)
concat.get_index('embeddings').save(index_path) |
def test_gen_poll_url_xml_has_xmltodict(monkeypatch):
monkeypatch.setitem(sys.modules, 'xmltodict', Mockxml('xmltodict'))
reload(generic_poll_text)
gpurl = generic_poll_text.GenPollUrl(json=False, xml=True, parse=(lambda x: x), url='testing')
monkeypatch.setattr(generic_poll_text, 'Request', MockRequest)
monkeypatch.setattr(generic_poll_text, 'urlopen', Mockurlopen)
generic_poll_text.Request.return_value = b'OK'
assert (gpurl.poll()['test'] == 'OK') |
class _ConfigSettingsTranslator():
def _get_config(self, key: str, config_settings: _ConfigSettings) -> List[str]:
cfg = (config_settings or {})
opts = (cfg.get(key) or [])
return (shlex.split(opts) if isinstance(opts, str) else opts)
def _global_args(self, config_settings: _ConfigSettings) -> Iterator[str]:
cfg = (config_settings or {})
falsey = {'false', 'no', '0', 'off'}
if (('verbose' in cfg) or ('--verbose' in cfg)):
level = str((cfg.get('verbose') or cfg.get('--verbose') or '1'))
(yield ('-q' if (level.lower() in falsey) else '-v'))
if (('quiet' in cfg) or ('--quiet' in cfg)):
level = str((cfg.get('quiet') or cfg.get('--quiet') or '1'))
(yield ('-v' if (level.lower() in falsey) else '-q'))
(yield from self._get_config('--global-option', config_settings))
def __dist_info_args(self, config_settings: _ConfigSettings) -> Iterator[str]:
cfg = (config_settings or {})
if ('tag-date' in cfg):
val = strtobool(str((cfg['tag-date'] or 'false')))
(yield ('--tag-date' if val else '--no-date'))
if ('tag-build' in cfg):
(yield from ['--tag-build', str(cfg['tag-build'])])
def _editable_args(self, config_settings: _ConfigSettings) -> Iterator[str]:
cfg = (config_settings or {})
mode = (cfg.get('editable-mode') or cfg.get('editable_mode'))
if (not mode):
return
(yield from ['--mode', str(mode)])
def _arbitrary_args(self, config_settings: _ConfigSettings) -> Iterator[str]:
(yield from self._get_config('--build-option', config_settings)) |
def collect_rst_dps(rst_file_location):
rst_dps = set()
with open(rst_file_location) as rst_file:
while ((line := rst_file.readline()) != ''):
if ((line.count('class_template.rst') > 0) or (line.count('function.rst') > 0)):
rst_file.readline()
while (((line := rst_file.readline()) != '') and (len((stripped_line := line.strip())) > 1)):
rst_dps.add(stripped_line)
return rst_dps |
def slice_aspect_ratio(pixel_dim, axis):
if (pixel_dim.size != 3):
raise ValueError('pixel_dim must be an array of 3 elements for 3D images')
if (axis == 0):
aspect_ratio = (pixel_dim[2] / pixel_dim[1])
elif (axis == 1):
aspect_ratio = (pixel_dim[2] / pixel_dim[0])
elif (axis == 2):
aspect_ratio = (pixel_dim[1] / pixel_dim[0])
else:
raise ValueError('Axis can only be 0, 1 or 2 for 3D images')
return aspect_ratio |
class Container():
def __init__(self, r: Union[(Variable, Type)], storage: list[Any], *, readonly: bool=False, strict: bool=False, allow_downcast: Optional[bool]=None, name: Optional[str]=None) -> None:
if ((not isinstance(storage, list)) or (not (len(storage) >= 1))):
raise TypeError('storage must be a list of length at least one')
if isinstance(r, Variable):
self.type = r.type
else:
self.type = r
if (name is None):
self.name = getattr(r, 'name', None)
else:
self.name = name
self.storage = storage
self.readonly = readonly
self.strict = strict
self.allow_downcast = allow_downcast
def __get__(self) -> Any:
return self.storage[0]
def __set__(self, value: Any) -> None:
if self.readonly:
raise Exception(f'Cannot set readonly storage: {self.name}')
try:
if (value is None):
self.storage[0] = None
return
kwargs = {}
if self.strict:
kwargs['strict'] = True
if (self.allow_downcast is not None):
kwargs['allow_downcast'] = self.allow_downcast
try:
self.storage[0] = self.type.filter_inplace(value, self.storage[0], **kwargs)
except NotImplementedError:
self.storage[0] = self.type.filter(value, **kwargs)
except Exception as e:
e.args = (e.args + (f'Container name "{self.name}"',))
raise
data = property(__get__, __set__)
value = property(__get__, __set__)
def __str__(self):
return (('<' + str(self.storage[0])) + '>')
def __repr__(self):
return (('<' + repr(self.storage[0])) + '>')
def __deepcopy__(self, memo: dict[(int, Any)]) -> 'Container':
data_was_in_memo = (id(self.storage[0]) in memo)
r = type(self)(deepcopy(self.type, memo=memo), deepcopy(self.storage, memo=memo), readonly=deepcopy(self.readonly, memo=memo), strict=deepcopy(self.strict, memo=memo), allow_downcast=deepcopy(self.allow_downcast, memo=memo), name=deepcopy(self.name, memo=memo))
if ((r.storage[0] is not None) and (not self.type.is_valid_value(r.storage[0]))):
assert (not data_was_in_memo)
assert self.type.is_valid_value(self.storage[0])
r.storage[0] = self.type.filter(r.storage[0], strict=False, allow_downcast=False)
memo[id(self.storage[0])] = r.storage[0]
return r |
def CalcuPSNR_int(img1, img2, max_val=255.0):
float_type = 'float64'
img1 = np.round((torch.clamp(img1, 0, 1).detach().cpu().numpy() * 255))
img2 = np.round((torch.clamp(img2, 0, 1).detach().cpu().numpy() * 255))
img1 = img1.astype(float_type)
img2 = img2.astype(float_type)
mse = np.mean(np.square((img1 - img2)), axis=(1, 2, 3))
psnr = ((20 * np.log10(max_val)) - (10 * np.log10(mse)))
return psnr |
def max_pool2d(data, l=(2, 2)):
if ((type(data) is tuple) and (len(data) == 3)):
out = (F.max_pool2d(data[0], l), F.max_pool2d(data[1], l), F.max_pool2d(data[2], l))
return out
elif ((type(data) is tuple) and (len(data) == 2)):
if (data[0] is None):
out = (F.max_pool2d(data[1], l), F.max_pool2d(data[2], l))
return out
elif (data[1] is None):
out = (F.max_pool2d(data[0], l), F.max_pool2d(data[2], l))
return out
else:
out = (F.max_pool2d(data[0], l), F.max_pool2d(data[1], l))
return out
elif (type(data) is Tensor):
out = F.max_pool2d(data, l)
return out |
def main() -> None:
version = get_version()
url = f'
with urlopen(url) as response:
if (response.status >= 400):
print(f'Failed to retrieve config.proto: status {response.status}', file=sys.stderr)
sys.exit(1)
with TemporaryDirectory() as dir_name:
file_path = (Path(dir_name) / f'{FILE_NAME}.proto')
with open(file_path, 'wb') as file:
file.write(response.read())
compile_proto(file_path)
if (generated_py := next(SRC_DIR.glob(f'{FILE_NAME}_pb*.py'), None)):
print(f'Build output: {generated_py.absolute()}.')
else:
print(f'Could not find the generated Python file in {SRC_DIR}.', file=sys.stderr)
sys.exit(1) |
def init_actor(actor, pool, dict_ph, env, num_q, value_fn_params, noise_params):
M1 = value_fn_params['layer_size1']
M2 = value_fn_params['layer_size2']
with tf.variable_scope(actor.name):
policy = DeterministicPolicy(env_spec=env.spec, hidden_layer_sizes=(M1, M2), reg=0.001, observation_ph=dict_ph['observations_ph'], noise_scale=noise_params['exploration_policy_noise_scale'])
oldpolicy = DeterministicPolicy(env_spec=env.spec, hidden_layer_sizes=(M1, M2), reg=0.001, name='old_deterministic_policy', observation_ph=dict_ph['observations_ph'], noise_scale=noise_params['exploration_policy_noise_scale'])
targetpolicy = DeterministicPolicy(env_spec=env.spec, hidden_layer_sizes=(M1, M2), reg=0.001, name='target_deterministic_policy', observation_ph=dict_ph['next_observations_ph'], noise_scale=noise_params['exploration_policy_noise_scale'])
actor.policy = policy
actor.oldpolicy = oldpolicy
actor.targetpolicy = targetpolicy
actor.arr_qf = []
actor.arr_target_qf = []
for j in range(num_q):
actor.arr_qf.append(NNQFunction(env_spec=env.spec, hidden_layer_sizes=(M1, M2), name='qf{i}'.format(i=j), observation_ph=dict_ph['observations_ph'], action_ph=dict_ph['actions_ph']))
actor.arr_target_qf.append(NNQFunction(env_spec=env.spec, hidden_layer_sizes=(M1, M2), name='target_qf{i}'.format(i=j), observation_ph=dict_ph['next_observations_ph'], action_ph=dict_ph['next_actions_ph']))
actor.pool = pool |
def test_safe_getattr() -> None:
helper = ErrorsHelper()
assert (safe_getattr(helper, 'raise_exception', 'default') == 'default')
assert (safe_getattr(helper, 'raise_fail_outcome', 'default') == 'default')
with pytest.raises(BaseException):
assert safe_getattr(helper, 'raise_baseexception', 'default') |
class Test_git_describe(unittest.TestCase, Testing_renderer_case_mixin):
style = 'git-describe'
expected = {'tagged_0_commits_clean': 'v1.2.3', 'tagged_0_commits_dirty': 'v1.2.3-dirty', 'tagged_1_commits_clean': 'v1.2.3-1-gabc', 'tagged_1_commits_dirty': 'v1.2.3-1-gabc-dirty', 'untagged_0_commits_clean': '', 'untagged_0_commits_dirty': '-dirty', 'untagged_1_commits_clean': 'abc', 'untagged_1_commits_dirty': 'abc-dirty', 'error_getting_parts': 'unknown'} |
def usage(msg=None):
if msg:
print(msg)
print(sys.argv[0], '[-p port] [-l logfile] [-dh] [allowed_client_name ...]]')
print()
print(' -p - Port to bind to')
print(' -l - Path to logfile. If not specified, STDOUT is used')
print(' -d - Run in the background')
print() |
def accumulate_results(game_modifications: dict, items: dict[(str, dict[(str, int)])], locations: dict[(str, dict[(str, int)])], major_progression_items_only: bool):
for (world_name, world_data) in game_modifications['locations'].items():
for (area_name, item_name) in world_data.items():
area_name = f'{world_name}/{area_name}'
item_name = _filter_item_name(item_name)
if (major_progression_items_only and is_non_major_progression(item_name)):
continue
items[item_name][area_name] += 1
locations[area_name][item_name] += 1 |
class CfcCell(tf.keras.layers.Layer):
def __init__(self, units, hparams, **kwargs):
super(CfcCell, self).__init__(**kwargs)
self.units = units
self.state_size = units
self.hparams = hparams
self._no_gate = False
def build(self, input_shape):
if isinstance(input_shape[0], tuple):
input_dim = input_shape[0][(- 1)]
else:
input_dim = input_shape[(- 1)]
if (self.hparams.get('backbone_activation') == 'silu'):
backbone_activation = tf.nn.silu
elif (self.hparams.get('backbone_activation') == 'relu'):
backbone_activation = tf.nn.relu
elif (self.hparams.get('backbone_activation') == 'tanh'):
backbone_activation = tf.nn.tanh
elif (self.hparams.get('backbone_activation') == 'gelu'):
backbone_activation = tf.nn.gelu
elif (self.hparams.get('backbone_activation') == 'lecun'):
backbone_activation = lecun_tanh
elif (self.hparams.get('backbone_activation') == 'softplus'):
backbone_activation = tf.nn.softplus
else:
raise ValueError('Unknown backbone activation')
self._no_gate = False
if ('no_gate' in self.hparams):
self._no_gate = self.hparams['no_gate']
self._minimal = False
if ('minimal' in self.hparams):
self._minimal = self.hparams['minimal']
self.backbone = []
for i in range(self.hparams['backbone_layers']):
self.backbone.append(tf.keras.layers.Dense(self.hparams['backbone_units'], backbone_activation, kernel_regularizer=tf.keras.regularizers.L2(self.hparams['weight_decay'])))
self.backbone.append(tf.keras.layers.Dropout(self.hparams['backbone_dr']))
self.backbone = tf.keras.models.Sequential(self.backbone)
if self._minimal:
self.ff1 = tf.keras.layers.Dense(self.units, kernel_regularizer=tf.keras.regularizers.L2(self.hparams['weight_decay']))
self.w_tau = self.add_weight(shape=(1, self.units), initializer=tf.keras.initializers.Zeros())
self.A = self.add_weight(shape=(1, self.units), initializer=tf.keras.initializers.Ones())
else:
self.ff1 = tf.keras.layers.Dense(self.units, lecun_tanh, kernel_regularizer=tf.keras.regularizers.L2(self.hparams['weight_decay']))
self.ff2 = tf.keras.layers.Dense(self.units, lecun_tanh, kernel_regularizer=tf.keras.regularizers.L2(self.hparams['weight_decay']))
self.time_a = tf.keras.layers.Dense(self.units, kernel_regularizer=tf.keras.regularizers.L2(self.hparams['weight_decay']))
self.time_b = tf.keras.layers.Dense(self.units, kernel_regularizer=tf.keras.regularizers.L2(self.hparams['weight_decay']))
self.built = True
def call(self, inputs, states, **kwargs):
hidden_state = states[0]
t = 1.0
if ((isinstance(inputs, tuple) or isinstance(inputs, list)) and (len(inputs) > 1)):
elapsed = inputs[1]
t = tf.reshape(elapsed, [(- 1), 1])
inputs = inputs[0]
x = tf.keras.layers.Concatenate()([inputs, hidden_state])
x = self.backbone(x)
ff1 = self.ff1(x)
if self._minimal:
new_hidden = ((((- self.A) * tf.math.exp(((- t) * (tf.math.abs(self.w_tau) + tf.math.abs(ff1))))) * ff1) + self.A)
else:
ff2 = self.ff2(x)
t_a = self.time_a(x)
t_b = self.time_b(x)
t_interp = tf.nn.sigmoid((((- t_a) * t) + t_b))
if self._no_gate:
new_hidden = (ff1 + (t_interp * ff2))
else:
new_hidden = ((ff1 * (1.0 - t_interp)) + (t_interp * ff2))
return (new_hidden, [new_hidden]) |
def eval_expression(node: str, rng: np.random.RandomState, scope: dict):
true = True
false = False
def Uniform(low, high):
return rng.uniform(low, high)
def RandomAngleAxis(angle_low, angle_high, axis):
return random_angle_axis(angle_low, angle_high, axis, rng)
if (('$' in node) and (not (node.startswith('eval(') and node.endswith(')')))):
exp = node
elif (node.startswith('eval(') and node.endswith(')')):
exp = node[5:(- 1)]
elif (node.startswith('Uniform') or node.startswith('RandomAngleAxis')):
exp = node
else:
return node
exps = parse_exp(exp)
if (len(exps) == 1):
if (exps[0][0] == '$'):
return scope[exp]
return eval(exp)
new_exps = []
for term in exps:
if (term[0] == '$'):
term = json.dumps(scope[term[0]])
new_exps.append(term)
exp = ''.join(new_exps)
return eval(exp) |
def when(event_type=None, selector=None):
def decorator(func):
if isinstance(selector, str):
elements = document.querySelectorAll(selector)
else:
from pyweb import pydom
if isinstance(selector, pydom.Element):
elements = [selector._js]
elif isinstance(selector, pydom.ElementCollection):
elements = [el._js for el in selector]
else:
raise ValueError(f'Invalid selector: {selector}. Selector must be a string, a pydom.Element or a pydom.ElementCollection.')
sig = inspect.signature(func)
if (not sig.parameters):
def wrapper(*args, **kwargs):
func()
for el in elements:
add_event_listener(el, event_type, wrapper)
else:
for el in elements:
add_event_listener(el, event_type, func)
return func
return decorator |
class NTU_Fi_ResNet(nn.Module):
def __init__(self, ResBlock, layer_list, num_classes):
super(NTU_Fi_ResNet, self).__init__()
self.reshape = nn.Sequential(nn.Conv2d(3, 3, (15, 23), stride=(3, 9)), nn.ReLU(), nn.Conv2d(3, 3, kernel_size=(3, 23), stride=1), nn.ReLU())
self.in_channels = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.batch_norm1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU()
self.max_pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(ResBlock, layer_list[0], planes=64)
self.layer2 = self._make_layer(ResBlock, layer_list[1], planes=128, stride=2)
self.layer3 = self._make_layer(ResBlock, layer_list[2], planes=256, stride=2)
self.layer4 = self._make_layer(ResBlock, layer_list[3], planes=512, stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear((512 * ResBlock.expansion), num_classes)
def forward(self, x):
x = self.reshape(x)
x = self.relu(self.batch_norm1(self.conv1(x)))
x = self.max_pool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.reshape(x.shape[0], (- 1))
x = self.fc(x)
return x
def _make_layer(self, ResBlock, blocks, planes, stride=1):
ii_downsample = None
layers = []
if ((stride != 1) or (self.in_channels != (planes * ResBlock.expansion))):
ii_downsample = nn.Sequential(nn.Conv2d(self.in_channels, (planes * ResBlock.expansion), kernel_size=1, stride=stride), nn.BatchNorm2d((planes * ResBlock.expansion)))
layers.append(ResBlock(self.in_channels, planes, i_downsample=ii_downsample, stride=stride))
self.in_channels = (planes * ResBlock.expansion)
for i in range((blocks - 1)):
layers.append(ResBlock(self.in_channels, planes))
return nn.Sequential(*layers) |
.parametrize('enabled', [True, False])
def test_trigger_auto_delay_enabled(enabled):
with expected_protocol(HP34401A, [('TRIG:DEL:AUTO?', ('1' if enabled else '0')), (f'TRIG:DEL:AUTO {(1 if enabled else 0)}', None)]) as inst:
assert (enabled == inst.trigger_auto_delay_enabled)
inst.trigger_auto_delay_enabled = enabled |
def fix_missing_data(contour_data_list):
contour_data = np.array(contour_data_list)
if (contour_data.any() == ''):
logger.warning(' Missing values detected.')
missing_values = np.where((contour_data == ''))[0]
if (missing_values.shape[0] > 1):
logger.warning(" More than one value missing, fixing this isn't implemented yet...")
else:
logger.warning(' Only one value missing.')
missing_index = missing_values[0]
missing_axis = (missing_index % 3)
if (missing_axis == 0):
logger.warning(' Missing value in x axis: interpolating.')
if (missing_index > (len(contour_data) - 3)):
lower_val = contour_data[(missing_index - 3)]
upper_val = contour_data[0]
elif (missing_index == 0):
lower_val = contour_data[(- 3)]
upper_val = contour_data[3]
else:
lower_val = contour_data[(missing_index - 3)]
upper_val = contour_data[(missing_index + 3)]
contour_data[missing_index] = (0.5 * (lower_val + upper_val))
elif (missing_axis == 1):
logger.warning(' Missing value in y axis: interpolating.')
if (missing_index > (len(contour_data) - 2)):
lower_val = contour_data[(missing_index - 3)]
upper_val = contour_data[1]
elif (missing_index == 0):
lower_val = contour_data[(- 2)]
upper_val = contour_data[4]
else:
lower_val = contour_data[(missing_index - 3)]
upper_val = contour_data[(missing_index + 3)]
contour_data[missing_index] = (0.5 * (lower_val + upper_val))
else:
logger.warning(' Missing value in z axis: taking slice value')
temp = contour_data[2::3].tolist()
temp.remove('')
contour_data[missing_index] = np.min(np.array(temp, dtype=np.double))
return contour_data |
class MSRA10k(BaseImageDataset):
def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None, min_area=None):
root = (env_settings().msra10k_dir if (root is None) else root)
super().__init__('MSRA10k', root, image_loader)
self.image_list = self._load_dataset(min_area=min_area)
if (data_fraction is not None):
raise NotImplementedError
def _load_dataset(self, min_area=None):
files_list = os.listdir(os.path.join(self.root, 'Imgs'))
image_list = [f[:(- 4)] for f in files_list if (f[(- 3):] == 'jpg')]
images = []
for f in image_list:
a = imread_indexed(os.path.join(self.root, 'Imgs', '{}.png'.format(f)))
if ((min_area is None) or ((a > 0).sum() > min_area)):
images.append(f)
return images
def get_name(self):
return 'msra10k'
def has_segmentation_info(self):
return True
def get_image_info(self, im_id):
mask = imread_indexed(os.path.join(self.root, 'Imgs', '{}.png'.format(self.image_list[im_id])))
mask = torch.Tensor((mask == 255))
bbox = masks_to_bboxes(mask, fmt='t').view(4)
valid = ((bbox[2] > 0) & (bbox[3] > 0))
visible = valid.clone().byte()
return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible}
def get_meta_info(self, im_id):
object_meta = OrderedDict({'object_class_name': None, 'motion_class': None, 'major_class': None, 'root_class': None, 'motion_adverb': None})
return object_meta
def get_image(self, image_id, anno=None):
frame = self.image_loader(os.path.join(self.root, 'Imgs', '{}.jpg'.format(self.image_list[image_id])))
if (anno is None):
anno = self.get_image_info(image_id)
object_meta = self.get_meta_info(image_id)
return (frame, anno, object_meta) |
class CharbonnierLoss(nn.Module):
def __init__(self, loss_weight=1.0, reduction='mean', eps=0.001):
super(CharbonnierLoss, self).__init__()
self.eps = eps
def forward(self, x, y):
diff = (x - y)
loss = torch.mean(torch.sqrt(((diff * diff) + (self.eps * self.eps))))
return loss |
class GetTypeHintTests(BaseTestCase):
def setUpClass(cls):
with tempfile.TemporaryDirectory() as tempdir:
sys.path.append(tempdir)
Path(tempdir, 'ann_module.py').write_text(ANN_MODULE_SOURCE)
Path(tempdir, 'ann_module2.py').write_text(ANN_MODULE_2_SOURCE)
Path(tempdir, 'ann_module3.py').write_text(ANN_MODULE_3_SOURCE)
cls.ann_module = importlib.import_module('ann_module')
cls.ann_module2 = importlib.import_module('ann_module2')
cls.ann_module3 = importlib.import_module('ann_module3')
sys.path.pop()
def tearDownClass(cls):
for modname in ('ann_module', 'ann_module2', 'ann_module3'):
delattr(cls, modname)
del sys.modules[modname]
def test_get_type_hints_modules(self):
ann_module_type_hints = {1: 2, 'f': Tuple[(int, int)], 'x': int, 'y': str}
self.assertEqual(gth(self.ann_module), ann_module_type_hints)
self.assertEqual(gth(self.ann_module2), {})
self.assertEqual(gth(self.ann_module3), {})
def test_get_type_hints_classes(self):
self.assertEqual(gth(self.ann_module.C, self.ann_module.__dict__), {'y': Optional[self.ann_module.C]})
self.assertIsInstance(gth(self.ann_module.j_class), dict)
self.assertEqual(gth(self.ann_module.M), {'123': 123, 'o': type})
self.assertEqual(gth(self.ann_module.D), {'j': str, 'k': str, 'y': Optional[self.ann_module.C]})
self.assertEqual(gth(self.ann_module.Y), {'z': int})
self.assertEqual(gth(self.ann_module.h_class), {'y': Optional[self.ann_module.C]})
self.assertEqual(gth(self.ann_module.S), {'x': str, 'y': str})
self.assertEqual(gth(self.ann_module.foo), {'x': int})
self.assertEqual(gth(NoneAndForward, globals()), {'parent': NoneAndForward, 'meaning': type(None)})
def test_respect_no_type_check(self):
_type_check
class NoTpCheck():
class Inn():
def __init__(self, x: 'not a type'):
...
self.assertTrue(NoTpCheck.__no_type_check__)
self.assertTrue(NoTpCheck.Inn.__init__.__no_type_check__)
self.assertEqual(gth(self.ann_module2.NTC.meth), {})
class ABase(Generic[T]):
def meth(x: int):
...
_type_check
class Der(ABase):
...
self.assertEqual(gth(ABase.meth), {'x': int})
def test_get_type_hints_ClassVar(self):
self.assertEqual(gth(self.ann_module2.CV, self.ann_module2.__dict__), {'var': ClassVar[self.ann_module2.CV]})
self.assertEqual(gth(B, globals()), {'y': int, 'x': ClassVar[Optional[B]], 'b': int})
self.assertEqual(gth(CSub, globals()), {'z': ClassVar[CSub], 'y': int, 'b': int, 'x': ClassVar[Optional[B]]})
self.assertEqual(gth(G), {'lst': ClassVar[List[T]]})
def test_final_forward_ref(self):
self.assertEqual(gth(Loop, globals())['attr'], Final[Loop])
self.assertNotEqual(gth(Loop, globals())['attr'], Final[int])
self.assertNotEqual(gth(Loop, globals())['attr'], Final) |
.parametrize('tensor_shape', [FC_SHAPE, CONV_SHAPE], ids=['FC', 'CONV'])
def test_he_uniform(tensor_shape):
(fan_in, _) = initializers._compute_fans(tensor_shape)
scale = np.sqrt((6.0 / fan_in))
_runner(initializers.he_uniform(), tensor_shape, target_mean=0.0, target_max=scale, target_min=(- scale)) |
def mutable_seq_typed_attrs(draw, defaults=None, allow_mutable_defaults=True, legacy_types_only=False, kw_only=None):
default_val = NOTHING
val_strat = lists(floats(allow_infinity=False, allow_nan=False))
if ((defaults is True) or ((defaults is None) and draw(booleans()))):
default_val = draw(val_strat)
if ((not allow_mutable_defaults) or draw(booleans())):
default = Factory((lambda : default_val))
else:
default = default_val
else:
default = default_val
return (field(type=(AbcMutableSequence[float] if (not legacy_types_only) else MutableSequence[float]), default=default, kw_only=(draw(booleans()) if (kw_only is None) else kw_only)), val_strat) |
def _parse_specifier(package_spec: str) -> ParsedPackage:
valid_pep508 = None
valid_url = None
valid_local_path = None
try:
package_req = Requirement(package_spec)
except InvalidRequirement:
pass
else:
valid_pep508 = package_req
if (valid_pep508 and package_req.name.endswith(ARCHIVE_EXTENSIONS)):
(package_path, package_path_exists) = _check_package_path(package_req.name)
if package_path_exists:
valid_local_path = str(package_path.resolve())
else:
raise PipxError(f'{package_path} does not exist')
if (not valid_pep508):
parsed_url = urllib.parse.urlsplit(package_spec)
if (parsed_url.scheme and parsed_url.netloc):
valid_url = package_spec
if ((not valid_pep508) and (not valid_url)):
(package_path_str, package_extras_str) = _split_path_extras(package_spec)
(package_path, package_path_exists) = _check_package_path(package_path_str)
if package_path_exists:
valid_local_path = (str(package_path.resolve()) + package_extras_str)
if ((not valid_pep508) and (not valid_url) and (not valid_local_path)):
raise PipxError(f'Unable to parse package spec: {package_spec}')
if (valid_pep508 and valid_local_path):
valid_pep508 = None
return ParsedPackage(valid_pep508=valid_pep508, valid_url=valid_url, valid_local_path=valid_local_path) |
def _create_recorded_proxy_method(proxy: HFProxy, method_name: str, cache_name: str, return_proxy: bool):
original_method = getattr(torch.Tensor, method_name)
(original_method)
def method(*args, **kwargs):
cache = getattr(args[0].tracer.root, cache_name)
res = cache.pop(0)
if return_proxy:
proxy = args[0].__torch_function__(original_method, None, args=args, kwargs=kwargs)
proxy.cache = res
return proxy
return res
method.__name__ = method_name
bound_method = method.__get__(proxy, proxy.__class__)
setattr(proxy, method_name, bound_method) |
def parse_by_phrase(path_to_txt_or_xml_file, meter='default_english', minword=5):
txt = read_file(path_to_txt_or_xml_file)
import re
phrases = re.split('[?.,;:\n]', txt)
if minword:
phrases2 = []
phrase = []
for px in phrases:
phrase += px.split()
if (len(phrase) >= minword):
phrases2 += [' '.join(phrase)]
phrase = []
phrases = phrases2
txt = '\n'.join(phrases)
return parse_string(txt) |
def handle_receive_delivered(chain_state: ChainState, state_change: ReceiveDelivered) -> TransitionResult[ChainState]:
queueid = QueueIdentifier(state_change.sender, CANONICAL_IDENTIFIER_UNORDERED_QUEUE)
inplace_delete_message_queue(chain_state, state_change, queueid)
return TransitionResult(chain_state, []) |
class SelfShowCommand(SelfCommand, ShowCommand):
name = 'self show'
options = [option('addons', None, 'List only add-on packages installed.'), *[o for o in ShowCommand.options if (o.name in {'tree', 'latest', 'outdated'})]]
description = "Show packages from Poetry's runtime environment."
help = f'''The <c1>self show</c1> command behaves similar to the <c1>show</c1> command, but
working within Poetry's runtime environment. This lists all packages installed within
the Poetry install environment.
To show only additional packages that have been added via <c1>self add</c1> and their
dependencies use <c1>self show --addons</c1>.
This is managed in the <comment>{SelfCommand.get_default_system_pyproject_file()}</> file.
'''
def activated_groups(self) -> set[str]:
if self.option('addons', False):
return {SelfCommand.ADDITIONAL_PACKAGE_GROUP}
groups: set[str] = super(ShowCommand, self).activated_groups
return groups |
class Job(pydantic.BaseModel):
job_type: JobType
payload: Optional[Dict]
('payload')
def crawler_payload(cls, v, values, **kwargs):
if (not v):
raise Exception('payload is required')
if (values['job_type'] == JobType.CRAWL_PTT_LATEST_POSTS):
for key in ['board']:
if (key not in v):
raise Exception(f'{key} is required in payload')
if (values['job_type'] == JobType.CRAWL_PTT_BOARD_LIST):
for key in ['top_n']:
if (key not in v):
raise Exception(f'{key} is required in payload')
if (values['job_type'] in [JobType.PTT_MONTHLY_SUMMARY, JobType.PTT_SPACY_PIPELINE]):
for key in ['year', 'month', 'overwrite']:
if (key not in v):
raise Exception(f'{key} is required in payload')
if (values['job_type'] == JobType.CRAWL_PTT_TOP_BOARD_POSTS):
for key in ['n_days']:
if (key not in v):
raise Exception(f'{key} is required in payload')
return v |
class Inception_C(nn.Module):
def __init__(self):
super(Inception_C, self).__init__()
self.branch0 = BasicConv2d(1536, 256, kernel_size=1, stride=1)
self.branch1_0 = BasicConv2d(1536, 384, kernel_size=1, stride=1)
self.branch1_1a = BasicConv2d(384, 256, kernel_size=(1, 3), stride=1, padding=(0, 1))
self.branch1_1b = BasicConv2d(384, 256, kernel_size=(3, 1), stride=1, padding=(1, 0))
self.branch2_0 = BasicConv2d(1536, 384, kernel_size=1, stride=1)
self.branch2_1 = BasicConv2d(384, 448, kernel_size=(3, 1), stride=1, padding=(1, 0))
self.branch2_2 = BasicConv2d(448, 512, kernel_size=(1, 3), stride=1, padding=(0, 1))
self.branch2_3a = BasicConv2d(512, 256, kernel_size=(1, 3), stride=1, padding=(0, 1))
self.branch2_3b = BasicConv2d(512, 256, kernel_size=(3, 1), stride=1, padding=(1, 0))
self.branch3 = nn.Sequential(nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False), BasicConv2d(1536, 256, kernel_size=1, stride=1))
def forward(self, x):
x0 = self.branch0(x)
x1_0 = self.branch1_0(x)
x1_1a = self.branch1_1a(x1_0)
x1_1b = self.branch1_1b(x1_0)
x1 = torch.cat((x1_1a, x1_1b), 1)
x2_0 = self.branch2_0(x)
x2_1 = self.branch2_1(x2_0)
x2_2 = self.branch2_2(x2_1)
x2_3a = self.branch2_3a(x2_2)
x2_3b = self.branch2_3b(x2_2)
x2 = torch.cat((x2_3a, x2_3b), 1)
x3 = self.branch3(x)
out = torch.cat((x0, x1, x2, x3), 1)
return out |
def relations(distinctions, relation_computation=None, **kwargs):
if (not distinctions.resolved_congruence):
warnings.warn(_CONGRUENCE_WARNING_MSG, PyPhiWarning, stacklevel=2)
return relation_computations[fallback(relation_computation, config.RELATION_COMPUTATION)](distinctions, **kwargs) |
class Thermotron3800(Instrument):
def __init__(self, adapter, name='Thermotron 3800', **kwargs):
super().__init__(adapter, name, includeSCPI=False, **kwargs)
def write(self, command):
super().write(command)
sleep(1)
id = Instrument.measurement('IDEN?', ' Reads the instrument identification\n\n :return: String\n ')
temperature = Instrument.measurement('PVAR1?', ' Reads the current temperature of the oven\n via built in thermocouple. Default unit is Celsius, unless\n changed by the user.\n\n :return: float\n ')
mode = Instrument.measurement('MODE?', ' Gets the operating mode of the oven.\n\n :return: Tuple(String, int)\n ', get_process=(lambda mode: Thermotron3800.__translate_mode(mode)))
setpoint = Instrument.control('SETP1?', 'SETP1,%g', ' A floating point property that controls the setpoint\n of the oven in Celsius. This property can be set.\n "setpoint" will not update until the "run()" command is called.\n After setpoint is set to a new value, the "run()" command\n must be called to tell the oven to run to the new temperature.\n\n :return: None\n ', validator=strict_range, values=[(- 55), 150])
def run(self):
self.write('RUNM')
def stop(self):
self.write('STOP')
def initalize_oven(self, wait=True):
self.write('INIT')
if wait:
sleep(2)
class Thermotron3800Mode(IntFlag):
PROGRAM_MODE = 1
EDIT_MODE_STOP = 2
VIEW_PROGRAM_MODE = 4
EDIT_MODE_HOLD = 8
MANUAL_MODE = 16
DELAYED_START_MODE = 32
UNUSED = 64
CALIBRATION_MODE = 128
def __translate_mode(mode_coded_integer):
mode = Thermotron3800.Thermotron3800Mode(int(mode_coded_integer))
return mode |
class BackgroundTaskMixin():
progress_update_signal = Signal(str, int)
background_tasks_button_lock_signal = Signal(bool)
abort_background_task_requested: bool = False
_background_thread: (threading.Thread | None) = None
def _start_thread_for(self, target):
randovania.games.prime2.patcher.csharp_subprocess.IO_LOOP = asyncio.get_event_loop()
self._background_thread = threading.Thread(target=target, name=f'BackgroundThread for {self}')
self._background_thread.start()
def run_in_background_thread(self, target, starting_message: str):
last_progress = 0.0
def progress_update(message: str, progress: (float | None)):
nonlocal last_progress
if (progress is None):
progress = last_progress
else:
last_progress = progress
if self.abort_background_task_requested:
self.progress_update_signal.emit(f'{message} - Aborted', int((progress * 100)))
raise AbortBackgroundTask
else:
self.progress_update_signal.emit(message, int((progress * 100)))
def thread(**_kwargs):
try:
target(progress_update=progress_update, **_kwargs)
except AbortBackgroundTask:
pass
finally:
self._background_thread = None
self.background_tasks_button_lock_signal.emit(True)
if self._background_thread:
raise BackgroundTaskInProgressError('Trying to start a new background thread while one exists already.')
self.abort_background_task_requested = False
progress_update(starting_message, 0)
self._start_thread_for(thread)
self.background_tasks_button_lock_signal.emit(False)
async def run_in_background_async(self, target, starting_message: str):
fut = concurrent.futures.Future()
def work(**_kwargs):
try:
fut.set_result(target(**_kwargs))
except AbortBackgroundTask:
fut.cancel()
except Exception as e:
fut.set_exception(e)
self.run_in_background_thread(work, starting_message)
return (await asyncio.futures.wrap_future(fut))
def stop_background_process(self):
self.abort_background_task_requested = True
def has_background_process(self) -> bool:
return (self._background_thread is not None) |
def could_use_op(input):
if ((not enabled) or (not torch.backends.cudnn.enabled)):
return False
if (input.device.type != 'cuda'):
return False
if any((torch.__version__.startswith(x) for x in ['1.7.', '1.8.'])):
return True
warnings.warn(f'conv2d_gradfix not supported on PyTorch {torch.__version__}. Falling back to torch.nn.functional.conv2d().')
return False |
class Textual_Encoder(nn.Module):
def __init__(self, cfg, classnames, clip_model):
super().__init__()
self.cfg = cfg
self.classnames = classnames
self.clip_model = clip_model
self.dtype = clip_model.dtype
def forward(self):
prompts = best_prompt_weight['{}_{}_test_prompts'.format(self.cfg.DATASET.NAME.lower(), self.cfg.MODEL.BACKBONE.NAME2)]
prompts = torch.cat([clip.tokenize(p) for p in prompts]).cuda()
text_feat = self.clip_model.encode_text(prompts).repeat(1, self.cfg.MODEL.PROJECT.NUM_VIEWS)
return text_feat |
class SyncMiddleware(LogMiddleware):
def __init__(self):
self.request_parent = dict()
self.sync_items = dict()
self._lock = asyncio.Lock()
async def process_spider_output(self, response, result, spider):
key = getattr(spider, 'sync_item_key')
if (key is None):
async for item in result:
(yield item)
return
parent_fingerprint = fingerprint(response.request)
async for item in result:
if (not isinstance(item, scrapy.Request)):
(yield item)
continue
(yield item.replace(errback=self.make_errback(item.errback)))
value = item.cb_kwargs.get(key)
if (value is not None):
assert isinstance(value, dict)
req_fingerprint = fingerprint(item)
(await self._lock.acquire())
self.request_parent[req_fingerprint] = 1
self.sync_items[req_fingerprint] = value
self._lock.release()
continue
(await self._lock.acquire())
if (not self.request_parent.get(parent_fingerprint)):
self._lock.release()
continue
req_fingerprint = fingerprint(item)
grandpa_fingerprint = self.request_parent[parent_fingerprint]
if isinstance(grandpa_fingerprint, bytes):
self.request_parent[req_fingerprint] = grandpa_fingerprint
self.request_parent[grandpa_fingerprint] += 1
else:
self.request_parent[req_fingerprint] = parent_fingerprint
self.request_parent[parent_fingerprint] += 1
self._lock.release()
(await self._lock.acquire())
(yield (await self._release_sync_item(response.request)))
self._lock.release()
def make_errback(self, old_errback) -> Callable:
async def new_errback(failure):
old_results = (old_errback(failure) if old_errback else None)
if isinstance(old_results, Generator):
for rlt in old_results:
(yield rlt)
if isinstance(old_results, AsyncGenerator):
async for rlt in old_results:
(yield rlt)
request = failure.request
self.log(message='Get error when fetching {} with {}, callback args {}'.format(request.url, request.body, str(request.cb_kwargs)), level=logging.WARNING)
(yield (await self._release_sync_item(request)))
return new_errback
async def _release_sync_item(self, finished_request: scrapy.Request) -> Union[(SyncSignalItem, None)]:
parent_fingerprint = fingerprint(finished_request)
grandpa_fingerprint = self.request_parent.get(parent_fingerprint)
if (grandpa_fingerprint is None):
return
value = None
if (not isinstance(grandpa_fingerprint, bytes)):
self.request_parent[parent_fingerprint] -= 1
if (self.request_parent[parent_fingerprint] == 0):
del self.request_parent[parent_fingerprint]
value = self.sync_items.pop(parent_fingerprint)
else:
self.request_parent[grandpa_fingerprint] -= 1
del self.request_parent[parent_fingerprint]
if (self.request_parent[grandpa_fingerprint] == 0):
del self.request_parent[grandpa_fingerprint]
value = self.sync_items.pop(grandpa_fingerprint)
if (value is None):
return
self.log(message='Synchronized: {}'.format(value), level=logging.INFO)
return SyncSignalItem(signal=value) |
def apply_to_ndarray(f, sample):
if (len(sample) == 0):
return {}
def _apply(x):
if isinstance(x, np.ndarray):
return f(x)
elif isinstance(x, dict):
return {key: _apply(value) for (key, value) in x.items()}
elif isinstance(x, list):
return [_apply(x) for x in x]
elif isinstance(x, tuple):
return [_apply(x) for x in x]
else:
return x
return _apply(sample) |
class UserDeleteView(DeleteView):
model = User
template_name = 'django_scantron/user_delete.html'
slug_field = 'slug'
slug_url_kwarg = 'slug'
pk_url_kwarg = 'pk'
context_object_name = 'user'
def __init__(self, **kwargs):
return super(UserDeleteView, self).__init__(**kwargs)
def dispatch(self, *args, **kwargs):
return super(UserDeleteView, self).dispatch(*args, **kwargs)
def get(self, request, *args, **kwargs):
raise Http404
def post(self, request, *args, **kwargs):
return super(UserDeleteView, self).post(request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
return super(UserDeleteView, self).delete(request, *args, **kwargs)
def get_object(self, queryset=None):
return super(UserDeleteView, self).get_object(queryset)
def get_queryset(self):
return super(UserDeleteView, self).get_queryset()
def get_slug_field(self):
return super(UserDeleteView, self).get_slug_field()
def get_context_data(self, **kwargs):
ret = super(UserDeleteView, self).get_context_data(**kwargs)
return ret
def get_context_object_name(self, obj):
return super(UserDeleteView, self).get_context_object_name(obj)
def render_to_response(self, context, **response_kwargs):
return super(UserDeleteView, self).render_to_response(context, **response_kwargs)
def get_template_names(self):
return super(UserDeleteView, self).get_template_names()
def get_success_url(self):
return reverse('user_list') |
class GetRootUUIDTests(unittest.TestCase):
def setUpClass(cls):
cls.das = DummyArtifacts()
cls.tempdir = cls.das.tempdir
def tearDownClass(cls):
cls.das.free()
def test_get_root_uuid(self):
exp_root_uuids = {'0': '89af91c0-033d-4e30-8ac4-f29a3b407dc1', '1': '5b929500-e4d6-4d3f-8f5f-93fd95d1117d', '2': 'e01f0484-40d4-420e-adcf-ca9be58ed1ee', '3': 'aa960110-4069-4b7c-97a3-8a768875e515', '4': '856502cb-66f2-45aa-a86c-e484cc9bfd57', '5': '48af8384-2b0a-4b26-b85c-11b79c0d6ea6', '6': '6facaf61-1676-45eb-ada0-d530be678b27'}
for (artifact, exp_uuid) in zip(self.das.all_artifact_versions, exp_root_uuids.values()):
with zipfile.ZipFile(artifact.filepath) as zfh:
self.assertEqual(exp_uuid, get_root_uuid(zfh)) |
class Swinv2Config(PretrainedConfig):
model_type = 'swinv2'
attribute_map = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__(self, image_size=224, patch_size=4, num_channels=3, embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], window_size=7, mlp_ratio=4.0, qkv_bias=True, hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, drop_path_rate=0.1, hidden_act='gelu', use_absolute_embeddings=False, patch_norm=True, initializer_range=0.02, layer_norm_eps=1e-05, encoder_stride=32, **kwargs):
super().__init__(**kwargs)
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.embed_dim = embed_dim
self.depths = depths
self.num_layers = len(depths)
self.num_heads = num_heads
self.window_size = window_size
self.mlp_ratio = mlp_ratio
self.qkv_bias = qkv_bias
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.drop_path_rate = drop_path_rate
self.hidden_act = hidden_act
self.use_absolute_embeddings = use_absolute_embeddings
self.path_norm = patch_norm
self.layer_norm_eps = layer_norm_eps
self.initializer_range = initializer_range
self.encoder_stride = encoder_stride
self.hidden_size = int((embed_dim * (2 ** (len(depths) - 1))))
self.pretrained_window_sizes = (0, 0, 0, 0) |
def main(config, args):
if (args.module and (args.module != 'main')):
if (args.module == 'help'):
help()
return
try:
library = importlib.import_module(((__package__ + '.') + args.module))
if args.name:
if (args.name == 'all'):
library.main(config, args)
del sys.modules[((__package__ + '.') + args.module)]
else:
print("-n missing argument. Currently supported value for name for this module by default is 'all'.", file=sys.stderr)
else:
print("-n missing argument. Currently supported value for name for this module by default is 'all'.", file=sys.stderr)
except ModuleNotFoundError:
logger.error('\nerror: invalid module')
help()
else:
logger.error('\nerror: invalid module')
help() |
class FrNet(nn.Module):
def __init__(self, input_nc, output_nc, nf):
super(FrNet, self).__init__()
self.conv1_d_stage1 = nn.Conv2d(input_nc, 8, 3, 1, 1)
self.relu1_d_stage1 = nn.LeakyReLU()
self.conv1_c_stage1 = nn.Conv2d((input_nc * 3), 8, 3, 1, 1)
self.relu1_c_stage1 = nn.LeakyReLU()
self.conv2_stage1 = nn.Conv2d(16, 16, 3, 1, 1)
self.pool1_stage1 = nn.MaxPool2d(2, 2)
self.relu2_stage1 = nn.LeakyReLU()
self.conv3_stage1 = nn.Conv2d(32, 32, 3, 1, 1)
self.relu3_stage1 = nn.LeakyReLU()
self.conv4_stage1 = nn.Conv2d(32, 32, 3, 1, 1)
self.relu4_stage1 = nn.LeakyReLU()
self.pool2_stage1 = nn.MaxPool2d(2, 2)
self.conv5_stage1 = nn.Conv2d(64, 64, 3, 1, 1)
self.relu5_stage1 = nn.LeakyReLU()
self.conv6_stage1 = nn.Conv2d(64, 64, 3, 1, 1)
self.relu6_stage1 = nn.LeakyReLU()
self.conv7_stage1 = nn.Conv2d(64, 64, 3, 1, 1)
self.relu7_stage1 = nn.LeakyReLU()
self.conv8_stage1 = nn.Conv2d(64, 16, 1)
self.relu8_stage1 = nn.LeakyReLU()
self.conv9_stage1 = nn.Conv2d(16, 4, 1)
self.relu9_stage1 = nn.LeakyReLU()
self.out_stage1 = nn.Conv2d(4, 1, 1)
self.out_relu_stage1 = nn.Tanh()
self.avgpool_img_4 = nn.AvgPool2d(4)
self.conv1_d_stage2 = nn.Conv2d(input_nc, 8, 3, 1, 1)
self.relu1_d_stage2 = nn.LeakyReLU()
self.conv1_c_stage2 = nn.Conv2d((input_nc * 3), 8, 3, 1, 1)
self.relu1_c_stage2 = nn.LeakyReLU()
self.conv2_stage2 = nn.Conv2d(16, 16, 3, 1, 1)
self.relu2_stage2 = nn.LeakyReLU()
self.up_stage2 = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)
self.conv3_stage2 = nn.Conv2d(32, 32, 3, 1, 1)
self.relu3_stage2 = nn.LeakyReLU()
self.conv4_stage2 = nn.Conv2d(32, 32, 3, 1, 1)
self.relu4_stage2 = nn.LeakyReLU()
self.conv5_stage2 = nn.Conv2d(96, 64, 3, 1, 1)
self.relu5_stage2 = nn.LeakyReLU()
self.conv6_stage2 = nn.Conv2d(64, 64, 3, 1, 1)
self.relu6_stage2 = nn.LeakyReLU()
self.conv7_stage2 = nn.Conv2d(64, 64, 3, 1, 1)
self.relu7_stage2 = nn.LeakyReLU()
self.conv8_stage2 = nn.Conv2d(64, 16, 1)
self.relu8_stage2 = nn.LeakyReLU()
self.conv9_stage2 = nn.Conv2d(16, 4, 1)
self.relu9_stage2 = nn.LeakyReLU()
self.out_stage2 = nn.Conv2d(4, 1, 1)
self.out_relu_stage2 = nn.Tanh()
self.avgpool_img_2 = nn.AvgPool2d(2)
self.up_depth_stage3 = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)
self.conv1_d_stage3 = nn.Conv2d(input_nc, 8, 3, 1, 1)
self.relu1_d_stage3 = nn.LeakyReLU()
self.conv1_c_stage3 = nn.Conv2d((input_nc * 3), 8, 3, 1, 1)
self.relu1_c_stage3 = nn.LeakyReLU()
self.conv2_stage3 = nn.Conv2d(16, 16, 3, 1, 1)
self.relu2_stage3 = nn.LeakyReLU()
self.up_stage3 = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)
self.conv3_stage3 = nn.Conv2d(32, 32, 3, 1, 1)
self.relu3_stage3 = nn.LeakyReLU()
self.conv4_stage3 = nn.Conv2d(32, 32, 3, 1, 1)
self.relu4_stage3 = nn.LeakyReLU()
self.conv5_stage3 = nn.Conv2d(64, 64, 3, 1, 1)
self.relu5_stage3 = nn.LeakyReLU()
self.conv6_stage3 = nn.Conv2d(64, 64, 3, 1, 1)
self.relu6_stage3 = nn.LeakyReLU()
self.conv7_stage3 = nn.Conv2d(64, 64, 3, 1, 1)
self.relu7_stage3 = nn.LeakyReLU()
self.conv8_stage3 = nn.Conv2d(64, 16, 1)
self.relu8_stage3 = nn.LeakyReLU()
self.conv9_stage3 = nn.Conv2d(16, 4, 1)
self.relu9_stage3 = nn.LeakyReLU()
self.out_stage3 = nn.Conv2d(4, 1, 1)
self.out_relu_stage3 = nn.Tanh()
def forward(self, depth, color):
out1_d_stage1 = self.conv1_d_stage1(depth)
out1_d_stage1 = self.relu1_d_stage1(out1_d_stage1)
out1_c_stage1 = self.conv1_c_stage1(color)
out1_c_stage1 = self.relu1_c_stage1(out1_c_stage1)
conv1_stage1 = torch.cat([out1_d_stage1, out1_c_stage1], dim=1)
out2_stage1 = self.conv2_stage1(conv1_stage1)
out2_stage1 = self.relu2_stage1(out2_stage1)
hyper1_stage1 = torch.cat([conv1_stage1, out2_stage1], dim=1)
pool1_out_stage1 = self.pool1_stage1(hyper1_stage1)
conv3_out_stage1 = self.conv3_stage1(pool1_out_stage1)
conv3_out_stage1 = self.relu3_stage1(conv3_out_stage1)
conv4_out_stage1 = self.conv4_stage1(conv3_out_stage1)
conv4_out_stage1 = self.relu4_stage1(conv4_out_stage1)
hyper2_stage1 = torch.cat([conv3_out_stage1, conv4_out_stage1], dim=1)
pool2_out_stage1 = self.pool2_stage1(hyper2_stage1)
conv5_out_stage1 = self.conv5_stage1(pool2_out_stage1)
conv5_out_stage1 = self.relu5_stage1(conv5_out_stage1)
conv6_out_stage1 = self.conv6_stage1(conv5_out_stage1)
conv6_out_stage1 = self.relu6_stage1(conv6_out_stage1)
conv7_out_stage1 = self.conv7_stage1(conv6_out_stage1)
conv7_out_stage1 = self.relu7_stage1(conv7_out_stage1)
conv8_out_stage1 = self.conv8_stage1(conv7_out_stage1)
conv8_out_stage1 = self.relu8_stage1(conv8_out_stage1)
conv9_out_stage1 = self.conv9_stage1(conv8_out_stage1)
conv9_out_stage1 = self.relu9_stage1(conv9_out_stage1)
out_final_stage1 = self.out_stage1(conv9_out_stage1)
out_final_stage1 = self.out_relu_stage1(out_final_stage1)
color_128 = self.avgpool_img_4(color)
depth_128 = self.avgpool_img_4(depth)
out1_d_stage2 = self.conv1_d_stage2(out_final_stage1)
out1_d_stage2 = self.relu1_d_stage2(out1_d_stage2)
out1_c_stage2 = self.conv1_c_stage2(color_128)
out1_c_stage2 = self.relu1_c_stage2(out1_c_stage2)
conv1_stage2 = torch.cat([out1_d_stage2, out1_c_stage2], dim=1)
out2_stage2 = self.conv2_stage2(conv1_stage2)
out2_stage2 = self.relu2_stage2(out2_stage2)
hyper1_stage2 = torch.cat([conv1_stage2, out2_stage2], dim=1)
up1_out_stage2 = self.up_stage2(hyper1_stage2)
up1_out_stage2 = torch.cat([up1_out_stage2, hyper2_stage1], dim=1)
conv5_out_stage2 = self.conv5_stage2(up1_out_stage2)
conv5_out_stage2 = self.relu5_stage2(conv5_out_stage2)
conv6_out_stage2 = self.conv6_stage2(conv5_out_stage2)
conv6_out_stage2 = self.relu6_stage2(conv6_out_stage2)
conv7_out_stage2 = self.conv7_stage2(conv6_out_stage2)
conv7_out_stage2 = self.relu7_stage2(conv7_out_stage2)
conv8_out_stage2 = self.conv8_stage2(conv7_out_stage2)
conv8_out_stage2 = self.relu8_stage2(conv8_out_stage2)
conv9_out_stage2 = self.conv9_stage2(conv8_out_stage2)
conv9_out_stage2 = self.relu9_stage2(conv9_out_stage2)
out_final_stage2 = self.out_stage2(conv9_out_stage2)
out_final_stage2 = self.out_relu_stage2(out_final_stage2)
color_256 = self.avgpool_img_2(color)
depth_256 = self.avgpool_img_2(depth)
out1_d_stage3 = self.conv1_d_stage3(out_final_stage2)
out1_d_stage3 = self.relu1_d_stage3(out1_d_stage3)
out1_c_stage3 = self.conv1_c_stage3(color_256)
out1_c_stage3 = self.relu1_c_stage3(out1_c_stage3)
conv1_stage3 = torch.cat([out1_d_stage3, out1_c_stage3], dim=1)
out2_stage3 = self.conv2_stage3(conv1_stage3)
out2_stage3 = self.relu2_stage3(out2_stage3)
hyper1_stage3 = torch.cat([conv1_stage3, out2_stage3], dim=1)
up1_out_stage3 = self.up_stage3(hyper1_stage3)
up1_out_stage3 = torch.cat([up1_out_stage3, hyper1_stage1], dim=1)
conv5_out_stage3 = self.conv5_stage3(up1_out_stage3)
conv5_out_stage3 = self.relu5_stage3(conv5_out_stage3)
conv6_out_stage3 = self.conv6_stage3(conv5_out_stage3)
conv6_out_stage3 = self.relu6_stage3(conv6_out_stage3)
conv7_out_stage3 = self.conv7_stage3(conv6_out_stage3)
conv7_out_stage3 = self.relu7_stage3(conv7_out_stage3)
conv8_out_stage3 = self.conv8_stage3(conv7_out_stage3)
conv8_out_stage3 = self.relu8_stage3(conv8_out_stage3)
conv9_out_stage3 = self.conv9_stage3(conv8_out_stage3)
conv9_out_stage3 = self.relu9_stage3(conv9_out_stage3)
out_final_stage3 = self.out_stage3(conv9_out_stage3)
out_final_stage3 = self.out_relu_stage3(out_final_stage3)
return out_final_stage3 |
def train(opt):
print('Training Information')
print('start from {}'.format(opt.start_from))
print('box from {}'.format(opt.input_box_dir))
print('attributes from {}'.format(opt.input_att_dir))
print('features from {}'.format(opt.input_fc_dir))
print('batch size ={}'.format(opt.batch_size))
print('#GPU={}'.format(torch.cuda.device_count()))
print('Caption model {}'.format(opt.caption_model))
print('refine aoa {}'.format(opt.refine_aoa))
print('Number of aoa module {}'.format(opt.aoa_num))
print('Self Critic After {}'.format(opt.self_critical_after))
print('learning_rate_decay_every {}'.format(opt.learning_rate_decay_every))
if (opt.use_val or opt.use_test):
print('It is a refining training')
print('Val is {} used for training '.format(('' if opt.use_val else 'not')))
print('Test is {} used for training '.format(('' if opt.use_test else 'not')))
print('')
checkpoint_path_suffix = '_bs{}'.format(opt.batch_size)
if opt.use_warmup:
checkpoint_path_suffix += '_warmup'
if (torch.cuda.device_count() > 1):
checkpoint_path_suffix += '_gpu{}'.format(torch.cuda.device_count())
if opt.checkpoint_path.endswith('_rl'):
opt.checkpoint_path = ((opt.checkpoint_path[:(- 3)] + checkpoint_path_suffix) + '_rl')
else:
opt.checkpoint_path += checkpoint_path_suffix
print('Save model to {}'.format(opt.checkpoint_path))
(opt.use_fc, opt.use_att) = utils.if_use_feat(opt.caption_model)
if opt.use_box:
opt.att_feat_size = (opt.att_feat_size + 5)
acc_steps = getattr(opt, 'acc_steps', 1)
name_append = opt.name_append
if ((len(name_append) > 0) and (name_append[0] != '-')):
name_append = ('_' + name_append)
loader = DataLoader(opt)
opt.vocab_size = loader.vocab_size
opt.seq_length = loader.seq_length
opt.losses_log_every = (len(loader.split_ix['train']) // opt.batch_size)
print('Evaluate on each {} iterations'.format(opt.losses_log_every))
if opt.write_summary:
print('write summary to {}'.format(opt.checkpoint_path))
tb_summary_writer = (tb and tb.SummaryWriter(opt.checkpoint_path))
infos = {}
histories = {}
if (opt.start_from is not None):
infos_path = os.path.join(opt.start_from, (('infos' + name_append) + '.pkl'))
print('Load model information {}'.format(infos_path))
with open(infos_path, 'rb') as f:
infos = utils.pickle_load(f)
saved_model_opt = infos['opt']
need_be_same = ['caption_model', 'rnn_type', 'rnn_size', 'num_layers']
for checkme in need_be_same:
assert (vars(saved_model_opt)[checkme] == vars(opt)[checkme]), ("Command line argument and saved model disagree on '%s' " % checkme)
histories_path = os.path.join(opt.start_from, (('histories' + name_append) + '.pkl'))
if os.path.isfile(histories_path):
with open(histories_path, 'rb') as f:
histories = utils.pickle_load(f)
else:
print('')
print('Initialize training process from all begining')
print('')
infos['iter'] = 0
infos['epoch'] = 0
infos['iterators'] = loader.iterators
infos['split_ix'] = loader.split_ix
infos['vocab'] = loader.get_vocab()
infos['opt'] = opt
iteration = infos.get('iter', 0)
epoch = infos.get('epoch', 0)
print('start from {} iterations -- {} epoch'.format(iteration, epoch))
val_result_history = histories.get('val_result_history', {})
loss_history = histories.get('loss_history', {})
lr_history = histories.get('lr_history', {})
ss_prob_history = histories.get('ss_prob_history', {})
loader.iterators = infos.get('iterators', loader.iterators)
start_Img_idx = loader.iterators['train']
loader.split_ix = infos.get('split_ix', loader.split_ix)
if (opt.load_best_score == 1):
best_val_score = infos.get('best_val_score', None)
best_epoch = infos.get('best_epoch', None)
best_cider = infos.get('best_val_score', 0)
print('best history val cider score: {} in epoch {}======='.format(best_val_score, best_epoch))
if (opt.name_append.isdigit() and (int(opt.name_append) < 100)):
assert ((int(opt.name_append) - epoch) == 1), 'dismatch in the model index and the real epoch number'
epoch += 1
opt.vocab = loader.get_vocab()
model = models.setup(opt).cuda()
del opt.vocab
if (torch.cuda.device_count() > 1):
dp_model = torch.nn.DataParallel(model)
else:
dp_model = model
lw_model = LossWrapper1(model, opt)
dp_lw_model = torch.nn.DataParallel(lw_model)
epoch_done = True
dp_lw_model.train()
if opt.noamopt:
assert (opt.caption_model in ['transformer', 'aoa']), 'noamopt can only work with transformer'
optimizer = utils.get_std_opt(model, factor=opt.noamopt_factor, warmup=opt.noamopt_warmup)
optimizer._step = iteration
elif opt.reduce_on_plateau:
optimizer = utils.build_optimizer(model.parameters(), opt)
optimizer = utils.ReduceLROnPlateau(optimizer, factor=0.5, patience=3)
else:
optimizer = utils.build_optimizer(model.parameters(), opt)
if (vars(opt).get('start_from', None) is not None):
optimizer_path = os.path.join(opt.start_from, (('optimizer' + name_append) + '.pth'))
if os.path.isfile(optimizer_path):
print('Loading optimizer')
optimizer.load_state_dict(torch.load(optimizer_path))
def save_checkpoint(model, infos, optimizer, histories=None, append=''):
if (len(append) > 0):
append = ('_' + append)
if (not os.path.isdir(opt.checkpoint_path)):
os.makedirs(opt.checkpoint_path)
checkpoint_path = os.path.join(opt.checkpoint_path, ('model%s.pth' % append))
torch.save(model.state_dict(), checkpoint_path)
print('Save model state to {}'.format(checkpoint_path))
optimizer_path = os.path.join(opt.checkpoint_path, ('optimizer%s.pth' % append))
torch.save(optimizer.state_dict(), optimizer_path)
print('Save model optimizer to {}'.format(optimizer_path))
with open(os.path.join(opt.checkpoint_path, ('infos' + ('%s.pkl' % append))), 'wb') as f:
utils.pickle_dump(infos, f)
print('Save training information to {}'.format(os.path.join(opt.checkpoint_path, ('infos' + ('%s.pkl' % append)))))
if histories:
with open(os.path.join(opt.checkpoint_path, ('histories' + ('%s.pkl' % append))), 'wb') as f:
utils.pickle_dump(histories, f)
print('Save training historyes to {}'.format(os.path.join(opt.checkpoint_path, ('histories' + ('%s.pkl' % append)))))
try:
while True:
if epoch_done:
if ((not opt.noamopt) and (not opt.reduce_on_plateau)):
if ((epoch > opt.learning_rate_decay_start) and (opt.learning_rate_decay_start >= 0)):
frac = ((epoch - opt.learning_rate_decay_start) // opt.learning_rate_decay_every)
decay_factor = (opt.learning_rate_decay_rate ** frac)
opt.current_lr = ((opt.learning_rate * decay_factor) * opt.refine_lr_decay)
else:
opt.current_lr = opt.learning_rate
infos['current_lr'] = opt.current_lr
print('Current Learning Rate is: {}'.format(opt.current_lr))
utils.set_lr(optimizer, opt.current_lr)
if ((epoch > opt.scheduled_sampling_start) and (opt.scheduled_sampling_start >= 0)):
frac = ((epoch - opt.scheduled_sampling_start) // opt.scheduled_sampling_increase_every)
opt.ss_prob = min((opt.scheduled_sampling_increase_prob * frac), opt.scheduled_sampling_max_prob)
model.ss_prob = opt.ss_prob
if ((opt.self_critical_after != (- 1)) and (epoch >= opt.self_critical_after)):
sc_flag = True
init_scorer(opt.cached_tokens)
else:
sc_flag = False
epoch_done = False
print('{}th Epoch Training starts now!'.format(epoch))
with tqdm(total=len(loader.split_ix['train']), initial=start_Img_idx) as pbar:
for i in range(start_Img_idx, len(loader.split_ix['train']), opt.batch_size):
start = time.time()
if ((opt.use_warmup == 1) and (iteration < opt.noamopt_warmup)):
opt.current_lr = ((opt.learning_rate * (iteration + 1)) / opt.noamopt_warmup)
utils.set_lr(optimizer, opt.current_lr)
data = loader.get_batch('train')
if ((iteration % acc_steps) == 0):
optimizer.zero_grad()
torch.cuda.synchronize()
start = time.time()
tmp = [data['fc_feats'], data['att_feats'], data['flag_feats'], data['labels'], data['masks'], data['att_masks']]
tmp = [(_ if (_ is None) else _.cuda()) for _ in tmp]
(fc_feats, att_feats, flag_feats, labels, masks, att_masks) = tmp
model_out = dp_lw_model(fc_feats, att_feats, flag_feats, labels, masks, att_masks, data['gts'], torch.arange(0, len(data['gts'])), sc_flag)
loss = model_out['loss'].mean()
loss_sp = (loss / acc_steps)
loss_sp.backward()
if (((iteration + 1) % acc_steps) == 0):
utils.clip_gradient(optimizer, opt.grad_clip)
optimizer.step()
torch.cuda.synchronize()
train_loss = loss.item()
end = time.time()
if (not sc_flag):
pbar.set_description('iter {:8} (epoch {:2}), train_loss = {:.3f}, time/batch = {:.3f}'.format(iteration, epoch, train_loss, (end - start)))
else:
pbar.set_description('iter {:8} (epoch {:2}), avg_reward = {:.3f}, time/batch = {:.3f}'.format(iteration, epoch, model_out['reward'].mean(), (end - start)))
iteration += 1
pbar.update(opt.batch_size)
if data['bounds']['wrapped']:
epoch += 1
epoch_done = True
save_checkpoint(model, infos, optimizer)
if (epoch > 15):
save_checkpoint(model, infos, optimizer, append=str(epoch))
print('')
print('======Best Cider = {} in epoch {}: iter {}!======'.format(best_val_score, best_epoch, infos.get('best_itr', None)))
print('')
if (((iteration % opt.losses_log_every) == 0) and opt.write_summary):
add_summary_value(tb_summary_writer, 'loss/train_loss', train_loss, iteration)
if opt.noamopt:
opt.current_lr = optimizer.rate()
elif opt.reduce_on_plateau:
opt.current_lr = optimizer.current_lr
add_summary_value(tb_summary_writer, 'hyperparam/learning_rate', opt.current_lr, iteration)
add_summary_value(tb_summary_writer, 'hyperparam/scheduled_sampling_prob', model.ss_prob, iteration)
if sc_flag:
add_summary_value(tb_summary_writer, 'avg_reward', model_out['reward'].mean(), iteration)
loss_history[iteration] = (train_loss if (not sc_flag) else model_out['reward'].mean())
lr_history[iteration] = opt.current_lr
ss_prob_history[iteration] = model.ss_prob
infos['iter'] = iteration
infos['epoch'] = epoch
infos['iterators'] = loader.iterators
infos['split_ix'] = loader.split_ix
if (((iteration % opt.save_checkpoint_every) == 0) and eval_ and (epoch > 3)):
model_path = os.path.join(opt.checkpoint_path, ('model_itr%s.pth' % iteration))
if (opt.use_val and (not opt.use_test)):
val_split = 'test'
if (not opt.use_val):
val_split = 'val'
eval_kwargs = {'split': val_split, 'dataset': opt.input_json, 'model': model_path}
eval_kwargs.update(vars(opt))
(val_loss, predictions, lang_stats) = eval_utils.eval_split(dp_model, lw_model.crit, loader, eval_kwargs)
if opt.reduce_on_plateau:
if ('CIDEr' in lang_stats):
optimizer.scheduler_step((- lang_stats['CIDEr']))
else:
optimizer.scheduler_step(val_loss)
if opt.write_summary:
add_summary_value(tb_summary_writer, 'loss/validation loss', val_loss, iteration)
if (lang_stats is not None):
bleu_dict = {}
for (k, v) in lang_stats.items():
if ('Bleu' in k):
bleu_dict[k] = v
if (len(bleu_dict) > 0):
tb_summary_writer.add_scalars('val/Bleu', bleu_dict, epoch)
for (k, v) in lang_stats.items():
if ('Bleu' not in k):
add_summary_value(tb_summary_writer, ('val/' + k), v, iteration)
val_result_history[iteration] = {'loss': val_loss, 'lang_stats': lang_stats, 'predictions': predictions}
if (opt.language_eval == 1):
current_score = lang_stats['CIDEr']
else:
current_score = (- val_loss)
best_flag = False
if ((best_val_score is None) or (current_score > best_val_score)):
best_val_score = current_score
infos['best_epoch'] = epoch
infos['best_itr'] = iteration
best_flag = True
infos['best_val_score'] = best_val_score
histories['val_result_history'] = val_result_history
histories['loss_history'] = loss_history
histories['lr_history'] = lr_history
histories['ss_prob_history'] = ss_prob_history
save_checkpoint(model, infos, optimizer, histories)
if opt.save_history_ckpt:
save_checkpoint(model, infos, optimizer, append=str(iteration))
if best_flag:
best_epoch = epoch
save_checkpoint(model, infos, optimizer, append='best')
print('update best model at {} iteration--{} epoch'.format(iteration, epoch))
start_Img_idx = 0
if ((epoch >= opt.max_epochs) and (opt.max_epochs != (- 1))):
print('epoch {} break all'.format(epoch))
save_checkpoint(model, infos, optimizer)
tb_summary_writer.close()
print('{} Training Done !'.format(('Refine' if (opt.use_test or opt.use_val) else '')))
break
except (RuntimeError, KeyboardInterrupt):
print('Save ckpt on exception ...')
save_checkpoint(model, infos, optimizer, append='interrupt')
print('Save ckpt done.')
stack_trace = traceback.format_exc()
print(stack_trace) |
class BosonicTransformation(Transformation):
def __init__(self, qubit_mapping: BosonicQubitMappingType=BosonicQubitMappingType.DIRECT, transformation_type: BosonicTransformationType=BosonicTransformationType.HARMONIC, basis_size: Union[(int, List[int])]=2, truncation: int=3):
self._qubit_mapping = qubit_mapping.value
self._transformation_type = transformation_type.value
self._basis_size = basis_size
self._truncation_order = truncation
self._num_modes = None
self._h_mat = None
self._untapered_qubit_op = None
def num_modes(self) -> int:
return self._num_modes
def basis(self) -> Union[(int, List[int])]:
return self._basis_size
def commutation_rule(self) -> bool:
return True
def untapered_qubit_op(self):
return self._untapered_qubit_op
def transform(self, driver: BaseDriver, aux_operators: Optional[List[Any]]=None) -> Tuple[(WeightedPauliOperator, List[WeightedPauliOperator])]:
watson = driver.run()
(ops, aux_ops) = self._do_transform(watson, aux_operators)
return (ops, aux_ops)
def _do_transform(self, watson: WatsonHamiltonian, aux_operators: Optional[List[Union[(BosonicOperator, WeightedPauliOperator)]]]=None) -> Tuple[(WeightedPauliOperator, List[WeightedPauliOperator])]:
self._num_modes = watson.num_modes
if (self._transformation_type == 'harmonic'):
if isinstance(self._basis_size, int):
self._basis_size = ([self._basis_size] * self._num_modes)
self._h_mat = HarmonicBasis(watson, self._basis_size, self._truncation_order).convert()
else:
raise QiskitChemistryError('Unknown Transformation type')
bos_op = BosonicOperator(self._h_mat, self._basis_size)
qubit_op = bos_op.mapping(qubit_mapping=self._qubit_mapping)
self._untapered_qubit_op = qubit_op
qubit_op.name = 'Bosonic Operator'
aux_ops = []
def _add_aux_op(aux_op: BosonicOperator, name: str) -> None:
if (not isinstance(aux_op, WeightedPauliOperator)):
aux_qop = BosonicTransformation._map_bosonic_operator_to_qubit(aux_op, self._qubit_mapping)
aux_qop.name = name
else:
aux_qop = aux_op
aux_ops.append(aux_qop)
logger.debug(' num paulis: %s', aux_qop.paulis)
logger.debug('Creating aux op for number of occupied modals per mode')
for mode in range(self._num_modes):
_add_aux_op(bos_op.number_occupied_modals_per_mode(mode), 'Number of occupied modals in mode {}'.format(mode))
if (aux_operators is not None):
for aux_op in aux_operators:
_add_aux_op(aux_op, aux_op.name)
return (qubit_op, aux_ops)
def interpret(self, raw_result: Union[(EigenstateResult, EigensolverResult, MinimumEigensolverResult)]) -> VibronicStructureResult:
eigenstate_result = None
if isinstance(raw_result, EigenstateResult):
eigenstate_result = raw_result
elif isinstance(raw_result, EigensolverResult):
eigenstate_result = EigenstateResult()
eigenstate_result.raw_result = raw_result
eigenstate_result.eigenenergies = raw_result.eigenvalues
eigenstate_result.eigenstates = raw_result.eigenstates
eigenstate_result.aux_operator_eigenvalues = raw_result.aux_operator_eigenvalues
elif isinstance(raw_result, MinimumEigensolverResult):
eigenstate_result = EigenstateResult()
eigenstate_result.raw_result = raw_result
eigenstate_result.eigenenergies = np.asarray([raw_result.eigenvalue])
eigenstate_result.eigenstates = [raw_result.eigenstate]
eigenstate_result.aux_operator_eigenvalues = raw_result.aux_operator_eigenvalues
result = VibronicStructureResult(eigenstate_result.data)
result.computed_vibronic_energies = eigenstate_result.eigenenergies
if (result.aux_operator_eigenvalues is not None):
if (not isinstance(result.aux_operator_eigenvalues, list)):
aux_operator_eigenvalues = [result.aux_operator_eigenvalues]
else:
aux_operator_eigenvalues = result.aux_operator_eigenvalues
result.num_occupied_modals_per_mode = []
for aux_op_eigenvalues in aux_operator_eigenvalues:
occ_modals = []
for mode in range(self._num_modes):
if (aux_op_eigenvalues[mode] is not None):
occ_modals.append(aux_op_eigenvalues[mode][0].real)
else:
occ_modals.append(None)
result.num_occupied_modals_per_mode.append(occ_modals)
return result
def _map_bosonic_operator_to_qubit(bos_op: BosonicOperator, qubit_mapping: str) -> WeightedPauliOperator:
qubit_op = bos_op.mapping(qubit_mapping=qubit_mapping, threshold=1e-05)
return qubit_op
def _build_single_hopping_operator(index: List[List[int]], basis: List[int], qubit_mapping: str) -> WeightedPauliOperator:
degree = len(index)
hml = []
for _ in range(degree):
hml.append([])
tmp = []
for i in range(len(index))[::(- 1)]:
tmp.append(index[i])
hml[(- 1)].append([tmp, 1])
dummpy_op = BosonicOperator(np.asarray(hml, dtype=object), basis)
qubit_op = dummpy_op.mapping(qubit_mapping)
if (len(qubit_op.paulis) == 0):
qubit_op = None
return qubit_op
def build_hopping_operators(self, excitations: Union[(str, List[List[int]])]='sd') -> Tuple[(Dict[(str, WeightedPauliOperator)], Dict, Dict[(str, List[List[int]])])]:
exctn_types = {'s': 0, 'd': 1}
if isinstance(excitations, str):
degrees = [exctn_types[letter] for letter in excitations]
excitations_list = UVCC.compute_excitation_lists(self._basis_size, degrees)
else:
excitations_list = excitations
size = len(excitations_list)
def _dag_list(extn_lst):
dag_lst = []
for lst in extn_lst:
dag_lst.append([lst[0], lst[2], lst[1]])
return dag_lst
hopping_operators: Dict[(str, WeightedPauliOperator)] = {}
excitation_indices = {}
to_be_executed_list = []
for idx in range(size):
to_be_executed_list += [excitations_list[idx], _dag_list(excitations_list[idx])]
hopping_operators['E_{}'.format(idx)] = None
hopping_operators['Edag_{}'.format(idx)] = None
excitation_indices['E_{}'.format(idx)] = excitations_list[idx]
excitation_indices['Edag_{}'.format(idx)] = _dag_list(excitations_list[idx])
result = parallel_map(self._build_single_hopping_operator, to_be_executed_list, task_args=(self._basis_size, self._qubit_mapping), num_processes=aqua_globals.num_processes)
for (key, res) in zip(hopping_operators.keys(), result):
hopping_operators[key] = res
type_of_commutativities: Dict[(str, List[bool])] = {}
return (hopping_operators, type_of_commutativities, excitation_indices)
def get_default_filter_criterion(self) -> Optional[Callable[([Union[(List, np.ndarray)], float, Optional[List[float]]], bool)]]:
def filter_criterion(self, eigenstate, eigenvalue, aux_values):
for mode in range(self._num_modes):
if (not np.isclose(aux_values[mode][0], 1)):
return False
return True
return partial(filter_criterion, self) |
def dicom_input_method(config, key_namespace='', patient_id='', site=None, **_):
monaco_site = site
FILE_UPLOAD = 'File upload'
MONACO_SEARCH = 'Search Monaco file export location'
import_method = st.radio('DICOM import method', [FILE_UPLOAD, MONACO_SEARCH], key=f'{key_namespace}_dicom_file_import_method')
if (import_method == FILE_UPLOAD):
dicom_plan_bytes = st.file_uploader('Upload DICOM RT Plan File', key=f'{key_namespace}_dicom_plan_uploader')
if (dicom_plan_bytes is None):
return {}
try:
dicom_plan_bytes.seek(0)
dicom_plan = pydicom.read_file(dicom_plan_bytes, force=True)
except:
st.write(_exceptions.WrongFileType('Does not appear to be a DICOM file'))
return {}
if (dicom_plan.SOPClassUID != DICOM_PLAN_UID):
st.write(_exceptions.WrongFileType('The DICOM type needs to be an RT DICOM Plan file'))
return {}
data_paths = []
if (import_method == MONACO_SEARCH):
try:
dicom_export_locations = _config.get_dicom_export_locations(config)
except KeyError:
st.write(_exceptions.ConfigMissing(f"No Monaco directory is configured. Please use '{FILE_UPLOAD}' instead."))
return {}
monaco_site = st_misc.site_picker(config, 'Monaco Export Location', default=monaco_site, key=f'{key_namespace}_monaco_site')
monaco_export_directory = dicom_export_locations[monaco_site]
st.write(monaco_export_directory.resolve())
patient_id = st.text_input('Patient ID', patient_id, key=f'{key_namespace}_patient_id')
found_dicom_files = list(monaco_export_directory.glob(f'{patient_id}_*.dcm'))
dicom_plans = {}
for path in found_dicom_files:
dcm = load_dicom_file_if_plan(path)
if (dcm is not None):
dicom_plans[path.name] = dcm
dicom_plan_options = list(dicom_plans.keys())
if ((len(dicom_plan_options) == 0) and (patient_id != '')):
st.write(_exceptions.NoRecordsFound(f'No exported DICOM RT plans found for Patient ID {patient_id} within the directory {monaco_export_directory}'))
return {'patient_id': patient_id}
if (len(dicom_plan_options) == 1):
selected_plan = dicom_plan_options[0]
else:
selected_plan = st.radio('Select DICOM Plan', dicom_plan_options, key=f'{key_namespace}_select_monaco_export_plan')
st.write(f'DICOM file being used: `{selected_plan}`')
dicom_plan = dicom_plans[selected_plan]
data_paths = [monaco_export_directory.joinpath(selected_plan)]
patient_id = str(dicom_plan.PatientID)
st.write(f'Patient ID: `{patient_id}`')
patient_name = str(dicom_plan.PatientName)
patient_name = utl_patient.convert_patient_name(patient_name)
st.write(f'Patient Name: `{patient_name}`')
rt_plan_name = str(dicom_plan.RTPlanName)
st.write(f'Plan Name: `{rt_plan_name}`')
try:
deliveries_all_fractions = pymedphys.Delivery.from_dicom(dicom_plan, fraction_group_number='all')
except AttributeError:
st.write(_exceptions.WrongFileType('Does not appear to be a photon DICOM plan'))
return {}
except ValueError as e:
st.warning('While extracting the delivery information out of the\n DICOM file the following error occurred\n ')
st.write(e)
st.stop()
fraction_groups = list(deliveries_all_fractions.keys())
if (len(fraction_groups) == 1):
delivery = deliveries_all_fractions[fraction_groups[0]]
else:
fraction_group_choices = {}
for (fraction, delivery) in deliveries_all_fractions.items():
rounded_mu = round(delivery.mu[(- 1)], 1)
fraction_group_choices[f'Perscription {fraction} with {rounded_mu} MU'] = fraction
fraction_group_selection = st.radio('Select relevant perscription', list(fraction_group_choices.keys()), key=f'{key_namespace}_dicom_perscription_chooser')
fraction_group_number = fraction_group_choices[fraction_group_selection]
delivery = deliveries_all_fractions[fraction_group_number]
deliveries = [delivery]
identifier = f'DICOM ({rt_plan_name})'
return {'site': monaco_site, 'patient_id': patient_id, 'patient_name': patient_name, 'data_paths': data_paths, 'identifier': identifier, 'deliveries': deliveries} |
class Window(OperatorMixin):
def __init__(self, sdf, n=None, value=None, with_state=False, start=None):
if ((value is None) and isinstance(n, (str, pd.Timedelta))):
value = n
n = None
self.n = n
self.root = sdf
if (isinstance(value, str) and isinstance(self.root.example.index, pd.DatetimeIndex)):
value = pd.Timedelta(value)
self.value = value
self.with_state = with_state
self.start = start
def __getitem__(self, key):
sdf = self.root[key]
return type(self)(sdf, n=self.n, value=self.value, with_state=self.with_state, start=self.start)
def __getattr__(self, key):
if ((key in self.root.columns) or (not len(self.root.columns))):
return self[key]
else:
raise AttributeError(f'{type(self)} has no attribute {key}')
def map_partitions(self, func, *args, **kwargs):
args2 = [(a.root if isinstance(a, type(self)) else a) for a in args]
root = self.root.map_partitions(func, *args2, **kwargs)
return type(self)(root, n=self.n, value=self.value, with_state=self.with_state, start=self.start)
def index(self):
return self.map_partitions((lambda x: x.index), self)
def columns(self):
return self.root.columns
def dtypes(self):
return self.root.dtypes
def example(self):
return self.root.example
def reset_index(self):
return type(self)(self.root.reset_index(), n=self.n, value=self.value)
def aggregate(self, agg):
if (self.n is not None):
diff = aggregations.diff_iloc
window = self.n
elif (self.value is not None):
diff = aggregations.diff_loc
window = self.value
return self.root.accumulate_partitions(aggregations.window_accumulator, diff=diff, window=window, agg=agg, start=self.start, returns_state=True, stream_type='updating', with_state=self.with_state)
def full(self):
return self.aggregate(aggregations.Full())
def apply(self, func):
result = self.aggregate(aggregations.Full())
return result.map_partitions(func, result)
def sum(self):
return self.aggregate(aggregations.Sum())
def count(self):
return self.aggregate(aggregations.Count())
def mean(self):
return self.aggregate(aggregations.Mean())
def var(self, ddof=1):
return self.aggregate(aggregations.Var(ddof=ddof))
def std(self, ddof=1):
return (self.var(ddof=ddof) ** 0.5)
def size(self):
return self.aggregate(aggregations.Size())
def value_counts(self):
return self.aggregate(aggregations.ValueCounts())
def groupby(self, other):
return WindowedGroupBy(self.root, other, None, self.n, self.value, self.with_state, self.start) |
class ModelBuilder():
default_tri_ke = 100.0
default_tri_ka = 100.0
default_tri_kd = 10.0
default_tri_drag = 0.0
default_tri_lift = 0.0
default_edge_ke = 100.0
default_edge_kd = 0.0
def __init__(self):
self.particle_q = []
self.particle_qd = []
self.particle_mass = []
self.shape_transform = []
self.shape_body = []
self.shape_geo_type = []
self.shape_geo_scale = []
self.shape_geo_src = []
self.shape_materials = []
self.geo_meshes = []
self.geo_sdfs = []
self.spring_indices = []
self.spring_rest_length = []
self.spring_stiffness = []
self.spring_damping = []
self.spring_control = []
self.tri_indices = []
self.tri_poses = []
self.tri_activations = []
self.tri_materials = []
self.edge_indices = []
self.edge_rest_angle = []
self.edge_bending_properties = []
self.tet_indices = []
self.tet_poses = []
self.tet_activations = []
self.tet_materials = []
self.muscle_start = []
self.muscle_params = []
self.muscle_activation = []
self.muscle_bodies = []
self.muscle_points = []
self.body_mass = []
self.body_inertia = []
self.body_com = []
self.body_q = []
self.body_qd = []
self.joint_parent = []
self.joint_child = []
self.joint_axis = []
self.joint_X_p = []
self.joint_X_c = []
self.joint_q = []
self.joint_qd = []
self.joint_type = []
self.joint_armature = []
self.joint_target_ke = []
self.joint_target_kd = []
self.joint_target = []
self.joint_limit_lower = []
self.joint_limit_upper = []
self.joint_limit_ke = []
self.joint_limit_kd = []
self.joint_act = []
self.joint_q_start = []
self.joint_qd_start = []
self.articulation_start = []
self.joint_count = 0
self.joint_dof_count = 0
self.joint_coord_count = 0
def add_articulation(self):
self.articulation_start.append(self.joint_count)
def add_rigid_articulation(self, articulation, xform=None):
if (xform is not None):
if (articulation.joint_type[0] == wp.sim.JOINT_FREE):
start = articulation.joint_q_start[0]
articulation.joint_q[(start + 0)] = xform.p[0]
articulation.joint_q[(start + 1)] = xform.p[1]
articulation.joint_q[(start + 2)] = xform.p[2]
articulation.joint_q[(start + 3)] = xform.q[0]
articulation.joint_q[(start + 4)] = xform.q[1]
articulation.joint_q[(start + 5)] = xform.q[2]
articulation.joint_q[(start + 6)] = xform.q[3]
else:
articulation.joint_X_p[0] = xform
self.add_articulation()
start_body_idx = len(self.body_mass)
self.joint_parent.extend([((p + self.joint_count) if (p != (- 1)) else (- 1)) for p in articulation.joint_parent])
self.joint_child.extend([(c + self.joint_count) for c in articulation.joint_child])
self.joint_q_start.extend([(c + self.joint_coord_count) for c in articulation.joint_q_start])
self.joint_qd_start.extend([(c + self.joint_dof_count) for c in articulation.joint_qd_start])
self.shape_body.extend([(b + start_body_idx) for b in articulation.shape_body])
rigid_articulation_attrs = ['body_inertia', 'body_mass', 'body_com', 'body_q', 'body_qd', 'joint_type', 'joint_X_p', 'joint_X_c', 'joint_armature', 'joint_axis', 'joint_q', 'joint_qd', 'joint_act', 'joint_limit_lower', 'joint_limit_upper', 'joint_limit_ke', 'joint_limit_kd', 'joint_target_ke', 'joint_target_kd', 'joint_target', 'shape_transform', 'shape_geo_type', 'shape_geo_scale', 'shape_geo_src', 'shape_materials']
for attr in rigid_articulation_attrs:
getattr(self, attr).extend(getattr(articulation, attr))
self.joint_count += articulation.joint_count
self.joint_dof_count += articulation.joint_dof_count
self.joint_coord_count += articulation.joint_coord_count
def add_body(self, origin: Transform, parent: int=(- 1), joint_xform: Transform=wp.transform(), joint_xform_child: Transform=wp.transform(), joint_axis: Vec3=(0.0, 0.0, 0.0), joint_type: wp.constant=JOINT_FREE, joint_target_ke: float=0.0, joint_target_kd: float=0.0, joint_limit_ke: float=100.0, joint_limit_kd: float=10.0, joint_limit_lower: float=(- 1000.0), joint_limit_upper: float=1000.0, joint_armature: float=0.0, com: Vec3=np.zeros(3), I_m: Mat33=np.zeros((3, 3)), m: float=0.0) -> int:
child = len(self.body_mass)
self.body_inertia.append((I_m + (np.eye(3) * joint_armature)))
self.body_mass.append(m)
self.body_com.append(com)
self.body_q.append(origin)
self.body_qd.append(wp.spatial_vector())
self.joint_type.append(joint_type.val)
self.joint_parent.append(parent)
self.joint_child.append(child)
self.joint_X_p.append(joint_xform)
self.joint_X_c.append(joint_xform_child)
self.joint_armature.append(joint_armature)
self.joint_axis.append(np.array(joint_axis))
if (joint_type == JOINT_PRISMATIC):
dof_count = 1
coord_count = 1
elif (joint_type == JOINT_REVOLUTE):
dof_count = 1
coord_count = 1
elif (joint_type == JOINT_BALL):
dof_count = 3
coord_count = 4
elif (joint_type == JOINT_FREE):
dof_count = 6
coord_count = 7
elif (joint_type == JOINT_FIXED):
dof_count = 0
coord_count = 0
elif (joint_type == JOINT_COMPOUND):
dof_count = 3
coord_count = 3
elif (joint_type == JOINT_UNIVERSAL):
dof_count = 2
coord_count = 2
joint_target_ke = np.resize(np.atleast_1d(joint_target_ke), dof_count)
joint_target_kd = np.resize(np.atleast_1d(joint_target_kd), dof_count)
joint_limit_ke = np.resize(np.atleast_1d(joint_limit_ke), dof_count)
joint_limit_kd = np.resize(np.atleast_1d(joint_limit_kd), dof_count)
joint_limit_lower = np.resize(np.atleast_1d(joint_limit_lower), dof_count)
joint_limit_upper = np.resize(np.atleast_1d(joint_limit_upper), dof_count)
for i in range(coord_count):
self.joint_q.append(0.0)
for i in range(dof_count):
self.joint_qd.append(0.0)
self.joint_act.append(0.0)
self.joint_limit_lower.append(joint_limit_lower[i])
self.joint_limit_upper.append(joint_limit_upper[i])
self.joint_limit_ke.append(joint_limit_ke[i])
self.joint_limit_kd.append(joint_limit_kd[i])
self.joint_target_ke.append(joint_target_ke[i])
self.joint_target_kd.append(joint_target_kd[i])
self.joint_target.append(0.0)
self.joint_q_start.append(self.joint_coord_count)
self.joint_qd_start.append(self.joint_dof_count)
self.joint_count += 1
self.joint_dof_count += dof_count
self.joint_coord_count += coord_count
return child
def add_muscle(self, bodies: List[int], positions: List[Vec3], f0: float, lm: float, lt: float, lmax: float, pen: float) -> float:
n = len(bodies)
self.muscle_start.append(len(self.muscle_bodies))
self.muscle_params.append((f0, lm, lt, lmax, pen))
self.muscle_activation.append(0.0)
for i in range(n):
self.muscle_bodies.append(bodies[i])
self.muscle_points.append(positions[i])
return (len(self.muscle_start) - 1)
def add_shape_plane(self, plane: Vec4=(0.0, 1.0, 0.0, 0.0), ke: float=100000.0, kd: float=1000.0, kf: float=1000.0, mu: float=0.5):
self._add_shape((- 1), (0.0, 0.0, 0.0), (0.0, 0.0, 0.0), GEO_PLANE, plane, None, 0.0, ke, kd, kf, mu)
def add_shape_sphere(self, body, pos: Vec3=(0.0, 0.0, 0.0), rot: Quat=(0.0, 0.0, 0.0, 1.0), radius: float=1.0, density: float=1000.0, ke: float=100000.0, kd: float=1000.0, kf: float=1000.0, mu: float=0.5):
self._add_shape(body, pos, rot, GEO_SPHERE, (radius, 0.0, 0.0, 0.0), None, density, ke, kd, kf, mu)
def add_shape_box(self, body: int, pos: Vec3=(0.0, 0.0, 0.0), rot: Quat=(0.0, 0.0, 0.0, 1.0), hx: float=0.5, hy: float=0.5, hz: float=0.5, density: float=1000.0, ke: float=100000.0, kd: float=1000.0, kf: float=1000.0, mu: float=0.5):
self._add_shape(body, pos, rot, GEO_BOX, (hx, hy, hz, 0.0), None, density, ke, kd, kf, mu)
def add_shape_capsule(self, body: int, pos: Vec3=(0.0, 0.0, 0.0), rot: Quat=(0.0, 0.0, 0.0, 1.0), radius: float=1.0, half_width: float=0.5, density: float=1000.0, ke: float=100000.0, kd: float=1000.0, kf: float=1000.0, mu: float=0.5):
self._add_shape(body, pos, rot, GEO_CAPSULE, (radius, half_width, 0.0, 0.0), None, density, ke, kd, kf, mu)
def add_shape_mesh(self, body: int, pos: Vec3=(0.0, 0.0, 0.0), rot: Quat=(0.0, 0.0, 0.0, 1.0), mesh: Mesh=None, scale: Vec3=(1.0, 1.0, 1.0), density: float=1000.0, ke: float=100000.0, kd: float=1000.0, kf: float=1000.0, mu: float=0.5):
self._add_shape(body, pos, rot, GEO_MESH, (scale[0], scale[1], scale[2], 0.0), mesh, density, ke, kd, kf, mu)
def add_shape_dense_volume(self, body: int, pos: Vec3=(0.0, 0.0, 0.0), rot: Quat=(0.0, 0.0, 0.0, 1.0), volume: DenseVolume=None, scale: Vec3=(1.0, 1.0, 1.0), density: float=1000.0, ke: float=100000.0, kd: float=1000.0, kf: float=1000.0, mu: float=0.5):
assert isinstance(volume, DenseVolume)
assert np.allclose(scale[0], scale[1]), 'add_shape_volume only supports uniform scale'
assert np.allclose(scale[0], scale[2]), 'add_shape_volume only supports uniform scale'
self._add_shape(body, pos, rot, GEO_DENSE_SDF, (scale[0], scale[1], scale[2], 0.0), volume, density, ke, kd, kf, mu)
def _add_shape(self, body, pos, rot, type, scale, src, density, ke, kd, kf, mu):
self.shape_body.append(body)
self.shape_transform.append(wp.transform(pos, rot))
self.shape_geo_type.append(type.val)
self.shape_geo_scale.append((scale[0], scale[1], scale[2]))
self.shape_geo_src.append(src)
self.shape_materials.append((ke, kd, kf, mu))
(m, I) = self._compute_shape_mass(type, scale, src, density)
self._update_body_mass(body, m, I, np.array(pos), np.array(rot))
def add_particle(self, pos: Vec3, vel: Vec3, mass: float) -> int:
self.particle_q.append(pos)
self.particle_qd.append(vel)
self.particle_mass.append(mass)
return (len(self.particle_q) - 1)
def add_spring(self, i: int, j, ke: float, kd: float, control: float):
self.spring_indices.append(i)
self.spring_indices.append(j)
self.spring_stiffness.append(ke)
self.spring_damping.append(kd)
self.spring_control.append(control)
p = self.particle_q[i]
q = self.particle_q[j]
delta = np.subtract(p, q)
l = np.sqrt(np.dot(delta, delta))
self.spring_rest_length.append(l)
def add_triangle(self, i: int, j: int, k: int, tri_ke: float=default_tri_ke, tri_ka: float=default_tri_ka, tri_kd: float=default_tri_kd, tri_drag: float=default_tri_drag, tri_lift: float=default_tri_lift) -> float:
p = np.array(self.particle_q[i])
q = np.array(self.particle_q[j])
r = np.array(self.particle_q[k])
qp = (q - p)
rp = (r - p)
n = wp.normalize(wp.cross(qp, rp))
e1 = wp.normalize(qp)
e2 = wp.normalize(wp.cross(n, e1))
R = np.matrix((e1, e2))
M = np.matrix((qp, rp))
D = (R * M.T)
area = (np.linalg.det(D) / 2.0)
if (area <= 0.0):
print('inverted or degenerate triangle element')
return 0.0
else:
inv_D = np.linalg.inv(D)
self.tri_indices.append((i, j, k))
self.tri_poses.append(inv_D.tolist())
self.tri_activations.append(0.0)
self.tri_materials.append((tri_ke, tri_ka, tri_kd, tri_drag, tri_lift))
return area
def add_tetrahedron(self, i: int, j: int, k: int, l: int, k_mu: float=1000.0, k_lambda: float=1000.0, k_damp: float=0.0) -> float:
p = np.array(self.particle_q[i])
q = np.array(self.particle_q[j])
r = np.array(self.particle_q[k])
s = np.array(self.particle_q[l])
qp = (q - p)
rp = (r - p)
sp = (s - p)
Dm = np.matrix((qp, rp, sp)).T
volume = (np.linalg.det(Dm) / 6.0)
if (volume <= 0.0):
print('inverted tetrahedral element')
else:
inv_Dm = np.linalg.inv(Dm)
self.tet_indices.append((i, j, k, l))
self.tet_poses.append(inv_Dm.tolist())
self.tet_activations.append(0.0)
self.tet_materials.append((k_mu, k_lambda, k_damp))
return volume
def add_edge(self, i: int, j: int, k: int, l: int, rest: float=None, edge_ke: float=default_edge_ke, edge_kd: float=default_edge_kd):
if (rest == None):
x1 = np.array(self.particle_q[i])
x2 = np.array(self.particle_q[j])
x3 = np.array(self.particle_q[k])
x4 = np.array(self.particle_q[l])
n1 = wp.normalize(np.cross((x3 - x1), (x4 - x1)))
n2 = wp.normalize(np.cross((x4 - x2), (x3 - x2)))
e = wp.normalize((x4 - x3))
d = np.clip(np.dot(n2, n1), (- 1.0), 1.0)
angle = math.acos(d)
sign = np.sign(np.dot(np.cross(n2, n1), e))
rest = (angle * sign)
self.edge_indices.append((i, j, k, l))
self.edge_rest_angle.append(rest)
self.edge_bending_properties.append((edge_ke, edge_kd))
def add_cloth_grid(self, pos: Vec3, rot: Quat, vel: Vec3, dim_x: int, dim_y: int, cell_x: float, cell_y: float, mass: float, reverse_winding: bool=False, fix_left: bool=False, fix_right: bool=False, fix_top: bool=False, fix_bottom: bool=False, tri_ke: float=default_tri_ke, tri_ka: float=default_tri_ka, tri_kd: float=default_tri_kd, tri_drag: float=default_tri_drag, tri_lift: float=default_tri_lift, edge_ke: float=default_edge_ke, edge_kd: float=default_edge_kd):
def grid_index(x, y, dim_x):
return ((y * dim_x) + x)
start_vertex = len(self.particle_q)
start_tri = len(self.tri_indices)
for y in range(0, (dim_y + 1)):
for x in range(0, (dim_x + 1)):
g = np.array(((x * cell_x), (y * cell_y), 0.0))
p = (np.array(wp.quat_rotate(rot, g)) + pos)
m = mass
if ((x == 0) and fix_left):
m = 0.0
elif ((x == dim_x) and fix_right):
m = 0.0
elif ((y == 0) and fix_bottom):
m = 0.0
elif ((y == dim_y) and fix_top):
m = 0.0
self.add_particle(p, vel, m)
if ((x > 0) and (y > 0)):
if reverse_winding:
tri1 = ((start_vertex + grid_index((x - 1), (y - 1), (dim_x + 1))), (start_vertex + grid_index(x, (y - 1), (dim_x + 1))), (start_vertex + grid_index(x, y, (dim_x + 1))))
tri2 = ((start_vertex + grid_index((x - 1), (y - 1), (dim_x + 1))), (start_vertex + grid_index(x, y, (dim_x + 1))), (start_vertex + grid_index((x - 1), y, (dim_x + 1))))
self.add_triangle(*tri1, tri_ke, tri_ka, tri_kd, tri_drag, tri_lift)
self.add_triangle(*tri2, tri_ke, tri_ka, tri_kd, tri_drag, tri_lift)
else:
tri1 = ((start_vertex + grid_index((x - 1), (y - 1), (dim_x + 1))), (start_vertex + grid_index(x, (y - 1), (dim_x + 1))), (start_vertex + grid_index((x - 1), y, (dim_x + 1))))
tri2 = ((start_vertex + grid_index(x, (y - 1), (dim_x + 1))), (start_vertex + grid_index(x, y, (dim_x + 1))), (start_vertex + grid_index((x - 1), y, (dim_x + 1))))
self.add_triangle(*tri1, tri_ke, tri_ka, tri_kd, tri_drag, tri_lift)
self.add_triangle(*tri2, tri_ke, tri_ka, tri_kd, tri_drag, tri_lift)
end_vertex = len(self.particle_q)
end_tri = len(self.tri_indices)
adj = wp.utils.MeshAdjacency(self.tri_indices[start_tri:end_tri], (end_tri - start_tri))
for (k, e) in adj.edges.items():
if ((e.f0 == (- 1)) or (e.f1 == (- 1))):
continue
self.add_edge(e.o0, e.o1, e.v0, e.v1, edge_ke=edge_ke, edge_kd=edge_kd)
def add_cloth_mesh(self, pos: Vec3, rot: Quat, scale: float, vel: Vec3, vertices: List[Vec3], indices: List[int], density: float, edge_callback=None, face_callback=None, tri_ke: float=default_tri_ke, tri_ka: float=default_tri_ka, tri_kd: float=default_tri_kd, tri_drag: float=default_tri_drag, tri_lift: float=default_tri_lift, edge_ke: float=default_edge_ke, edge_kd: float=default_edge_kd):
num_tris = int((len(indices) / 3))
start_vertex = len(self.particle_q)
start_tri = len(self.tri_indices)
for (i, v) in enumerate(vertices):
p = (np.array(wp.quat_rotate(rot, (v * scale))) + pos)
self.add_particle(p, vel, 0.0)
for t in range(num_tris):
i = (start_vertex + indices[((t * 3) + 0)])
j = (start_vertex + indices[((t * 3) + 1)])
k = (start_vertex + indices[((t * 3) + 2)])
if face_callback:
face_callback(i, j, k)
area = self.add_triangle(i, j, k, tri_ke, tri_ka, tri_kd, tri_drag, tri_lift)
if (area > 0.0):
self.particle_mass[i] += ((density * area) / 3.0)
self.particle_mass[j] += ((density * area) / 3.0)
self.particle_mass[k] += ((density * area) / 3.0)
end_vertex = len(self.particle_q)
end_tri = len(self.tri_indices)
adj = wp.utils.MeshAdjacency(self.tri_indices[start_tri:end_tri], (end_tri - start_tri))
for (k, e) in adj.edges.items():
if ((e.f0 == (- 1)) or (e.f1 == (- 1))):
continue
if edge_callback:
edge_callback(e.f0, e.f1)
self.add_edge(e.o0, e.o1, e.v0, e.v1, edge_ke=edge_ke, edge_kd=edge_kd)
def add_particle_grid(self, pos: Vec3, rot: Quat, vel: Vec3, dim_x: int, dim_y: int, dim_z: int, cell_x: float, cell_y: float, cell_z: float, mass: float, jitter: float):
for z in range(dim_z):
for y in range(dim_y):
for x in range(dim_x):
v = np.array(((x * cell_x), (y * cell_y), (z * cell_z)))
m = mass
p = ((np.array(wp.quat_rotate(rot, v)) + pos) + (np.random.rand(3) * jitter))
self.add_particle(p, vel, m)
def add_soft_grid(self, pos: Vec3, rot: Quat, vel: Vec3, dim_x: int, dim_y: int, dim_z: int, cell_x: float, cell_y: float, cell_z: float, density: float, k_mu: float, k_lambda: float, k_damp: float, fix_left: bool=False, fix_right: bool=False, fix_top: bool=False, fix_bottom: bool=False, tri_ke: float=default_tri_ke, tri_ka: float=default_tri_ka, tri_kd: float=default_tri_kd, tri_drag: float=default_tri_drag, tri_lift: float=default_tri_lift):
start_vertex = len(self.particle_q)
mass = (((cell_x * cell_y) * cell_z) * density)
for z in range((dim_z + 1)):
for y in range((dim_y + 1)):
for x in range((dim_x + 1)):
v = np.array(((x * cell_x), (y * cell_y), (z * cell_z)))
m = mass
if (fix_left and (x == 0)):
m = 0.0
if (fix_right and (x == dim_x)):
m = 0.0
if (fix_top and (y == dim_y)):
m = 0.0
if (fix_bottom and (y == 0)):
m = 0.0
p = (np.array(wp.quat_rotate(rot, v)) + pos)
self.add_particle(p, vel, m)
faces = {}
def add_face(i: int, j: int, k: int):
key = tuple(sorted((i, j, k)))
if (key not in faces):
faces[key] = (i, j, k)
else:
del faces[key]
def add_tet(i: int, j: int, k: int, l: int):
self.add_tetrahedron(i, j, k, l, k_mu, k_lambda, k_damp)
add_face(i, k, j)
add_face(j, k, l)
add_face(i, j, l)
add_face(i, l, k)
def grid_index(x, y, z):
return (((((dim_x + 1) * (dim_y + 1)) * z) + ((dim_x + 1) * y)) + x)
for z in range(dim_z):
for y in range(dim_y):
for x in range(dim_x):
v0 = (grid_index(x, y, z) + start_vertex)
v1 = (grid_index((x + 1), y, z) + start_vertex)
v2 = (grid_index((x + 1), y, (z + 1)) + start_vertex)
v3 = (grid_index(x, y, (z + 1)) + start_vertex)
v4 = (grid_index(x, (y + 1), z) + start_vertex)
v5 = (grid_index((x + 1), (y + 1), z) + start_vertex)
v6 = (grid_index((x + 1), (y + 1), (z + 1)) + start_vertex)
v7 = (grid_index(x, (y + 1), (z + 1)) + start_vertex)
if (((x & 1) ^ (y & 1)) ^ (z & 1)):
add_tet(v0, v1, v4, v3)
add_tet(v2, v3, v6, v1)
add_tet(v5, v4, v1, v6)
add_tet(v7, v6, v3, v4)
add_tet(v4, v1, v6, v3)
else:
add_tet(v1, v2, v5, v0)
add_tet(v3, v0, v7, v2)
add_tet(v4, v7, v0, v5)
add_tet(v6, v5, v2, v7)
add_tet(v5, v2, v7, v0)
for (k, v) in faces.items():
self.add_triangle(v[0], v[1], v[2], tri_ke, tri_ka, tri_kd, tri_drag, tri_lift)
def add_soft_mesh(self, pos: Vec3, rot: Quat, scale: float, vel: Vec3, vertices: List[Vec3], indices: List[int], density: float, k_mu: float, k_lambda: float, k_damp: float, tri_ke: float=default_tri_ke, tri_ka: float=default_tri_ka, tri_kd: float=default_tri_kd, tri_drag: float=default_tri_drag, tri_lift: float=default_tri_lift):
num_tets = int((len(indices) / 4))
start_vertex = len(self.particle_q)
start_tri = len(self.tri_indices)
faces = {}
def add_face(i, j, k):
key = tuple(sorted((i, j, k)))
if (key not in faces):
faces[key] = (i, j, k)
else:
del faces[key]
for v in vertices:
p = (wp.quat_rotate(rot, (v * scale)) + pos)
self.add_particle(p, vel, 0.0)
for t in range(num_tets):
v0 = (start_vertex + indices[((t * 4) + 0)])
v1 = (start_vertex + indices[((t * 4) + 1)])
v2 = (start_vertex + indices[((t * 4) + 2)])
v3 = (start_vertex + indices[((t * 4) + 3)])
volume = self.add_tetrahedron(v0, v1, v2, v3, k_mu, k_lambda, k_damp)
if (volume > 0.0):
self.particle_mass[v0] += ((density * volume) / 4.0)
self.particle_mass[v1] += ((density * volume) / 4.0)
self.particle_mass[v2] += ((density * volume) / 4.0)
self.particle_mass[v3] += ((density * volume) / 4.0)
add_face(v0, v2, v1)
add_face(v1, v2, v3)
add_face(v0, v1, v3)
add_face(v0, v3, v2)
for (k, v) in faces.items():
try:
self.add_triangle(v[0], v[1], v[2], tri_ke, tri_ka, tri_kd, tri_drag, tri_lift)
except np.linalg.LinAlgError:
continue
def compute_sphere_inertia(self, density: float, r: float) -> tuple:
v = (((((4.0 / 3.0) * math.pi) * r) * r) * r)
m = (density * v)
Ia = ((((2.0 / 5.0) * m) * r) * r)
I = np.array([[Ia, 0.0, 0.0], [0.0, Ia, 0.0], [0.0, 0.0, Ia]])
return (m, I)
def compute_capsule_inertia(self, density: float, r: float, l: float) -> tuple:
ms = (((((density * (4.0 / 3.0)) * math.pi) * r) * r) * r)
mc = ((((density * math.pi) * r) * r) * l)
m = (ms + mc)
Ia = ((mc * (((0.25 * r) * r) + (((1.0 / 12.0) * l) * l))) + (ms * ((((0.4 * r) * r) + ((0.375 * r) * l)) + ((0.25 * l) * l))))
Ib = ((((mc * 0.5) + (ms * 0.4)) * r) * r)
I = np.array([[Ib, 0.0, 0.0], [0.0, Ia, 0.0], [0.0, 0.0, Ia]])
return (m, I)
def compute_box_inertia(self, density: float, w: float, h: float, d: float) -> tuple:
v = ((w * h) * d)
m = (density * v)
Ia = (((1.0 / 12.0) * m) * ((h * h) + (d * d)))
Ib = (((1.0 / 12.0) * m) * ((w * w) + (d * d)))
Ic = (((1.0 / 12.0) * m) * ((w * w) + (h * h)))
I = np.array([[Ia, 0.0, 0.0], [0.0, Ib, 0.0], [0.0, 0.0, Ic]])
return (m, I)
def _compute_shape_mass(self, type, scale, src, density):
if (density == 0):
return (0, np.zeros((3, 3)))
if (type == GEO_SPHERE):
return self.compute_sphere_inertia(density, scale[0])
elif (type == GEO_BOX):
return self.compute_box_inertia(density, (scale[0] * 2.0), (scale[1] * 2.0), (scale[2] * 2.0))
elif (type == GEO_CAPSULE):
return self.compute_capsule_inertia(density, scale[0], (scale[1] * 2.0))
elif (type == GEO_MESH):
s = scale[0]
return (((((density * src.mass) * s) * s) * s), ((((((density * src.I) * s) * s) * s) * s) * s))
elif (type == GEO_DENSE_SDF):
if (src.mesh is not None):
return self._compute_shape_mass(GEO_MESH, scale, src.mesh, density)
else:
return self.compute_box_inertia(density, (scale[0] * 2.0), (scale[1] * 2.0), (scale[2] * 2.0))
def _transform_inertia(self, m, I, p, q):
R = np.array(wp.quat_to_matrix(q)).reshape(3, 3)
return (((R I) R.T) + (m * ((np.dot(p, p) * np.eye(3)) - np.outer(p, p))))
def _update_body_mass(self, i, m, I, p, q):
if (i == (- 1)):
return
new_mass = (self.body_mass[i] + m)
if (new_mass == 0.0):
return
new_com = (((self.body_com[i] * self.body_mass[i]) + (p * m)) / new_mass)
com_offset = (new_com - self.body_com[i])
shape_offset = (new_com - p)
new_inertia = (self._transform_inertia(self.body_mass[i], self.body_inertia[i], com_offset, wp.quat_identity()) + self._transform_inertia(m, I, shape_offset, q))
self.body_mass[i] = new_mass
self.body_inertia[i] = new_inertia
self.body_com[i] = new_com
def set_body_mass(self, i, m, I, com):
if (i == (- 1)):
return
self.body_mass[i] = m
self.body_inertia[i] = I
self.body_com[i] = com
def finalize(self, device: str) -> Model:
particle_inv_mass = []
for m in self.particle_mass:
if (m > 0.0):
particle_inv_mass.append((1.0 / m))
else:
particle_inv_mass.append(0.0)
body_inv_mass = []
body_inv_inertia = []
for m in self.body_mass:
if (m > 0.0):
body_inv_mass.append((1.0 / m))
else:
body_inv_mass.append(0.0)
for i in self.body_inertia:
if i.any():
body_inv_inertia.append(np.linalg.inv(i))
else:
body_inv_inertia.append(i)
m = Model(device)
m.particle_q = wp.array(self.particle_q, dtype=wp.vec3, device=device)
m.particle_qd = wp.array(self.particle_qd, dtype=wp.vec3, device=device)
m.particle_mass = wp.array(self.particle_mass, dtype=wp.float32, device=device)
m.particle_inv_mass = wp.array(particle_inv_mass, dtype=wp.float32, device=device)
m.shape_transform = wp.array(self.shape_transform, dtype=wp.transform, device=device)
m.shape_body = wp.array(self.shape_body, dtype=wp.int32, device=device)
m.shape_geo_type = wp.array(self.shape_geo_type, dtype=wp.int32, device=device)
m.shape_geo_src = self.shape_geo_src
shape_geo_id = []
for geo in self.shape_geo_src:
if geo:
shape_geo_id.append(geo.finalize(device=device))
else:
shape_geo_id.append((- 1))
m.shape_geo_id = wp.array(shape_geo_id, dtype=wp.uint64, device=device)
m.shape_geo_scale = wp.array(self.shape_geo_scale, dtype=wp.vec3, device=device)
m.shape_materials = wp.array(self.shape_materials, dtype=wp.vec4, device=device)
m.spring_indices = wp.array(self.spring_indices, dtype=wp.int32, device=device)
m.spring_rest_length = wp.array(self.spring_rest_length, dtype=wp.float32, device=device)
m.spring_stiffness = wp.array(self.spring_stiffness, dtype=wp.float32, device=device)
m.spring_damping = wp.array(self.spring_damping, dtype=wp.float32, device=device)
m.spring_control = wp.array(self.spring_control, dtype=wp.float32, device=device)
m.tri_indices = wp.array(self.tri_indices, dtype=wp.int32, device=device)
m.tri_poses = wp.array(self.tri_poses, dtype=wp.mat22, device=device)
m.tri_activations = wp.array(self.tri_activations, dtype=wp.float32, device=device)
m.tri_materials = wp.array(self.tri_materials, dtype=wp.float32, device=device)
m.edge_indices = wp.array(self.edge_indices, dtype=wp.int32, device=device)
m.edge_rest_angle = wp.array(self.edge_rest_angle, dtype=wp.float32, device=device)
m.edge_bending_properties = wp.array(self.edge_bending_properties, dtype=wp.float32, device=device)
m.tet_indices = wp.array(self.tet_indices, dtype=wp.int32, device=device)
m.tet_poses = wp.array(self.tet_poses, dtype=wp.mat33, device=device)
m.tet_activations = wp.array(self.tet_activations, dtype=wp.float32, device=device)
m.tet_materials = wp.array(self.tet_materials, dtype=wp.float32, device=device)
self.muscle_start.append(len(self.muscle_bodies))
m.muscle_start = wp.array(self.muscle_start, dtype=wp.int32, device=device)
m.muscle_params = wp.array(self.muscle_params, dtype=wp.float32, device=device)
m.muscle_bodies = wp.array(self.muscle_bodies, dtype=wp.int32, device=device)
m.muscle_points = wp.array(self.muscle_points, dtype=wp.vec3, device=device)
m.muscle_activation = wp.array(self.muscle_activation, dtype=wp.float32, device=device)
m.body_q = wp.array(self.body_q, dtype=wp.transform, device=device)
m.body_qd = wp.array(self.body_qd, dtype=wp.spatial_vector, device=device)
m.body_inertia = wp.array(self.body_inertia, dtype=wp.mat33, device=device)
m.body_inv_inertia = wp.array(body_inv_inertia, dtype=wp.mat33, device=device)
m.body_mass = wp.array(self.body_mass, dtype=wp.float32, device=device)
m.body_inv_mass = wp.array(body_inv_mass, dtype=wp.float32, device=device)
m.body_com = wp.array(self.body_com, dtype=wp.vec3, device=device)
m.joint_type = wp.array(self.joint_type, dtype=wp.int32, device=device)
m.joint_parent = wp.array(self.joint_parent, dtype=wp.int32, device=device)
m.joint_child = wp.array(self.joint_child, dtype=wp.int32, device=device)
m.joint_X_p = wp.array(self.joint_X_p, dtype=wp.transform, device=device)
m.joint_X_c = wp.array(self.joint_X_c, dtype=wp.transform, device=device)
m.joint_axis = wp.array(self.joint_axis, dtype=wp.vec3, device=device)
m.joint_q = wp.array(self.joint_q, dtype=float, device=device)
m.joint_qd = wp.array(self.joint_qd, dtype=float, device=device)
m.joint_armature = wp.array(self.joint_armature, dtype=wp.float32, device=device)
m.joint_target = wp.array(self.joint_target, dtype=wp.float32, device=device)
m.joint_target_ke = wp.array(self.joint_target_ke, dtype=wp.float32, device=device)
m.joint_target_kd = wp.array(self.joint_target_kd, dtype=wp.float32, device=device)
m.joint_act = wp.array(self.joint_act, dtype=wp.float32, device=device)
m.joint_limit_lower = wp.array(self.joint_limit_lower, dtype=wp.float32, device=device)
m.joint_limit_upper = wp.array(self.joint_limit_upper, dtype=wp.float32, device=device)
m.joint_limit_ke = wp.array(self.joint_limit_ke, dtype=wp.float32, device=device)
m.joint_limit_kd = wp.array(self.joint_limit_kd, dtype=wp.float32, device=device)
self.joint_q_start.append(self.joint_coord_count)
self.joint_qd_start.append(self.joint_dof_count)
self.articulation_start.append(self.joint_count)
m.joint_q_start = wp.array(self.joint_q_start, dtype=int, device=device)
m.joint_qd_start = wp.array(self.joint_qd_start, dtype=int, device=device)
m.articulation_start = wp.array(self.articulation_start, dtype=int, device=device)
m.soft_contact_max = (64 * 1024)
m.soft_contact_count = wp.zeros(1, dtype=wp.int32, device=device)
m.soft_contact_particle = wp.zeros(m.soft_contact_max, dtype=int, device=device)
m.soft_contact_body = wp.zeros(m.soft_contact_max, dtype=int, device=device)
m.soft_contact_body_pos = wp.zeros(m.soft_contact_max, dtype=wp.vec3, device=device)
m.soft_contact_body_vel = wp.zeros(m.soft_contact_max, dtype=wp.vec3, device=device)
m.soft_contact_normal = wp.zeros(m.soft_contact_max, dtype=wp.vec3, device=device)
m.particle_count = len(self.particle_q)
m.body_count = len(self.body_q)
m.shape_count = len(self.shape_geo_type)
m.tri_count = len(self.tri_poses)
m.tet_count = len(self.tet_poses)
m.edge_count = len(self.edge_rest_angle)
m.spring_count = len(self.spring_rest_length)
m.muscle_count = (len(self.muscle_start) - 1)
m.articulation_count = (len(self.articulation_start) - 1)
m.joint_dof_count = self.joint_dof_count
m.joint_coord_count = self.joint_coord_count
m.contact_count = 0
m.geo_meshes = self.geo_meshes
m.geo_sdfs = self.geo_sdfs
m.ground = True
m.ground_plane = np.array((0.0, 1.0, 0.0, 0.0))
m.enable_tri_collisions = False
return m |
def verify_online_player(caller, value):
session_list = SESSIONS.get_sessions()
char_list = []
matched_character = None
for session in session_list:
if (not session.logged_in):
continue
char_list.append(session.get_puppet())
for character in char_list:
if (value.lower() == character.key.lower()):
matched_character = character
if (not matched_character):
caller.msg(("No character matching '%s' is online." % value))
return False
return matched_character |
def main():
if (not TOKEN):
print('No token is set, skipping')
return
python = shutil.which('python')
dependency = f'mkdocs-material[imaging] {DEP_REF}{GIT_REF}'
try:
process = subprocess.Popen([python, '-m', 'pip', 'install', '--disable-pip-version-check', dependency], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding='utf-8')
except Exception as e:
print(str(e).replace(TOKEN, '*****'))
sys.exit(1)
with process:
for line in iter(process.stdout.readline, ''):
print(line.replace(TOKEN, '*****'), end='')
sys.exit(process.returncode) |
def test_from_string__wkt_with_proj():
wkt = 'PROJCS["WGS 84 / Pseudo-Mercator",GEOGCS["WGS 84", DATUM["WGS_1984", SPHEROID["WGS 84",6378137,298., AUTHORITY["EPSG","7030"]], AUTHORITY["EPSG","6326"]], PRIMEM["Greenwich",0, AUTHORITY["EPSG","8901"]], UNIT["degree",0., AUTHORITY["EPSG","9122"]], AUTHORITY["EPSG","4326"]],PROJECTION["Mercator_1SP"],PARAMETER["central_meridian",0],PARAMETER["scale_factor",1],PARAMETER["false_easting",0],PARAMETER["false_northing",0],UNIT["metre",1, AUTHORITY["EPSG","9001"]],AXIS["Easting",EAST],AXIS["Northing",NORTH],EXTENSION["PROJ4","+proj=merc +a=6378137 +b=6378137 +lat_ts=0 +lon_0=0 +x_0=0 +y_0=0 +k=1 +units=m +nadgrids= +wktext +no_defs"],AUTHORITY["EPSG","3857"]] '
assert (CRS.from_string(wkt).to_epsg() == 3857) |
class ProviderValidator():
def __call__(self, data):
provider_key = data.get('provider_key')
provider = get_plugin('PROJECT_ISSUE_PROVIDERS', provider_key)
if (provider is None):
raise ValidationError({'provider_key': 'Please provide a valid provider.'})
try:
options = {option.get('key'): option.get('value') for option in data.get('options', [])}
except KeyError as e:
raise ValidationError({'options': 'Options need to be of the form "{"key": "": "value": ""}".'}) from e
for key in options:
if (key not in [field.get('key') for field in provider.fields]):
raise ValidationError({'options': f'Key "{key}" is not valid.'})
for field in provider.fields:
if (field.get('required', True) and (field.get('key') not in options)):
raise ValidationError({'options': 'Key "{}" is required.'.format(field.get('key'))}) |
.parametrize('option_api, backend', [('pyqt5', 'PyQt5'), ('pyqt6', 'PyQt6'), ('pyside2', 'PySide2'), ('pyside6', 'PySide6')])
def test_already_loaded_backend(monkeypatch, option_api, backend):
import builtins
class Mock():
pass
qtcore = Mock()
for method_name in ('qInstallMessageHandler', 'qDebug', 'qWarning', 'qCritical', 'qFatal'):
setattr(qtcore, method_name, (lambda *_: None))
if (backend in ('PyQt5', 'PyQt6')):
pyqt_version = (330496 if (backend == 'PyQt5') else 393216)
qtcore.PYQT_VERSION = (pyqt_version + 1)
qtcore.pyqtSignal = object()
qtcore.pyqtSlot = object()
qtcore.pyqtProperty = object()
else:
qtcore.Signal = object()
qtcore.Slot = object()
qtcore.Property = object()
qtwidgets = Mock()
qapplication = Mock()
qapplication.instance = (lambda *_: None)
qtwidgets.QApplication = qapplication
qbackend = Mock()
qbackend.QtCore = qtcore
qbackend.QtGui = object()
qbackend.QtTest = object()
qbackend.QtWidgets = qtwidgets
import_orig = builtins.__import__
def _fake_import(name, *args, **kwargs):
if (name == backend):
return qbackend
return import_orig(name, *args, **kwargs)
def _fake_is_library_loaded(name, *args):
return (name == backend)
monkeypatch.delenv('PYTEST_QT_API', raising=False)
monkeypatch.setattr(qt_compat, '_is_library_loaded', _fake_is_library_loaded)
monkeypatch.setattr(builtins, '__import__', _fake_import)
qt_api.set_qt_api(api=None)
assert (qt_api.pytest_qt_api == option_api) |
def parse_davis2017_splits():
with open('data/davis/DAVIS/ImageSets/2017/train.txt') as f:
train_list = [(vid.rstrip(), idx) for (idx, vid) in enumerate(f.readlines())]
with open('data/davis/DAVIS/ImageSets/2017/val.txt') as f:
val_list = [(vid.rstrip(), idx) for (idx, vid) in enumerate(f.readlines())]
with open('data/davis/DAVIS/ImageSets/2017/test-dev.txt') as f:
test_list = [(vid.rstrip(), idx) for (idx, vid) in enumerate(f.readlines())]
splits = ((train_list, val_list, test_list),)
return splits |
class MobileViTDeepLabV3(nn.Module):
def __init__(self, config: MobileViTConfig) -> None:
super().__init__()
self.aspp = MobileViTASPP(config)
self.dropout = nn.Dropout2d(config.classifier_dropout_prob)
self.classifier = MobileViTConvLayer(config, in_channels=config.aspp_out_channels, out_channels=config.num_labels, kernel_size=1, use_normalization=False, use_activation=False, bias=True)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
features = self.aspp(hidden_states[(- 1)])
features = self.dropout(features)
features = self.classifier(features)
return features |
class ACLExtractor(meta.FlatExtractor):
def _record_to_object(record):
newline_pos = record.find(b'\n')
first_line = record[:newline_pos]
if (not first_line.startswith(b'# file: ')):
raise meta.ParsingError(('Bad record beginning: %r' % first_line))
filename = first_line[8:]
if (filename == b'.'):
index = ()
else:
unquoted_filename = C.acl_unquote(filename)
index = tuple(unquoted_filename.split(b'/'))
return get_meta_object(index, os.fsdecode(record[newline_pos:])) |
def test_redirect_both(capfd):
msg = 'StdOut'
msg2 = 'StdErr'
stream = StringIO()
stream2 = StringIO()
with redirect_stdout(stream):
with redirect_stderr(stream2):
with m.ostream_redirect():
m.raw_output(msg)
m.raw_err(msg2)
(stdout, stderr) = capfd.readouterr()
assert (stdout == '')
assert (stderr == '')
assert (stream.getvalue() == msg)
assert (stream2.getvalue() == msg2) |
class Graph(pg.GraphItem):
def __init__(self):
self.dragPoint = None
self.dragOffset = None
self.textItems = []
pg.GraphItem.__init__(self)
self.scatter.sigClicked.connect(self.clicked)
def setData(self, **kwds):
self.text = kwds.pop('text', [])
self.data = kwds
if ('pos' in self.data):
npts = self.data['pos'].shape[0]
self.data['data'] = np.empty(npts, dtype=[('index', int)])
self.data['data']['index'] = np.arange(npts)
self.setTexts(self.text)
self.updateGraph()
def setTexts(self, text):
for i in self.textItems:
i.scene().removeItem(i)
self.textItems = []
for t in text:
item = pg.TextItem(t)
self.textItems.append(item)
item.setParentItem(self)
def updateGraph(self):
pg.GraphItem.setData(self, **self.data)
for (i, item) in enumerate(self.textItems):
item.setPos(*self.data['pos'][i])
def mouseDragEvent(self, ev):
if (ev.button() != QtCore.Qt.MouseButton.LeftButton):
ev.ignore()
return
if ev.isStart():
pos = ev.buttonDownPos()
pts = self.scatter.pointsAt(pos)
if (len(pts) == 0):
ev.ignore()
return
self.dragPoint = pts[0]
ind = pts[0].data()[0]
self.dragOffset = (self.data['pos'][ind] - pos)
elif ev.isFinish():
self.dragPoint = None
return
elif (self.dragPoint is None):
ev.ignore()
return
ind = self.dragPoint.data()[0]
self.data['pos'][ind] = (ev.pos() + self.dragOffset)
self.updateGraph()
ev.accept()
def clicked(self, pts):
print(('clicked: %s' % pts)) |
class FusedLeakyReLUFunction(Function):
def forward(ctx, input, bias, negative_slope, scale):
empty = input.new_empty(0)
out = fused_act_ext.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale)
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
return out
def backward(ctx, grad_output):
(out,) = ctx.saved_tensors
(grad_input, grad_bias) = FusedLeakyReLUFunctionBackward.apply(grad_output, out, ctx.negative_slope, ctx.scale)
return (grad_input, grad_bias, None, None) |
_arg_scope
def split_separable_conv2d(input_tensor, num_outputs, scope=None, normalizer_fn=None, stride=1, rate=1, endpoints=None, use_explicit_padding=False):
with _v1_compatible_scope_naming(scope) as scope:
dw_scope = (scope + 'depthwise')
endpoints = (endpoints if (endpoints is not None) else {})
kernel_size = [3, 3]
padding = 'SAME'
if use_explicit_padding:
padding = 'VALID'
input_tensor = _fixed_padding(input_tensor, kernel_size, rate)
net = slim.separable_conv2d(input_tensor, None, kernel_size, depth_multiplier=1, stride=stride, rate=rate, normalizer_fn=normalizer_fn, padding=padding, scope=dw_scope)
endpoints[dw_scope] = net
pw_scope = (scope + 'pointwise')
net = slim.conv2d(net, num_outputs, [1, 1], stride=1, normalizer_fn=normalizer_fn, scope=pw_scope)
endpoints[pw_scope] = net
return net |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.