code stringlengths 281 23.7M |
|---|
def cleanup_numbered_dir(root: Path, prefix: str, keep: int, consider_lock_dead_if_created_before: float) -> None:
if (not root.exists()):
return
for path in cleanup_candidates(root, prefix, keep):
try_cleanup(path, consider_lock_dead_if_created_before)
for path in root.glob('garbage-*'):
try_cleanup(path, consider_lock_dead_if_created_before)
cleanup_dead_symlinks(root) |
def test_mouse_press_event_topleft_scale(view, item):
view.scene.addItem(item)
item.setSelected(True)
event = MagicMock()
event.pos.return_value = QtCore.QPointF(2, 2)
event.scenePos.return_value = QtCore.QPointF((- 1), (- 1))
event.button.return_value = Qt.MouseButton.LeftButton
with patch.object(item, 'bounding_rect_unselected', return_value=QtCore.QRectF(0, 0, 100, 80)):
item.mousePressEvent(event)
assert (item.scale_active is True)
assert (item.event_start == QtCore.QPointF((- 1), (- 1)))
assert (item.event_direction.x() < 0)
assert (item.event_direction.y() < 0)
assert (item.scale_orig_factor == 1)
event.accept.assert_called_once_with() |
def check_not_deprecated(file, metadata_is={}, metadata_keys_contain=[], compare_as_close=[], current_version=None, last_compatible_version=radis.config['OLDEST_COMPATIBLE_VERSION'], engine='guess'):
if (engine == 'guess'):
engine = DataFileManager.guess_engine(file)
manager = DataFileManager(engine)
try:
file_metadata = manager.read_metadata(file)
except AttributeError as err:
if ("Attribute 'metadata' does not exist" in str(err)):
raise DeprecatedFileWarning(('File {0} is deprecated : '.format(file) + 'Metadata is missing. Delete it to regenerate it on next run'))
raise
try:
file_version = file_metadata.pop('version')
except KeyError:
raise DeprecatedFileWarning(('File {0} is deprecated : '.format(file) + 'RADIS version missing in metadata. Delete it to regenerate it on next run'))
if (current_version is None):
current_version = radis.__version__
if (parse(file_version) < parse(last_compatible_version)):
raise DeprecatedFileWarning((('File {0} has been generated in a deprecated '.format(file) + 'version ({0}). Oldest compatible version is {1}. '.format(file_version, last_compatible_version)) + 'Delete the file to regenerate it on next run'))
if (parse(current_version) > parse(file_version)):
warn(DeprecationWarning((('File {0} has been generated in '.format(file) + 'a deprecated version ({0}) compared to current ({1})'.format(file_version, current_version)) + '. Delete it to regenerate it on next run')))
out = False
elif (parse(current_version) == parse(file_version)):
out = True
else:
raise ValueError(('Cache file ({0}) generated with a future version ({1} > {2})? '.format(file, file_version, current_version) + 'Do you own a DeLorean? Delete the file manually if you understand what happened'))
for k in metadata_keys_contain:
if (k not in file_metadata):
raise DeprecatedFileWarning("Metadata in file {0} doesn't contain the expected key `{1}`. ".format(file, k))
metadata_is = _h5_compatible(metadata_is)
file_metadata = {k: v for (k, v) in file_metadata.items() if (k in metadata_is)}
(out, compare_string) = compare_dict(metadata_is, file_metadata, compare_as_close=compare_as_close, verbose=False, return_string=True, df1_str='Expected', df2_str='Got')
if (out != 1):
raise DeprecatedFileWarning((('Metadata in file {0} dont match '.format(file) + 'expected values. See comparison below:') + '\n\tExpected\tFile\n{0}'.format(compare_string)))
return out |
def make_trident_res_layer(block, inplanes, planes, num_blocks, stride=1, trident_dilations=(1, 2, 3), style='pytorch', with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN'), dcn=None, plugins=None, test_branch_idx=(- 1)):
downsample = None
if ((stride != 1) or (inplanes != (planes * block.expansion))):
downsample = []
conv_stride = stride
downsample.extend([build_conv_layer(conv_cfg, inplanes, (planes * block.expansion), kernel_size=1, stride=conv_stride, bias=False), build_norm_layer(norm_cfg, (planes * block.expansion))[1]])
downsample = nn.Sequential(*downsample)
layers = []
for i in range(num_blocks):
layers.append(block(inplanes=inplanes, planes=planes, stride=(stride if (i == 0) else 1), trident_dilations=trident_dilations, downsample=(downsample if (i == 0) else None), style=style, with_cp=with_cp, conv_cfg=conv_cfg, norm_cfg=norm_cfg, dcn=dcn, plugins=plugins, test_branch_idx=test_branch_idx, concat_output=(True if (i == (num_blocks - 1)) else False)))
inplanes = (planes * block.expansion)
return nn.Sequential(*layers) |
class CiderD():
def __init__(self, n=4, sigma=6.0, df='corpus'):
self._n = n
self._sigma = sigma
self._df = df
self.cider_scorer = CiderScorer(n=self._n, df_mode=self._df)
def compute_score(self, gts, res):
self.cider_scorer.clear()
for res_id in res:
hypo = res_id['caption']
ref = gts[res_id['image_id']]
assert (type(hypo) is list)
assert (len(hypo) == 1)
assert (type(ref) is list)
assert (len(ref) > 0)
self.cider_scorer += (hypo[0], ref)
(score, scores) = self.cider_scorer.compute_score()
return (score, scores)
def method(self):
return 'CIDEr-D' |
class G_D(nn.Module):
def __init__(self, G, D):
super(G_D, self).__init__()
self.G = G
self.D = D
def forward(self, z, gy, x=None, dy=None, train_G=False, return_G_z=False, split_D=False, return_bn=False):
with torch.set_grad_enabled(train_G):
G_z = self.G(z, self.G.shared(gy), return_bn=return_bn)
if return_bn:
activations = G_z[1]
G_z = G_z[0]
if (self.G.fp16 and (not self.D.fp16)):
G_z = G_z.float()
if (self.D.fp16 and (not self.G.fp16)):
G_z = G_z.half()
if split_D:
D_fake = self.D(G_z, gy)
if (x is not None):
D_real = self.D(x, dy)
return (D_fake, D_real)
elif (return_G_z and return_bn):
return (D_fake, G_z, activations)
elif return_G_z:
return (D_fake, G_z)
else:
return D_fake
else:
D_input = (torch.cat([G_z, x], 0) if (x is not None) else G_z)
D_class = (torch.cat([gy, dy], 0) if (dy is not None) else gy)
D_out = self.D(D_input, D_class)
if (x is not None):
return torch.split(D_out, [G_z.shape[0], x.shape[0]])
elif return_G_z:
return (D_out, G_z)
else:
return D_out |
.parametrize(('use_comm', 'use_framer'), [('tcp', 'socket'), ('tcp', 'rtu'), ('tls', 'tls'), ('udp', 'socket'), ('udp', 'rtu'), ('serial', 'rtu')])
class TestClientServerAsyncExamples():
(name='use_port')
def get_port_in_class(base_ports):
base_ports[__class__.__name__] += 1
return base_ports[__class__.__name__]
async def test_combinations(self, mock_server, mock_clc):
assert mock_server
(await main(cmdline=mock_clc))
async def test_client_exception(self, mock_server, mock_clc):
assert mock_server
test_client = setup_async_client(cmdline=mock_clc)
test_client.read_holding_registers = mock.AsyncMock(side_effect=ModbusIOException('test'))
(await run_async_client(test_client, modbus_calls=run_a_few_calls))
async def test_server_no_client(self, mock_server):
assert mock_server
async def test_server_client_twice(self, mock_server, use_comm, mock_clc):
assert mock_server
if (use_comm == 'serial'):
return
test_client = setup_async_client(cmdline=mock_clc)
(await run_async_client(test_client, modbus_calls=run_a_few_calls))
(await asyncio.sleep(0.5))
(await run_async_client(test_client, modbus_calls=run_a_few_calls))
async def test_client_no_server(self, mock_clc):
test_client = setup_async_client(cmdline=mock_clc)
with pytest.raises((AssertionError, asyncio.TimeoutError)):
(await run_async_client(test_client, modbus_calls=run_a_few_calls)) |
def phrase_event(callbacks, parameters):
phrase = parameters.strip().lower()
punctuations = ',.";?!'
for p in punctuations:
phrase = phrase.replace(p, ' ')
words = phrase.split()
words = [w.strip("' ") for w in words if w.strip("' ")]
to_call = []
for callback in callbacks:
keys = callback['parameters']
if ((not keys) or any(((key.strip().lower() in words) for key in keys.split(',')))):
to_call.append(callback)
return to_call |
def get_nuc_g_factor(symb_or_charge, mass=None):
if isinstance(symb_or_charge, str):
Z = mole.charge(symb_or_charge)
else:
Z = symb_or_charge
if (mass is None):
(nuc_spin, g_nuc) = ISOTOPE_GYRO[Z][0][1:3]
else:
for (isotop_mass, nuc_spin, g_nuc) in ISOTOPE_GYRO[Z]:
if (isotop_mass == mass):
break
else:
raise ValueError(('mass=%s not found in isotopes of %s' % (mass, symb_or_charge)))
return g_nuc |
def duplicate_states_loss(player):
episode_loss = torch.tensor(0)
with torch.cuda.device(player.gpu_id):
episode_loss = episode_loss.cuda()
for i in player.duplicate_states_actions:
step_optimal_action = torch.tensor(player.duplicate_states_actions[i]).reshape([1]).long()
with torch.cuda.device(player.gpu_id):
step_optimal_action = step_optimal_action.cuda()
step_loss = F.cross_entropy(player.probs[i], step_optimal_action)
episode_loss = (episode_loss + step_loss)
return episode_loss |
class Decoder(nn.Module):
def __init__(self, in_channels, out_channels, conv_kernel_size=3, scale_factor=2, basic_module=DoubleConv, conv_layer_order='gcr', num_groups=8, padding=1, upsample='default', dropout_prob=0.1, is3d=True):
super(Decoder, self).__init__()
concat = True
adapt_channels = False
if ((upsample is not None) and (upsample != 'none')):
if (upsample == 'default'):
if (basic_module == DoubleConv):
upsample = 'nearest'
concat = True
adapt_channels = False
elif ((basic_module == ResNetBlock) or (basic_module == ResNetBlockSE)):
upsample = 'deconv'
concat = False
adapt_channels = True
if (upsample == 'deconv'):
self.upsampling = TransposeConvUpsampling(in_channels=in_channels, out_channels=out_channels, kernel_size=conv_kernel_size, scale_factor=scale_factor, is3d=is3d)
else:
self.upsampling = InterpolateUpsampling(mode=upsample)
else:
self.upsampling = NoUpsampling()
self.joining = partial(self._joining, concat=True)
self.joining = partial(self._joining, concat=concat)
if (adapt_channels is True):
in_channels = out_channels
self.basic_module = basic_module(in_channels, out_channels, encoder=False, kernel_size=conv_kernel_size, order=conv_layer_order, num_groups=num_groups, padding=padding, dropout_prob=dropout_prob, is3d=is3d)
def forward(self, encoder_features, x):
x = self.upsampling(encoder_features=encoder_features, x=x)
x = self.joining(encoder_features, x)
x = self.basic_module(x)
return x
def _joining(encoder_features, x, concat):
if concat:
return torch.cat((encoder_features, x), dim=1)
else:
return (encoder_features + x) |
def test_get_transparent_pixel(ntg1, ntg2, ntg3, ntg_no_fill_value):
tp = ntg1.get_transparent_pixel()
assert isinstance(tp, int)
assert (tp == 255)
assert (ntg2.get_transparent_pixel() == 0)
assert (ntg3.get_transparent_pixel() == 255)
assert (ntg_no_fill_value.get_transparent_pixel() == (- 1)) |
.parametrize('main_schema, other_schema_data, instance, expect_err', [(CASE1_MAIN_SCHEMA, {'title_schema.json': CASE1_TITLE_SCHEMA}, CASE1_FAILING_DOCUMENT, None), (CASE2_MAIN_SCHEMA, {'values.json': CASE2_VALUES_SCHEMA}, CASE2_FAILING_DOCUMENT, "{'foo': 'bar'} is not of type 'string'")])
.parametrize('with_file_scheme', [True, False])
def test_local_ref_schema_failure_case(run_line, tmp_path, main_schema, other_schema_data, instance, expect_err, with_file_scheme):
(main_schemafile, doc) = _prep_files(tmp_path, main_schema, other_schema_data, instance)
if with_file_scheme:
schemafile = main_schemafile.resolve().as_uri()
else:
schemafile = str(main_schemafile)
res = run_line(['check-jsonschema', '--schemafile', schemafile, str(doc)])
assert (res.exit_code == 1)
if (expect_err is not None):
assert (expect_err in res.stdout) |
class SmallEncoder(nn.Module):
def __init__(self, output_dim=128, norm_fn='batch', dropout=0.0):
super(SmallEncoder, self).__init__()
self.norm_fn = norm_fn
if (self.norm_fn == 'group'):
self.norm1 = nn.GroupNorm(num_groups=8, num_channels=32)
elif (self.norm_fn == 'batch'):
self.norm1 = nn.BatchNorm2d(32)
elif (self.norm_fn == 'instance'):
self.norm1 = nn.InstanceNorm2d(32)
elif (self.norm_fn == 'none'):
self.norm1 = nn.Sequential()
self.conv1 = nn.Conv2d(3, 32, kernel_size=7, stride=2, padding=3)
self.relu1 = nn.ReLU(inplace=True)
self.in_planes = 32
self.layer1 = self._make_layer(32, stride=1)
self.layer2 = self._make_layer(64, stride=2)
self.layer3 = self._make_layer(96, stride=2)
self.dropout = None
if (dropout > 0):
self.dropout = nn.Dropout2d(p=dropout)
self.conv2 = nn.Conv2d(96, output_dim, kernel_size=1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)):
if (m.weight is not None):
nn.init.constant_(m.weight, 1)
if (m.bias is not None):
nn.init.constant_(m.bias, 0)
def _make_layer(self, dim, stride=1):
layer1 = BottleneckBlock(self.in_planes, dim, self.norm_fn, stride=stride)
layer2 = BottleneckBlock(dim, dim, self.norm_fn, stride=1)
layers = (layer1, layer2)
self.in_planes = dim
return nn.Sequential(*layers)
def forward(self, x):
is_list = (isinstance(x, tuple) or isinstance(x, list))
if is_list:
batch_dim = x[0].shape[0]
x = torch.cat(x, dim=0)
x = self.conv1(x)
x = self.norm1(x)
x = self.relu1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.conv2(x)
if (self.training and (self.dropout is not None)):
x = self.dropout(x)
if is_list:
x = torch.split(x, [batch_dim, batch_dim], dim=0)
return x |
def evaluate_js(window):
result = window.evaluate_js("\n var h1 = document.createElement('h1')\n var text = document.createTextNode('Hello pywebview')\n h1.appendChild(text)\n document.body.appendChild(h1)\n\n document.body.style.backgroundColor = '#212121'\n document.body.style.color = '#f2f2f2'\n\n // Return user agent\n 'User agent:\\n' + navigator.userAgent;\n ")
print(result) |
class Effect508(BaseEffect):
type = 'passive'
def handler(fit, ship, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Small Projectile Turret')), 'damageMultiplier', ship.getModifiedItemAttr('shipBonusMF'), skill='Minmatar Frigate', **kwargs) |
def is_trivial_bound(tp: ProperType, allow_tuple: bool=False) -> bool:
if (isinstance(tp, Instance) and (tp.type.fullname == 'builtins.tuple')):
return (allow_tuple and is_trivial_bound(get_proper_type(tp.args[0])))
return (isinstance(tp, Instance) and (tp.type.fullname == 'builtins.object')) |
class GlobalContextVitBlock(nn.Module):
def __init__(self, dim: int, feat_size: Tuple[(int, int)], num_heads: int, window_size: int=7, mlp_ratio: float=4.0, use_global: bool=True, qkv_bias: bool=True, layer_scale: Optional[float]=None, proj_drop: float=0.0, attn_drop: float=0.0, drop_path: float=0.0, attn_layer: Callable=WindowAttentionGlobal, act_layer: Callable=nn.GELU, norm_layer: Callable=nn.LayerNorm):
super().__init__()
feat_size = to_2tuple(feat_size)
window_size = to_2tuple(window_size)
self.window_size = window_size
self.num_windows = int(((feat_size[0] // window_size[0]) * (feat_size[1] // window_size[1])))
self.norm1 = norm_layer(dim)
self.attn = attn_layer(dim, num_heads=num_heads, window_size=window_size, use_global=use_global, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=proj_drop)
self.ls1 = (LayerScale(dim, layer_scale) if (layer_scale is not None) else nn.Identity())
self.drop_path1 = (DropPath(drop_path) if (drop_path > 0.0) else nn.Identity())
self.norm2 = norm_layer(dim)
self.mlp = Mlp(in_features=dim, hidden_features=int((dim * mlp_ratio)), act_layer=act_layer, drop=proj_drop)
self.ls2 = (LayerScale(dim, layer_scale) if (layer_scale is not None) else nn.Identity())
self.drop_path2 = (DropPath(drop_path) if (drop_path > 0.0) else nn.Identity())
def _window_attn(self, x, q_global: Optional[torch.Tensor]=None):
(B, H, W, C) = x.shape
x_win = window_partition(x, self.window_size)
x_win = x_win.view((- 1), (self.window_size[0] * self.window_size[1]), C)
attn_win = self.attn(x_win, q_global)
x = window_reverse(attn_win, self.window_size, (H, W))
return x
def forward(self, x, q_global: Optional[torch.Tensor]=None):
x = (x + self.drop_path1(self.ls1(self._window_attn(self.norm1(x), q_global))))
x = (x + self.drop_path2(self.ls2(self.mlp(self.norm2(x)))))
return x |
class ObjectsBoundingBoxConditionalBuilder(ObjectsCenterPointsConditionalBuilder):
def object_descriptor_length(self) -> int:
return 3
def _make_object_descriptors(self, annotations: List[Annotation]) -> List[Tuple[(int, ...)]]:
object_triples = [(self.object_representation(ann), *self.token_pair_from_bbox(ann.bbox)) for ann in annotations]
empty_triple = (self.none, self.none, self.none)
object_triples = pad_list(object_triples, empty_triple, self.no_max_objects)
return object_triples
def inverse_build(self, conditional: LongTensor) -> Tuple[(List[Tuple[(int, BoundingBox)]], Optional[BoundingBox])]:
conditional_list = conditional.tolist()
crop_coordinates = None
if self.encode_crop:
crop_coordinates = self.bbox_from_token_pair(conditional_list[(- 2)], conditional_list[(- 1)])
conditional_list = conditional_list[:(- 2)]
object_triples = grouper(conditional_list, 3)
assert (conditional.shape[0] == self.embedding_dim)
return ([(object_triple[0], self.bbox_from_token_pair(object_triple[1], object_triple[2])) for object_triple in object_triples if (object_triple[0] != self.none)], crop_coordinates)
def plot(self, conditional: LongTensor, label_for_category_no: Callable[([int], str)], figure_size: Tuple[(int, int)], line_width: int=3, font_size: Optional[int]=None) -> Tensor:
plot = pil_image.new('RGB', figure_size, WHITE)
draw = pil_img_draw.Draw(plot)
font = ImageFont.truetype('/usr/share/fonts/truetype/lato/Lato-Regular.ttf', size=get_plot_font_size(font_size, figure_size))
(width, height) = plot.size
(description, crop_coordinates) = self.inverse_build(conditional)
for ((representation, bbox), color) in zip(description, cycle(COLOR_PALETTE)):
annotation = self.representation_to_annotation(representation)
class_label = ((label_for_category_no(annotation.category_no) + ' ') + additional_parameters_string(annotation))
bbox = absolute_bbox(bbox, width, height)
draw.rectangle(bbox, outline=color, width=line_width)
draw.text(((bbox[0] + line_width), (bbox[1] + line_width)), class_label, anchor='la', fill=BLACK, font=font)
if (crop_coordinates is not None):
draw.rectangle(absolute_bbox(crop_coordinates, width, height), outline=GRAY_75, width=line_width)
return ((convert_pil_to_tensor(plot) / 127.5) - 1.0) |
class EntropyStatCollector(diamond.collector.Collector):
PROC = '/proc/sys/kernel/random/entropy_avail'
def get_default_config(self):
config = super(EntropyStatCollector, self).get_default_config()
config.update({'path': 'entropy'})
return config
def collect(self):
if (not os.access(self.PROC, os.R_OK)):
return None
entropy_file = open(self.PROC)
entropy = entropy_file.read().strip()
entropy_file.close()
self.publish_gauge('available', entropy) |
def run(func, *args, backend=None, backend_options=None):
if (backend is None):
backend = os.getenv('PURERPC_BACKEND', 'asyncio')
_log.info('purerpc.run() selected {} backend'.format(backend))
if (backend == 'uvloop'):
backend = 'asyncio'
options = dict(use_uvloop=True)
if (backend_options is None):
backend_options = options
else:
backend_options.update(options)
if ((backend == 'asyncio') and backend_options and backend_options.get('use_uvloop')):
import uvloop
return anyio.run(func, *args, backend=backend, backend_options=backend_options) |
.parametrize('case', [CaseReducesInx3OutComp, CaseIfBasicComp, CaseIfDanglingElseInnerComp, CaseIfDanglingElseOutterComp, CaseElifBranchComp, CaseNestedIfComp, CaseForRangeLowerUpperStepPassThroughComp, CaseIfExpInForStmtComp, CaseIfExpBothImplicitComp, CaseIfBoolOpInForStmtComp, CaseIfTmpVarInForStmtComp, CaseFixedSizeSliceComp, CaseLambdaConnectComp, CaseLambdaConnectWithListComp])
def test_yosys_behavioral_L2(case):
run_test(case, case.DUT()) |
def test_entityref():
entref = OSC.EntityRef('ref_str')
entref2 = OSC.EntityRef('ref_str')
entref3 = OSC.EntityRef('ref_str2')
prettyprint(entref.get_element())
assert (entref == entref2)
assert (entref != entref3)
entref4 = OSC.EntityRef.parse(entref.get_element())
assert (entref == entref4)
assert (version_validation('EntityRef', entref, 0) == ValidationResponse.OK)
assert (version_validation('EntityRef', entref, 1) == ValidationResponse.OK)
assert (version_validation('EntityRef', entref, 2) == ValidationResponse.OK) |
class TestFindWebengineResources():
def qt_data_path(self, monkeypatch: pytest.MonkeyPatch, tmp_path: pathlib.Path):
qt_data_path = (tmp_path / 'qt_data')
qt_data_path.mkdir()
monkeypatch.setattr(pakjoy.qtutils, 'library_path', (lambda _which: qt_data_path))
return qt_data_path
def application_dir_path(self, monkeypatch: pytest.MonkeyPatch, tmp_path: pathlib.Path, qt_data_path: pathlib.Path):
app_dir_path = (tmp_path / 'app_dir')
app_dir_path.mkdir()
monkeypatch.setattr(pakjoy.objects.qapp, 'applicationDirPath', (lambda : app_dir_path))
return app_dir_path
def fallback_path(self, monkeypatch: pytest.MonkeyPatch, tmp_path: pathlib.Path, qt_data_path: pathlib.Path, application_dir_path: pathlib.Path):
home_path = (tmp_path / 'home')
monkeypatch.setattr(pakjoy.pathlib.Path, 'home', (lambda : home_path))
app_path = (home_path / f'.{pakjoy.objects.qapp.applicationName()}')
app_path.mkdir(parents=True)
return app_path
.parametrize('create_file', [True, False])
def test_overridden(self, monkeypatch: pytest.MonkeyPatch, tmp_path: pathlib.Path, create_file: bool):
override_path = (tmp_path / 'override')
override_path.mkdir()
monkeypatch.setenv(pakjoy.RESOURCES_ENV_VAR, str(override_path))
if create_file:
(override_path / pakjoy.PAK_FILENAME).touch()
assert (pakjoy._find_webengine_resources() == override_path)
.parametrize('with_subfolder', [True, False])
def test_qt_data_path(self, qt_data_path: pathlib.Path, with_subfolder: bool):
resources_path = qt_data_path
if with_subfolder:
resources_path /= 'resources'
resources_path.mkdir()
(resources_path / pakjoy.PAK_FILENAME).touch()
assert (pakjoy._find_webengine_resources() == resources_path)
def test_application_dir_path(self, application_dir_path: pathlib.Path):
(application_dir_path / pakjoy.PAK_FILENAME).touch()
assert (pakjoy._find_webengine_resources() == application_dir_path)
def test_fallback_path(self, fallback_path: pathlib.Path):
(fallback_path / pakjoy.PAK_FILENAME).touch()
assert (pakjoy._find_webengine_resources() == fallback_path)
def test_nowhere(self, fallback_path: pathlib.Path):
with pytest.raises(binparsing.ParseError, match="Couldn't find webengine resources dir"):
pakjoy._find_webengine_resources() |
class TokenizerTrainingArguments():
base_tokenizer: Optional[str] = field(default='gpt2', metadata={'help': 'Base tokenizer to build new tokenizer from.'})
dataset_name: Optional[str] = field(default='transformersbook/codeparrot-train', metadata={'help': 'Dataset to train tokenizer on.'})
text_column: Optional[str] = field(default='content', metadata={'help': 'Column containing text data to process.'})
vocab_size: Optional[int] = field(default=200000, metadata={'help': 'Number of examples to train tokenizer on.'})
n_examples: Optional[int] = field(default=32768, metadata={'help': 'Number of examples to train the tokenizer on.'})
tokenizer_name: Optional[str] = field(default='codeparrot', metadata={'help': 'Name of new tokenizer.'})
push_to_hub: Optional[bool] = field(default=True, metadata={'help': 'Push saved tokenizer to the hub.'}) |
class ForumTopicEdited(TelegramObject):
__slots__ = ('name', 'icon_custom_emoji_id')
def __init__(self, name: Optional[str]=None, icon_custom_emoji_id: Optional[str]=None, *, api_kwargs: Optional[JSONDict]=None):
super().__init__(api_kwargs=api_kwargs)
self.name: Optional[str] = name
self.icon_custom_emoji_id: Optional[str] = icon_custom_emoji_id
self._id_attrs = (self.name, self.icon_custom_emoji_id)
self._freeze() |
class SwiGLUFFNFused(SwiGLU):
def __init__(self, in_features: int, hidden_features: Optional[int]=None, out_features: Optional[int]=None, act_layer: Callable[(..., nn.Module)]=None, drop: float=0.0, bias: bool=True) -> None:
out_features = (out_features or in_features)
hidden_features = (hidden_features or in_features)
hidden_features = (((int(((hidden_features * 2) / 3)) + 7) // 8) * 8)
super().__init__(in_features=in_features, hidden_features=hidden_features, out_features=out_features, bias=bias) |
class IpPool(db.Model, AuditTimeMixin):
__tablename__ = 'tb_ippool'
id = db.Column(db.Integer)
fixed_ip = db.Column(db.String(256), primary_key=True)
region = db.Column(db.String(50), nullable=False)
allocated = db.Column(db.Boolean, nullable=False, default=True)
is_ipv6 = db.Column(db.Boolean, nullable=False, default=False)
def __repr__(self):
return ('<IpPool> [fixed_ip: %s, region: %s]' % (self.ip, self.region)) |
class PyramidFeatures(nn.Module):
def __init__(self, config, img_size=224, in_channels=3):
super().__init__()
model_path = config.swin_pretrained_path
self.swin_transformer = SwinTransformer(img_size, in_chans=3)
checkpoint = torch.load(model_path, map_location=torch.device(device))['model']
unexpected = ['patch_embed.proj.weight', 'patch_embed.proj.bias', 'patch_embed.norm.weight', 'patch_embed.norm.bias', 'head.weight', 'head.bias', 'layers.0.downsample.norm.weight', 'layers.0.downsample.norm.bias', 'layers.0.downsample.reduction.weight', 'layers.1.downsample.norm.weight', 'layers.1.downsample.norm.bias', 'layers.1.downsample.reduction.weight', 'layers.2.downsample.norm.weight', 'layers.2.downsample.norm.bias', 'layers.2.downsample.reduction.weight', 'norm.weight', 'norm.bias']
resnet = eval(f'torchvision.models.{config.cnn_backbone}(pretrained={config.resnet_pretrained})')
self.resnet_layers = nn.ModuleList(resnet.children())[:7]
self.p1_ch = nn.Conv2d(config.cnn_pyramid_fm[0], config.swin_pyramid_fm[0], kernel_size=1)
self.p1_pm = PatchMerging(((config.image_size // config.patch_size), (config.image_size // config.patch_size)), config.swin_pyramid_fm[0])
self.p1_pm.state_dict()['reduction.weight'][:] = checkpoint['layers.0.downsample.reduction.weight']
self.p1_pm.state_dict()['norm.weight'][:] = checkpoint['layers.0.downsample.norm.weight']
self.p1_pm.state_dict()['norm.bias'][:] = checkpoint['layers.0.downsample.norm.bias']
self.norm_1 = nn.LayerNorm(config.swin_pyramid_fm[0])
self.avgpool_1 = nn.AdaptiveAvgPool1d(1)
self.p2 = self.resnet_layers[5]
self.p2_ch = nn.Conv2d(config.cnn_pyramid_fm[1], config.swin_pyramid_fm[1], kernel_size=1)
self.p2_pm = PatchMerging((((config.image_size // config.patch_size) // 2), ((config.image_size // config.patch_size) // 2)), config.swin_pyramid_fm[1])
self.p2_pm.state_dict()['reduction.weight'][:] = checkpoint['layers.1.downsample.reduction.weight']
self.p2_pm.state_dict()['norm.weight'][:] = checkpoint['layers.1.downsample.norm.weight']
self.p2_pm.state_dict()['norm.bias'][:] = checkpoint['layers.1.downsample.norm.bias']
self.p3 = self.resnet_layers[6]
self.p3_ch = nn.Conv2d(config.cnn_pyramid_fm[2], config.swin_pyramid_fm[2], kernel_size=1)
self.norm_2 = nn.LayerNorm(config.swin_pyramid_fm[2])
self.avgpool_2 = nn.AdaptiveAvgPool1d(1)
for key in list(checkpoint.keys()):
if ((key in unexpected) or ('layers.3' in key)):
del checkpoint[key]
self.swin_transformer.load_state_dict(checkpoint)
def forward(self, x):
for i in range(5):
x = self.resnet_layers[i](x)
fm1 = x
fm1_ch = self.p1_ch(x)
fm1_reshaped = Rearrange('b c h w -> b (h w) c')(fm1_ch)
sw1 = self.swin_transformer.layers[0](fm1_reshaped)
sw1_skipped = (fm1_reshaped + sw1)
norm1 = self.norm_1(sw1_skipped)
sw1_CLS = self.avgpool_1(norm1.transpose(1, 2))
sw1_CLS_reshaped = Rearrange('b c 1 -> b 1 c')(sw1_CLS)
fm1_sw1 = self.p1_pm(sw1_skipped)
fm1_sw2 = self.swin_transformer.layers[1](fm1_sw1)
fm2 = self.p2(fm1)
fm2_ch = self.p2_ch(fm2)
fm2_reshaped = Rearrange('b c h w -> b (h w) c')(fm2_ch)
fm2_sw2_skipped = (fm2_reshaped + fm1_sw2)
fm2_sw2 = self.p2_pm(fm2_sw2_skipped)
fm2_sw3 = self.swin_transformer.layers[2](fm2_sw2)
fm3 = self.p3(fm2)
fm3_ch = self.p3_ch(fm3)
fm3_reshaped = Rearrange('b c h w -> b (h w) c')(fm3_ch)
fm3_sw3_skipped = (fm3_reshaped + fm2_sw3)
norm2 = self.norm_2(fm3_sw3_skipped)
sw3_CLS = self.avgpool_2(norm2.transpose(1, 2))
sw3_CLS_reshaped = Rearrange('b c 1 -> b 1 c')(sw3_CLS)
return [torch.cat((sw1_CLS_reshaped, sw1_skipped), dim=1), torch.cat((sw3_CLS_reshaped, fm3_sw3_skipped), dim=1)] |
class CoLightAgent(Agent):
def __init__(self, dic_agent_conf=None, dic_traffic_env_conf=None, dic_path=None, cnt_round=None, best_round=None, bar_round=None, intersection_id='0'):
super(CoLightAgent, self).__init__(dic_agent_conf, dic_traffic_env_conf, dic_path, intersection_id)
self.att_regulatization = dic_agent_conf['att_regularization']
self.CNN_layers = dic_agent_conf['CNN_layers']
self.num_agents = dic_traffic_env_conf['NUM_INTERSECTIONS']
self.num_neighbors = min(dic_traffic_env_conf['TOP_K_ADJACENCY'], self.num_agents)
self.vec = np.zeros((1, self.num_neighbors))
self.vec[0][0] = 1
self.num_actions = len(self.dic_traffic_env_conf['PHASE'][self.dic_traffic_env_conf['SIMULATOR_TYPE']])
self.num_lanes = np.sum(np.array(list(self.dic_traffic_env_conf['LANE_NUM'].values())))
self.len_feature = self.compute_len_feature()
self.memory = self.build_memory()
if (cnt_round == 0):
self.q_network = self.build_network()
if os.listdir(self.dic_path['PATH_TO_MODEL']):
self.q_network.load_weights(os.path.join(self.dic_path['PATH_TO_MODEL'], 'round_0_inter_{0}.h5'.format(intersection_id)), by_name=True)
self.q_network_bar = self.build_network_from_copy(self.q_network)
else:
try:
if best_round:
self.load_network('round_{0}_inter_{1}'.format(best_round, self.intersection_id))
if (bar_round and (bar_round != best_round) and (cnt_round > 10)):
self.load_network_bar('round_{0}_inter_{1}'.format(bar_round, self.intersection_id))
elif ('UPDATE_Q_BAR_EVERY_C_ROUND' in self.dic_agent_conf):
if self.dic_agent_conf['UPDATE_Q_BAR_EVERY_C_ROUND']:
self.load_network_bar('round_{0}'.format(max((((best_round - 1) // self.dic_agent_conf['UPDATE_Q_BAR_FREQ']) * self.dic_agent_conf['UPDATE_Q_BAR_FREQ']), 0), self.intersection_id))
else:
self.load_network_bar('round_{0}_inter_{1}'.format(max((best_round - self.dic_agent_conf['UPDATE_Q_BAR_FREQ']), 0), self.intersection_id))
else:
self.load_network_bar('round_{0}_inter_{1}'.format(max((best_round - self.dic_agent_conf['UPDATE_Q_BAR_FREQ']), 0), self.intersection_id))
else:
self.load_network('round_{0}_inter_{1}'.format((cnt_round - 1), self.intersection_id))
if ('UPDATE_Q_BAR_EVERY_C_ROUND' in self.dic_agent_conf):
if self.dic_agent_conf['UPDATE_Q_BAR_EVERY_C_ROUND']:
self.load_network_bar('round_{0}_inter_{1}'.format(max((((cnt_round - 1) // self.dic_agent_conf['UPDATE_Q_BAR_FREQ']) * self.dic_agent_conf['UPDATE_Q_BAR_FREQ']), 0), self.intersection_id))
else:
self.load_network_bar('round_{0}_inter_{1}'.format(max((cnt_round - self.dic_agent_conf['UPDATE_Q_BAR_FREQ']), 0), self.intersection_id))
else:
self.load_network_bar('round_{0}_inter_{1}'.format(max((cnt_round - self.dic_agent_conf['UPDATE_Q_BAR_FREQ']), 0), self.intersection_id))
except:
print('fail to load network, current round: {0}'.format(cnt_round))
'\n "EPSILON": 0.8,\n "EPSILON_DECAY": 0.95,\n "MIN_EPSILON": 0.2,\n '
if os.path.exists(os.path.join(self.dic_path['PATH_TO_MODEL'], 'round_-1_inter_{0}.h5'.format(intersection_id))):
self.dic_agent_conf['EPSILON'] = self.dic_agent_conf['MIN_EPSILON']
print(('round%d, EPSILON:%.4f' % (cnt_round, self.dic_agent_conf['EPSILON'])))
else:
decayed_epsilon = (self.dic_agent_conf['EPSILON'] * pow(self.dic_agent_conf['EPSILON_DECAY'], cnt_round))
self.dic_agent_conf['EPSILON'] = max(decayed_epsilon, self.dic_agent_conf['MIN_EPSILON'])
def compute_len_feature(self):
from functools import reduce
len_feature = tuple()
for feature_name in self.dic_traffic_env_conf['LIST_STATE_FEATURE']:
if ('adjacency' in feature_name):
continue
elif ('phase' in feature_name):
len_feature += self.dic_traffic_env_conf['DIC_FEATURE_DIM'][('D_' + feature_name.upper())]
elif (feature_name == 'lane_num_vehicle'):
len_feature += ((self.dic_traffic_env_conf['DIC_FEATURE_DIM'][('D_' + feature_name.upper())][0] * self.num_lanes),)
return sum(len_feature)
'\n components of the network\n 1. MLP encoder of features\n 2. CNN layers\n 3. q network\n '
def MLP(self, In_0, layers=[128, 128]):
for (layer_index, layer_size) in enumerate(layers):
if (layer_index == 0):
h = Dense(layer_size, activation='relu', kernel_initializer='random_normal', name=('Dense_embed_%d' % layer_index))(In_0)
else:
h = Dense(layer_size, activation='relu', kernel_initializer='random_normal', name=('Dense_embed_%d' % layer_index))(h)
return h
def MultiHeadsAttModel(self, In_agent, In_neighbor, l=5, d=128, dv=16, dout=128, nv=8, suffix=(- 1)):
print('In_agent.shape,In_neighbor.shape,l, d, dv, dout, nv', In_agent.shape, In_neighbor.shape, l, d, dv, dout, nv)
agent_repr = Reshape((self.num_agents, 1, d))(In_agent)
neighbor_repr = RepeatVector3D(self.num_agents)(In_agent)
print('neighbor_repr.shape', neighbor_repr.shape)
neighbor_repr = Lambda((lambda x: K.batch_dot(x[0], x[1])))([In_neighbor, neighbor_repr])
print('neighbor_repr.shape', neighbor_repr.shape)
agent_repr_head = Dense((dv * nv), activation='relu', kernel_initializer='random_normal', name=('agent_repr_%d' % suffix))(agent_repr)
agent_repr_head = Reshape((self.num_agents, 1, dv, nv))(agent_repr_head)
agent_repr_head = Lambda((lambda x: K.permute_dimensions(x, (0, 1, 4, 2, 3))))(agent_repr_head)
neighbor_repr_head = Dense((dv * nv), activation='relu', kernel_initializer='random_normal', name=('neighbor_repr_%d' % suffix))(neighbor_repr)
print('DEBUG', neighbor_repr_head.shape)
print('self.num_agents,self.num_neighbors,dv,nv', self.num_agents, self.num_neighbors, dv, nv)
neighbor_repr_head = Reshape((self.num_agents, self.num_neighbors, dv, nv))(neighbor_repr_head)
neighbor_repr_head = Lambda((lambda x: K.permute_dimensions(x, (0, 1, 4, 2, 3))))(neighbor_repr_head)
att = Lambda((lambda x: K.softmax(K.batch_dot(x[0], x[1], axes=[4, 4]))))([agent_repr_head, neighbor_repr_head])
att_record = Reshape((self.num_agents, nv, self.num_neighbors))(att)
neighbor_hidden_repr_head = Dense((dv * nv), activation='relu', kernel_initializer='random_normal', name=('neighbor_hidden_repr_%d' % suffix))(neighbor_repr)
neighbor_hidden_repr_head = Reshape((self.num_agents, self.num_neighbors, dv, nv))(neighbor_hidden_repr_head)
neighbor_hidden_repr_head = Lambda((lambda x: K.permute_dimensions(x, (0, 1, 4, 2, 3))))(neighbor_hidden_repr_head)
out = Lambda((lambda x: K.mean(K.batch_dot(x[0], x[1]), axis=2)))([att, neighbor_hidden_repr_head])
out = Reshape((self.num_agents, dv))(out)
out = Dense(dout, activation='relu', kernel_initializer='random_normal', name=('MLP_after_relation_%d' % suffix))(out)
return (out, att_record)
def adjacency_index2matrix(self, adjacency_index):
adjacency_index_new = np.sort(adjacency_index, axis=(- 1))
l = to_categorical(adjacency_index_new, num_classes=self.num_agents)
return l
def action_att_predict(self, state, total_features=[], total_adjs=[], bar=False):
batch_size = len(state)
if ((total_features == []) and (total_adjs == [])):
(total_features, total_adjs) = (list(), list())
for i in range(batch_size):
feature = []
adj = []
for j in range(self.num_agents):
observation = []
for feature_name in self.dic_traffic_env_conf['LIST_STATE_FEATURE']:
if ('adjacency' in feature_name):
continue
if (feature_name == 'cur_phase'):
if (len(state[i][j][feature_name]) == 1):
observation.extend(self.dic_traffic_env_conf['PHASE'][self.dic_traffic_env_conf['SIMULATOR_TYPE']][state[i][j][feature_name][0]])
else:
observation.extend(state[i][j][feature_name])
elif (feature_name == 'lane_num_vehicle'):
observation.extend(state[i][j][feature_name])
feature.append(observation)
adj.append(state[i][j]['adjacency_matrix'])
total_features.append(feature)
total_adjs.append(adj)
total_features = np.reshape(np.array(total_features), [batch_size, self.num_agents, (- 1)])
total_adjs = self.adjacency_index2matrix(np.array(total_adjs))
if bar:
all_output = self.q_network_bar.predict([total_features, total_adjs])
else:
all_output = self.q_network.predict([total_features, total_adjs])
(action, attention) = (all_output[0], all_output[1])
if (len(action) > 1):
return (total_features, total_adjs, action, attention)
max_action = np.expand_dims(np.argmax(action, axis=(- 1)), axis=(- 1))
random_action = np.reshape(np.random.randint(self.num_actions, size=(1 * self.num_agents)), (1, self.num_agents, 1))
possible_action = np.concatenate([max_action, random_action], axis=(- 1))
selection = np.random.choice([0, 1], size=(batch_size * self.num_agents), p=[(1 - self.dic_agent_conf['EPSILON']), self.dic_agent_conf['EPSILON']])
act = possible_action.reshape(((batch_size * self.num_agents), 2))[(np.arange((batch_size * self.num_agents)), selection)]
act = np.reshape(act, (batch_size, self.num_agents))
return (act, attention)
def choose_action(self, count, state):
(act, attention) = self.action_att_predict([state])
return (act[0], attention[0])
def prepare_Xs_Y(self, memory, dic_exp_conf):
ind_end = len(memory)
print('memory size before forget: {0}'.format(ind_end))
if (dic_exp_conf['PRETRAIN'] or dic_exp_conf['AGGREGATE']):
sample_slice = memory
else:
ind_sta = max(0, (ind_end - self.dic_agent_conf['MAX_MEMORY_LEN']))
memory_after_forget = memory[ind_sta:ind_end]
print('memory size after forget:', len(memory_after_forget))
sample_size = min(self.dic_agent_conf['SAMPLE_SIZE'], len(memory_after_forget))
sample_slice = random.sample(memory_after_forget, sample_size)
print('memory samples number:', sample_size)
_state = []
_next_state = []
_action = []
_reward = []
for i in range(len(sample_slice)):
_state.append([])
_next_state.append([])
_action.append([])
_reward.append([])
for j in range(self.num_agents):
(state, action, next_state, reward, _) = sample_slice[i][j]
_state[i].append(state)
_next_state[i].append(next_state)
_action[i].append(action)
_reward[i].append(reward)
(_features, _adjs, q_values, _) = self.action_att_predict(_state)
(_next_features, _next_adjs, _, attention) = self.action_att_predict(_next_state)
(_, _, target_q_values, _) = self.action_att_predict(_next_state, total_features=_next_features, total_adjs=_next_adjs, bar=True)
for i in range(len(sample_slice)):
for j in range(self.num_agents):
q_values[i][j][_action[i][j]] = ((_reward[i][j] / self.dic_agent_conf['NORMAL_FACTOR']) + (self.dic_agent_conf['GAMMA'] * np.max(target_q_values[i][j])))
self.Xs = [_features, _adjs]
self.Y = q_values.copy()
self.Y_total = [q_values.copy()]
self.Y_total.append(attention)
return
def build_network(self, MLP_layers=[32, 32], Output_layers=[]):
CNN_layers = self.CNN_layers
CNN_heads = ([1] * len(CNN_layers))
start_time = time.time()
assert (len(CNN_layers) == len(CNN_heads))
In = list()
In.append(Input(shape=[self.num_agents, self.len_feature], name='feature'))
In.append(Input(shape=(self.num_agents, self.num_neighbors, self.num_agents), name='adjacency_matrix'))
Input_end_time = time.time()
feature = self.MLP(In[0], MLP_layers)
Embedding_end_time = time.time()
att_record_all_layers = list()
print('CNN_heads:', CNN_heads)
for (CNN_layer_index, CNN_layer_size) in enumerate(CNN_layers):
print('CNN_heads[CNN_layer_index]:', CNN_heads[CNN_layer_index])
if (CNN_layer_index == 0):
(h, att_record) = self.MultiHeadsAttModel(feature, In[1], l=self.num_neighbors, d=MLP_layers[(- 1)], dv=CNN_layer_size[0], dout=CNN_layer_size[1], nv=CNN_heads[CNN_layer_index], suffix=CNN_layer_index)
else:
(h, att_record) = self.MultiHeadsAttModel(h, In[1], l=self.num_neighbors, d=MLP_layers[(- 1)], dv=CNN_layer_size[0], dout=CNN_layer_size[1], nv=CNN_heads[CNN_layer_index], suffix=CNN_layer_index)
att_record_all_layers.append(att_record)
if (len(CNN_layers) > 1):
att_record_all_layers = Concatenate(axis=1)(att_record_all_layers)
else:
att_record_all_layers = att_record_all_layers[0]
att_record_all_layers = Reshape((len(CNN_layers), self.num_agents, CNN_heads[(- 1)], self.num_neighbors))(att_record_all_layers)
for (layer_index, layer_size) in enumerate(Output_layers):
h = Dense(layer_size, activation='relu', kernel_initializer='random_normal', name=('Dense_q_%d' % layer_index))(h)
out = Dense(self.num_actions, kernel_initializer='random_normal', name='action_layer')(h)
model = Model(inputs=In, outputs=[out, att_record_all_layers])
if self.att_regulatization:
model.compile(optimizer=RMSprop(lr=self.dic_agent_conf['LEARNING_RATE']), loss=[self.dic_agent_conf['LOSS_FUNCTION'], 'kullback_leibler_divergence'], loss_weights=[1, self.dic_agent_conf['rularization_rate']])
else:
model.compile(optimizer=RMSprop(lr=self.dic_agent_conf['LEARNING_RATE']), loss=self.dic_agent_conf['LOSS_FUNCTION'], loss_weights=[1, 0])
model.summary()
network_end = time.time()
print('build_Input_end_time:', (Input_end_time - start_time))
print('embedding_time:', (Embedding_end_time - Input_end_time))
print('total time:', (network_end - start_time))
return model
def build_memory(self):
return []
def train_network(self, dic_exp_conf):
if (dic_exp_conf['PRETRAIN'] or dic_exp_conf['AGGREGATE']):
epochs = 1000
else:
epochs = self.dic_agent_conf['EPOCHS']
batch_size = min(self.dic_agent_conf['BATCH_SIZE'], len(self.Y))
early_stopping = EarlyStopping(monitor='val_loss', patience=self.dic_agent_conf['PATIENCE'], verbose=0, mode='min')
hist = self.q_network.fit(self.Xs, self.Y_total, batch_size=batch_size, epochs=epochs, shuffle=False, verbose=2, validation_split=0.3, callbacks=[early_stopping, TensorBoard(log_dir='./temp.tensorboard')])
def build_network_from_copy(self, network_copy):
network_structure = network_copy.to_json()
network_weights = network_copy.get_weights()
network = model_from_json(network_structure, custom_objects={'RepeatVector3D': RepeatVector3D})
network.set_weights(network_weights)
if self.att_regulatization:
network.compile(optimizer=RMSprop(lr=self.dic_agent_conf['LEARNING_RATE']), loss=([self.dic_agent_conf['LOSS_FUNCTION'] for i in range(self.num_agents)] + ['kullback_leibler_divergence']), loss_weights=[1, self.dic_agent_conf['rularization_rate']])
else:
network.compile(optimizer=RMSprop(lr=self.dic_agent_conf['LEARNING_RATE']), loss=self.dic_agent_conf['LOSS_FUNCTION'], loss_weights=[1, 0])
return network
def load_network(self, file_name, file_path=None):
if (file_path == None):
file_path = self.dic_path['PATH_TO_MODEL']
self.q_network = load_model(os.path.join(file_path, ('%s.h5' % file_name)), custom_objects={'RepeatVector3D': RepeatVector3D})
print(('succeed in loading model %s' % file_name))
def load_network_bar(self, file_name, file_path=None):
if (file_path == None):
file_path = self.dic_path['PATH_TO_MODEL']
self.q_network_bar = load_model(os.path.join(file_path, ('%s.h5' % file_name)), custom_objects={'RepeatVector3D': RepeatVector3D})
print(('succeed in loading model %s' % file_name))
def save_network(self, file_name):
self.q_network.save(os.path.join(self.dic_path['PATH_TO_MODEL'], ('%s.h5' % file_name)))
def save_network_bar(self, file_name):
self.q_network_bar.save(os.path.join(self.dic_path['PATH_TO_MODEL'], ('%s.h5' % file_name))) |
class GeneralGraph(Graph, ABC):
def __init__(self, nodes: List[Node]):
self.nodes: List[Node] = nodes
self.num_vars: int = len(nodes)
node_map: Dict[(Node, int)] = {}
for i in range(self.num_vars):
node = nodes[i]
node_map[node] = i
self.node_map: Dict[(Node, int)] = node_map
self.graph: ndarray = np.zeros((self.num_vars, self.num_vars), np.dtype(int))
self.dpath: ndarray = np.zeros((self.num_vars, self.num_vars), np.dtype(int))
self.reconstitute_dpath([])
self.ambiguous_triples: List[Tuple[(Node, Node, Node)]] = []
self.underline_triples: List[Tuple[(Node, Node, Node)]] = []
self.dotted_underline_triples: List[Tuple[(Node, Node, Node)]] = []
self.attributes = {}
self.pattern = False
self.pag = False
def adjust_dpath(self, i: int, j: int):
dpath = self.dpath
dpath[(j, i)] = 1
for k in range(self.num_vars):
if (dpath[(i, k)] == 1):
dpath[(j, k)] = 1
if (dpath[(k, j)] == 1):
dpath[(k, i)] = 1
self.dpath = dpath
def reconstitute_dpath(self, edges: List[Edge]):
self.dpath = np.zeros((self.num_vars, self.num_vars), np.dtype(int))
for i in range(self.num_vars):
self.adjust_dpath(i, i)
while (len(edges) > 0):
edge = edges.pop()
node1 = edge.get_node1()
node2 = edge.get_node2()
i = self.node_map[node1]
j = self.node_map[node2]
if self.is_parent_of(node1, node2):
self.adjust_dpath(i, j)
elif self.is_parent_of(node2, node1):
self.adjust_dpath(j, i)
def collect_ancestors(self, node: Node, ancestors: List[Node]):
if (node in ancestors):
return
ancestors.append(node)
parents = self.get_parents(node)
if parents:
for parent in parents:
self.collect_ancestors(parent, ancestors)
def add_directed_edge(self, node1: Node, node2: Node):
i = self.node_map[node1]
j = self.node_map[node2]
self.graph[(j, i)] = 1
self.graph[(i, j)] = (- 1)
self.adjust_dpath(i, j)
def add_edge(self, edge: Edge):
node1 = edge.get_node1()
node2 = edge.get_node2()
endpoint1 = str(edge.get_endpoint1())
endpoint2 = str(edge.get_endpoint2())
i = self.node_map[node1]
j = self.node_map[node2]
e1 = self.graph[(i, j)]
e2 = self.graph[(j, i)]
bidirected = ((e2 == 1) and (e1 == 1))
existing_edge = ((not bidirected) and ((e2 != 0) or (e1 != 0)))
if (endpoint1 == 'TAIL'):
if existing_edge:
return False
if (endpoint2 == 'TAIL'):
if bidirected:
self.graph[(j, i)] = Endpoint.TAIL_AND_ARROW.value
self.graph[(i, j)] = Endpoint.TAIL_AND_ARROW.value
else:
self.graph[(j, i)] = (- 1)
self.graph[(i, j)] = (- 1)
elif (endpoint2 == 'ARROW'):
if bidirected:
self.graph[(j, i)] = Endpoint.ARROW_AND_ARROW.value
self.graph[(i, j)] = Endpoint.TAIL_AND_ARROW.value
else:
self.graph[(j, i)] = 1
self.graph[(i, j)] = (- 1)
self.adjust_dpath(i, j)
elif (endpoint2 == 'CIRCLE'):
if bidirected:
return False
else:
self.graph[(j, i)] = 2
self.graph[(i, j)] = (- 1)
else:
return False
else:
if (endpoint1 == 'ARROW'):
if (endpoint2 == 'ARROW'):
if existing_edge:
if ((e1 == 2) or (e2 == 2)):
return False
if (self.graph[(j, i)] == Endpoint.ARROW.value):
self.graph[(j, i)] = Endpoint.ARROW_AND_ARROW.value
else:
self.graph[(j, i)] = Endpoint.TAIL_AND_ARROW.value
if (self.graph[(i, j)] == Endpoint.ARROW.value):
self.graph[(i, j)] = Endpoint.ARROW_AND_ARROW.value
else:
self.graph[(i, j)] = Endpoint.TAIL_AND_ARROW.value
else:
self.graph[(j, i)] = Endpoint.ARROW.value
self.graph[(i, j)] = Endpoint.ARROW.value
else:
return False
elif (endpoint1 == 'CIRCLE'):
if existing_edge:
return False
if (endpoint2 == 'ARROW'):
if bidirected:
return False
else:
self.graph[(j, i)] = 1
self.graph[(i, j)] = 2
elif (endpoint2 == 'CIRCLE'):
if bidirected:
return False
else:
self.graph[(j, i)] = 2
self.graph[(i, j)] = 2
else:
return False
else:
return False
return True
def add_node(self, node: Node) -> bool:
if (node in self.nodes):
return False
nodes = self.nodes
nodes.append(node)
self.nodes = nodes
self.num_vars = (self.num_vars + 1)
self.node_map[node] = (self.num_vars - 1)
row = np.zeros((self.num_vars - 1))
graph = np.vstack((self.graph, row))
dpath = np.vstack((self.dpath, row))
col = np.zeros(self.num_vars)
graph = np.column_stack((graph, col))
dpath = np.column_stack((dpath, col))
self.graph = graph
self.dpath = dpath
self.adjust_dpath((self.num_vars - 1), (self.num_vars - 1))
return True
def clear(self):
self.nodes = []
self.num_vars = 0
self.node_map = {}
self.graph = np.zeros((self.num_vars, self.num_vars), np.dtype(int))
self.dpath = np.zeros((self.num_vars, self.num_vars), np.dtype(int))
def contains_edge(self, edge: Edge) -> bool:
endpoint1 = str(edge.get_endpoint1())
endpoint2 = str(edge.get_endpoint2())
node1 = edge.get_node1()
node2 = edge.get_node2()
i = self.node_map[node1]
j = self.node_map[node2]
e1 = self.graph[(i, j)]
e2 = self.graph[(j, i)]
if (endpoint1 == 'TAIL'):
if (endpoint2 == 'TAIL'):
if (((e2 == (- 1)) and (e1 == (- 1))) or ((e2 == Endpoint.TAIL_AND_ARROW.value) and (e1 == Endpoint.TAIL_AND_ARROW.value))):
return True
else:
return False
elif (endpoint2 == 'ARROW'):
if (((e1 == (- 1)) and (e2 == 1)) or ((e1 == Endpoint.TAIL_AND_ARROW.value) and (e2 == Endpoint.ARROW_AND_ARROW.value))):
return True
else:
return False
elif (endpoint2 == 'CIRCLE'):
if ((e1 == (- 1)) and (e2 == 2)):
return True
else:
return False
else:
return False
elif (endpoint1 == 'ARROW'):
if (endpoint2 == 'ARROW'):
if (((e1 == Endpoint.ARROW.value) and (e2 == Endpoint.ARROW.value)) or ((e1 == Endpoint.TAIL_AND_ARROW.value) and (e2 == Endpoint.TAIL_AND_ARROW.value)) or ((e1 == Endpoint.ARROW_AND_ARROW.value) or (e2 == Endpoint.ARROW_AND_ARROW.value))):
return True
else:
return False
else:
return False
elif (endpoint1 == 'CIRCLE'):
if (endpoint2 == 'ARROW'):
if ((e1 == 2) and (e2 == 1)):
return True
else:
return False
elif (endpoint2 == 'CIRCLE'):
if ((e1 == 2) and (e2 == 2)):
return True
else:
return False
else:
return False
else:
return False
def contains_node(self, node: Node) -> bool:
node_list = self.nodes
return (node in node_list)
def exists_directed_cycle(self) -> bool:
utils = GraphUtils()
for node in self.nodes:
if utils.exists_directed_path_from_to_breadth_first(node, node, self):
return True
return False
def exists_trek(self, node1: Node, node2: Node) -> bool:
for node in self.nodes:
if (self.is_ancestor_of(node, node1) and self.is_ancestor_of(node, node2)):
return True
return False
def __eq__(self, other):
if isinstance(other, GeneralGraph):
sorted_list = self.nodes.sort()
if ((sorted_list == other.nodes.sort()) and np.array_equal(self.graph, other.graph)):
return True
else:
return False
else:
return False
def get_adjacent_nodes(self, node: Node) -> List[Node]:
j = self.node_map[node]
adj_list: List[Node] = []
for i in range(self.num_vars):
if ((not (self.graph[(j, i)] == 0)) and (not (self.graph[(i, j)] == 0))):
node2 = self.nodes[i]
adj_list.append(node2)
return adj_list
def get_parents(self, node) -> List[Node]:
j = self.node_map[node]
parents: List[Node] = []
for i in range(self.num_vars):
if (((self.graph[(i, j)] == (- 1)) and (self.graph[(j, i)] == 1)) or ((self.graph[(i, j)] == Endpoint.TAIL_AND_ARROW.value) and (self.graph[(j, i)] == Endpoint.ARROW_AND_ARROW.value))):
node2 = self.nodes[i]
parents.append(node2)
return parents
def get_ancestors(self, nodes: List[Node]) -> List[Node]:
if (not isinstance(nodes, list)):
raise TypeError('Must be a list of nodes')
ancestors: List[Node] = []
for node in nodes:
self.collect_ancestors(node, ancestors)
return ancestors
def get_children(self, node: Node) -> List[Node]:
i = self.node_map[node]
children: List[Node] = []
for j in range(self.num_vars):
if (((self.graph[(j, i)] == 1) and (self.graph[(i, j)] == (- 1))) or ((self.graph[(j, i)] == Endpoint.ARROW_AND_ARROW.value) and (self.graph[(i, j)] == Endpoint.TAIL_AND_ARROW.value))):
node2 = self.nodes[j]
children.append(node2)
return children
def get_indegree(self, node: Node) -> int:
i = self.node_map[node]
indegree = 0
for j in range(self.num_vars):
if (self.graph[(i, j)] == 1):
indegree = (indegree + 1)
elif (self.graph[(i, j)] == Endpoint.ARROW_AND_ARROW.value):
indegree = (indegree + 2)
return indegree
def get_outdegree(self, node: Node) -> int:
i = self.node_map[node]
outdegree = 0
for j in range(self.num_vars):
if ((self.graph[(i, j)] == (- 1)) or (self.graph[(i, j)] == Endpoint.TAIL_AND_ARROW.value)):
outdegree = (outdegree + 1)
return outdegree
def get_degree(self, node: Node) -> int:
i = self.node_map[node]
degree = 0
for j in range(self.num_vars):
if ((self.graph[(i, j)] == 1) or (self.graph[(i, j)] == (- 1)) or (self.graph[(i, j)] == 2)):
degree = (degree + 1)
elif (self.graph[(i, j)] != 0):
degree = (degree + 2)
return degree
def get_max_degree(self) -> int:
nodes = self.nodes
max_degree = (- 1)
for node in nodes:
deg = self.get_degree(node)
if (deg > max_degree):
max_degree = deg
return max_degree
def get_node(self, name: str) -> (Node | None):
for node in self.nodes:
if (node.get_name() == name):
return node
return None
def get_nodes(self) -> List[Node]:
return self.nodes
def get_node_names(self) -> List[str]:
node_names: List[str] = []
for node in self.nodes:
node_names.append(node.get_name())
return node_names
def get_num_edges(self) -> int:
edges = 0
for i in range(self.num_vars):
for j in range((i + 1), self.num_vars):
if ((self.graph[(i, j)] == 1) or (self.graph[(i, j)] == (- 1)) or (self.graph[(i, j)] == 2)):
edges = (edges + 1)
elif (self.graph[(i, j)] != 0):
edges = (edges + 2)
return edges
def get_num_connected_edges(self, node: Node) -> int:
i = self.node_map[node]
edges = 0
for j in range(self.num_vars):
if ((self.graph[(j, i)] == 1) or (self.graph[(j, i)] == (- 1)) or (self.graph[(j, i)] == 2)):
edges = (edges + 1)
elif (self.graph[(j, i)] != 0):
edges = (edges + 2)
return edges
def get_num_nodes(self) -> int:
return self.num_vars
def is_adjacent_to(self, node1: Node, node2: Node) -> bool:
i = self.node_map[node1]
j = self.node_map[node2]
return (self.graph[(j, i)] != 0)
def is_ancestor_of(self, node1: Node, node2: Node) -> bool:
i = self.node_map[node1]
j = self.node_map[node2]
return (self.dpath[(j, i)] == 1)
def is_child_of(self, node1: Node, node2: Node) -> bool:
i = self.node_map[node1]
j = self.node_map[node2]
return (((self.graph[(j, i)] == Endpoint.TAIL.value) and (self.graph[(i, j)] == Endpoint.ARROW.value)) or (self.graph[(j, i)] == Endpoint.TAIL_AND_ARROW.value))
def is_parent_of(self, node1: Node, node2: Node) -> bool:
i = self.node_map[node1]
j = self.node_map[node2]
return (((self.graph[(j, i)] == Endpoint.ARROW.value) and (self.graph[(i, j)] == Endpoint.TAIL.value)) or (self.graph[(i, j)] == Endpoint.TAIL_AND_ARROW.value))
def is_proper_ancestor_of(self, node1: Node, node2: Node) -> bool:
return (self.is_ancestor_of(node1, node2) and (not (node1 == node2)))
def is_proper_descendant_of(self, node1: Node, node2: Node) -> bool:
return (self.is_descendant_of(node1, node2) and (not (node1 == node2)))
def is_descendant_of(self, node1: Node, node2: Node) -> bool:
return self.is_ancestor_of(node2, node1)
def get_edge(self, node1: Node, node2: Node) -> (Edge | None):
i = self.node_map[node1]
j = self.node_map[node2]
end_1 = self.graph[(i, j)]
end_2 = self.graph[(j, i)]
if (end_1 == 0):
return None
edge = Edge(node1, node2, Endpoint(end_1), Endpoint(end_2))
return edge
def get_directed_edge(self, node1: Node, node2: Node) -> (Edge | None):
i = self.node_map[node1]
j = self.node_map[node2]
end_1 = self.graph[(i, j)]
end_2 = self.graph[(j, i)]
if ((end_1 > 1) or (end_1 == 0) or ((end_1 == (- 1)) and (end_2 == (- 1)))):
return None
edge = Edge(node1, node2, Endpoint(end_1), Endpoint(end_2))
return edge
def get_node_edges(self, node: Node) -> List[Edge]:
i = self.node_map[node]
edges: List[Edge] = []
for j in range(self.num_vars):
node2 = self.nodes[j]
if ((self.graph[(j, i)] == 1) or (self.graph[(j, i)] == (- 1)) or (self.graph[(j, i)] == 2)):
edges.append(self.get_edge(node, node2))
elif ((self.graph[(j, i)] == Endpoint.TAIL_AND_ARROW.value) and (self.graph[(i, j)] == Endpoint.ARROW_AND_ARROW.value)):
edges.append(Edge(node, node2, Endpoint.ARROW, Endpoint.TAIL))
edges.append(Edge(node, node2, Endpoint.ARROW, Endpoint.ARROW))
elif ((self.graph[(j, i)] == Endpoint.ARROW_AND_ARROW.value) and (self.graph[(i, j)] == Endpoint.TAIL_AND_ARROW.value)):
edges.append(Edge(node, node2, Endpoint.TAIL, Endpoint.ARROW))
edges.append(Edge(node, node2, Endpoint.ARROW, Endpoint.ARROW))
elif ((self.graph[(j, i)] == Endpoint.TAIL_AND_ARROW.value) and (self.graph[(i, j)] == Endpoint.TAIL_AND_ARROW.value)):
edges.append(Edge(node, node2, Endpoint.TAIL, Endpoint.TAIL))
edges.append(Edge(node, node2, Endpoint.ARROW, Endpoint.ARROW))
return edges
def get_graph_edges(self) -> List[Edge]:
edges: List[Edge] = []
for i in range(self.num_vars):
node = self.nodes[i]
for j in range((i + 1), self.num_vars):
node2 = self.nodes[j]
if ((self.graph[(j, i)] == 1) or (self.graph[(j, i)] == (- 1)) or (self.graph[(j, i)] == 2)):
edges.append(self.get_edge(node, node2))
elif ((self.graph[(j, i)] == Endpoint.TAIL_AND_ARROW.value) and (self.graph[(i, j)] == Endpoint.ARROW_AND_ARROW.value)):
edges.append(Edge(node, node2, Endpoint.ARROW, Endpoint.TAIL))
edges.append(Edge(node, node2, Endpoint.ARROW, Endpoint.ARROW))
elif ((self.graph[(j, i)] == Endpoint.ARROW_AND_ARROW.value) and (self.graph[(i, j)] == Endpoint.TAIL_AND_ARROW.value)):
edges.append(Edge(node, node2, Endpoint.TAIL, Endpoint.ARROW))
edges.append(Edge(node, node2, Endpoint.ARROW, Endpoint.ARROW))
elif ((self.graph[(j, i)] == Endpoint.TAIL_AND_ARROW.value) and (self.graph[(i, j)] == Endpoint.TAIL_AND_ARROW.value)):
edges.append(Edge(node, node2, Endpoint.TAIL, Endpoint.TAIL))
edges.append(Edge(node, node2, Endpoint.ARROW, Endpoint.ARROW))
return edges
def get_endpoint(self, node1: Node, node2: Node) -> (Endpoint | None):
edge = self.get_edge(node1, node2)
if edge:
return edge.get_proximal_endpoint(node2)
else:
return None
def is_def_noncollider(self, node1: Node, node2: Node, node3: Node) -> bool:
edges = self.get_node_edges(node2)
circle12 = False
circle23 = False
for edge in edges:
_node1 = (edge.get_distal_node(node2) == node1)
_node3 = (edge.get_distal_node(node2) == node3)
if (_node1 and edge.points_toward(node1)):
return True
if (_node3 and edge.points_toward(node3)):
return True
if (_node1 and (edge.get_proximal_endpoint(node2) == Endpoint.CIRCLE)):
circle12 = True
if (_node3 and (edge.get_proximal_endpoint(node2) == Endpoint.CIRCLE)):
circle23 = True
if (circle12 and circle23 and (not self.is_adjacent_to(node1, node2))):
return True
return False
def is_def_collider(self, node1: Node, node2: Node, node3: Node) -> bool:
edge1 = self.get_edge(node1, node2)
edge2 = self.get_edge(node2, node3)
if ((edge1 is None) or (edge2 is None)):
return False
return ((str(edge1.get_proximal_endpoint(node2)) == 'ARROW') and (str(edge2.get_proximal_endpoint(node2)) == 'ARROW'))
def is_def_unshielded_collider(self, node1: Node, node2: Node, node3: Node) -> bool:
return (self.is_def_collider(node1, node2, node3) and (not self.is_directly_connected_to(node1, node3)))
def is_dconnected_to(self, node1: Node, node2: Node, z: List[Node]) -> bool:
utils = GraphUtils()
return utils.is_dconnected_to(node1, node2, z, self)
def is_dseparated_from(self, node1: Node, node2: Node, z: List[Node]) -> bool:
return (not self.is_dconnected_to(node1, node2, z))
def is_pattern(self) -> bool:
return self.pattern
def set_pattern(self, pat: bool):
self.pattern = pat
def is_pag(self) -> bool:
return self.pag
def set_pag(self, pag: bool):
self.pag = pag
def is_directed_from_to(self, node1: Node, node2: Node) -> bool:
i = self.node_map[node1]
j = self.node_map[node2]
return ((self.graph[(j, i)] == 1) and (self.graph[(i, j)] == (- 1)))
def is_undirected_from_to(self, node1: Node, node2: Node) -> bool:
i = self.node_map[node1]
j = self.node_map[node2]
return ((self.graph[(j, i)] == (- 1)) and (self.graph[(i, j)] == (- 1)))
def is_directly_connected_to(self, node1: Node, node2: Node) -> bool:
i = self.node_map[node1]
j = self.node_map[node2]
return (not ((self.graph[(j, i)] == 0) and (self.graph[(i, j)] == 0)))
def is_exogenous(self, node: Node) -> bool:
return (self.get_indegree(node) == 0)
def get_nodes_into(self, node: Node, endpoint: Endpoint) -> List[Node]:
i = self.node_map[node]
nodes: List[Node] = []
if (str(endpoint) == 'ARROW'):
for j in range(self.num_vars):
if ((self.graph[(i, j)] == 1) or (self.graph[(i, j)] == Endpoint.ARROW_AND_ARROW.value)):
node2 = self.nodes[j]
nodes.append(node2)
elif (str(endpoint) == 'TAIL'):
for j in range(self.num_vars):
if ((self.graph[(i, j)] == (- 1)) or (self.graph[(i, j)] == Endpoint.TAIL_AND_ARROW.value)):
node2 = self.nodes[j]
nodes.append(node2)
elif (str(endpoint) == 'CIRCLE'):
for j in range(self.num_vars):
if (self.graph[(i, j)] == 2):
node2 = self.nodes[j]
nodes.append(node2)
return nodes
def get_nodes_out_of(self, node: Node, endpoint: Endpoint) -> List[Node]:
i = self.node_map[node]
nodes: List[Node] = []
if (str(endpoint) == 'ARROW'):
for j in range(self.num_vars):
if ((self.graph[(j, i)] == 1) or (self.graph[(j, i)] == Endpoint.ARROW_AND_ARROW.value)):
node2 = self.nodes[j]
nodes.append(node2)
elif (str(endpoint) == 'TAIL'):
for j in range(self.num_vars):
if ((self.graph[(j, i)] == (- 1)) or (self.graph[(j, i)] == Endpoint.TAIL_AND_ARROW.value)):
node2 = self.nodes[j]
nodes.append(node2)
elif (str(endpoint) == 'CIRCLE'):
for j in range(self.num_vars):
if (self.graph[(j, i)] == 2):
node2 = self.nodes[j]
nodes.append(node2)
return nodes
def remove_edge(self, edge: Edge):
node1 = edge.get_node1()
node2 = edge.get_node2()
i = self.node_map[node1]
j = self.node_map[node2]
out_of = self.graph[(j, i)]
in_to = self.graph[(i, j)]
end1 = edge.get_numerical_endpoint1()
end2 = edge.get_numerical_endpoint2()
is_fully_directed = (self.is_parent_of(node1, node2) or self.is_parent_of(node2, node1))
if ((out_of == Endpoint.TAIL_AND_ARROW.value) and (in_to == Endpoint.TAIL_AND_ARROW.value)):
if (end1 == Endpoint.ARROW.value):
self.graph[(j, i)] = (- 1)
self.graph[(i, j)] = (- 1)
elif (end1 == (- 1)):
self.graph[(i, j)] = Endpoint.ARROW.value
self.graph[(j, i)] = Endpoint.ARROW.value
elif ((out_of == Endpoint.ARROW_AND_ARROW.value) and (in_to == Endpoint.TAIL_AND_ARROW.value)):
if (end1 == Endpoint.ARROW.value):
self.graph[(j, i)] = 1
self.graph[(i, j)] = (- 1)
elif (end1 == (- 1)):
self.graph[(j, i)] = Endpoint.ARROW.value
self.graph[(i, j)] = Endpoint.ARROW.value
elif ((out_of == Endpoint.TAIL_AND_ARROW.value) and (in_to == Endpoint.ARROW_AND_ARROW.value)):
if (end1 == Endpoint.ARROW.value):
self.graph[(j, i)] = (- 1)
self.graph[(i, j)] = 1
elif (end1 == (- 1)):
self.graph[(j, i)] = Endpoint.ARROW.value
self.graph[(i, j)] = Endpoint.ARROW.value
elif ((end1 == in_to) and (end2 == out_of)):
self.graph[(j, i)] = 0
self.graph[(i, j)] = 0
if is_fully_directed:
self.reconstitute_dpath(self.get_graph_edges())
def remove_connecting_edge(self, node1: Node, node2: Node):
i = self.node_map[node1]
j = self.node_map[node2]
self.graph[(j, i)] = 0
self.graph[(i, j)] = 0
def remove_connecting_edges(self, node1: Node, node2: Node):
i = self.node_map[node1]
j = self.node_map[node2]
self.graph[(j, i)] = 0
self.graph[(i, j)] = 0
def remove_edges(self, edges: List[Edge]):
for edge in edges:
self.remove_edge(edge)
def remove_node(self, node: Node):
i = self.node_map[node]
graph = self.graph
graph = np.delete(graph, i, axis=0)
graph = np.delete(graph, i, axis=1)
self.graph = graph
nodes = self.nodes
nodes.remove(node)
self.nodes = nodes
node_map = {}
for (i, node) in enumerate(self.nodes):
node_map[node] = i
self.node_map = node_map
self.num_vars -= 1
self.reconstitute_dpath(self.get_graph_edges())
def remove_nodes(self, nodes: List[Node]):
for node in nodes:
self.remove_node(node)
def subgraph(self, nodes: List[Node]):
subgraph = GeneralGraph(nodes)
graph = self.graph
nodes_to_delete = []
for i in range(self.num_vars):
if (not (self.nodes[i] in nodes)):
nodes_to_delete.append(i)
graph = np.delete(graph, nodes_to_delete, axis=0)
graph = np.delete(graph, nodes_to_delete, axis=1)
subgraph.graph = graph
subgraph.reconstitute_dpath(subgraph.get_graph_edges())
return subgraph
def __str__(self):
utils = GraphUtils()
return utils.graph_string(self)
def transfer_nodes_and_edges(self, graph):
for node in graph.nodes:
self.add_node(node)
for edge in graph.get_graph_edges():
self.add_edge(edge)
def transfer_attributes(self, graph):
graph.attributes = self.attributes
def get_ambiguous_triples(self) -> List[Tuple[(Node, Node, Node)]]:
return self.ambiguous_triples
def get_underlines(self) -> List[Tuple[(Node, Node, Node)]]:
return self.underline_triples
def get_dotted_underlines(self) -> List[Tuple[(Node, Node, Node)]]:
return self.dotted_underline_triples
def is_ambiguous_triple(self, node1: Node, node2: Node, node3: Node) -> bool:
return ((node1, node2, node3) in self.ambiguous_triples)
def is_underline_triple(self, node1: Node, node2: Node, node3: Node) -> bool:
return ((node1, node2, node3) in self.underline_triples)
def is_dotted_underline_triple(self, node1: Node, node2: Node, node3: Node) -> bool:
return ((node1, node2, node3) in self.dotted_underline_triples)
def add_ambiguous_triple(self, node1: Node, node2: Node, node3: Node):
self.ambiguous_triples.append((node1, node2, node3))
def add_underline_triple(self, node1: Node, node2: Node, node3: Node):
self.underline_triples.append((node1, node2, node3))
def add_dotted_underline_triple(self, node1: Node, node2: Node, node3: Node):
self.dotted_underline_triples.append((node1, node2, node3))
def remove_ambiguous_triple(self, node1: Node, node2: Node, node3: Node):
self.ambiguous_triples.remove((node1, node2, node3))
def remove_underline_triple(self, node1: Node, node2: Node, node3: Node):
self.underline_triples.remove((node1, node2, node3))
def remove_dotted_underline_triple(self, node1: Node, node2: Node, node3: Node):
self.dotted_underline_triples.remove((node1, node2, node3))
def set_ambiguous_triples(self, triples: List[Tuple[(Node, Node, Node)]]):
self.ambiguous_triples = triples
def set_underline_triples(self, triples: List[Tuple[(Node, Node, Node)]]):
self.underline_triples = triples
def set_dotted_underline_triples(self, triples: List[Tuple[(Node, Node, Node)]]):
self.dotted_underline_triples = triples
def get_causal_ordering(self) -> List[Node]:
utils = GraphUtils()
return utils.get_causal_order(self)
def is_parameterizable(self, node: Node) -> bool:
return True
def is_time_lag_model(self) -> bool:
return False
def get_sepset(self, node1: Node, node2: Node) -> List[Node]:
utils = GraphUtils()
return utils.get_sepset(node1, node2, self)
def set_nodes(self, nodes: List[Node]):
if (len(nodes) != self.num_vars):
raise ValueError('Sorry, there is a mismatch in the number of variables you are trying to set.')
self.nodes = nodes
def get_all_attributes(self):
return self.attributes
def get_attribute(self, key):
return self.attributes[key]
def remove_attribute(self, key):
self.attributes.pop[key]
def add_attribute(self, key, value):
self.attributes[key] = value
def get_node_map(self) -> Dict[(Node, int)]:
return self.node_map |
def load_pos_conv_layer(full_name, value, pos_conv_embeddings, unused_weights):
name = full_name.split('pos_conv.')[(- 1)]
items = name.split('.')
layer_id = int(items[0])
type_id = int(items[1])
weight_type = name.split('.')[(- 1)]
if (type_id != 0):
unused_weights.append(full_name)
return
else:
layer_type = 'conv'
set_weights(full_name, pos_conv_embeddings, value, f'layers.{layer_id}.{layer_type}.{weight_type}') |
def infer_typing_attr(node: Subscript, ctx: (context.InferenceContext | None)=None) -> Iterator[ClassDef]:
try:
value = next(node.value.infer())
except (InferenceError, StopIteration) as exc:
raise UseInferenceDefault from exc
if ((not value.qname().startswith('typing.')) or (value.qname() in TYPING_ALIAS)):
raise UseInferenceDefault
if (isinstance(value, ClassDef) and (value.qname() in {'typing.Generic', 'typing.Annotated', 'typing_extensions.Annotated'})):
func_to_add = _extract_single_node(CLASS_GETITEM_TEMPLATE)
value.locals['__class_getitem__'] = [func_to_add]
if (isinstance(node.parent, ClassDef) and (node in node.parent.bases) and getattr(node.parent, '__cache', None)):
cache = node.parent.__cache
if (cache.get(node.parent.slots) is not None):
del cache[node.parent.slots]
node._explicit_inference = (lambda node, context: iter([value]))
return iter([value])
node = extract_node(TYPING_TYPE_TEMPLATE.format(value.qname().split('.')[(- 1)]))
return node.infer(context=ctx) |
('PyQt6.QtWidgets.QFileDialog.getOpenFileName')
def test_on_action_open(dialog_mock, view, qtbot):
root = os.path.dirname(__file__)
filename = os.path.join(root, 'assets', 'test1item.bee')
dialog_mock.return_value = (filename, None)
view.on_loading_finished = MagicMock()
view.scene.cancel_crop_mode = MagicMock()
view.on_action_open()
qtbot.waitUntil((lambda : (view.on_loading_finished.called is True)))
assert (len(view.scene.items()) == 1)
item = view.scene.items()[0]
assert (item.isSelected() is False)
assert item.pixmap()
view.on_loading_finished.assert_called_once_with(filename, [])
view.scene.cancel_crop_mode.assert_called_once_with() |
def RegisterPythonwin(register=True):
import os
lib_dir = distutils.sysconfig.get_python_lib(plat_specific=1)
classes_root = get_root_hkey()
pythonwin_exe = os.path.join(lib_dir, 'Pythonwin', 'Pythonwin.exe')
pythonwin_edit_command = (pythonwin_exe + ' /edit "%1"')
keys_vals = [('Software\\Microsoft\\Windows\\CurrentVersion\\App Paths\\Pythonwin.exe', '', pythonwin_exe), ('Software\\Classes\\Python.File\\shell\\Edit with Pythonwin', 'command', pythonwin_edit_command), ('Software\\Classes\\Python.NoConFile\\shell\\Edit with Pythonwin', 'command', pythonwin_edit_command)]
try:
if register:
for (key, sub_key, val) in keys_vals:
hkey = winreg.CreateKey(classes_root, key)
if sub_key:
hkey = winreg.CreateKey(hkey, sub_key)
winreg.SetValueEx(hkey, None, 0, winreg.REG_SZ, val)
hkey.Close()
else:
for (key, sub_key, val) in keys_vals:
try:
winreg.DeleteKey(classes_root, key)
except OSError as why:
winerror = getattr(why, 'winerror', why.errno)
if (winerror != 2):
raise
finally:
from win32com.shell import shell, shellcon
shell.SHChangeNotify(shellcon.SHCNE_ASSOCCHANGED, shellcon.SHCNF_IDLIST, None, None) |
def test_update_if_modified_field_changed(sqldb):
cursor = sqldb.cursor()
rules_db.RulesRow(RuleID=501, Name='Long Press Rule', Type=MOCK_RULE_TYPE, State=1).update_db(cursor)
rules_db.RuleDevicesRow(RuleDevicePK=1, RuleID=501, DeviceID=MOCK_UDN).update_db(cursor)
db = rules_db.RulesDb(sqldb, MOCK_UDN, MOCK_NAME)
(rule, device) = db.rules_for_device()[0]
assert (db.update_if_modified() is False)
rule.State = 0
assert (db.update_if_modified() is True) |
def get_act_fn(name='relu'):
if (not name):
return None
if (not (is_no_jit() or is_exportable() or is_scriptable())):
if (name in _ACT_FN_ME):
return _ACT_FN_ME[name]
if (is_exportable() and (name in ('silu', 'swish'))):
return swish
if (not (is_no_jit() or is_exportable())):
if (name in _ACT_FN_JIT):
return _ACT_FN_JIT[name]
return _ACT_FN_DEFAULT[name] |
class APEv2File(AudioFile):
IGNORE = ['file', 'index', 'introplay', 'dummy']
TRANS = {'subtitle': 'version', 'track': 'tracknumber', 'disc': 'discnumber', 'catalog': 'labelid', 'year': 'date', 'record location': 'location', 'album artist': 'albumartist', 'debut album': 'originalalbum', 'record date': 'recordingdate', 'original artist': 'originalartist', 'mixartist': 'remixer'}
SNART = {v: k for (k, v) in TRANS.items()}
can_change_images = True
def __init__(self, filename, audio=None):
if audio:
tag = (audio.tags or {})
else:
with translate_errors():
try:
tag = mutagen.apev2.APEv2(filename)
except mutagen.apev2.APENoHeaderError:
tag = {}
for (key, value) in tag.items():
if (get_cover_type(key, value) is not None):
self.has_images = True
key = self.TRANS.get(key.lower(), key.lower())
if ((value.kind == mutagen.apev2.TEXT) and (key not in self.IGNORE)):
self[key] = '\n'.join(list(value))
self.sanitize(filename)
def __titlecase(key):
if (key.lower() in ['isrc', 'isbn', 'ean/upc']):
return key.upper()
else:
return key.title()
def can_change(self, key=None):
if (key is None):
return True
else:
return (super().can_change(key) and (key.lower() not in self.IGNORE) and (key.lower() not in self.TRANS) and mutagen.apev2.is_valid_apev2_key(self.__titlecase(key)))
def write(self):
with translate_errors():
try:
tag = mutagen.apev2.APEv2(self['~filename'])
except mutagen.apev2.APENoHeaderError:
tag = mutagen.apev2.APEv2()
for key in list(tag.keys()):
value = tag[key]
if ((value.kind == mutagen.apev2.TEXT) and (key.lower() not in self.IGNORE)):
del tag[key]
lower = self.as_lowercased()
for key in lower.realkeys():
new_key = self.SNART.get(key, key)
if (new_key in self.IGNORE):
continue
new_key = self.__titlecase(new_key)
tag[new_key] = lower.list(key)
with translate_errors():
tag.save(self['~filename'])
self.sanitize()
def get_primary_image(self):
try:
tag = mutagen.apev2.APEv2(self['~filename'])
except Exception:
return
primary = None
for (key, value) in tag.items():
primary = (key, value)
cover_type = get_cover_type(key, value)
if (cover_type == APICType.COVER_FRONT):
break
if (primary is not None):
return parse_cover(*primary)
def get_images(self):
try:
tag = mutagen.apev2.APEv2(self['~filename'])
except Exception:
return []
images = []
for (key, value) in tag.items():
image = parse_cover(key, value)
if (image is not None):
images.append(image)
images.sort(key=(lambda c: c.sort_key))
return images
def clear_images(self):
with translate_errors():
try:
tag = mutagen.apev2.APEv2(self['~filename'])
except mutagen.apev2.APENoHeaderError:
return
for (key, value) in tag.items():
cover_type = get_cover_type(key, value)
if (cover_type is not None):
del tag[key]
tag.save()
self.has_images = False
def set_image(self, image):
with translate_errors():
try:
tag = mutagen.apev2.APEv2(self['~filename'])
except mutagen.apev2.APENoHeaderError:
tag = mutagen.apev2.APEv2()
for (key, value) in tag.items():
cover_type = get_cover_type(key, value)
if (cover_type is not None):
del tag[key]
(key, value) = write_cover(image)
tag[key] = value
with translate_errors():
tag.save(self['~filename'])
self.has_images = True |
class Minor(nn.Module):
def __init__(self, G_ch=64, dim_z=128, bottom_width=4, resolution=128, G_kernel_size=3, G_attn='64', n_classes=1000, num_G_SVs=1, num_G_SV_itrs=1, G_shared=True, shared_dim=0, hier=False, cross_replica=False, mybn=False, G_activation=nn.ReLU(inplace=False), G_lr=5e-05, G_B1=0.0, G_B2=0.999, adam_eps=1e-08, BN_eps=1e-05, SN_eps=1e-12, G_mixed_precision=False, G_fp16=False, G_init='ortho', skip_init=False, no_optim=False, G_param='SN', norm_style='bn', **kwargs):
super(Minor, self).__init__()
self.ch = G_ch
self.dim_z = dim_z
self.bottom_width = bottom_width
self.resolution = resolution
self.kernel_size = G_kernel_size
self.attention = G_attn
self.n_classes = n_classes
self.G_shared = G_shared
self.shared_dim = (shared_dim if (shared_dim > 0) else dim_z)
self.hier = hier
self.cross_replica = cross_replica
self.mybn = mybn
self.activation = G_activation
self.init = G_init
self.G_param = G_param
self.norm_style = norm_style
self.BN_eps = BN_eps
self.SN_eps = SN_eps
self.fp16 = G_fp16
self.arch = M_arch(self.ch, self.attention)[resolution]
if self.hier:
self.num_slots = (len(self.arch['in_channels']) + 1)
self.z_chunk_size = (self.dim_z // self.num_slots)
self.dim_z = (self.z_chunk_size * self.num_slots)
else:
self.num_slots = 1
self.z_chunk_size = 0
if (self.G_param == 'SN'):
self.which_conv = functools.partial(layers.SNConv2d, kernel_size=3, padding=1, eps=self.SN_eps)
self.which_linear = functools.partial(layers.SNLinear, num_svs=num_G_SVs, num_itrs=num_G_SV_itrs, eps=self.SN_eps)
else:
self.which_conv = functools.partial(nn.Conv2d, kernel_size=3, padding=1)
self.which_linear = nn.Linear
self.which_embedding = nn.Embedding
bn_linear = (functools.partial(self.which_linear, bias=False) if self.G_shared else self.which_embedding)
self.which_bn = functools.partial(layers.ccbn, which_linear=bn_linear, cross_replica=self.cross_replica, mybn=self.mybn, input_size=((self.shared_dim + self.z_chunk_size) if self.G_shared else self.n_classes), norm_style=self.norm_style, eps=self.BN_eps)
self.shared = (self.which_embedding(n_classes, self.shared_dim) if G_shared else layers.identity())
self.linear = self.which_linear(self.dim_z, self.dim_z)
self.linear2 = self.which_linear(self.dim_z, self.dim_z)
self.linear3 = self.which_linear(self.dim_z, self.dim_z)
self.linear4 = self.which_linear(self.dim_z, self.dim_z)
self.linear5 = self.which_linear(self.dim_z, self.dim_z)
self.linear6 = self.which_linear(self.dim_z, self.dim_z)
self.linear7 = self.which_linear(self.dim_z, self.dim_z)
self.linear8 = self.which_linear(self.dim_z, self.shared_dim)
if (not skip_init):
self.init_weights()
if no_optim:
return
(self.lr, self.B1, self.B2, self.adam_eps) = (G_lr, G_B1, G_B2, adam_eps)
if G_mixed_precision:
print('Using fp16 adam in G...')
import utils
self.optim = utils.Adam16(params=self.parameters(), lr=self.lr, betas=(self.B1, self.B2), weight_decay=0, eps=self.adam_eps)
else:
self.optim = optim.Adam(params=self.parameters(), lr=self.lr, betas=(self.B1, self.B2), weight_decay=0, eps=self.adam_eps)
def init_weights(self):
self.param_count = 0
for module in self.modules():
if (isinstance(module, nn.Conv2d) or isinstance(module, nn.Linear) or isinstance(module, nn.Embedding)):
if (self.init == 'ortho'):
init.orthogonal_(module.weight)
elif (self.init == 'N02'):
init.normal_(module.weight, 0, 0.02)
elif (self.init in ['glorot', 'xavier']):
init.xavier_uniform_(module.weight)
else:
print('Init style not recognized...')
self.param_count += sum([p.data.nelement() for p in module.parameters()])
print(('Param count for Ms initialized parameters: %d' % self.param_count))
def forward(self, z):
h_noise = self.linear(z)
h_noise = self.linear2(h_noise)
h_noise = self.linear3(h_noise)
h_noise = self.linear4(h_noise)
h_label = self.linear5(z)
h_label = self.linear6(h_label)
h_label = self.linear7(h_label)
h_label = self.linear8(h_label)
return (h_noise, h_label) |
class Source(object):
def __init__(self, s):
self.pos = 0
self.s = s
self.ignore_space = False
def at_end(self):
s = self.s
pos = self.pos
if self.ignore_space:
while True:
if (pos >= len(s)):
break
elif is_space(ord(s[pos])):
pos += 1
elif (s[pos] == u'#'):
pos = s.find(u'\n', pos)
if (pos < 0):
pos = len(s)
else:
break
return (pos >= len(s))
def get(self):
s = self.s
pos = self.pos
if self.ignore_space:
while True:
if (pos >= len(s)):
return u''
elif is_space(ord(s[pos])):
pos += 1
elif (s[pos] == u'#'):
pos = s.find(u'\n', pos)
if (pos < 0):
pos = len(s)
else:
break
try:
ch = s[pos]
self.pos = (pos + 1)
return ch
except IndexError:
self.pos = pos
return u''
except ValueError:
self.pos = len(s)
return u''
def match(self, substr, consume=True):
s = self.s
pos = self.pos
if self.ignore_space:
for c in substr:
while True:
if (pos >= len(s)):
return False
elif is_space(ord(s[pos])):
pos += 1
elif (s[pos] == u'#'):
pos = s.find(u'\n', pos)
if (pos < 0):
pos = len(s)
else:
break
if (s[pos] != c):
return False
pos += 1
if consume:
self.pos = pos
return True
else:
if ((pos + len(substr)) <= len(s)):
matches = True
for i in xrange(len(substr)):
if (s[(pos + i)] != substr[i]):
matches = False
else:
matches = False
if (not matches):
return False
if consume:
self.pos = (pos + len(substr))
return True
def expect(self, substr):
if (not self.match(substr)):
raise RegexpError(('Missing %s' % str(substr))) |
def sanity_check_dependencies():
import numpy
import requests
import six
if (distutils.version.LooseVersion(numpy.__version__) < distutils.version.LooseVersion('1.10.4')):
logger.warn("You have 'numpy' version %s installed, but 'gym' requires at least 1.10.4. HINT: upgrade via 'pip install -U numpy'.", numpy.__version__)
if (distutils.version.LooseVersion(requests.__version__) < distutils.version.LooseVersion('2.0')):
logger.warn("You have 'requests' version %s installed, but 'gym' requires at least 2.0. HINT: upgrade via 'pip install -U requests'.", requests.__version__) |
class TwoInputsModel(torch.nn.Module):
def __init__(self, num_classes=3):
super(TwoInputsModel, self).__init__()
self.conv1 = torch.nn.Conv2d(3, 16, kernel_size=2, stride=2, padding=2, bias=False)
self.bn1 = torch.nn.BatchNorm2d(16)
self.conv2 = torch.nn.Conv2d(3, 8, kernel_size=3, stride=2, padding=2)
self.bn2 = torch.nn.BatchNorm2d(8)
self.conv3 = torch.nn.Conv2d(8, 16, kernel_size=3, stride=2, padding=2)
self.ada = torch.nn.AdaptiveAvgPool2d(18)
self.relu1 = torch.nn.ReLU(inplace=True)
self.maxpool = torch.nn.MaxPool2d(kernel_size=2, stride=2, padding=1)
self.fc = torch.nn.Linear(1600, num_classes)
def forward(self, *inputs):
x1 = self.conv1(inputs[0])
x1 = self.bn1(x1)
x2 = self.conv2(inputs[1])
x2 = self.bn2(x2)
x2 = self.conv3(x2)
x2 = self.ada(x2)
x = (x1 + x2)
x = self.relu1(x)
x = self.maxpool(x)
x = x.view(x.size(0), (- 1))
x = self.fc(x)
return x |
def test_importorskip_dev_module(monkeypatch) -> None:
try:
mod = types.ModuleType('mockmodule')
mod.__version__ = '0.13.0.dev-43290'
monkeypatch.setitem(sys.modules, 'mockmodule', mod)
mod2 = pytest.importorskip('mockmodule', minversion='0.12.0')
assert (mod2 == mod)
with pytest.raises(pytest.skip.Exception):
pytest.importorskip('mockmodule1', minversion='0.14.0')
except pytest.skip.Exception:
assert False, f'spurious skip: {ExceptionInfo.from_current()}' |
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2, last_phase=True)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = modified_linear.CosineLinear((512 * block.expansion), num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1, last_phase=False):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = (planes * block.expansion)
if last_phase:
for i in range(1, (blocks - 1)):
layers.append(block(self.inplanes, planes))
layers.append(block(self.inplanes, planes, last=True))
else:
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), (- 1))
x = self.fc(x)
return x |
class LSTM(nn.Module):
def __init__(self, word_embedding_dimension: int, hidden_dim: int, num_layers: int=1, dropout: float=0, bidirectional: bool=True):
nn.Module.__init__(self)
self.config_keys = ['word_embedding_dimension', 'hidden_dim', 'num_layers', 'dropout', 'bidirectional']
self.word_embedding_dimension = word_embedding_dimension
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.dropout = dropout
self.bidirectional = bidirectional
self.embeddings_dimension = hidden_dim
if self.bidirectional:
self.embeddings_dimension *= 2
self.encoder = nn.LSTM(word_embedding_dimension, hidden_dim, num_layers=num_layers, dropout=dropout, bidirectional=bidirectional, batch_first=True)
def forward(self, features):
token_embeddings = features['token_embeddings']
sentence_lengths = torch.clamp(features['sentence_lengths'], min=1)
packed = nn.utils.rnn.pack_padded_sequence(token_embeddings, sentence_lengths, batch_first=True, enforce_sorted=False)
packed = self.encoder(packed)
unpack = nn.utils.rnn.pad_packed_sequence(packed[0], batch_first=True)[0]
features.update({'token_embeddings': unpack})
return features
def get_word_embedding_dimension(self) -> int:
return self.embeddings_dimension
def tokenize(self, text: str) -> List[int]:
raise NotImplementedError()
def save(self, output_path: str):
with open(os.path.join(output_path, 'lstm_config.json'), 'w') as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
torch.save(self.state_dict(), os.path.join(output_path, 'pytorch_model.bin'))
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def load(input_path: str):
with open(os.path.join(input_path, 'lstm_config.json'), 'r') as fIn:
config = json.load(fIn)
weights = torch.load(os.path.join(input_path, 'pytorch_model.bin'))
model = LSTM(**config)
model.load_state_dict(weights)
return model |
def extract_and_save_image(dataset, save_dir, discard, label2name):
if osp.exists(save_dir):
print('Folder "{}" already exists'.format(save_dir))
return
print('Extracting images to "{}" ...'.format(save_dir))
mkdir_if_missing(save_dir)
for i in range(len(dataset)):
(img, label) = dataset[i]
if (label == discard):
continue
class_name = label2name[label]
label_new = new_name2label[class_name]
class_dir = osp.join(save_dir, ((str(label_new).zfill(3) + '_') + class_name))
mkdir_if_missing(class_dir)
impath = osp.join(class_dir, (str((i + 1)).zfill(5) + '.jpg'))
img.save(impath) |
class cvode(IntegratorBase):
valid_methods = {'adams': _cvode.CV_ADAMS, 'bdf': _cvode.CV_BDF}
valid_iterations = {'functional': _cvode.CV_FUNCTIONAL, 'newton': _cvode.CV_NEWTON}
def __init__(self, method='adams', iteration='functional', rtol=1e-06, atol=1e-12):
if (method not in cvode.valid_methods):
raise Exception(('%s is not a valid value for method -- please use one of the following: %s' % (method, [m for m in cvode.valid_methods])))
if (iteration not in cvode.valid_iterations):
raise Exception(('%s is not a valid value for iteration -- please use one of the following: %s' % (iteration, [m for m in cvode.valid_iterations])))
self.method = method
self.iteration = iteration
self.rtol = rtol
self.atol = atol
self.first_step = True
def reset(self, n, has_jac):
if has_jac:
raise Exception('has_jac not yet supported')
self.success = 1
self.y = _cvode.NVector(([0] * n))
self.first_step = True
self.cvode_mem = _cvode.CVodeCreate(cvode.valid_methods[self.method], cvode.valid_iterations[self.iteration])
_cvode.CVodeMalloc(self.cvode_mem, cvode_rhs_func, 0.0, self.y, _cvode.CV_SS, self.rtol, self.atol)
_cvode.CVDense(self.cvode_mem, n)
def run(self, f, jac, y0, t0, t1, f_params, jac_params):
if self.first_step:
self.y[:] = y0[:]
_cvode.CVodeReInit(self.cvode_mem, cvode_rhs_func, t0, self.y, _cvode.CV_SS, self.rtol, self.atol)
f_data = ctypes.cast(ctypes.pointer(ctypes.py_object((f, f_params))), ctypes.c_void_p)
_cvode.CVodeSetFdata(self.cvode_mem, f_data)
tret = _cvode.realtype()
flag = _cvode.CVode(self.cvode_mem, t1, self.y, tret, _cvode.CV_NORMAL)
if (flag < 0):
self.success = 0
print(('cvodes error: %d (see SUNDIALS manual for more information)' % flag))
return (self.y, t1) |
def run(config):
state_dict = {'itr': 0, 'epoch': 0, 'save_num': 0, 'save_best_num': 0, 'best_IS': 0, 'best_FID': 999999, 'config': config}
if config['config_from_name']:
utils.load_weights(None, None, state_dict, config['weights_root'], config['experiment_name'], config['load_weights'], None, strict=False, load_optim=False)
for item in state_dict['config']:
if (item not in ['z_var', 'base_root', 'batch_size', 'G_batch_size', 'use_ema', 'G_eval_mode']):
config[item] = state_dict['config'][item]
config['resolution'] = utils.imsize_dict[config['dataset']]
config['n_classes'] = utils.nclass_dict[config['dataset']]
config['G_activation'] = utils.activation_dict[config['G_nl']]
config['D_activation'] = utils.activation_dict[config['D_nl']]
config = utils.update_config_roots(config)
config['skip_init'] = True
config['no_optim'] = True
device = 'cuda'
utils.seed_rng(config['seed'])
torch.backends.cudnn.benchmark = True
model = __import__(config['model'])
experiment_name = (config['experiment_name'] if config['experiment_name'] else utils.name_from_config(config))
print(('Experiment name is %s' % experiment_name))
G = model.Generator(**config).cuda()
utils.count_parameters(G)
print('Loading weights...')
utils.load_weights((G if (not config['use_ema']) else None), None, state_dict, config['weights_root'], experiment_name, config['load_weights'], (G if (config['ema'] and config['use_ema']) else None), strict=False, load_optim=False)
G_batch_size = max(config['G_batch_size'], config['batch_size'])
(z_, y_) = utils.prepare_z_y(G_batch_size, G.dim_z, config['n_classes'], device=device, fp16=config['G_fp16'], z_var=config['z_var'])
if config['G_eval_mode']:
print('Putting G in eval mode..')
G.eval()
else:
print(('G is in %s mode...' % ('training' if G.training else 'eval')))
sample = functools.partial(utils.sample, G=G, z_=z_, y_=y_, config=config)
if config['accumulate_stats']:
print(('Accumulating standing stats across %d accumulations...' % config['num_standing_accumulations']))
utils.accumulate_standing_stats(G, z_, y_, config['n_classes'], config['num_standing_accumulations'])
if config['sample_npz']:
(x, y) = ([], [])
print(('Sampling %d images and saving them to npz...' % config['sample_num_npz']))
for i in trange(int(np.ceil((config['sample_num_npz'] / float(G_batch_size))))):
with torch.no_grad():
(images, labels) = sample()
x += [np.uint8(((255 * (images.cpu().numpy() + 1)) / 2.0))]
y += [labels.cpu().numpy()]
x = np.concatenate(x, 0)[:config['sample_num_npz']]
y = np.concatenate(y, 0)[:config['sample_num_npz']]
print(('Images shape: %s, Labels shape: %s' % (x.shape, y.shape)))
npz_filename = ('%s/%s/samples.npz' % (config['samples_root'], experiment_name))
pdb.set_trace()
print(('Saving npz to %s...' % npz_filename))
np.savez(npz_filename, **{'x': x, 'y': y})
if config['sample_sheets']:
print('Preparing conditional sample sheets...')
utils.sample_sheet(G, classes_per_sheet=utils.classes_per_sheet_dict[config['dataset']], num_classes=config['n_classes'], samples_per_class=10, parallel=config['parallel'], samples_root=config['samples_root'], experiment_name=experiment_name, folder_number=config['sample_sheet_folder_num'], z_=z_)
if config['sample_interps']:
print('Preparing interp sheets...')
for (fix_z, fix_y) in zip([False, False, True], [False, True, False]):
utils.interp_sheet(G, num_per_sheet=16, num_midpoints=8, num_classes=config['n_classes'], parallel=config['parallel'], samples_root=config['samples_root'], experiment_name=experiment_name, folder_number=config['sample_sheet_folder_num'], sheet_number=0, fix_z=fix_z, fix_y=fix_y, device='cuda')
if config['sample_random']:
print('Preparing random sample sheet...')
(images, labels) = sample()
torchvision.utils.save_image(images.float(), ('%s/%s/random_samples.jpg' % (config['samples_root'], experiment_name)), nrow=int((G_batch_size ** 0.5)), normalize=True)
get_inception_metrics = inception_utils.prepare_inception_metrics(config['dataset'], config['parallel'], config['no_fid'])
def get_metrics():
sample = functools.partial(utils.sample, G=G, z_=z_, y_=y_, config=config)
(IS_mean, IS_std, FID) = get_inception_metrics(sample, config['num_inception_images'], num_splits=10, prints=False)
outstring = ('Using %s weights ' % ('ema' if config['use_ema'] else 'non-ema'))
outstring += ('in %s mode, ' % ('eval' if config['G_eval_mode'] else 'training'))
outstring += ('with noise variance %3.3f, ' % z_.var)
outstring += ('over %d images, ' % config['num_inception_images'])
if (config['accumulate_stats'] or (not config['G_eval_mode'])):
outstring += ('with batch size %d, ' % G_batch_size)
if config['accumulate_stats']:
outstring += ('using %d standing stat accumulations, ' % config['num_standing_accumulations'])
outstring += ('Itr %d: PYTORCH UNOFFICIAL Inception Score is %3.3f +/- %3.3f, PYTORCH UNOFFICIAL FID is %5.4f' % (state_dict['itr'], IS_mean, IS_std, FID))
print(outstring)
if config['sample_inception_metrics']:
print('Calculating Inception metrics...')
get_metrics()
if config['sample_trunc_curves']:
(start, step, end) = [float(item) for item in config['sample_trunc_curves'].split('_')]
print(('Getting truncation values for variance in range (%3.3f:%3.3f:%3.3f)...' % (start, step, end)))
for var in np.arange(start, (end + step), step):
z_.var = var
if config['accumulate_stats']:
utils.accumulate_standing_stats(G, z_, y_, config['n_classes'], config['num_standing_accumulations'])
get_metrics() |
def mol_data_from_csv(csv_name: str):
with open(csv_name, 'r') as csv_file:
mol_confs = csv.DictReader(csv_file)
rows = []
for row in mol_confs:
row = dict(row)
row['smiles'] = (row['smiles'] if row['smiles'] else None)
row['multiplicity'] = (int(float(row['multiplicity'])) if row['multiplicity'] else 1)
row['config_file'] = (row['config_file'] if row['config_file'] else None)
row['restart'] = (row['restart'] if row['restart'] else None)
row['end'] = (row['end'] if row['end'] else None)
rows.append(row)
final = {row['name']: row for row in rows}
for val in final.values():
del val['name']
return final |
class LiltConfig(PretrainedConfig):
model_type = 'lilt'
def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, position_embedding_type='absolute', classifier_dropout=None, channel_shrink_ratio=4, max_2d_position_embeddings=1024, **kwargs):
super().__init__(pad_token_id=pad_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.position_embedding_type = position_embedding_type
self.classifier_dropout = classifier_dropout
self.channel_shrink_ratio = channel_shrink_ratio
self.max_2d_position_embeddings = max_2d_position_embeddings |
def test_push_pull_manifest_list_duplicate_manifest(v22_protocol, basic_images, liveserver_session, app_reloader, data_model):
credentials = ('devtable', 'password')
options = ProtocolOptions()
blobs = {}
manifest = v22_protocol.build_schema2(basic_images, blobs, options)
builder = DockerSchema2ManifestListBuilder()
builder.add_manifest(manifest, 'amd64', 'linux')
builder.add_manifest(manifest, 'amd32', 'linux')
manifestlist = builder.build()
v22_protocol.push_list(liveserver_session, 'devtable', 'newrepo', 'latest', manifestlist, [manifest], blobs, credentials=credentials, options=options)
v22_protocol.pull_list(liveserver_session, 'devtable', 'newrepo', 'latest', manifestlist, credentials=credentials, options=options) |
()
def no_qt(monkeypatch):
need_reload = False
if _check_qt_installed():
need_reload = True
monkeypatch.setenv('QT_API', 'bad_name')
sys.modules.pop('qtpy')
importlib.reload(pyvistaqt)
assert ('qtpy' not in sys.modules)
(yield)
monkeypatch.undo()
if need_reload:
importlib.reload(pyvistaqt)
assert ('qtpy' in sys.modules) |
def analyze_enum_class_attribute_access(itype: Instance, name: str, mx: MemberContext) -> (Type | None):
if (name in ENUM_REMOVED_PROPS):
return report_missing_attribute(mx.original_type, itype, name, mx)
if (name.startswith('__') and name.endswith('__') and (name.replace('_', '') != '')):
return None
enum_literal = LiteralType(name, fallback=itype)
return itype.copy_modified(last_known_value=enum_literal) |
class SemanticAnalyzerPluginInterface():
modules: dict[(str, MypyFile)]
options: Options
cur_mod_id: str
msg: MessageBuilder
def named_type(self, fullname: str, args: (list[Type] | None)=None) -> Instance:
raise NotImplementedError
def builtin_type(self, fully_qualified_name: str) -> Instance:
raise NotImplementedError
def named_type_or_none(self, fullname: str, args: (list[Type] | None)=None) -> (Instance | None):
raise NotImplementedError
def basic_new_typeinfo(self, name: str, basetype_or_fallback: Instance, line: int) -> TypeInfo:
raise NotImplementedError
def parse_bool(self, expr: Expression) -> (bool | None):
raise NotImplementedError
def parse_str_literal(self, expr: Expression) -> (str | None):
def fail(self, msg: str, ctx: Context, serious: bool=False, *, blocker: bool=False, code: (ErrorCode | None)=None) -> None:
raise NotImplementedError
def anal_type(self, t: Type, *, tvar_scope: (TypeVarLikeScope | None)=None, allow_tuple_literal: bool=False, allow_unbound_tvars: bool=False, report_invalid_types: bool=True, third_pass: bool=False) -> (Type | None):
raise NotImplementedError
def class_type(self, self_type: Type) -> Type:
raise NotImplementedError
def lookup_fully_qualified(self, name: str) -> SymbolTableNode:
raise NotImplementedError
def lookup_fully_qualified_or_none(self, name: str) -> (SymbolTableNode | None):
raise NotImplementedError
def lookup_qualified(self, name: str, ctx: Context, suppress_errors: bool=False) -> (SymbolTableNode | None):
raise NotImplementedError
def add_plugin_dependency(self, trigger: str, target: (str | None)=None) -> None:
raise NotImplementedError
def add_symbol_table_node(self, name: str, stnode: SymbolTableNode) -> Any:
raise NotImplementedError
def qualified_name(self, n: str) -> str:
raise NotImplementedError
def defer(self) -> None:
raise NotImplementedError
def final_iteration(self) -> bool:
raise NotImplementedError
def is_stub_file(self) -> bool:
raise NotImplementedError
def analyze_simple_literal_type(self, rvalue: Expression, is_final: bool) -> (Type | None):
raise NotImplementedError |
class Event(object):
def __init__(self, console, input):
pass
def __repr__(self):
if (self.type in ['KeyPress', 'KeyRelease']):
s = ("%s char='%s'%d keysym='%s' keycode=%d:%x state=%x keyinfo=%s" % (self.type, self.char, ord(self.char), self.keysym, self.keycode, self.keycode, self.state, self.keyinfo))
elif (self.type in ['Motion', 'Button']):
s = ('%s x=%d y=%d state=%x' % (self.type, self.x, self.y, self.state))
elif (self.type == 'Configure'):
s = ('%s w=%d h=%d' % (self.type, self.width, self.height))
elif (self.type in ['FocusIn', 'FocusOut']):
s = self.type
elif (self.type == 'Menu'):
s = ('%s state=%x' % (self.type, self.state))
else:
s = 'unknown event type'
return s |
def _parse_pmt(payload):
table_id = payload[0]
if (table_id != _TABLE_PMT):
return None
length = (((payload[1] & 15) << 8) | payload[2])
data = payload[8:(3 + length)]
data = data[:(- 4)]
meta_length = (((data[2] & 15) << 8) | data[3])
stream = data[(4 + meta_length):]
while stream:
stream_type = stream[0]
pid = (((stream[1] & 31) << 8) | stream[2])
nbytes = (((stream[3] & 15) << 8) | stream[4])
if (stream_type == 6):
ptr = stream[5:(5 + nbytes)]
while ptr:
if ((ptr[0] == 82) and (ptr[2] == 135)):
return pid
ptr = ptr[(2 + ptr[1]):]
stream = stream[(5 + nbytes):]
return None |
def read_freq_cpu(path, type_freq):
freq = {}
with open('{path}/cpufreq/{type_freq}_min_freq'.format(path=path, type_freq=type_freq), 'r') as f:
freq['min'] = int(f.read())
with open('{path}/cpufreq/{type_freq}_max_freq'.format(path=path, type_freq=type_freq), 'r') as f:
freq['max'] = int(f.read())
current_path = '{path}/cpufreq/{type_freq}_cur_freq'.format(path=path, type_freq=type_freq)
if os.path.isfile(current_path):
with open(current_path, 'r') as f:
data = f.read().strip()
if data.isdigit():
freq['cur'] = int(data)
return freq |
def get_help(cmd: Optional[str]) -> str:
base = ['pipx']
args = ((base + ([cmd] if cmd else [])) + ['--help'])
env_patch = os.environ.copy()
env_patch['PATH'] = os.pathsep.join(([str(Path(sys.executable).parent)] + env_patch['PATH'].split(os.pathsep)))
content = check_output(args, text=True, env=env_patch)
content = content.replace(str(Path('~').expanduser()), '~')
return f'''
```
{' '.join(args[2:])}
{content}
```
''' |
class SwitchMetric(Metric):
def __init__(self, args: Namespace, mode='all'):
super(SwitchMetric, self).__init__(args)
self.args = args
self.mode = mode
self.amax = args.padding_size
self.use_lm = args.use_lm
def __call__(self, gts: list, preds: list, mask: list) -> dict:
ret = {}
assert (len(gts) == len(preds))
preds = self._repairlm_sep(preds, mask)
if (self.mode == 'all'):
ret['token'] = self._cal_token_level(gts, preds, mask)
ret['sentence'] = self._cal_sentence_level(gts, preds)
elif (self.mode == 'token'):
ret['token'] = self._cal_token_level(gts, preds, mask)
elif (self.mode == 'sentence'):
ret['sentence'] = self._cal_sentence_level(gts, preds)
else:
raise Exception('SwitchMetric.__call__ occure some errors, invalid params `mode`.')
return ret
def _repairlm_sep(self, preds: list, mask: list):
if self.use_lm:
seq_lens = [((np.where((mk == 0))[0][0] - 1) if (mk[(- 2)] != 1) else ((len(mk) - 2) if (mk[(- 1)] != 1) else (len(mk) - 1))) for mk in mask]
for insid in range(len(seq_lens)):
preds[insid][seq_lens[insid]] = 0
return preds
def _cal_token_level(self, gts: list, preds: list, mask: list) -> float:
gts = np.clip(gts, a_min=0, a_max=self.amax)
total_token = len(np.where((gts > 0))[0])
externel_token = (np.array(mask).size - total_token)
correct_token = (np.sum((np.array(gts) == np.array(preds))) - externel_token)
return ((correct_token * 1.0) / total_token)
def _cal_sentence_level(self, gts: list, preds: list, mask: list=None) -> float:
total_sentence = len(gts)
gts = np.clip(gts, a_min=0, a_max=self.amax)
correct_sentence = sum([(1 if op.eq(gts[ins_idx].tolist(), preds[ins_idx].tolist()) else 0) for ins_idx in range(len(gts))])
return ((correct_sentence * 1.0) / total_sentence) |
def rand_throw():
vp = np.random.uniform(low=0, high=360)
goal = np.array([np.random.uniform(low=(- 0.3), high=0.3), np.random.uniform(low=(- 0.3), high=0.3)])
return dict(vp=vp, imsize=(64, 64), name='throw', goal=goal.tolist(), modelname='model/model_70000_3007.74_2728.77_268.42', modeldata='model/vdata_train.npy') |
class LidResults(Enum):
inflow = 0
evap = 1
infil = 2
surfFlow = 3
drainFlow = 4
initVol = 5
finalVol = 6
surfDepth = 7
paveDepth = 8
soilMoist = 9
storDepth = 10
dryTime = 11
oldDrainFlow = 12
newDrainFlow = 13
pervArea = 14
flowToPerv = 15
evapRate = 16
nativeInfil = 17
surfInflow = 18
surfInfil = 19
surfEvap = 20
surfOutflow = 21
paveEvap = 22
pavePerc = 23
soilEvap = 24
soilPerc = 25
storInflow = 26
storExfil = 27
storEvap = 28
storDrain = 29 |
class SawyerDisassembleV1Policy(Policy):
_fully_parsed
def _parse_obs(obs):
return {'hand_pos': obs[:3], 'wrench_pos': obs[3:6], 'peg_pos': obs[9:], 'unused_info': obs[6:9]}
def get_action(self, obs):
o_d = self._parse_obs(obs)
action = Action({'delta_pos': np.arange(3), 'grab_effort': 3})
action['delta_pos'] = move(o_d['hand_pos'], to_xyz=self._desired_pos(o_d), p=10.0)
action['grab_effort'] = self._grab_effort(o_d)
return action.array
def _desired_pos(o_d):
pos_curr = o_d['hand_pos']
pos_wrench = (o_d['wrench_pos'] + np.array([0.01, (- 0.01), 0.01]))
pos_peg = (o_d['peg_pos'] + np.array([0.07, 0.0, 0.15]))
if (np.linalg.norm((pos_curr[:2] - pos_wrench[:2])) > 0.02):
return (pos_wrench + np.array([0.0, 0.0, 0.12]))
elif (abs((pos_curr[2] - pos_wrench[2])) > 0.03):
return pos_wrench
elif (pos_wrench[2] < 0.12):
return (pos_peg + np.array([0.0, 0.0, 0.1]))
else:
return (pos_curr + np.array([0.0, (- 0.1), 0.0]))
def _grab_effort(o_d):
pos_curr = o_d['hand_pos']
pos_wrench = (o_d['wrench_pos'] + np.array([0.01, 0.0, 0.0]))
if ((np.linalg.norm((pos_curr[:2] - pos_wrench[:2])) > 0.02) or (abs((pos_curr[2] - pos_wrench[2])) > 0.08)):
return 0.0
else:
return 0.8 |
def locate_cuda():
if ('CUDA_PATH' in os.environ):
home = os.environ['CUDA_PATH']
print(('home = %s\n' % home))
nvcc = pjoin(home, 'bin', nvcc_bin)
else:
default_path = pjoin(os.sep, 'usr', 'local', 'cuda', 'bin')
nvcc = find_in_path(nvcc_bin, ((os.environ['PATH'] + os.pathsep) + default_path))
if (nvcc is None):
raise EnvironmentError('The nvcc binary could not be located in your $PATH. Either add it to your path, or set $CUDA_PATH')
home = os.path.dirname(os.path.dirname(nvcc))
print(('home = %s, nvcc = %s\n' % (home, nvcc)))
cudaconfig = {'home': home, 'nvcc': nvcc, 'include': pjoin(home, 'include'), 'lib64': pjoin(home, lib_dir)}
for (k, v) in cudaconfig.iteritems():
if (not os.path.exists(v)):
raise EnvironmentError(('The CUDA %s path could not be located in %s' % (k, v)))
return cudaconfig |
def pytest_addoption(parser: Parser) -> None:
parser.addini('doctest_optionflags', 'Option flags for doctests', type='args', default=['ELLIPSIS'])
parser.addini('doctest_encoding', 'Encoding used for doctest files', default='utf-8')
group = parser.getgroup('collect')
group.addoption('--doctest-modules', action='store_true', default=False, help='Run doctests in all .py modules', dest='doctestmodules')
group.addoption('--doctest-report', type=str.lower, default='udiff', help='Choose another output format for diffs on doctest failure', choices=DOCTEST_REPORT_CHOICES, dest='doctestreport')
group.addoption('--doctest-glob', action='append', default=[], metavar='pat', help='Doctests file matching pattern, default: test*.txt', dest='doctestglob')
group.addoption('--doctest-ignore-import-errors', action='store_true', default=False, help='Ignore doctest ImportErrors', dest='doctest_ignore_import_errors')
group.addoption('--doctest-continue-on-failure', action='store_true', default=False, help='For a given doctest, continue to run after the first failure', dest='doctest_continue_on_failure') |
('pypyr.moduleloader.get_module')
(Step, 'invoke_step')
def test_run_pipeline_steps_with_retries(mock_invoke_step, mock_get_module):
step = Step({'name': 'step1', 'retry': {'max': 0}})
context = get_test_context()
original_len = len(context)
mock_invoke_step.side_effect = [ValueError('arb'), None]
with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug:
step.run_step(context)
mock_logger_debug.assert_any_call('done')
assert (mock_invoke_step.call_count == 2)
mock_invoke_step.assert_called_with({'key1': 'value1', 'key2': 'value2', 'key3': 'value3', 'key4': [{'k4lk1': 'value4', 'k4lk2': 'value5'}, {'k4lk1': 'value6', 'k4lk2': 'value7'}], 'key5': False, 'key6': True, 'key7': 77, 'retryCounter': 2})
assert (len(context) == (original_len + 1)) |
class BaseDB(DatabaseAPI):
def set(self, key: bytes, value: bytes) -> None:
self[key] = value
def exists(self, key: bytes) -> bool:
return self.__contains__(key)
def __contains__(self, key: bytes) -> bool:
if hasattr(self, '_exists'):
return self._exists(key)
else:
return super().__contains__(key)
def delete(self, key: bytes) -> None:
try:
del self[key]
except KeyError:
pass
def __iter__(self) -> Iterator[bytes]:
raise NotImplementedError('By default, DB classes cannot be iterated.')
def __len__(self) -> int:
raise NotImplementedError('By default, DB classes cannot return the total number of keys.') |
class JSONOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, 'wt')
def writekvs(self, kvs):
for (k, v) in sorted(kvs.items()):
if hasattr(v, 'dtype'):
kvs[k] = float(v)
self.file.write((json.dumps(kvs) + '\n'))
self.file.flush()
def close(self):
self.file.close() |
(params=[lazy_fixture('example_git_ssh_url')])
def git_repo_factory(request, example_project):
def git_repo():
repo = Repo.init(example_project.resolve())
repo.git.branch('-M', 'main')
with repo.config_writer('repository') as config:
config.set_value('user', 'name', 'semantic release testing')
config.set_value('user', 'email', 'not_a_')
config.set_value('commit', 'gpgsign', False)
repo.create_remote(name='origin', url=request.param)
return repo
return git_repo |
class TestToyDictionary():
XML_PATH = os.path.join(os.path.dirname(__file__), '..', 'dev_data', 'toy_dict.xml')
def test_parse_xml(self):
dct = parse_opencorpora_xml(self.XML_PATH)
assert (dct.version == '0.92')
assert (dct.revision == '389440')
assert (dct.links[0] == ('5', '6', '1'))
assert (len(dct.links) == 13)
assert (dct.grammemes[1] == ('NOUN', 'POST', '', ' '))
assert (len(dct.grammemes) == 114)
assert (dct.lexemes['14'] == [('', 'INFN,impf,intr')])
assert (dct.lexemes['111111'] == [])
assert (dct.lexemes['222222'] == [])
def test_convert_to_pymorphy2(self, tmpdir):
try:
assert_can_create()
except NotImplementedError as e:
raise pytest.skip(str(e))
out_path = str(tmpdir.join('dicts'))
options = {'min_paradigm_popularity': 0, 'min_ending_freq': 0, 'paradigm_prefixes': lang.ru.PARADIGM_PREFIXES}
convert_to_pymorphy2(self.XML_PATH, out_path, source_name='toy', language_code='ru', overwrite=True, compile_options=options)
morph = pymorphy2.MorphAnalyzer(out_path)
assert (morph.tag('') == [morph.TagClass('INFN,impf,intr')])
assert (morph.tag('')[0] == morph.tag('-')[0])
assert ('Init' in morph.tag('')[0])
assert ('Init' not in morph.tag('')[0])
assert (morph.normal_forms('') == [''])
assert (morph.normal_forms('') == ['']) |
def merge_two_slices(fgraph, slice1, len1, slice2, len2):
if (not isinstance(slice1, slice)):
raise ValueError('slice1 should be of type `slice`')
(sl1, reverse1) = get_canonical_form_slice(slice1, len1)
(sl2, reverse2) = get_canonical_form_slice(slice2, len2)
if (not isinstance(sl2, slice)):
if (reverse1 is None):
val = (sl1.start + (sl2 * sl1.step))
val = switch(le(len2, 0), (len1 + 1), val)
val = switch(ge(sl2, len2), (len1 + 1), val)
val = switch(lt(sl2, 0), ((- len1) - 1), val)
if sl1.step:
val = switch(eq(sl1.step, 0), (len1 + 1), val)
return val
else:
p_val = (sl1.start + (sl2 * sl1.step))
n_val = ((sl1.stop - 1) - (sl2 * sl1.step))
val = switch(lt(reverse1, 0), n_val, p_val)
val = switch(le(len2, 0), (len1 + 1), val)
val = switch(ge(sl2, len2), (len1 + 1), val)
val = switch(lt(sl2, 0), ((- len1) - 1), val)
if sl1.step:
val = switch(eq(sl1.step, 0), (len1 + 1), val)
return val
else:
flen = (sl2.stop - sl2.start)
p_step = (sl1.step * sl2.step)
n_step = ((sl1.step * sl2.step) * (- 1))
pp_start = minimum((sl1.start + (sl2.start * sl1.step)), sl1.stop)
pp_stop = minimum((sl1.start + (sl2.stop * sl1.step)), sl1.stop)
pn_stop = (sl1.start + ((sl2.start - 1) * sl1.step))
pn_stop = switch(and_(lt(pn_stop, 0), gt(flen, 0)), ((- len1) - 1), minimum(pn_stop, sl1.stop))
pn_start = (sl1.start + ((sl2.stop - 1) * sl1.step))
pn_start = minimum(pn_start, sl1.stop)
pn_start = maximum(pn_start, 0)
np_stop = ((sl1.stop - (sl2.stop * sl1.step)) - 1)
np_stop = switch(and_(lt(np_stop, 0), gt(flen, 0)), ((- len1) - 1), maximum((sl1.start - 1), np_stop))
np_start = maximum(sl1.start, ((sl1.stop - (sl2.start * sl1.step)) - 1))
nn_start = maximum(sl1.start, ((sl1.stop - 1) - ((sl2.stop - 1) * sl1.step)))
nn_stop = maximum(sl1.start, (sl1.stop - (sl2.start * sl1.step)))
start = switch(lt((reverse2 * reverse1), 0), switch(lt(reverse1, 0), np_start, pn_start), switch(lt(reverse1, 0), nn_start, pp_start))
stop = switch(lt((reverse2 * reverse1), 0), switch(lt(reverse1, 0), np_stop, pn_stop), switch(lt(reverse1, 0), nn_stop, pp_stop))
step = switch(lt((reverse2 * reverse1), 0), n_step, p_step)
start = switch(le(flen, 0), 0, start)
stop = switch(le(flen, 0), 0, stop)
return slice(start, stop, step) |
class ChainChoiceType(click.Choice):
def convert(self, value, param, ctx):
if isinstance(value, int):
return value
elif (isinstance(value, str) and value.isnumeric()):
try:
return int(value)
except ValueError:
self.fail(f'invalid numeric network id: {value}', param, ctx)
else:
network_name = super().convert(value, param, ctx)
return CHAINNAME_TO_ID[network_name] |
class DevDataset(Dataset):
def __init__(self, args, raw_datasets, cache_root):
self.raw_datasets = raw_datasets
cache_path = os.path.join(cache_root, 'tab_fact_dev.cache')
if (os.path.exists(cache_path) and args.dataset.use_cache):
self.extended_data = torch.load(cache_path)
else:
self.tab_processor = get_default_processor(max_cell_length=15, tokenizer=AutoTokenizer.from_pretrained(args.bert.location, use_fast=False), max_input_length=args.seq2seq.table_truncation_max_length)
self.extended_data = []
for raw_data in tqdm(self.raw_datasets):
extend_data = deepcopy(raw_data)
statement = extend_data['statement'].lower()
label_str = label_id2label_str[extend_data['label']]
table_context = {'header': extend_data['table']['header'], 'rows': extend_data['table']['rows']}
for truncate_func in self.tab_processor.table_truncate_funcs:
truncate_func.truncate_table(table_context, statement, [])
linear_table = self.tab_processor.table_linearize_func.process_table(table_context)
extend_data.update({'struct_in': linear_table.lower(), 'text_in': statement.lower(), 'seq_out': label_str.lower()})
self.extended_data.append(extend_data)
if args.dataset.use_cache:
torch.save(self.extended_data, cache_path)
def __getitem__(self, index) -> T_co:
return self.extended_data[index]
def __len__(self):
return len(self.extended_data) |
def asin_list_from_csv(mf):
if os.path.isfile(mf):
with open(mf) as f:
csvread = csv.reader(f, delimiter=';', quotechar='"', quoting=csv.QUOTE_ALL)
asinlist = []
filelist = []
for row in csvread:
try:
if (row[0] != '* NONE *'):
asinlist.append(row[0])
except IndexError:
continue
filelist.append(row[6])
return (asinlist, filelist)
else:
with open(mf, 'wb') as o:
csvwrite = csv.writer(o, delimiter=';', quotechar='"', quoting=csv.QUOTE_ALL)
csvwrite.writerow(['asin', 'lang', 'author', 'title', 'pages', 'is_real', 'file_path'])
return ([], []) |
class SponsorshipPackageManagerTests(TestCase):
def test_filter_packages_by_current_year(self):
current_year = SponsorshipCurrentYear.get_year()
active_package = baker.make(SponsorshipPackage, year=current_year)
baker.make(SponsorshipPackage, year=(current_year - 1))
qs = SponsorshipPackage.objects.all().from_current_year()
self.assertEqual(1, qs.count())
self.assertIn(active_package, qs) |
class MaxLengthCriteria(StoppingCriteria):
def __init__(self, max_length: int):
self.max_length = max_length
_start_docstrings(STOPPING_CRITERIA_INPUTS_DOCSTRING)
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
return (input_ids.shape[(- 1)] >= self.max_length) |
def get_dist_run_id(cfg, num_nodes):
init_method = cfg.DISTRIBUTED.INIT_METHOD
run_id = cfg.DISTRIBUTED.RUN_ID
if ((init_method == 'tcp') and (cfg.DISTRIBUTED.RUN_ID == 'auto')):
assert (num_nodes == 1), 'cfg.DISTRIBUTED.RUN_ID=auto is allowed for 1 machine only.'
port = find_free_tcp_port()
run_id = f'localhost:{port}'
elif (init_method == 'file'):
if (num_nodes > 1):
logging.warning('file is not recommended to use for distributed training on > 1 node')
if ((not run_id) or (run_id == 'auto')):
(unused_fno, run_id) = tempfile.mkstemp()
elif ((init_method == 'tcp') and (cfg.DISTRIBUTED.NUM_NODES > 1)):
assert cfg.DISTRIBUTED.RUN_ID, 'please specify RUN_ID for tcp'
elif (init_method == 'env'):
assert (num_nodes == 1), "can not use 'env' init method for multi-node. Use tcp"
return run_id |
class DirectPalette(AbstractPalette):
registry = BLOCK_STATES
def get_bits_per_block():
return math.ceil(math.log2(sum((len(b['states']) for b in BLOCK_STATES.data.values()))))
def encode(block: str, props: dict=None) -> int:
props = ({} if (props is None) else props)
block_data = BLOCK_STATES.encode(block)
for state in block_data['states']:
if ((not props) and state.get('default')):
return state['id']
state_props = state.get('properties')
if (state_props and (dict(state_props.items()) == dict(props))):
return state['id']
raise ValueError(f"{block} doesn't have a state with those properties.")
def decode(state: int) -> immutables.Map:
return BLOCK_STATES.decode(state) |
def get_cache_dir() -> Path:
if ((os.name == 'posix') and (sys.platform != 'darwin')):
xdg = (os.environ.get('XDG_CACHE_HOME', None) or os.path.expanduser('~/.cache'))
return Path(xdg, 'flit')
elif (sys.platform == 'darwin'):
return Path(os.path.expanduser('~'), 'Library/Caches/flit')
else:
local = (os.environ.get('LOCALAPPDATA', None) or os.path.expanduser('~\\AppData\\Local'))
return Path(local, 'flit') |
def anonymise_cli(args):
if args.delete_unknown_tags:
handle_unknown_tags = True
elif args.ignore_unknown_tags:
handle_unknown_tags = False
else:
handle_unknown_tags = None
if (not args.keywords_to_leave_unchanged):
keywords_to_leave_unchanged = ()
else:
keywords_to_leave_unchanged = args.keywords_to_leave_unchanged
replacement_strategy = None
if isfile(args.input_path):
anonymise_file(dicom_filepath=args.input_path, output_filepath=args.output_path, delete_original_file=args.delete_original_files, anonymise_filename=(not args.preserve_filenames), replace_values=(not args.clear_values), keywords_to_leave_unchanged=keywords_to_leave_unchanged, delete_private_tags=(not args.keep_private_tags), delete_unknown_tags=handle_unknown_tags, replacement_strategy=replacement_strategy)
elif isdir(args.input_path):
anonymise_directory(dicom_dirpath=args.input_path, output_dirpath=args.output_path, delete_original_files=args.delete_original_files, anonymise_filenames=(not args.preserve_filenames), replace_values=(not args.clear_values), keywords_to_leave_unchanged=keywords_to_leave_unchanged, delete_private_tags=(not args.keep_private_tags), delete_unknown_tags=handle_unknown_tags, replacement_strategy=replacement_strategy)
else:
raise FileNotFoundError('No file or directory was found at the supplied input path.') |
def _split_text(asr, audio, speech2text):
if (len(asr) < 2):
return [(0, len(audio), asr)]
try:
timings = _get_timings(asr, audio, speech2text)
except Exception:
return [(0, len(audio), asr)]
threshold = np.percentile((timings[1:] - timings[:(- 1)]), 98, interpolation='nearest')
(text, start, prev) = ('', timings[0], timings[0])
remain = len(asr)
ret = []
for (char, curr) in zip(asr, timings):
if ((len(text) > 1) and (remain > 1) and ((curr - prev) > threshold)):
ret.append((start, curr, text))
(start, text) = (curr, '')
prev = curr
text += char
remain -= 1
if text:
ret.append((start, curr, text))
return ret |
def _show_fixtures_per_test(config: Config, session: Session) -> None:
import _pytest.config
session.perform_collect()
curdir = Path.cwd()
tw = _pytest.config.create_terminal_writer(config)
verbose = config.getvalue('verbose')
def get_best_relpath(func) -> str:
loc = getlocation(func, str(curdir))
return bestrelpath(curdir, Path(loc))
def write_fixture(fixture_def: fixtures.FixtureDef[object]) -> None:
argname = fixture_def.argname
if ((verbose <= 0) and argname.startswith('_')):
return
prettypath = _pretty_fixture_path(fixture_def.func)
tw.write(f'{argname}', green=True)
tw.write(f' -- {prettypath}', yellow=True)
tw.write('\n')
fixture_doc = inspect.getdoc(fixture_def.func)
if fixture_doc:
write_docstring(tw, (fixture_doc.split('\n\n')[0] if (verbose <= 0) else fixture_doc))
else:
tw.line(' no docstring available', red=True)
def write_item(item: nodes.Item) -> None:
info: Optional[FuncFixtureInfo] = getattr(item, '_fixtureinfo', None)
if ((info is None) or (not info.name2fixturedefs)):
return
tw.line()
tw.sep('-', f'fixtures used by {item.name}')
tw.sep('-', f'({get_best_relpath(item.function)})')
for (_, fixturedefs) in sorted(info.name2fixturedefs.items()):
assert (fixturedefs is not None)
if (not fixturedefs):
continue
write_fixture(fixturedefs[(- 1)])
for session_item in session.items:
write_item(session_item) |
.parametrize('vectorize', [True, False])
def test_vf_ground_sky_2d_integ(test_system_fixed_tilt, vectorize):
(ts, pts, vfs_gnd_sky) = test_system_fixed_tilt
vf_integ = utils.vf_ground_sky_2d_integ(ts['rotation'], ts['gcr'], ts['height'], ts['pitch'], max_rows=1, npoints=3, vectorize=vectorize)
expected_vf_integ = np.trapz(vfs_gnd_sky, pts, axis=0)
assert np.isclose(vf_integ, expected_vf_integ, rtol=0.1) |
class ResourceCache():
def __init__(self) -> None:
self._cache: t.Dict[(str, referencing.Resource[Schema])] = {}
def __setitem__(self, uri: str, data: t.Any) -> referencing.Resource[Schema]:
resource = referencing.Resource.from_contents(data, default_specification=DRAFT202012)
self._cache[uri] = resource
return resource
def __getitem__(self, uri: str) -> referencing.Resource[Schema]:
return self._cache[uri]
def __contains__(self, uri: str) -> bool:
return (uri in self._cache) |
class QueryBuilder(object):
def __init__(self):
self._query = []
self.current_field = None
self.c_oper = None
self.l_oper = None
def field(self, field):
self.current_field = field
return self
def order_descending(self):
self._query.append('ORDERBYDESC{0}'.format(self.current_field))
self.c_oper = inspect.currentframe().f_back.f_code.co_name
return self
def order_ascending(self):
self._query.append('ORDERBY{0}'.format(self.current_field))
self.c_oper = inspect.currentframe().f_back.f_code.co_name
return self
def starts_with(self, starts_with):
return self._add_condition('STARTSWITH', starts_with, types=[str])
def ends_with(self, ends_with):
return self._add_condition('ENDSWITH', ends_with, types=[str])
def contains(self, contains):
return self._add_condition('LIKE', contains, types=[str])
def not_contains(self, not_contains):
return self._add_condition('NOT LIKE', not_contains, types=[str])
def is_empty(self):
return self._add_condition('ISEMPTY', '', types=[str, int])
def is_not_empty(self):
return self._add_condition('ISNOTEMPTY', '', types=[str, int])
def equals(self, data):
if isinstance(data, six.string_types):
return self._add_condition('=', data, types=[int, str])
elif isinstance(data, list):
return self._add_condition('IN', ','.join(map(str, data)), types=[str])
raise QueryTypeError(('Expected value of type `str` or `list`, not %s' % type(data)))
def not_equals(self, data):
if isinstance(data, six.string_types):
return self._add_condition('!=', data, types=[int, str])
elif isinstance(data, list):
return self._add_condition('NOT IN', ','.join(data), types=[str])
raise QueryTypeError(('Expected value of type `str` or `list`, not %s' % type(data)))
def greater_than(self, greater_than):
if hasattr(greater_than, 'strftime'):
greater_than = datetime_as_utc(greater_than).strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(greater_than, six.string_types):
raise QueryTypeError(('Expected value of type `int` or instance of `datetime`, not %s' % type(greater_than)))
return self._add_condition('>', greater_than, types=[int, str])
def greater_than_or_equal(self, greater_than):
if hasattr(greater_than, 'strftime'):
greater_than = datetime_as_utc(greater_than).strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(greater_than, six.string_types):
raise QueryTypeError(('Expected value of type `int` or instance of `datetime`, not %s' % type(greater_than)))
return self._add_condition('>=', greater_than, types=[int, str])
def less_than(self, less_than):
if hasattr(less_than, 'strftime'):
less_than = datetime_as_utc(less_than).strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(less_than, six.string_types):
raise QueryTypeError(('Expected value of type `int` or instance of `datetime`, not %s' % type(less_than)))
return self._add_condition('<', less_than, types=[int, str])
def less_than_or_equal(self, less_than):
if hasattr(less_than, 'strftime'):
less_than = datetime_as_utc(less_than).strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(less_than, six.string_types):
raise QueryTypeError(('Expected value of type `int` or instance of `datetime`, not %s' % type(less_than)))
return self._add_condition('<=', less_than, types=[int, str])
def between(self, start, end):
if (hasattr(start, 'strftime') and hasattr(end, 'strftime')):
dt_between = ('javascript:gs.dateGenerate("%(start)s"):gs.dateGenerate("%(end)s")' % {'start': start.strftime('%Y-%m-%d %H:%M:%S'), 'end': end.strftime('%Y-%m-%d %H:%M:%S')})
elif (isinstance(start, int) and isinstance(end, int)):
dt_between = ('%%d' % (start, end))
else:
raise QueryTypeError(('Expected `start` and `end` of type `int` or instance of `datetime`, not %s and %s' % (type(start), type(end))))
return self._add_condition('BETWEEN', dt_between, types=[str])
def AND(self):
return self._add_logical_operator('^')
def OR(self):
return self._add_logical_operator('^OR')
def NQ(self):
return self._add_logical_operator('^NQ')
def _add_condition(self, operator, operand, types):
if (not self.current_field):
raise QueryMissingField('Conditions requires a field()')
elif (not (type(operand) in types)):
caller = inspect.currentframe().f_back.f_code.co_name
raise QueryTypeError(('Invalid type passed to %s() , expected: %s' % (caller, types)))
elif self.c_oper:
raise QueryMultipleExpressions('Expected logical operator after expression')
self.c_oper = inspect.currentframe().f_back.f_code.co_name
self._query.append(('%(current_field)s%(operator)s%(operand)s' % {'current_field': self.current_field, 'operator': operator, 'operand': operand}))
return self
def _add_logical_operator(self, operator):
if (not self.c_oper):
raise QueryExpressionError('Logical operators must be preceded by an expression')
self.current_field = None
self.c_oper = None
self.l_oper = inspect.currentframe().f_back.f_code.co_name
self._query.append(operator)
return self
def __str__(self):
if (len(self._query) == 0):
raise QueryEmpty('At least one condition is required')
elif (self.current_field is None):
raise QueryMissingField('Logical operator expects a field()')
elif (self.c_oper is None):
raise QueryExpressionError('field() expects an expression')
return str().join(self._query) |
def assert_device_map(device_map, num_blocks):
blocks = list(range(0, num_blocks))
device_map_blocks = [item for sublist in list(device_map.values()) for item in sublist]
duplicate_blocks = []
for i in device_map_blocks:
if ((device_map_blocks.count(i) > 1) and (i not in duplicate_blocks)):
duplicate_blocks.append(i)
missing_blocks = [i for i in blocks if (i not in device_map_blocks)]
extra_blocks = [i for i in device_map_blocks if (i not in blocks)]
if (len(duplicate_blocks) != 0):
raise ValueError(('Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device. These attention blocks were specified more than once: ' + str(duplicate_blocks)))
if (len(missing_blocks) != 0):
raise ValueError(('There are attention blocks for this model that are not specified in the device_map. Add these attention blocks to a device on the device_map: ' + str(missing_blocks)))
if (len(extra_blocks) != 0):
raise ValueError(('The device_map contains more attention blocks than this model has. Remove these from the device_map:' + str(extra_blocks))) |
def gcs_test_credential() -> Generator[(None, None, None)]:
if ('GOOGLE_APPLICATION_CREDENTIALS' in os.environ):
(yield)
return
if ('GOOGLE_APPLICATION_CREDENTIALS_JSON' in os.environ):
with tempfile.NamedTemporaryFile('w') as f:
f.write(os.environ['GOOGLE_APPLICATION_CREDENTIALS_JSON'])
f.flush()
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = f.name
(yield)
del os.environ['GOOGLE_APPLICATION_CREDENTIALS'] |
def init_params(opt, ClothWarper, data_loader):
iter_path = os.path.join(opt.checkpoints_dir, opt.name, 'iter.txt')
(start_epoch, epoch_iter) = (1, 0)
if opt.continue_train:
if os.path.exists(iter_path):
(start_epoch, epoch_iter) = np.loadtxt(iter_path, delimiter=',', dtype=int)
print(('Resuming from epoch %d at iteration %d' % (start_epoch, epoch_iter)))
if (start_epoch > opt.niter):
ClothWarper.module.update_learning_rate_cloth((start_epoch - 1))
n_gpus = (opt.n_gpus_gen if (opt.batchSize == 1) else 1)
tG = opt.n_frames_G
tD = opt.n_frames_D
t_scales = opt.n_scales_temporal
input_nc_1 = opt.input_nc_T_2
input_nc_2 = opt.input_nc_S_2
input_nc_3 = opt.input_nc_P_2
print_freq = lcm(opt.print_freq, opt.batchSize)
total_steps = (((start_epoch - 1) * len(data_loader)) + epoch_iter)
total_steps = ((total_steps // print_freq) * print_freq)
return (n_gpus, tG, input_nc_1, input_nc_2, input_nc_3, start_epoch, epoch_iter, print_freq, total_steps, iter_path, tD, t_scales) |
def _decode(data):
code = ''
for c in data:
if (c in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f']):
code += c
elif (c not in [' ', '\n']):
raise rse.BadParameter(("Cannot decode '%s' in '%s'" % (c, data)))
return bytes.fromhex(code).decode('utf-8') |
class StubAttr(StubBase):
def __init__(self, obj, attr_name):
self.__dict__['_obj'] = obj
self.__dict__['_attr_name'] = attr_name
def obj(self):
return self.__dict__['_obj']
def attr_name(self):
return self.__dict__['_attr_name']
def __str__(self):
return ('StubAttr(%s, %s)' % (str(self.obj), str(self.attr_name))) |
def get_tweets():
result = []
news_sources = AutoImportResource.objects.filter(type_res='twitter').exclude(in_edit=True).exclude(is_active=False)
for source in news_sources:
print('Process twitter', source)
try:
result.extend(_parse_tweets_data(get_tweets_by_url(source.link), source))
except Exception as e:
print(e)
return result |
class FileItem(BrowserItem):
def __init__(self, parent, pathProxy, mode='normal'):
BrowserItem.__init__(self, parent, pathProxy)
self._mode = mode
self._timeSinceLastDocString = 0
if ((self._mode == 'normal') and self.path().lower().endswith('.py')):
self._createDummyItem('Loading high level structure ...')
def setFileIcon(self):
dummy_filename = op.join(cleanpath(pyzo.appDataDir), 'dummyFiles', ('dummy' + ext(self.path())))
if (not op.isfile(dummy_filename)):
if (not isdir(op.dirname(dummy_filename))):
os.makedirs(op.dirname(dummy_filename))
with open(dummy_filename, 'wb'):
pass
if (sys.platform.startswith('linux') and (not QtCore.__file__.startswith('/usr/'))):
icon = iconprovider.icon(iconprovider.File)
else:
icon = iconprovider.icon(QtCore.QFileInfo(dummy_filename))
icon = addIconOverlays(icon)
self.setIcon(0, icon)
def searchContents(self, needle, **kwargs):
self.setHidden(True)
self._proxy.setSearch(needle, **kwargs)
def onActivated(self):
path = self.path()
if (ext(path) not in ['.pyc', '.pyo', '.png', '.jpg', '.ico']):
pyzo.editors.loadFile(path)
pyzo.editors.getCurrentEditor().setFocus()
def onExpanded(self):
if (self._mode == 'normal'):
if self.path().lower().endswith('.py'):
self._proxy.pushTask(tasks.DocstringTask())
self._proxy.pushTask(tasks.PeekTask())
def onCollapsed(self):
if (self._mode == 'normal'):
self.clear()
if self.path().lower().endswith('.py'):
self._createDummyItem('Loading high level structure ...')
def onChanged(self):
pass
def onTaskFinished(self, task):
if isinstance(task, tasks.DocstringTask):
result = task.result()
self.clear()
if result:
DocstringItem(self, result)
elif isinstance(task, tasks.PeekTask):
result = task.result()
if result:
for r in result:
SubFileItem(self, *r)
else:
self._createDummyItem('No classes or functions found.')
else:
BrowserItem.onTaskFinished(self, task) |
def task(reindexed_root_dir, dataset, index):
image_id = dataset._ids[index]
examples = dataset.get_example(index)
id_to_meta = {}
for (i_example, example) in enumerate(examples):
instance_id = f'{image_id}/{i_example:08d}'
npz_file = (reindexed_root_dir / f'{instance_id}.npz')
npz_file.parent.makedirs_p()
np.savez_compressed(npz_file, **example)
id_to_meta[instance_id] = {'class_id': int(example['class_id']), 'visibility': float(example['visibility'])}
return id_to_meta |
class SharedAdam(optim.Optimizer):
def __init__(self, params, lr=0.001, betas=(0.9, 0.999), eps=0.001, weight_decay=0, amsgrad=True):
defaults = defaultdict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad)
super(SharedAdam, self).__init__(params, defaults)
for group in self.param_groups:
for p in group['params']:
if p.requires_grad:
state = self.state[p]
state['step'] = torch.zeros(1)
state['exp_avg'] = p.data.new().resize_as_(p.data).zero_()
state['exp_avg_sq'] = p.data.new().resize_as_(p.data).zero_()
state['max_exp_avg_sq'] = p.data.new().resize_as_(p.data).zero_()
print('initialized optimizer.')
def share_memory(self):
print('attempting to share memory.')
try:
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['step'].share_memory_()
state['exp_avg'].share_memory_()
state['exp_avg_sq'].share_memory_()
state['max_exp_avg_sq'].share_memory_()
except Exception as e:
print(e)
print('sharing memory.')
def step(self, closure=None):
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
amsgrad = group['amsgrad']
state = self.state[p]
(exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq'])
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
(beta1, beta2) = group['betas']
state['step'] += 1
if (group['weight_decay'] != 0):
grad = grad.add(group['weight_decay'], p.data)
exp_avg.mul_(beta1).add_((1 - beta1), grad)
exp_avg_sq.mul_(beta2).addcmul_((1 - beta2), grad, grad)
if amsgrad:
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
denom = max_exp_avg_sq.sqrt().add_(group['eps'])
else:
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = (1 - (beta1 ** state['step'].item()))
bias_correction2 = (1 - (beta2 ** state['step'].item()))
step_size = ((group['lr'] * math.sqrt(bias_correction2)) / bias_correction1)
p.data.addcdiv_((- step_size), exp_avg, denom)
return loss |
def test_geodesic_fwd_inv_inplace():
gg = Geod(ellps='clrk66')
_BOSTON_LON = numpy.array([0], dtype=numpy.float64)
_BOSTON_LAT = numpy.array([0], dtype=numpy.float64)
_PORTLAND_LON = numpy.array([1], dtype=numpy.float64)
_PORTLAND_LAT = numpy.array([1], dtype=numpy.float64)
(az12, az21, dist) = gg.inv(_BOSTON_LON, _BOSTON_LAT, _PORTLAND_LON, _PORTLAND_LAT, inplace=True)
assert (az12 is _BOSTON_LON)
assert (az21 is _BOSTON_LAT)
assert (dist is _PORTLAND_LON)
(endlon, endlat, backaz) = gg.fwd(_BOSTON_LON, _BOSTON_LAT, az12, dist, inplace=True)
assert (endlon is _BOSTON_LON)
assert (endlat is _BOSTON_LAT)
assert (backaz is az12) |
class TrainRegSet(torch.utils.data.Dataset):
def __init__(self, data_root, image_size):
super().__init__()
self.transform = transforms.Compose([transforms.Resize(image_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
self.imgs = torchvision.datasets.ImageFolder(root=data_root, transform=self.transform)
def __getitem__(self, idx):
sample = {'img': self.imgs[idx][0], 'keypoints': torch.tensor(0)}
return sample
def __len__(self):
return len(self.imgs) |
class DiscCentroidsLoss(nn.Module):
def __init__(self, num_classes, feat_dim, size_average=True):
super(DiscCentroidsLoss, self).__init__()
self.num_classes = num_classes
self.centroids = nn.Parameter(torch.randn(num_classes, feat_dim))
self.disccentroidslossfunc = DiscCentroidsLossFunc.apply
self.feat_dim = feat_dim
self.size_average = size_average
def forward(self, feat, label):
batch_size = feat.size(0)
feat = feat.view(batch_size, (- 1))
if (feat.size(1) != self.feat_dim):
raise ValueError("Center's dim: {0} should be equal to input feature's dim: {1}".format(self.feat_dim, feat.size(1)))
batch_size_tensor = feat.new_empty(1).fill_((batch_size if self.size_average else 1))
loss_attract = self.disccentroidslossfunc(feat, label, self.centroids, batch_size_tensor).squeeze()
distmat = (torch.pow(feat, 2).sum(dim=1, keepdim=True).expand(batch_size, self.num_classes) + torch.pow(self.centroids, 2).sum(dim=1, keepdim=True).expand(self.num_classes, batch_size).t())
distmat.addmm_(1, (- 2), feat, self.centroids.t())
classes = torch.arange(self.num_classes).long().cuda()
labels_expand = label.unsqueeze(1).expand(batch_size, self.num_classes)
mask = labels_expand.eq(classes.expand(batch_size, self.num_classes))
distmat_neg = distmat
distmat_neg[mask] = 0.0
margin = 10.0
loss_repel = torch.clamp((margin - (distmat_neg.sum() / (batch_size * self.num_classes))), 0.0, 1000000.0)
loss = (loss_attract + (0.01 * loss_repel))
return loss |
def writegen(fnfn, generator, header=None, sep=','):
import codecs
of = codecs.open(fnfn, 'w', encoding='utf-8')
header_written = False
for dx in generator():
if (not header_written):
if (not header):
if ('header' in dx):
header = dx['header']
else:
header = sorted(dx.keys())
of.write((sep.join([(('"' + x) + '"') for x in header]) + '\n'))
header_written = True
vals = []
for h in header:
v = dx.get(h, '')
is_str = (type(v) in [str, str])
if ((type(v) in [float, int]) and (int(v) == v)):
v = int(v)
try:
o = str(v)
except UnicodeDecodeError:
o = v.decode('utf-8', errors='ignore')
if (is_str and v):
o = (('"' + o) + '"')
vals += [o]
of.write((sep.join(vals) + '\n')) |
class IsHasAccessOrReadOnly(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
if (request.method in permissions.SAFE_METHODS):
return True
user = request.user
is_manager = user.groups.filter(name=MANAGER_GROUP).exists()
return ((user == obj.creator) or user.is_staff or is_manager) |
class SecretSerializer(SerializationBase):
def serialize(obj: _DecryptedSecret) -> bytes:
if (not isinstance(obj, _DecryptedSecret)):
raise SerializationError(f'Can only serialize {_DecryptedSecret.__name__} objects')
try:
schema = class_schema(_DecryptedSecret, base_schema=BaseSchema)()
data = schema.dump(obj)
data = json.dumps(data).encode()
return data
except (AttributeError, TypeError, ValidationError, ValueError, JSONDecodeError) as ex:
raise SerializationError(f"Can't serialize: {obj}") from ex
def deserialize(data: bytes) -> _DecryptedSecret:
try:
obj = json.loads(data.decode())
schema = class_schema(_DecryptedSecret, base_schema=BaseSchema)()
return schema.load(deepcopy(obj))
except (ValueError, TypeError, ValidationError, JSONDecodeError) as ex:
raise SerializationError(f"Can't deserialize: {data!r}") from ex |
def window_accumulator(acc, new, diff=None, window=None, agg=None, with_state=False):
if (acc is None):
acc = {'dfs': [], 'state': agg.initial(new)}
dfs = acc['dfs']
state = acc['state']
(dfs, old) = diff(dfs, new, window=window)
if (new is not None):
(state, result) = agg.on_new(state, new)
for o in old:
if len(o):
(state, result) = agg.on_old(state, o)
acc2 = {'dfs': dfs, 'state': state}
return (acc2, result) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.