code stringlengths 281 23.7M |
|---|
class LinearsolverResult(AlgorithmResult):
def solution(self) -> np.ndarray:
return self.get('solution')
def solution(self, value: np.ndarray) -> None:
self.data['solution'] = value
def from_dict(a_dict: Dict) -> 'LinearsolverResult':
return LinearsolverResult(a_dict) |
class TestAdaroundOptimizer(unittest.TestCase):
def _optimize_layer_rounding(self, warm_start):
AimetLogger.set_level_for_all_areas(logging.DEBUG)
model = TinyModel().eval()
dummy_input = torch.randn(1, 3, 32, 32)
sim = QuantizationSimModel(model, dummy_input=dummy_input, quant_scheme='tf_enhanced', default_param_bw=4)
module = model.conv1
quant_module = sim.model.conv1
nearest_encoding = unittest.mock.MagicMock()
nearest_encoding.bw = 4
nearest_encoding.offset = (- 127.0)
nearest_encoding.delta = 0.
quant_module.param_quantizers['weight'].encoding = nearest_encoding
with Adaround._replace_tensor_quantizer(quant_module):
alpha = torch.randn(quant_module._module_to_wrap.weight.shape, requires_grad=True)
quant_module.param_quantizers['weight'].alpha = alpha
before_opt = alpha.clone()
dataset_size = 50
batch_size = 10
image_size = (3, 32, 32)
data_loader = create_fake_data_loader(dataset_size, batch_size, image_size)
def forward_fn(model, inputs):
(inputs, _) = inputs
model(inputs)
path = './tmp/cached_dataset/'
try:
cached_dataset = CachedDataset(data_loader, (dataset_size // batch_size), path)
opt_params = AdaroundHyperParameters(num_iterations=10, reg_param=0.01, beta_range=(20, 2), warm_start=warm_start)
AdaroundOptimizer.adaround_module(module, quant_module, model, sim.model, None, cached_dataset, forward_fn, opt_params)
after_opt = quant_module.param_quantizers['weight'].alpha
self.assertFalse(np.array_equal(to_numpy(before_opt), to_numpy(after_opt)))
self.assertTrue((quant_module.param_quantizers['weight'].alpha.grad is not None))
finally:
if os.path.isdir(path):
shutil.rmtree(path)
def test_optimize_rounding_with_only_recons_loss(self):
warm_start = 1.0
self._optimize_layer_rounding(warm_start)
def test_optimize_rounding_with_combined_loss(self):
warm_start = 0.2
self._optimize_layer_rounding(warm_start)
def test_compute_recons_metrics(self):
np.random.seed(0)
torch.manual_seed(0)
quant_scheme = QuantScheme.post_training_tf_enhanced
weight_bw = 8
activation_bw = 8
weight_data = np.random.rand(4, 4, 1, 1).astype(dtype='float32')
encoding_dict = compute_encoding_for_given_bitwidth(weight_data, weight_bw, quant_scheme, False, QuantizationDataType.int)
(encoding, _) = create_encoding_from_dict(encoding_dict)
print(encoding_dict['scale'], encoding_dict['max'])
self.assertAlmostEqual(encoding_dict['scale'], 0., places=3)
conv1 = torch.nn.Conv2d(4, 4, 1, bias=False)
conv1.weight.data = torch.from_numpy(weight_data)
quant_module = StaticGridQuantWrapper(conv1, weight_bw, activation_bw, round_mode='nearest', quant_scheme=quant_scheme)
quant_module.param_quantizers['weight'].encoding = encoding
inp_data = np.random.rand(1, 4, 10, 10).astype(dtype='float32')
inp_data = torch.from_numpy(inp_data)
out_data = np.random.rand(1, 4, 10, 10).astype(dtype='float32')
out_data = torch.from_numpy(out_data)
with Adaround._replace_tensor_quantizer(quant_module):
(recons_err_hard, recons_err_soft) = AdaroundOptimizer._compute_recons_metrics(quant_module, None, inp_data, out_data)
print(recons_err_hard, recons_err_soft)
self.assertAlmostEqual(recons_err_hard, 0., places=3)
self.assertAlmostEqual(recons_err_soft, 0., places=3)
.cuda
def test_compute_output_with_adarounded_weights(self):
np.random.seed(0)
torch.manual_seed(0)
quant_scheme = QuantScheme.post_training_tf_enhanced
weight_bw = 8
activation_bw = 8
weight_data = np.random.rand(4, 4, 1, 1).astype(dtype='float32')
conv1 = torch.nn.Conv2d(4, 4, 1, bias=False).to(torch.device('cuda'))
conv1.weight.data = torch.from_numpy(weight_data).to(torch.device('cuda'))
quant_module = StaticGridQuantWrapper(conv1, weight_bw, activation_bw, round_mode='nearest', quant_scheme=quant_scheme)
quant_module.param_quantizers['weight'].update_encoding_stats(conv1.weight.data)
quant_module.param_quantizers['weight'].compute_encoding()
encoding = quant_module.param_quantizers['weight'].encoding
print(encoding.max, encoding.min)
inp_data = np.random.rand(1, 4, 10, 10).astype(dtype='float32')
inp_data = torch.from_numpy(inp_data).to(torch.device('cuda'))
out_data = np.random.rand(1, 4, 10, 10).astype(dtype='float32')
out_tensor = torch.from_numpy(out_data).to(torch.device('cuda'))
with Adaround._replace_tensor_quantizer(quant_module):
adaround_out_tensor = AdaroundOptimizer._compute_output_with_adarounded_weights(quant_module, inp_data)
mse_loss = functional.mse_loss(adaround_out_tensor, out_tensor)
print(mse_loss.detach().cpu().numpy())
self.assertAlmostEqual(mse_loss.detach().cpu().numpy(), 0.6107949, places=2)
recon_loss = AdaroundLoss.compute_recon_loss(adaround_out_tensor, out_tensor)
print(recon_loss.detach().cpu().numpy())
self.assertAlmostEqual(recon_loss.detach().cpu().numpy(), 2.4431798, places=2) |
class PathSeparatorTest(TestCase):
def test_os_path_sep_matches_fake_filesystem_separator(self):
filesystem = fake_filesystem.FakeFilesystem(path_separator='!')
fake_os_module = fake_os.FakeOsModule(filesystem)
self.assertEqual('!', fake_os_module.sep)
self.assertEqual('!', fake_os_module.path.sep) |
def calculate_sentence_transformer_embedding(examples, embedding_model, mean_normal=False):
text_to_encode = [f"The topic is {raw_item['activity_label']}. {raw_item['ctx_a']} {raw_item['ctx_b']} | {raw_item['endings'][0]} | {raw_item['endings'][1]} | {raw_item['endings'][2]} | {raw_item['endings'][3]}" for raw_item in examples]
num = len(text_to_encode)
emb_model = SentenceTransformer(embedding_model)
embeddings = []
bar = tqdm(range(0, num, 20), desc='calculate embeddings')
for i in range(0, num, 20):
embeddings += emb_model.encode(text_to_encode[i:(i + 20)]).tolist()
bar.update(1)
embeddings = torch.tensor(embeddings)
if mean_normal:
mean_embeddings = torch.mean(embeddings, 0, True)
embeddings = (embeddings - mean_embeddings)
return embeddings |
def is_literal_type_like(t: (Type | None)) -> bool:
t = get_proper_type(t)
if (t is None):
return False
elif isinstance(t, LiteralType):
return True
elif isinstance(t, UnionType):
return any((is_literal_type_like(item) for item in t.items))
elif isinstance(t, TypeVarType):
return (is_literal_type_like(t.upper_bound) or any((is_literal_type_like(item) for item in t.values)))
else:
return False |
class VisionEncoderDecoderDecoderOnnxConfig(OnnxConfig):
def inputs(self) -> Mapping[(str, Mapping[(int, str)])]:
common_inputs = OrderedDict()
common_inputs['input_ids'] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
common_inputs['attention_mask'] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
common_inputs['encoder_hidden_states'] = {0: 'batch', 1: 'encoder_sequence'}
return common_inputs
def generate_dummy_inputs(self, tokenizer: 'PreTrainedTokenizerBase', batch_size: int=(- 1), seq_length: int=(- 1), is_pair: bool=False, framework: Optional['TensorType']=None) -> Mapping[(str, Any)]:
import torch
common_inputs = OrderedDict()
dummy_input = super().generate_dummy_inputs(tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework)
(batch, encoder_sequence) = dummy_input['input_ids'].shape
encoder_hidden_states_shape = (batch, encoder_sequence, self._config.encoder_hidden_size)
common_inputs['input_ids'] = dummy_input.pop('input_ids')
common_inputs['attention_mask'] = dummy_input.pop('attention_mask')
common_inputs['encoder_hidden_states'] = torch.zeros(encoder_hidden_states_shape)
return common_inputs |
class GeneralDataset(data.Dataset):
def __init__(self, root, transform=None, refname=True):
self.root = root
print(root)
self.transform = transform
self.retname = refname
self.cam_intrinsic = np.float32(np.array([[582.64, 0, 313.04], [0, 582.69, 238.44], [0, 0, 1]]))
self.sample_ids = []
self.sample_ids = natsorted(os.listdir(self.root))
print('Number of dataset images: {:d}'.format(len(self.sample_ids)))
def load_color_image(self, filepath):
if filepath.endswith('npy'):
image = np.load(filepath)
elif filepath.endswith('png'):
image = cv2.imread(filepath, (- 1))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
else:
raise Exception(('not supported data format, file: ' + filepath))
return np.uint8(image)
def get_one_sample(self, index):
sample = {}
_img = self.load_color_image(os.path.join(self.root, self.sample_ids[index]))
sample['image'] = _img
sample['original_image'] = _img
sample['intrinsic'] = self.cam_intrinsic
if self.retname:
sample['meta'] = {'image': str(self.sample_ids[index]), 'im_size': (_img.shape[0], _img.shape[1])}
if (self.transform is not None):
sample = self.transform(sample)
return sample
def __getitem__(self, index):
sample = self.get_one_sample(index)
return sample
def __len__(self):
return len(self.sample_ids) |
class TransparentCheckBox(QtWidgets.QCheckBox):
def enterEvent(self, e):
if (self.window().showhelp is True):
QtWidgets.QToolTip.showText(e.globalPos(), '<h3>Frame transparency</h3>Toggle the transparency of the axis-frame.<p>If checked, the map will be exported with a transparent background.<p><b>NOTE:</b> The current value is also used for clipboard-export! (<code>ctrl+c</code>)') |
class WriteSingleRegisterResponse(ModbusResponse):
function_code = 6
_rtu_frame_size = 8
def __init__(self, address=None, value=None, **kwargs):
super().__init__(**kwargs)
self.address = address
self.value = value
def encode(self):
return struct.pack('>HH', self.address, self.value)
def decode(self, data):
(self.address, self.value) = struct.unpack('>HH', data)
def get_response_pdu_size(self):
return (((1 + 2) + 2) + 2)
def __str__(self):
params = (self.address, self.value)
return ('WriteRegisterResponse %d => %d' % params) |
def get_shortcuts_folder():
if (get_root_hkey() == winreg.HKEY_LOCAL_MACHINE):
try:
fldr = get_special_folder_path('CSIDL_COMMON_PROGRAMS')
except OSError:
fldr = get_special_folder_path('CSIDL_PROGRAMS')
else:
fldr = get_special_folder_path('CSIDL_PROGRAMS')
try:
install_group = winreg.QueryValue(get_root_hkey(), (root_key_name + '\\InstallPath\\InstallGroup'))
except OSError:
vi = sys.version_info
install_group = ('Python %d.%d' % (vi[0], vi[1]))
return os.path.join(fldr, install_group) |
class AdaptiveBN(nn.Module):
def __init__(self, max_nc, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True):
super().__init__()
num_features = max_nc
self.num_features = max_nc
self.eps = eps
self.momentum = momentum
self.affine = affine
self.track_running_stats = track_running_stats
if self.affine:
self.weight = Parameter(torch.Tensor(num_features))
self.bias = Parameter(torch.Tensor(num_features))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
if self.track_running_stats:
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
self.register_buffer('num_batches_tracked', torch.tensor(0, dtype=torch.long))
else:
self.register_parameter('running_mean', None)
self.register_parameter('running_var', None)
self.register_parameter('num_batches_tracked', None)
self.reset_parameters()
def reset_running_stats(self):
if self.track_running_stats:
self.running_mean.zero_()
self.running_var.fill_(1)
self.num_batches_tracked.zero_()
def reset_parameters(self):
self.reset_running_stats()
if self.affine:
init.ones_(self.weight)
init.zeros_(self.bias)
def forward(self, input):
if (self.momentum is None):
exponential_average_factor = 0.0
else:
exponential_average_factor = self.momentum
training = self.training
if (training and self.track_running_stats):
if (self.num_batches_tracked is not None):
self.num_batches_tracked += 1
if (self.momentum is None):
exponential_average_factor = (1.0 / float(self.num_batches_tracked))
else:
exponential_average_factor = self.momentum
(batch_size, nc, embed_dim) = input.size()
assert (self.num_features >= nc)
output = F.batch_norm(input, self.running_mean[:nc], self.running_var[:nc], self.weight[:nc], self.bias[:nc], (self.training or (not self.track_running_stats)), exponential_average_factor, self.eps)
output += (0.0 * (((torch.min(self.running_mean) + torch.min(self.running_var)) + torch.min(self.weight)) + torch.min(self.bias)))
return output |
def _notify_superusers(key):
notification_metadata = {'name': key.name, 'kid': key.kid, 'service': key.service, 'jwk': key.jwk, 'metadata': key.metadata, 'created_date': timegm(key.created_date.utctimetuple())}
if (key.expiration_date is not None):
notification_metadata['expiration_date'] = timegm(key.expiration_date.utctimetuple())
if (len(config.app_config['SUPER_USERS']) > 0):
superusers = User.select().where((User.username << config.app_config['SUPER_USERS']))
for superuser in superusers:
create_notification('service_key_submitted', superuser, metadata=notification_metadata, lookup_path='/service_key_approval/{0}/{1}'.format(key.kid, superuser.id)) |
class ContactLabelGenerator(object):
def __init__(self):
pass
def get_contact_labels(self, smpl, obj, num_samples, thres=0.02):
object_points = obj.sample(num_samples)
(dist, _, vertices) = igl.signed_distance(object_points, smpl.vertices, smpl.faces, return_normals=False)
return (object_points, (dist < thres), vertices)
def to_trimesh(self, mesh):
tri = trimesh.Trimesh(mesh.v, mesh.f, process=False)
return tri |
class TextButton(DemoItem):
BUTTON_WIDTH = 180
BUTTON_HEIGHT = 19
(LEFT, RIGHT) = range(2)
(SIDEBAR, PANEL, UP, DOWN) = range(4)
(ON, OFF, HIGHLIGHT, DISABLED) = range(4)
def __init__(self, text, align=LEFT, userCode=0, parent=None, type=SIDEBAR):
super(TextButton, self).__init__(parent)
from menumanager import MenuManager
self._menu_manager = MenuManager.instance()
self.menuString = text
self.buttonLabel = text
self.alignment = align
self.buttonType = type
self.userCode = userCode
self.scanAnim = None
self.bgOn = None
self.bgOff = None
self.bgHighlight = None
self.bgDisabled = None
self.state = TextButton.OFF
self.setAcceptHoverEvents(True)
self.setCursor(Qt.PointingHandCursor)
if (type in (TextButton.SIDEBAR, TextButton.PANEL)):
self.logicalSize = QSize(TextButton.BUTTON_WIDTH, TextButton.BUTTON_HEIGHT)
else:
self.logicalSize = QSize(int(((TextButton.BUTTON_WIDTH / 2.0) - 5)), int((TextButton.BUTTON_HEIGHT * 1.5)))
self._prepared = False
def setMenuString(self, menu):
self.menuString = menu
def prepare(self):
if (not self._prepared):
self.setupHoverText()
self.setupScanItem()
self.setupButtonBg()
self._prepared = True
def boundingRect(self):
return QRectF(0, 0, self.logicalSize.width(), self.logicalSize.height())
def setupHoverText(self):
if (not self.buttonLabel):
return
textItem = DemoTextItem(self.buttonLabel, Colors.buttonFont(), Colors.buttonText, (- 1), self)
textItem.setZValue((self.zValue() + 2))
textItem.setPos(16, 0)
def setupScanItem(self):
if Colors.useButtonBalls:
scanItem = ScanItem(self)
scanItem.setZValue((self.zValue() + 1))
self.scanAnim = DemoItemAnimation(scanItem)
x = 1.0
y = 1.5
stop = ((TextButton.BUTTON_WIDTH - scanItem.boundingRect().width()) - x)
if (self.alignment == TextButton.LEFT):
self.scanAnim.setDuration(2500)
self.scanAnim.setKeyValueAt(0.0, QPointF(x, y))
self.scanAnim.setKeyValueAt(0.5, QPointF(x, y))
self.scanAnim.setKeyValueAt(0.7, QPointF(stop, y))
self.scanAnim.setKeyValueAt(1.0, QPointF(x, y))
scanItem.setPos(QPointF(x, y))
else:
self.scanAnim.setKeyValueAt(0.0, QPointF(stop, y))
self.scanAnim.setKeyValueAt(0.5, QPointF(x, y))
self.scanAnim.setKeyValueAt(1.0, QPointF(stop, y))
scanItem.setPos(QPointF(stop, y))
def setState(self, state):
self.state = state
self.bgOn.setRecursiveVisible((state == TextButton.ON))
self.bgOff.setRecursiveVisible((state == TextButton.OFF))
self.bgHighlight.setRecursiveVisible((state == TextButton.HIGHLIGHT))
self.bgDisabled.setRecursiveVisible((state == TextButton.DISABLED))
if (state == TextButton.DISABLED):
self.setCursor(Qt.ArrowCursor)
else:
self.setCursor(Qt.PointingHandCursor)
def setupButtonBg(self):
self.bgOn = ButtonBackground(self.buttonType, True, True, self.logicalSize, self)
self.bgOff = ButtonBackground(self.buttonType, False, False, self.logicalSize, self)
self.bgHighlight = ButtonBackground(self.buttonType, True, False, self.logicalSize, self)
self.bgDisabled = ButtonBackground(self.buttonType, True, True, self.logicalSize, self)
self.setState(TextButton.OFF)
def hoverEnterEvent(self, event):
if ((not self.isEnabled()) or (self.state == TextButton.DISABLED)):
return
if (self.state == TextButton.OFF):
self.setState(TextButton.HIGHLIGHT)
if (Colors.noAnimations and Colors.useButtonBalls):
self.scanAnim.setDuration(1000)
self.scanAnim.setKeyValueAt(0.2, self.scanAnim.posAt(0))
if ((self._menu_manager.window.fpsMedian > 10) or Colors.noAdapt or Colors.noTimerUpdate):
if Colors.useButtonBalls:
self.scanAnim.play(True, True)
def hoverLeaveEvent(self, event):
if (self.state == TextButton.DISABLED):
return
self.setState(TextButton.OFF)
if (Colors.noAnimations and Colors.useButtonBalls):
self.scanAnim.stop()
def mousePressEvent(self, event):
if (self.state == TextButton.DISABLED):
return
if ((self.state == TextButton.HIGHLIGHT) or (self.state == TextButton.OFF)):
self.setState(TextButton.ON)
def mouseReleaseEvent(self, event):
if (self.state == TextButton.ON):
self.setState(TextButton.OFF)
if (self.isEnabled() and self.boundingRect().contains(event.pos())):
self._menu_manager.itemSelected(self.userCode, self.menuString)
def animationStarted(self, _):
if (self.state == TextButton.DISABLED):
return
self.setState(TextButton.OFF) |
class HourGlassNetMultiScaleInt(nn.Module):
def __init__(self, in_nc=3, out_nc=3, upscale=4, nf=64, res_type='res', n_mid=2, n_tail=2, n_HG=6, act_type='leakyrelu', inter_supervis=True, mscale_inter_super=False, share_upsample=False):
super(HourGlassNetMultiScaleInt, self).__init__()
self.n_HG = n_HG
self.inter_supervis = inter_supervis
if (upscale == 3):
ksize = 3
else:
ksize = 1
self.conv_in = B.conv_block(in_nc, nf, kernel_size=3, norm_type=None, act_type=None)
def make_upsample_block(upscale=4, in_ch=64, out_nc=3, kernel_size=3):
n_upscale = (1 if (upscale == 3) else int(math.log(upscale, 2)))
LR_conv = B.conv_block(nf, nf, kernel_size=3, norm_type=None, act_type=None, mode='CNA')
HR_conv0 = B.conv_block(nf, nf, kernel_size=3, norm_type=None, act_type='leakyrelu')
HR_conv1 = B.conv_block(nf, out_nc, kernel_size=kernel_size, norm_type=None, act_type=None)
if (upscale == 1):
return nn.Sequential(LR_conv, HR_conv0, HR_conv1)
elif (upscale == 3):
upsampler = B.upconv_blcok(nf, nf, 3, act_type=act_type)
else:
upsampler = [B.upconv_blcok(nf, nf, act_type=act_type) for _ in range(n_upscale)]
return nn.Sequential(LR_conv, *upsampler, HR_conv0, HR_conv1)
self.flat_map = make_upsample_block(upscale=upscale, kernel_size=ksize)
self.edge_map = make_upsample_block(upscale=upscale, kernel_size=ksize)
self.corner_map = make_upsample_block(upscale=upscale, kernel_size=ksize)
self.upsample_flat = make_upsample_block(upscale=upscale)
self.upsample_edge = make_upsample_block(upscale=upscale)
self.upsample_corner = make_upsample_block(upscale=upscale)
for i in range(n_HG):
if (i != (n_HG - 1)):
HG_block = HourGlassBlock(res_type=res_type, n_mid=n_mid, n_tail=n_tail)
else:
HG_block = HourGlassBlock(res_type=res_type, n_mid=n_mid, n_tail=0)
setattr(self, ('HG_%d' % i), HG_block)
def forward(self, x):
x = self.conv_in(x)
SR_map = []
result = []
out = x
super_block_idx = [1, (self.n_HG // 2), (self.n_HG - 1)]
for i in range(self.n_HG):
(out, out_inter) = getattr(self, ('HG_%d' % i))(out)
if (i in super_block_idx):
if (i == (self.n_HG - 1)):
sr_feature = (out.mul(0.2) + x)
else:
sr_feature = out_inter
if (super_block_idx.index(i) == 0):
srout_flat = self.upsample_flat(sr_feature)
flat_map = self.flat_map(sr_feature)
result.append(srout_flat)
elif (super_block_idx.index(i) == 1):
srout_edge = self.upsample_edge(sr_feature)
edge_map = self.edge_map(sr_feature)
result.append(srout_edge)
elif (super_block_idx.index(i) == 2):
srout_corner = self.upsample_corner(sr_feature)
corner_map = self.corner_map(sr_feature)
result.append(srout_corner)
(flat_r, flat_g, flat_b) = flat_map.split(split_size=1, dim=1)
(edge_r, edge_g, edge_b) = edge_map.split(split_size=1, dim=1)
(corner_r, corner_g, corner_b) = corner_map.split(split_size=1, dim=1)
r_map = torch.cat((flat_r, edge_r, corner_r), dim=1)
g_map = torch.cat((flat_g, edge_g, corner_g), dim=1)
b_map = torch.cat((flat_b, edge_b, corner_b), dim=1)
r_map = F.softmax(r_map, dim=1)
g_map = F.softmax(g_map, dim=1)
b_map = F.softmax(b_map, dim=1)
(flat_r, edge_r, corner_r) = r_map.split(split_size=1, dim=1)
(flat_g, edge_g, corner_g) = g_map.split(split_size=1, dim=1)
(flat_b, edge_b, corner_b) = b_map.split(split_size=1, dim=1)
flat_map = torch.cat((flat_r, flat_g, flat_b), dim=1)
edge_map = torch.cat((edge_r, edge_g, edge_b), dim=1)
corner_map = torch.cat((corner_r, corner_g, corner_b), dim=1)
srout = (((flat_map * srout_flat) + (edge_map * srout_edge)) + (corner_map * srout_corner))
result.append(srout)
SR_map.append(torch.mean(flat_map, dim=1, keepdim=True))
SR_map.append(torch.mean(edge_map, dim=1, keepdim=True))
SR_map.append(torch.mean(corner_map, dim=1, keepdim=True))
return (result, SR_map) |
def resolve_inout(input=None, output=None, files=None, overwrite=False, num_inputs=None):
resolved_output = (output or (files[(- 1)] if files else None))
if ((not overwrite) and resolved_output and os.path.exists(resolved_output)):
raise FileOverwriteError("file exists and won't be overwritten without use of the `--overwrite` option.")
resolved_inputs = ([input] if input else (([] + list(files[:((- 1) if (not output) else None)])) if files else []))
if (num_inputs is not None):
if (len(resolved_inputs) < num_inputs):
raise click.BadParameter('Insufficient inputs')
elif (len(resolved_inputs) > num_inputs):
raise click.BadParameter('Too many inputs')
return (resolved_output, resolved_inputs) |
def pack_dbobj(item):
_init_globals()
obj = item
natural_key = _FROM_MODEL_MAP[(hasattr(obj, 'id') and hasattr(obj, 'db_date_created') and hasattr(obj, '__dbclass__') and obj.__dbclass__.__name__.lower())]
return ((natural_key and ('__packed_dbobj__', natural_key, _TO_DATESTRING(obj), _GA(obj, 'id'))) or item) |
def get_datasets(input_size: int, split_cache_path='split_cache.pkl'):
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
train_transform = transforms.Compose([transforms.Resize((input_size, input_size)), transforms.ToTensor(), transforms.Normalize(mean, std)])
test_transform = transforms.Compose([transforms.Resize((input_size, input_size)), transforms.ToTensor(), transforms.Normalize(mean, std)])
full_dataset = (datasets.StanfordCars(root=dataset_path, split='train', download=True) + datasets.StanfordCars(root=dataset_path, split='test', download=True))
(train_indices, test_indices) = (None, None)
if ((not split_cache_path) or (not os.path.exists(split_cache_path))):
train_categories = np.random.choice(a=196, size=(196 // 2), replace=False)
labels_list = np.array([label for (_, label) in tqdm.tqdm(full_dataset)])
labels_mask = np.isin(labels_list, train_categories)
train_indices = np.argwhere(labels_mask).squeeze()
test_indices = np.argwhere(np.logical_not(labels_mask)).squeeze()
if ((train_indices is None) or (test_indices is None)):
(train_indices, test_indices) = pickle.load(open(split_cache_path, 'rb'))
else:
pickle.dump((train_indices, test_indices), open(split_cache_path, 'wb'))
train_dataset = CarsDataset(Subset(full_dataset, train_indices), transform=train_transform)
test_dataset = CarsDataset(Subset(full_dataset, test_indices), transform=test_transform)
return (train_dataset, test_dataset) |
class FennelLexer(RegexLexer):
name = 'Fennel'
url = '
aliases = ['fennel', 'fnl']
filenames = ['*.fnl']
version_added = '2.3'
special_forms = ('#', '%', '*', '+', '-', '->', '->>', '-?>', '-?>>', '.', '..', '/', '//', ':', '<', '<=', '=', '>', '>=', '?.', '^', 'accumulate', 'and', 'band', 'bnot', 'bor', 'bxor', 'collect', 'comment', 'do', 'doc', 'doto', 'each', 'eval-compiler', 'for', 'hashfn', 'icollect', 'if', 'import-macros', 'include', 'length', 'let', 'lshift', 'lua', 'macrodebug', 'match', 'not', 'not=', 'or', 'partial', 'pick-args', 'pick-values', 'quote', 'require-macros', 'rshift', 'set', 'set-forcibly!', 'tset', 'values', 'when', 'while', 'with-open', '~=')
declarations = ('fn', 'global', 'lambda', 'local', 'macro', 'macros', 'var', '')
builtins = ('_G', '_VERSION', 'arg', 'assert', 'bit32', 'collectgarbage', 'coroutine', 'debug', 'dofile', 'error', 'getfenv', 'getmetatable', 'io', 'ipairs', 'load', 'loadfile', 'loadstring', 'math', 'next', 'os', 'package', 'pairs', 'pcall', 'print', 'rawequal', 'rawget', 'rawlen', 'rawset', 'require', 'select', 'setfenv', 'setmetatable', 'string', 'table', 'tonumber', 'tostring', 'type', 'unpack', 'xpcall')
valid_name = '[a-zA-Z_!$%&*+/:<=>?^~|-][\\w!$%&*+/:<=>?^~|\\.-]*'
tokens = {'root': [(';.*$', Comment.Single), (',+', Text), ('\\s+', Whitespace), ('-?\\d+\\.\\d+', Number.Float), ('-?\\d+', Number.Integer), ('"(|\\\\[^\\\\]|[^"\\\\])*"', String), ('(true|false|nil)', Name.Constant), ((':' + valid_name), String.Symbol), (words(special_forms, suffix=' '), Keyword), (words(declarations, suffix=' '), Keyword.Declaration), (words(builtins, suffix=' '), Name.Builtin), ('\\.\\.\\.', Name.Variable), (valid_name, Name.Variable), ('(\\(|\\))', Punctuation), ('(\\[|\\])', Punctuation), ('(\\{|\\})', Punctuation), ('#', Punctuation)]} |
class ExperimentPlanner3D_v21_customTargetSpacing_2x2x2(ExperimentPlanner3D_v21):
def __init__(self, folder_with_cropped_data, preprocessed_output_folder):
super(ExperimentPlanner3D_v21, self).__init__(folder_with_cropped_data, preprocessed_output_folder)
self.data_identifier = 'nnFormerData_plans_v2.1_trgSp_2x2x2'
self.plans_fname = join(self.preprocessed_output_folder, 'nnFormerPlansv2.1_trgSp_2x2x2_plans_3D.pkl')
def get_target_spacing(self):
return np.array([2.0, 2.0, 2.0]) |
def activation(act_type, inplace=True, neg_slope=0.05, n_prelu=1):
act_type = act_type.lower()
if (act_type == 'relu'):
layer = nn.ReLU(inplace)
elif (act_type == 'lrelu'):
layer = nn.LeakyReLU(neg_slope, inplace)
elif (act_type == 'prelu'):
layer = nn.PReLU(num_parameters=n_prelu, init=neg_slope)
else:
raise NotImplementedError('activation layer [{:s}] is not found'.format(act_type))
return layer |
class While(_base_nodes.MultiLineWithElseBlockNode, _base_nodes.Statement):
_astroid_fields = ('test', 'body', 'orelse')
_multi_line_block_fields = ('body', 'orelse')
test: NodeNG
body: list[NodeNG]
orelse: list[NodeNG]
def postinit(self, test: NodeNG, body: list[NodeNG], orelse: list[NodeNG]) -> None:
self.test = test
self.body = body
self.orelse = orelse
_property
def blockstart_tolineno(self):
return self.test.tolineno
def block_range(self, lineno: int) -> tuple[(int, int)]:
return self._elsed_block_range(lineno, self.orelse)
def get_children(self):
(yield self.test)
(yield from self.body)
(yield from self.orelse)
def _get_yield_nodes_skip_functions(self):
(yield from self.test._get_yield_nodes_skip_functions())
(yield from super()._get_yield_nodes_skip_functions())
def _get_yield_nodes_skip_lambdas(self):
(yield from self.test._get_yield_nodes_skip_lambdas())
(yield from super()._get_yield_nodes_skip_lambdas()) |
def require_deepspeed_aio(test_case):
if (not is_deepspeed_available()):
return unittest.skip('test requires deepspeed')(test_case)
import deepspeed
from deepspeed.ops.aio import AsyncIOBuilder
if (not deepspeed.ops.__compatible_ops__[AsyncIOBuilder.NAME]):
return unittest.skip('test requires deepspeed async-io')(test_case)
else:
return test_case |
(frozen=True)
class StandardPickupDefinition(JsonDataclass):
game: RandovaniaGame = dataclasses.field(metadata={'init_from_extra': True})
name: str = dataclasses.field(metadata={'init_from_extra': True})
pickup_category: PickupCategory = dataclasses.field(metadata={'init_from_extra': True})
broad_category: PickupCategory = dataclasses.field(metadata={'init_from_extra': True})
model_name: str
offworld_models: frozendict[(RandovaniaGame, str)]
progression: tuple[(str, ...)]
default_shuffled_count: int
default_starting_count: int
preferred_location_category: LocationCategory
ammo: tuple[(str, ...)] = dataclasses.field(default_factory=tuple, metadata=EXCLUDE_DEFAULT)
unlocks_ammo: bool = dataclasses.field(default=False, metadata=EXCLUDE_DEFAULT)
additional_resources: frozendict[(str, int)] = dataclasses.field(default_factory=frozendict, metadata=EXCLUDE_DEFAULT)
hide_from_gui: bool = dataclasses.field(default=False, metadata=EXCLUDE_DEFAULT)
must_be_starting: bool = dataclasses.field(default=False, metadata=EXCLUDE_DEFAULT)
original_location: (PickupIndex | None) = dataclasses.field(default=None, metadata=EXCLUDE_DEFAULT)
probability_offset: float = dataclasses.field(default=0.0, metadata=EXCLUDE_DEFAULT)
probability_multiplier: float = dataclasses.field(default=1.0, metadata=EXCLUDE_DEFAULT)
description: (str | None) = dataclasses.field(default=None, metadata=EXCLUDE_DEFAULT)
extra: frozendict = dataclasses.field(default_factory=frozendict, metadata=EXCLUDE_DEFAULT)
def __post_init__(self) -> None:
if ((not self.progression) and (not self.ammo)):
raise ValueError(f'Standard Pickup {self.name} has no progression nor ammo.')
def from_json_with_categories(cls, name: str, game: RandovaniaGame, pickup_categories: dict[(str, PickupCategory)], value: dict) -> Self:
return cls.from_json(value, game=game, name=name, pickup_category=pickup_categories[value['pickup_category']], broad_category=pickup_categories[value['broad_category']])
def as_json(self) -> dict:
return {'pickup_category': self.pickup_category.name, 'broad_category': self.broad_category.name, **super().as_json} |
def get_survey_project_request_handler(request_type: str) -> Callable:
handlers_dict = {'formEventMapping': handle_simple_project_form_event_mapping_request, 'metadata': handle_simple_project_metadata_request, 'record': handle_survey_project_records_request}
return handlers_dict[request_type] |
class FeatureListNet(FeatureDictNet):
def __init__(self, model, out_indices=(0, 1, 2, 3, 4), out_map=None, feature_concat=False, flatten_sequential=False):
super(FeatureListNet, self).__init__(model, out_indices=out_indices, out_map=out_map, feature_concat=feature_concat, flatten_sequential=flatten_sequential)
def forward(self, x) -> List[torch.Tensor]:
return list(self._collect(x).values()) |
def simplify_responses(responses):
def unpack_multi(responses):
for resp in responses:
if isinstance(resp, MultiplyResponse):
for sub in unpack_multi(resp.responses):
(yield sub)
else:
(yield resp)
def cancel_pzs(poles, zeros):
poles_new = []
zeros_new = list(zeros)
for p in poles:
try:
zeros_new.pop(zeros_new.index(p))
except ValueError:
poles_new.append(p)
return (poles_new, zeros_new)
def combine_pzs(responses):
poles = []
zeros = []
constant = 1.0
out = []
for resp in responses:
if isinstance(resp, PoleZeroResponse):
poles.extend(resp.poles)
zeros.extend(resp.zeros)
constant *= resp.constant
else:
out.append(resp)
(poles, zeros) = cancel_pzs(poles, zeros)
if (poles or zeros):
out.insert(0, PoleZeroResponse(poles=poles, zeros=zeros, constant=constant))
elif (constant != 1.0):
out.insert(0, Gain(constant=constant))
return out
def split(xs, condition):
out = ([], [])
for x in xs:
out[condition(x)].append(x)
return out
def combine_gains(responses):
(non_scalars, scalars) = split(responses, (lambda resp: resp.is_scalar()))
if scalars:
factor = num.prod([resp.get_scalar() for resp in scalars])
(yield Gain(constant=factor))
for resp in non_scalars:
(yield resp)
return list(combine_gains(combine_pzs(unpack_multi(responses)))) |
(frozen=True)
class EventPaymentReceivedSuccess(Event):
token_network_registry_address: TokenNetworkRegistryAddress
token_network_address: TokenNetworkAddress
identifier: PaymentID
amount: PaymentAmount
initiator: InitiatorAddress
def __post_init__(self) -> None:
if (self.amount < 0):
raise ValueError('transferred_amount cannot be negative')
if (self.amount > UINT256_MAX):
raise ValueError('transferred_amount is too large')
def __repr__(self) -> str:
return f'{self.__class__.__name__}< token_network_address: {to_checksum_address(self.token_network_address)} identifier: {self.identifier} amount: {self.amount} initiator: {to_checksum_address(self.initiator)} >' |
def PQDescPath(coll, f, lcs):
f = (eval(f, globals(), lcs) if (f != '_') else None)
stack = []
if isList(coll):
stack = [i for i in flatten(coll)]
elif isMap(coll):
stack = [map_tuple(k, v) for (k, v) in coll.items()]
while stack:
i = stack.pop()
if isinstance(i, map_tuple):
if (f is None):
(yield i.value)
elif (f and (i.key == f)):
(yield i.value)
i = i.value
if isList(i):
it = iter(i)
frst = next(it)
[stack.append(j) for j in it]
if isList(frst):
stack.extend([ci for ci in frst])
elif isMap(frst):
stack.extend([map_tuple(k, v) for (k, v) in frst.items()])
elif isMap(i):
keys = list(i.keys())
[stack.append(map_tuple(j, i[j])) for j in keys] |
def formatv(fwid, plusstr, pkstr, thresh, val):
if (abs(val) < thresh):
val1 = 0.0
else:
val1 = val
str = (pkstr % val1).strip()
if re.match('^-0\\.0*$', str):
str = str[1:]
if (str[0] != '-'):
str = (plusstr + str)
str = str.replace('e', 'D')
return (fwid % str) |
.skipif((not dependencies.lvm.is_available), reason='lvm not available')
def test_lvm_mount():
parser = ImageParser([fullpath('images/lvm.raw')])
volumes = []
for v in parser.init():
volumes.append(v)
assert (len(volumes) == 2)
assert (volumes[0].mountpoint is not None)
assert (volumes[0].flag == 'alloc')
assert (volumes[0].filesystem.type == 'ext')
assert (volumes[0].index == '0.0')
parser.force_clean() |
class MypycNativeIntTests(TestCase):
def test_construction(self):
for native_int in native_int_types:
self.assert_same(native_int(), 0)
self.assert_same(native_int(0), 0)
self.assert_same(native_int(1), 1)
self.assert_same(native_int((- 3)), (- 3))
self.assert_same(native_int((2 ** 64)), (2 ** 64))
self.assert_same(native_int((- (2 ** 64))), (- (2 ** 64)))
self.assert_same(native_int(1.234), 1)
self.assert_same(native_int(2.634), 2)
self.assert_same(native_int((- 1.234)), (- 1))
self.assert_same(native_int((- 2.634)), (- 2))
self.assert_same(native_int('0'), 0)
self.assert_same(native_int('123'), 123)
self.assert_same(native_int('abc', 16), 2748)
self.assert_same(native_int('-101', base=2), (- 5))
def test_isinstance(self):
for native_int in native_int_types:
assert isinstance(0, native_int)
assert isinstance(1234, native_int)
assert isinstance(True, native_int)
assert (not isinstance(1.0, native_int))
def test_docstring(self):
for native_int in native_int_types:
assert native_int.__doc__
def assert_same(self, x, y):
assert (type(x) is type(y))
assert (x == y) |
def _check_shape(name, M, n, m, square=False, symmetric=False):
if (square and (M.shape[0] != M.shape[1])):
raise ControlDimension(('%s must be a square matrix' % name))
if (symmetric and (not _is_symmetric(M))):
raise ControlArgument(('%s must be a symmetric matrix' % name))
if ((M.shape[0] != n) or (M.shape[1] != m)):
raise ControlDimension(('Incompatible dimensions of %s matrix' % name)) |
class FancyFormatter():
def __init__(self, f_out: IO[str], f_err: IO[str], hide_error_codes: bool) -> None:
self.hide_error_codes = hide_error_codes
if (sys.platform not in ('linux', 'darwin', 'win32', 'emscripten')):
self.dummy_term = True
return
if ((not should_force_color()) and ((not f_out.isatty()) or (not f_err.isatty()))):
self.dummy_term = True
return
if (sys.platform == 'win32'):
self.dummy_term = (not self.initialize_win_colors())
elif (sys.platform == 'emscripten'):
self.dummy_term = (not self.initialize_vt100_colors())
else:
self.dummy_term = (not self.initialize_unix_colors())
if (not self.dummy_term):
self.colors = {'red': self.RED, 'green': self.GREEN, 'blue': self.BLUE, 'yellow': self.YELLOW, 'none': ''}
def initialize_vt100_colors(self) -> bool:
assert (sys.platform in ('win32', 'emscripten'))
self.BOLD = '\x1b[1m'
self.UNDER = '\x1b[4m'
self.BLUE = '\x1b[94m'
self.GREEN = '\x1b[92m'
self.RED = '\x1b[91m'
self.YELLOW = '\x1b[93m'
self.NORMAL = '\x1b[0m'
self.DIM = '\x1b[2m'
return True
def initialize_win_colors(self) -> bool:
assert (sys.platform == 'win32')
if (sys.platform == 'win32'):
winver = sys.getwindowsversion()
if ((winver.major < MINIMUM_WINDOWS_MAJOR_VT100) or (winver.build < MINIMUM_WINDOWS_BUILD_VT100)):
return False
import ctypes
kernel32 = ctypes.windll.kernel32
ENABLE_PROCESSED_OUTPUT = 1
ENABLE_WRAP_AT_EOL_OUTPUT = 2
ENABLE_VIRTUAL_TERMINAL_PROCESSING = 4
STD_OUTPUT_HANDLE = (- 11)
kernel32.SetConsoleMode(kernel32.GetStdHandle(STD_OUTPUT_HANDLE), ((ENABLE_PROCESSED_OUTPUT | ENABLE_WRAP_AT_EOL_OUTPUT) | ENABLE_VIRTUAL_TERMINAL_PROCESSING))
self.initialize_vt100_colors()
return True
return False
def initialize_unix_colors(self) -> bool:
if ((sys.platform == 'win32') or (not CURSES_ENABLED)):
return False
try:
try:
fd = sys.stdout.fileno()
except io.UnsupportedOperation:
with open('/dev/null', 'rb') as f:
curses.setupterm(fd=f.fileno())
else:
curses.setupterm(fd=fd)
except curses.error:
return False
bold = curses.tigetstr('bold')
under = curses.tigetstr('smul')
set_color = curses.tigetstr('setaf')
set_eseq = curses.tigetstr('cup')
normal = curses.tigetstr('sgr0')
if (not (bold and under and set_color and set_eseq and normal)):
return False
self.NORMAL = normal.decode()
self.BOLD = bold.decode()
self.UNDER = under.decode()
self.DIM = parse_gray_color(set_eseq)
self.BLUE = curses.tparm(set_color, curses.COLOR_BLUE).decode()
self.GREEN = curses.tparm(set_color, curses.COLOR_GREEN).decode()
self.RED = curses.tparm(set_color, curses.COLOR_RED).decode()
self.YELLOW = curses.tparm(set_color, curses.COLOR_YELLOW).decode()
return True
def style(self, text: str, color: Literal[('red', 'green', 'blue', 'yellow', 'none')], bold: bool=False, underline: bool=False, dim: bool=False) -> str:
if self.dummy_term:
return text
if bold:
start = self.BOLD
else:
start = ''
if underline:
start += self.UNDER
if dim:
start += self.DIM
return (((start + self.colors[color]) + text) + self.NORMAL)
def fit_in_terminal(self, messages: list[str], fixed_terminal_width: (int | None)=None) -> list[str]:
width = (fixed_terminal_width or get_terminal_width())
new_messages = messages.copy()
for (i, error) in enumerate(messages):
if (': error:' in error):
(loc, msg) = error.split('error:', maxsplit=1)
msg = soft_wrap(msg, width, first_offset=(len(loc) + len('error: ')))
new_messages[i] = ((loc + 'error:') + msg)
if (error.startswith((' ' * DEFAULT_SOURCE_OFFSET)) and ('^' not in error)):
error = error[DEFAULT_SOURCE_OFFSET:]
marker_line = messages[(i + 1)]
marker_column = marker_line.index('^')
column = (marker_column - DEFAULT_SOURCE_OFFSET)
if ('~' not in marker_line):
marker = '^'
else:
marker = marker_line[marker_column:(marker_line.rindex('~') + 1)]
max_len = ((width - DEFAULT_SOURCE_OFFSET) - 6)
(source_line, offset) = trim_source_line(error, max_len, column, MINIMUM_WIDTH)
new_messages[i] = ((' ' * DEFAULT_SOURCE_OFFSET) + source_line)
new_marker_line = ((' ' * ((DEFAULT_SOURCE_OFFSET + column) - offset)) + marker)
if ((len(new_marker_line) > len(new_messages[i])) and (len(marker) > 3)):
new_marker_line = (new_marker_line[:(len(new_messages[i]) - 3)] + '...')
new_messages[(i + 1)] = new_marker_line
return new_messages
def colorize(self, error: str) -> str:
if (': error:' in error):
(loc, msg) = error.split('error:', maxsplit=1)
if self.hide_error_codes:
return ((loc + self.style('error:', 'red', bold=True)) + self.highlight_quote_groups(msg))
codepos = msg.rfind('[')
if (codepos != (- 1)):
code = msg[codepos:]
msg = msg[:codepos]
else:
code = ''
return (((loc + self.style('error:', 'red', bold=True)) + self.highlight_quote_groups(msg)) + self.style(code, 'yellow'))
elif (': note:' in error):
(loc, msg) = error.split('note:', maxsplit=1)
formatted = self.highlight_quote_groups(self.underline_link(msg))
return ((loc + self.style('note:', 'blue')) + formatted)
elif error.startswith((' ' * DEFAULT_SOURCE_OFFSET)):
if ('^' not in error):
return self.style(error, 'none', dim=True)
return self.style(error, 'red')
else:
return error
def highlight_quote_groups(self, msg: str) -> str:
if (msg.count('"') % 2):
return msg
parts = msg.split('"')
out = ''
for (i, part) in enumerate(parts):
if ((i % 2) == 0):
out += self.style(part, 'none')
else:
out += self.style((('"' + part) + '"'), 'none', bold=True)
return out
def underline_link(self, note: str) -> str:
match = re.search(' note)
if (not match):
return note
start = match.start()
end = match.end()
return ((note[:start] + self.style(note[start:end], 'none', underline=True)) + note[end:])
def format_success(self, n_sources: int, use_color: bool=True) -> str:
msg = f'Success: no issues found in {n_sources} source file{plural_s(n_sources)}'
if (not use_color):
return msg
return self.style(msg, 'green', bold=True)
def format_error(self, n_errors: int, n_files: int, n_sources: int, *, blockers: bool=False, use_color: bool=True) -> str:
msg = f'Found {n_errors} error{plural_s(n_errors)} in {n_files} file{plural_s(n_files)}'
if blockers:
msg += ' (errors prevented further checking)'
else:
msg += f' (checked {n_sources} source file{plural_s(n_sources)})'
if (not use_color):
return msg
return self.style(msg, 'red', bold=True) |
def pass_orin_nano(engine):
return [add_engine_in_list('APE', engine, 'APE', 'APE'), (add_engine_in_list('NVENC', engine, 'NVENC', 'NVENC') + add_engine_in_list('NVDEC', engine, 'NVDEC', 'NVDEC')), (add_engine_in_list('NVJPG', engine, 'NVJPG', 'NVJPG') + add_engine_in_list('NVJPG1', engine, 'NVJPG', 'NVJPG1')), (add_engine_in_list('SE', engine, 'SE', 'SE') + add_engine_in_list('VIC', engine, 'VIC', 'VIC'))] |
def parse_arguments():
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('--data', help='path to dataset base directory', default='dataset/')
parser.add_argument('--optimizer', help='Which optimizer to use', default='sgd')
parser.add_argument('--set', help='name of dataset', type=str, default='ImageNet')
parser.add_argument('-a', '--arch', metavar='ARCH', default='ResNet18', help='model architecture')
parser.add_argument('--config', help='Config file to use (see configs dir)', default=None)
parser.add_argument('--log-dir', help='Where to save the runs. If None use ./runs', default=None)
parser.add_argument('-j', '--workers', default=20, type=int, metavar='N', help='number of data loading workers (default: 20)')
parser.add_argument('--epochs', default=90, type=int, metavar='N', help='number of total epochs to run')
parser.add_argument('--start-epoch', default=None, type=int, metavar='N', help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int, metavar='N', help='mini-batch size (default: 256), this is the total batch size of all GPUs on the current node when using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr', default=0.006, type=float, help='initial learning rate')
parser.add_argument('--warmup_length', default=0, type=int, help='Number of warmup iterations')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M', help='momentum')
parser.add_argument('--wd', '--weight-decay', default=0.0001, type=float, metavar='W', help='weight decay (default: 1e-4)', dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=10, type=int, metavar='N', help='print frequency (default: 10)')
parser.add_argument('--num-classes', default=10, type=int)
parser.add_argument('--resume', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)')
parser.add_argument('--resume_train_weights', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true', help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', default=None, type=str, help='use pre-trained model')
parser.add_argument('--seed', default=None, type=int, help='seed for initializing training. ')
parser.add_argument('--multigpu', default=None, type=(lambda x: [int(a) for a in x.split(',')]), help='Which GPUs to use for multigpu training')
parser.add_argument('--lr-policy', default='cosine_lr', help='Policy for the learning rate.')
parser.add_argument('--lr-adjust', default=30, type=int, help='Interval to drop lr')
parser.add_argument('--lr-gamma', default=0.1, type=int, help='Multistep multiplier')
parser.add_argument('--name', default=None, type=str, help='Experiment name to append to filepath')
parser.add_argument('--save-every', default=(- 1), type=int, help='Save every ___ epochs')
parser.add_argument('--prune-rate', default=0.0, help='Amount of pruning to do during sparse training', type=float)
parser.add_argument('--pr_start', default=1.0, help='Amount of pruning rate for start', type=float)
parser.add_argument('--low-data', default=1, help='Amount of data to use', type=float)
parser.add_argument('--width-mult', default=1.0, help='How much to vary the width of the network.', type=float)
parser.add_argument('--nesterov', default=False, action='store_true', help='Whether or not to use nesterov for SGD')
parser.add_argument('--random-subnet', action='store_true', help='Whether or not to use a random subnet when fine tuning for lottery experiments')
parser.add_argument('--one-batch', action='store_true', help='One batch train set for debugging purposes (test overfitting)')
parser.add_argument('--conv-type', type=str, default=None, help='What kind of sparsity to use')
parser.add_argument('--freeze-weights', action='store_true', help='Whether or not to train only subnet (this freezes weights)')
parser.add_argument('--mode', default='fan_in', help='Weight initialization mode')
parser.add_argument('--nonlinearity', default='relu', help='Nonlinearity used by initialization')
parser.add_argument('--bn-type', default=None, help='BatchNorm type')
parser.add_argument('--init', default='kaiming_normal', help='Weight initialization modifications')
parser.add_argument('--no-bn-decay', action='store_true', default=False, help='No batchnorm decay')
parser.add_argument('--scale-fan', action='store_true', default=False, help='scale fan')
parser.add_argument('--first-layer-dense', action='store_true', help='First layer dense or sparse')
parser.add_argument('--last-layer-dense', action='store_true', help='Last layer dense or sparse')
parser.add_argument('--label-smoothing', type=float, help='Label smoothing to use, default 0.0', default=None)
parser.add_argument('--first-layer-type', type=str, default=None, help='Conv type of first layer')
parser.add_argument('--trainer', type=str, default='default', help='cs, ss, or standard training')
parser.add_argument('--score-init-constant', type=float, default=1)
parser.add_argument('--K', type=int, default=2, help='Sample K nets')
parser.add_argument('--T', type=float, default=1, help='Temperature Annealing Parameter')
parser.add_argument('--TA', default=True, action='store_true', help='Temperature Annealing')
parser.add_argument('--init_weights', type=str, default='', help='init weights loc')
parser.add_argument('--use_running_stats', default=False, action='store_true', help='Whether use bn running stats')
parser.add_argument('--iterative', default=True, action='store_true', help='Whether use iterative pruning')
parser.add_argument('--train_weights_at_the_same_time', default=True, action='store_true', help='Whether train_weights at the same time')
parser.add_argument('--sample_from_training_set', default=True, action='store_true', help='Whether sample from training set')
parser.add_argument('--finetune', default=True, action='store_true', help='Whether finetune')
parser.add_argument('--weight_opt_lr', type=float, default=0.1, help='lr for weight training at the same time')
parser.add_argument('--ts', type=float, default=0.16, help='ts')
parser.add_argument('--te', type=float, default=0.6, help='te')
args = parser.parse_args()
if (len(sys.argv) > 1):
get_config(args)
return args |
(backend='memory', stale_after=timedelta(seconds=1), next_time=True)
def _error_throwing_func(arg1):
if (not hasattr(_error_throwing_func, 'count')):
_error_throwing_func.count = 0
_error_throwing_func.count += 1
if (_error_throwing_func.count > 1):
raise ValueError('Tiny Rick!')
return 7 |
_criterion('cross_entropy')
class CrossEntropyCriterion(FairseqCriterion):
def __init__(self, args, task):
super().__init__(args, task)
def forward(self, model, sample, reduce=True):
net_output = model(**sample['net_input'])
(loss, _, sample_status) = self.compute_loss(model, net_output, sample, reduce=reduce)
sample_size = (sample['target'].size(0) if self.args.sentence_avg else sample['ntokens'])
logging_output = {'loss': (utils.item(loss.data) if reduce else loss.data), 'nll_loss': (utils.item(loss.data) if reduce else loss.data), 'ntokens': sample['ntokens'], 'nsentences': sample['target'].size(0), 'sample_size': sample_size, 'sample_status': sample_status}
return (loss, sample_size, logging_output)
def compute_loss(self, model, net_output, sample, reduce=True):
lprobs = model.get_normalized_probs(net_output, log_probs=True)
origin_target = model.get_targets(sample, net_output)
sample_status = None
if (not model.training):
_lprobs = lprobs
_target = origin_target.unsqueeze((- 1))
_pad_mask = _target.eq(self.padding_idx)
target_lprob = _lprobs.gather(dim=(- 1), index=_target)
mtarget_lprob = target_lprob.masked_fill(_pad_mask, 2.0)
sample_lprob_list = mtarget_lprob.squeeze((- 1)).tolist()
sample_id_list = sample['id'].squeeze().tolist()
sample_status = list(zip(sample_id_list, sample_lprob_list))
lprobs = lprobs.view((- 1), lprobs.size((- 1)))
target = origin_target.view((- 1))
loss = F.nll_loss(lprobs, target, ignore_index=self.padding_idx, reduction=('sum' if reduce else 'none'))
return (loss, loss, sample_status)
def aggregate_logging_outputs(logging_outputs):
loss_sum = sum((log.get('loss', 0) for log in logging_outputs))
ntokens = sum((log.get('ntokens', 0) for log in logging_outputs))
nsentences = sum((log.get('nsentences', 0) for log in logging_outputs))
sample_size = sum((log.get('sample_size', 0) for log in logging_outputs))
sample_status = [log.get('sample_status', 0) for log in logging_outputs]
agg_output = {'loss': (((loss_sum / sample_size) / math.log(2)) if (sample_size > 0) else 0.0), 'ntokens': ntokens, 'nsentences': nsentences, 'sample_size': sample_size, 'sample_status': sample_status}
if (sample_size != ntokens):
agg_output['nll_loss'] = ((loss_sum / ntokens) / math.log(2))
return agg_output |
.end_to_end()
.parametrize('node_def', ["PathNode(path=Path('file.txt'))", "Path('file.txt')"])
def test_return_with_task_decorator(runner, tmp_path, node_def):
source = f'''
from pathlib import Path
from typing_extensions import Annotated
from pytask import task, PathNode
(produces={node_def})
def task_example():
return "Hello, World!"
'''
tmp_path.joinpath('task_module.py').write_text(textwrap.dedent(source))
result = runner.invoke(cli, [tmp_path.as_posix()])
assert (result.exit_code == ExitCode.OK)
assert (tmp_path.joinpath('file.txt').read_text() == 'Hello, World!') |
(models.Proposal)
class ProposalAdmin(TimeAuditAdmin, SimpleHistoryAdmin, ExportMixin):
list_display = ('proposal_info', 'author_info', 'author_email', 'conference', 'status', 'review_status')
list_filter = ['proposal_section__name', 'proposal_type', 'target_audience', 'conference', 'status', 'review_status']
formfield_overrides = {TextField: {'widget': AdminPagedownWidget}}
actions = ['export_as_csv']
def proposal_info(self, obj):
return ('%s (%s)' % (obj.title, obj.proposal_type))
def author_email(self, obj):
if obj.author:
return obj.author.email
def author_info(self, obj):
if obj.author:
return ('%s (%s)' % (obj.author.get_full_name(), obj.author.username))
def get_queryset(self, request):
qs = super(ProposalAdmin, self).get_queryset(request)
if request.user.is_superuser:
return qs
moderators = service.list_conference_moderator(user=request.user)
return qs.filter(conference__in=[m.conference for m in moderators]) |
.parametrize('screenshot_manager', [{}, {'type': 'box'}, {'type': 'line'}, {'type': 'line', 'line_width': 1}, {'start_pos': 'top'}], indirect=True)
def ss_hddgraph(screenshot_manager):
widget = screenshot_manager.c.widget['hddgraph']
widget.eval(f'self.values={values}')
widget.eval('self.maxvalue=400')
widget.eval('self.draw()')
screenshot_manager.take_screenshot() |
class SchemaValidator(KeywordValidator):
def __init__(self, registry: 'KeywordValidatorRegistry'):
super().__init__(registry)
self.schema_ids_registry: Optional[List[int]] = []
def default_validator(self) -> ValueValidator:
return cast(ValueValidator, self.registry['default'])
def __call__(self, schema: SchemaPath, require_properties: bool=True) -> Iterator[ValidationError]:
if (not hasattr(schema.content(), '__getitem__')):
return
assert (self.schema_ids_registry is not None)
schema_id = id(schema.content())
if (schema_id in self.schema_ids_registry):
return
self.schema_ids_registry.append(schema_id)
nested_properties = []
if ('allOf' in schema):
all_of = (schema / 'allOf')
for inner_schema in all_of:
(yield from self(inner_schema, require_properties=False))
if ('properties' not in inner_schema):
continue
inner_schema_props = (inner_schema / 'properties')
inner_schema_props_keys = inner_schema_props.keys()
nested_properties += list(inner_schema_props_keys)
if ('anyOf' in schema):
any_of = (schema / 'anyOf')
for inner_schema in any_of:
(yield from self(inner_schema, require_properties=False))
if ('oneOf' in schema):
one_of = (schema / 'oneOf')
for inner_schema in one_of:
(yield from self(inner_schema, require_properties=False))
if ('not' in schema):
not_schema = (schema / 'not')
(yield from self(not_schema, require_properties=False))
if ('items' in schema):
array_schema = (schema / 'items')
(yield from self(array_schema, require_properties=False))
if ('properties' in schema):
props = (schema / 'properties')
for (_, prop_schema) in props.items():
(yield from self(prop_schema, require_properties=False))
required = schema.getkey('required', [])
properties = schema.get('properties', {}).keys()
if ('allOf' in schema):
extra_properties = list(((set(required) - set(properties)) - set(nested_properties)))
else:
extra_properties = list((set(required) - set(properties)))
if (extra_properties and require_properties):
(yield ExtraParametersError(f'Required list has not defined properties: {extra_properties}'))
if ('default' in schema):
default = schema['default']
nullable = schema.get('nullable', False)
if ((default is not None) or (nullable is not True)):
(yield from self.default_validator(schema, default)) |
def load_mnist():
(train, test) = tf.keras.datasets.mnist.load_data()
(train_data, train_labels) = train
(test_data, test_labels) = test
train_data = (np.array(train_data, dtype=np.float32) / 255)
test_data = (np.array(test_data, dtype=np.float32) / 255)
train_labels = np.array(train_labels, dtype=np.int32)
test_labels = np.array(test_labels, dtype=np.int32)
assert (train_data.min() == 0.0)
assert (train_data.max() == 1.0)
assert (test_data.min() == 0.0)
assert (test_data.max() == 1.0)
assert (train_labels.ndim == 1)
assert (test_labels.ndim == 1)
return (train_data, train_labels, test_data, test_labels) |
def test_build_with_multiple_readme_files(fixture_dir: FixtureDirGetter, tmp_path: Path, tmp_venv: VirtualEnv, command_tester_factory: CommandTesterFactory) -> None:
source_dir = fixture_dir('with_multiple_readme_files')
target_dir = (tmp_path / 'project')
shutil.copytree(str(source_dir), str(target_dir))
poetry = Factory().create_poetry(target_dir)
tester = command_tester_factory('build', poetry, environment=tmp_venv)
tester.execute()
build_dir = (target_dir / 'dist')
assert build_dir.exists()
sdist_file = (build_dir / 'my_package-0.1.tar.gz')
assert sdist_file.exists()
assert (sdist_file.stat().st_size > 0)
(wheel_file,) = build_dir.glob('my_package-0.1-*.whl')
assert wheel_file.exists()
assert (wheel_file.stat().st_size > 0)
with tarfile.open(sdist_file) as tf:
sdist_content = tf.getnames()
assert ('my_package-0.1/README-1.rst' in sdist_content)
assert ('my_package-0.1/README-2.rst' in sdist_content) |
class ReferenceFinder(mypy.mixedtraverser.MixedTraverserVisitor):
def __init__(self) -> None:
self.refs: set[str] = set()
def visit_block(self, block: Block) -> None:
if (not block.is_unreachable):
super().visit_block(block)
def visit_name_expr(self, e: NameExpr) -> None:
self.refs.add(e.name)
def visit_instance(self, t: Instance) -> None:
self.add_ref(t.type.name)
super().visit_instance(t)
def visit_unbound_type(self, t: UnboundType) -> None:
if t.name:
self.add_ref(t.name)
def visit_tuple_type(self, t: TupleType) -> None:
for item in t.items:
item.accept(self)
def visit_callable_type(self, t: CallableType) -> None:
for arg in t.arg_types:
arg.accept(self)
t.ret_type.accept(self)
def add_ref(self, fullname: str) -> None:
self.refs.add(fullname)
while ('.' in fullname):
fullname = fullname.rsplit('.', 1)[0]
self.refs.add(fullname) |
class MountainCarEnv(gym.Env):
metadata = {'render.modes': ['human', 'rgb_array'], 'video.frames_per_second': 30}
def __init__(self):
self.min_position = (- 1.2)
self.max_position = 0.6
self.max_speed = 0.07
self.goal_position = 0.5
self.low = np.array([self.min_position, (- self.max_speed)])
self.high = np.array([self.max_position, self.max_speed])
self.viewer = None
self.action_space = spaces.Discrete(3)
self.observation_space = spaces.Box(self.low, self.high)
self._seed()
self.reset()
def _seed(self, seed=None):
(self.np_random, seed) = seeding.np_random(seed)
return [seed]
def _step(self, action):
assert self.action_space.contains(action), ('%r (%s) invalid' % (action, type(action)))
(position, velocity) = self.state
velocity += (((action - 1) * 0.001) + (math.cos((3 * position)) * (- 0.0025)))
velocity = np.clip(velocity, (- self.max_speed), self.max_speed)
position += velocity
position = np.clip(position, self.min_position, self.max_position)
if ((position == self.min_position) and (velocity < 0)):
velocity = 0
done = bool((position >= self.goal_position))
reward = (- 1.0)
self.state = (position, velocity)
return (np.array(self.state), reward, done, {})
def _reset(self):
self.state = np.array([self.np_random.uniform(low=(- 0.6), high=(- 0.4)), 0])
return np.array(self.state)
def _height(self, xs):
return ((np.sin((3 * xs)) * 0.45) + 0.55)
def _render(self, mode='human', close=False):
if close:
if (self.viewer is not None):
self.viewer.close()
self.viewer = None
return
screen_width = 600
screen_height = 400
world_width = (self.max_position - self.min_position)
scale = (screen_width / world_width)
carwidth = 40
carheight = 20
if (self.viewer is None):
from gym.envs.classic_control import rendering
self.viewer = rendering.Viewer(screen_width, screen_height)
xs = np.linspace(self.min_position, self.max_position, 100)
ys = self._height(xs)
xys = list(zip(((xs - self.min_position) * scale), (ys * scale)))
self.track = rendering.make_polyline(xys)
self.track.set_linewidth(4)
self.viewer.add_geom(self.track)
clearance = 10
(l, r, t, b) = (((- carwidth) / 2), (carwidth / 2), carheight, 0)
car = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)])
car.add_attr(rendering.Transform(translation=(0, clearance)))
self.cartrans = rendering.Transform()
car.add_attr(self.cartrans)
self.viewer.add_geom(car)
frontwheel = rendering.make_circle((carheight / 2.5))
frontwheel.set_color(0.5, 0.5, 0.5)
frontwheel.add_attr(rendering.Transform(translation=((carwidth / 4), clearance)))
frontwheel.add_attr(self.cartrans)
self.viewer.add_geom(frontwheel)
backwheel = rendering.make_circle((carheight / 2.5))
backwheel.add_attr(rendering.Transform(translation=(((- carwidth) / 4), clearance)))
backwheel.add_attr(self.cartrans)
backwheel.set_color(0.5, 0.5, 0.5)
self.viewer.add_geom(backwheel)
flagx = ((self.goal_position - self.min_position) * scale)
flagy1 = (self._height(self.goal_position) * scale)
flagy2 = (flagy1 + 50)
flagpole = rendering.Line((flagx, flagy1), (flagx, flagy2))
self.viewer.add_geom(flagpole)
flag = rendering.FilledPolygon([(flagx, flagy2), (flagx, (flagy2 - 10)), ((flagx + 25), (flagy2 - 5))])
flag.set_color(0.8, 0.8, 0)
self.viewer.add_geom(flag)
pos = self.state[0]
self.cartrans.set_translation(((pos - self.min_position) * scale), (self._height(pos) * scale))
self.cartrans.set_rotation(math.cos((3 * pos)))
return self.viewer.render(return_rgb_array=(mode == 'rgb_array')) |
.parametrize('selection_bitsize', [3, 4])
.parametrize('target_bitsize', [3, 5, 6])
def test_arctan(selection_bitsize, target_bitsize):
gate = ArcTan(selection_bitsize, target_bitsize)
maps = {}
for x in range((2 ** selection_bitsize)):
inp = f'0b_{x:0{selection_bitsize}b}_0_{0:0{target_bitsize}b}'
y = (((- 2) * np.arctan(x)) / np.pi)
bits = [*iter_bits_fixed_point(y, (target_bitsize + 1), signed=True)]
(sign, y_bin) = (bits[0], bits[1:])
y_bin_str = ''.join((str(b) for b in y_bin))
out = f'0b_{x:0{selection_bitsize}b}_{sign}_{y_bin_str}'
maps[int(inp, 2)] = int(out, 2)
num_qubits = gate.num_qubits()
op = gate.on(*cirq.LineQubit.range(num_qubits))
circuit = cirq.Circuit(op)
cirq.testing.assert_equivalent_computational_basis_map(maps, circuit)
circuit += (op ** (- 1))
cirq.testing.assert_allclose_up_to_global_phase(circuit.unitary(), np.diag(([1] * (2 ** num_qubits))), atol=1e-08) |
class ChangeEmail(LoginRequiredMixin, PasswordConfirmMixin, FormView):
template_name = 'dictionary/user/preferences/email.html'
form_class = ChangeEmailForm
success_url = reverse_lazy('user_preferences')
def form_valid(self, form):
send_email_confirmation(self.request.user, form.cleaned_data.get('email1'))
notifications.info(self.request, _('your e-mail will be changed after the confirmation.'), extra_tags='persistent')
return redirect(self.success_url) |
def emit_yield(builder: IRBuilder, val: Value, line: int) -> Value:
retval = builder.coerce(val, builder.ret_types[(- 1)], line)
cls = builder.fn_info.generator_class
next_block = BasicBlock()
next_label = len(cls.continuation_blocks)
cls.continuation_blocks.append(next_block)
builder.assign(cls.next_label_target, Integer(next_label), line)
builder.add(Return(retval))
builder.activate_block(next_block)
add_raise_exception_blocks_to_generator_class(builder, line)
assert (cls.send_arg_reg is not None)
return cls.send_arg_reg |
def calc_face_dimensions(face):
horizontal_edges = filter_horizontal_edges(face.edges)
vertical_edges = filter_vertical_edges(face.edges)
width = (sum((e.calc_length() for e in horizontal_edges)) / 2)
height = (sum((e.calc_length() for e in vertical_edges)) / 2)
return (round(width, 4), round(height, 4)) |
def monitor(steps):
import re
with open('train_log/DQN-REALDATA/mean_score.log', 'r') as file:
c = file.read().splitlines()
i = (- 1)
while True:
if ('Start Epoch' in c[i]):
break
i -= 1
assert ('Start Epoch' in c[i])
current_epoch = int(re.findall('Epoch (.*) \\.\\.\\.', c[i])[0])
assert (current_epoch > 0)
print('Start monitoring at epoch {}'.format(current_epoch))
next_epoch = (current_epoch + steps)
tic = time.time()
print_flag = True
while True:
toc = time.time()
interval = (toc - tic)
if (((int(interval) % 60) == 0) and print_flag):
print('Epoch {}/{}/{} runs {} minutes...'.format(current_epoch, (current_epoch + 1), (current_epoch + 2), (interval // 60)))
print_flag = False
if (((int(interval) % 60) == 1) and (not print_flag)):
print_flag = True
with open('train_log/DQN-REALDATA/mean_score.log', 'r') as file:
content = file.read()
if ('Start Epoch {}'.format(next_epoch) in content):
tic = time.time()
restart_game()
click(cf_offline.start_botton_pos[0], cf_offline.start_botton_pos[1])
current_epoch = next_epoch
print('Start monitoring at epoch {}'.format(current_epoch))
next_epoch = (current_epoch + steps)
if (interval > ((60 * 60) * 3)):
tic = time.time()
restart_game()
click(cf_offline.start_botton_pos[0], cf_offline.start_botton_pos[1])
print('Restart game at epoch {}'.format(current_epoch)) |
def test_determine_ignored_lines():
f = incremental_coverage.determine_ignored_lines
assert (f('a = 0 # coverage: ignore') == {1})
assert (f('\n a = 0 # coverage: ignore\n b = 0\n ') == {2})
assert (f('\n a = 0 \n b = 0 # coverage: ignore\n ') == {3})
assert (f('\n a = 0 # coverage: ignore \n b = 0 # coverage: ignore\n ') == {2, 3})
assert (f('\n if True:\n a = 0 # coverage: ignore\n\n b = 0\n ') == {3})
assert (f('\n if True:\n # coverage: ignore\n a = 0\n\n b = 0\n ') == {3, 4, 5, 6, 7})
assert (f('\n if True:\n # coverage: ignore\n a = 0\n\n b = 0\n stop = 1\n ') == {3, 4, 5, 6})
assert (f('\n if True:\n # coverage: ignore\n a = 0\n\n b = 0\n else:\n c = 0\n ') == {3, 4, 5, 6})
assert (f('\n if True:\n while False:\n # coverage: ignore\n a = 0\n\n b = 0\n else:\n c = 0 # coverage: ignore\n ') == {4, 5, 6, 9})
assert (f('\n a = 2#coverage:ignore\n a = 3 #coverage:ignore\n a = 4# coverage:ignore\n a = 5#coverage :ignore\n a = 6#coverage: ignore\n a = 7#coverage: ignore\t\n a = 8#coverage:\tignore\t\n \n b = 1 # no cover\n b = 2 # coverage: definitely\n b = 3 # lint: ignore\n ') == {2, 3, 4, 5, 6, 7, 8}) |
class TableModel(BaseTableModel):
def __init__(self, client: Client, attachment: bool=True, **kwargs: Any):
if attachment:
self._attachment = AttachmentModel(client, table_name=kwargs.get('table_name'))
super(TableModel, self).__init__(client, **kwargs)
def _api_url(self) -> Any:
return f'{self._client.base_url}/api/now/table/{self._table_name}'
async def upload_file(self, selection: Union[(Selector, Condition, str)], path: str) -> Response:
path_parts = os.path.split(path)
record_id = (await self.get_object_id(selection))
return (await self._attachment.upload((self._table_name or ''), record_id, file_name=path_parts[(- 1)], dir_name=os.path.join(*path_parts[:(- 1)])))
async def download_file(self, selection: Union[(Selector, Condition, str)], dst_dir: str='.') -> FileHandler:
return (await self._attachment.download(selection, dst_dir))
async def get_attachments(self, selection: Union[(Selector, Condition, str)]=None, **kwargs: Any) -> Response:
return (await self._attachment.get(selection, params=dict(table_name=self._table_name), **kwargs))
async def get_attachment(self, selection: Union[(Selector, Condition, str)]=None, **kwargs: Any) -> Response:
return (await self._attachment.get_one(selection, params=dict(table_name=self._table_name), **kwargs))
async def _close_session(self) -> None:
(await self._close_self())
if self._attachment:
(await self._attachment._close_session()) |
class ContextAuth():
clusterCertificate: str = None
clusterCertificateData: str = None
clusterHost: str = None
clientCertificate: str = None
clientCertificateData: str = None
clientKey: str = None
clientKeyData: str = None
clusterName: str = None
username: str = None
password: str = None
bearerToken: str = None
def clusterCertificateDataBase64(self):
if (self.clusterCertificateData is not None):
return base64.b64encode(bytes(self.clusterCertificateData, 'utf8')).decode('ascii')
return
def clientCertificateDataBase64(self):
if (self.clientCertificateData is not None):
return base64.b64encode(bytes(self.clientCertificateData, 'utf8')).decode('ascii')
return
def clientKeyDataBase64(self):
if (self.clientKeyData is not None):
return base64.b64encode(bytes(self.clientKeyData, 'utf-8')).decode('ascii')
return
def fetch_auth_data(self, kubeconfig: any):
context_username = None
current_context = kubeconfig['current-context']
if (current_context is None):
raise Exception('no current-context found in kubeconfig')
for context in kubeconfig['contexts']:
if (context['name'] == current_context):
context_username = context['context']['user']
self.clusterName = context['context']['cluster']
if (context_username is None):
raise Exception('user not found for context {0}'.format(current_context))
if (self.clusterName is None):
raise Exception('cluster not found for context {0}'.format(current_context))
cluster_id = None
user_id = None
for (index, user) in enumerate(kubeconfig['users']):
if (user['name'] == context_username):
user_id = index
if (user_id is None):
raise Exception('user {0} not found in kubeconfig users'.format(context_username))
for (index, cluster) in enumerate(kubeconfig['clusters']):
if (cluster['name'] == self.clusterName):
cluster_id = index
if (cluster_id is None):
raise Exception('no cluster {} found in kubeconfig users'.format(self.clusterName))
user = kubeconfig['users'][user_id]['user']
cluster = kubeconfig['clusters'][cluster_id]['cluster']
self.clusterHost = cluster['server']
if ('client-key' in user):
try:
self.clientKey = user['client-key']
self.clientKeyData = self.read_file(user['client-key'])
except Exception as e:
raise e
if ('client-key-data' in user):
try:
self.clientKeyData = base64.b64decode(user['client-key-data']).decode('utf-8')
except Exception as e:
raise Exception('impossible to decode client-key-data')
if ('client-certificate' in user):
try:
self.clientCertificate = user['client-certificate']
self.clientCertificateData = self.read_file(user['client-certificate'])
except Exception as e:
raise e
if ('client-certificate-data' in user):
try:
self.clientCertificateData = base64.b64decode(user['client-certificate-data']).decode('utf-8')
except Exception as e:
raise Exception('impossible to decode client-certificate-data')
if ('certificate-authority' in cluster):
try:
self.clusterCertificate = cluster['certificate-authority']
self.clusterCertificateData = self.read_file(cluster['certificate-authority'])
except Exception as e:
raise e
if ('certificate-authority-data' in cluster):
try:
self.clusterCertificateData = base64.b64decode(cluster['certificate-authority-data']).decode('utf-8')
except Exception as e:
raise Exception('impossible to decode certificate-authority-data')
if ('username' in user):
self.username = user['username']
if ('password' in user):
self.password = user['password']
if ('token' in user):
self.bearerToken = user['token']
def read_file(self, filename: str) -> str:
if (not os.path.exists(filename)):
raise Exception('file not found {0} '.format(filename))
with open(filename, 'rb') as file_stream:
return file_stream.read().decode('utf-8') |
def local_property(name=None):
if name:
depr('local_property() is deprecated and will be removed.')
ls = threading.local()
def fget(self):
try:
return ls.var
except AttributeError:
raise RuntimeError('Request context not initialized.')
def fset(self, value):
ls.var = value
def fdel(self):
del ls.var
return property(fget, fset, fdel, 'Thread-local property') |
def parse_args(input_args=None):
parser = argparse.ArgumentParser(description='Simple example of a training script.')
parser.add_argument('--train', type=str, default='True', choices=['True', 'False'])
parser.add_argument('--edit', type=str, default='True', choices=['True', 'False'])
parser.add_argument('--save', type=str, default='False', choices=['True', 'False'])
parser.add_argument('--interpolation', type=str, default='vs', choices=['vs', 'vp'])
if (input_args is not None):
args = parser.parse_args(input_args)
else:
args = parser.parse_args()
return args |
def main():
pp.connect(use_gui=True)
pp.add_data_path()
p.resetDebugVisualizerCamera(cameraDistance=2, cameraPitch=(- 20), cameraYaw=80, cameraTargetPosition=[0, 0, 0])
p.loadURDF('plane.urdf')
ri = reorientbot.pybullet.PandaRobotInterface()
cube = pp.create_box(0.05, 0.05, 0.05, mass=0.1, color=(0, 1, 0, 1))
ee_to_world = ri.get_pose('tipLink')
obj_to_ee = ([0, 0, 0.025], [0, 0, 0, 1])
obj_to_world = pp.multiply(ee_to_world, obj_to_ee)
p.resetBasePositionAndOrientation(cube, *obj_to_world)
p.createConstraint(parentBodyUniqueId=ri.robot, parentLinkIndex=ri.ee, childBodyUniqueId=cube, childLinkIndex=(- 1), jointType=p.JOINT_FIXED, jointAxis=(0, 0, 0), parentFramePosition=obj_to_ee[0], parentFrameOrientation=obj_to_ee[1], childFramePosition=(0, 0, 0))
coord = reorientbot.geometry.Coordinate(*pp.get_link_pose(ri.robot, ri.ee))
coord.translate([0.5, 0, (- 0.5)], wrt='world')
robot_model = ri.get_skrobot()
while True:
joint_positions = robot_model.inverse_kinematics(coord.skrobot_coords, move_target=robot_model.tipLink)
for _ in ri.movej(joint_positions):
p.stepSimulation()
time.sleep((1 / 240))
for _ in ri.movej(ri.homej):
p.stepSimulation()
time.sleep((1 / 240))
pp.disconnect() |
('evennia.server.portal.amp.amp.BinaryBoxProtocol.transport')
class TestAMPClientRecv(_TestAMP):
def test_msgportal2server(self, mocktransport):
self._connect_server(mocktransport)
self.amp_server.send_MsgPortal2Server(self.session, text={'foo': 'bar'})
wire_data = self._catch_wire_read(mocktransport)[0]
self._connect_client(mocktransport)
self.amp_client.dataReceived(wire_data)
self.server.sessions.data_in.assert_called_with(self.session, text={'foo': 'bar'})
def test_adminportal2server(self, mocktransport):
self._connect_server(mocktransport)
self.amp_server.send_AdminPortal2Server(self.session, operation=amp.PDISCONNALL)
wire_data = self._catch_wire_read(mocktransport)[0]
self._connect_client(mocktransport)
self.server.sessions.portal_disconnect_all = MagicMock()
self.amp_client.dataReceived(wire_data)
self.server.sessions.portal_disconnect_all.assert_called() |
def test_ellipsoidal2dcs_to_cf():
ecs = Ellipsoidal2DCS(axis=Ellipsoidal2DCSAxis.LATITUDE_LONGITUDE)
assert (ecs.to_cf() == [{'standard_name': 'latitude', 'long_name': 'latitude coordinate', 'units': 'degrees_north', 'axis': 'Y'}, {'standard_name': 'longitude', 'long_name': 'longitude coordinate', 'units': 'degrees_east', 'axis': 'X'}]) |
class Effect3201(BaseEffect):
type = 'overheat'
def handler(fit, module, context, projectionRange, **kwargs):
module.boostItemAttr('duration', module.getModifiedItemAttr('overloadSelfDurationBonus'))
module.boostItemAttr('shieldBonus', module.getModifiedItemAttr('overloadShieldBonus'), stackingPenalties=True, **kwargs) |
def test_shared_ptr_from_this_and_references():
s = m.SharedFromThisRef()
stats = ConstructorStats.get(m.B)
assert (stats.alive() == 2)
ref = s.ref
assert (stats.alive() == 2)
assert s.set_ref(ref)
assert s.set_holder(ref)
bad_wp = s.bad_wp
assert (stats.alive() == 2)
assert s.set_ref(bad_wp)
with pytest.raises(RuntimeError) as excinfo:
assert s.set_holder(bad_wp)
assert ('Unable to cast from non-held to held instance' in str(excinfo.value))
copy = s.copy
assert (stats.alive() == 3)
assert s.set_ref(copy)
assert s.set_holder(copy)
holder_ref = s.holder_ref
assert (stats.alive() == 3)
assert s.set_ref(holder_ref)
assert s.set_holder(holder_ref)
holder_copy = s.holder_copy
assert (stats.alive() == 3)
assert s.set_ref(holder_copy)
assert s.set_holder(holder_copy)
del ref, bad_wp, copy, holder_ref, holder_copy, s
assert (stats.alive() == 0)
z = m.SharedFromThisVirt.get()
y = m.SharedFromThisVirt.get()
assert (y is z) |
def test_protocol() -> None:
tv = TypedValue(Proto)
def fn() -> None:
pass
assert_cannot_assign(tv, KnownValue(fn))
fn.asynq = (lambda : None)
assert_can_assign(tv, KnownValue(fn))
class X():
def asynq(self) -> None:
pass
assert_can_assign(tv, TypedValue(X))
assert_can_assign(tv, KnownValue(X())) |
def join_simple(declaration: (Type | None), s: Type, t: Type) -> ProperType:
declaration = get_proper_type(declaration)
s = get_proper_type(s)
t = get_proper_type(t)
if ((s.can_be_true, s.can_be_false) != (t.can_be_true, t.can_be_false)):
s = mypy.typeops.true_or_false(s)
t = mypy.typeops.true_or_false(t)
if isinstance(s, AnyType):
return s
if isinstance(s, ErasedType):
return t
if is_proper_subtype(s, t, ignore_promotions=True):
return t
if is_proper_subtype(t, s, ignore_promotions=True):
return s
if isinstance(declaration, UnionType):
return mypy.typeops.make_simplified_union([s, t])
if (isinstance(s, NoneType) and (not isinstance(t, NoneType))):
(s, t) = (t, s)
if (isinstance(s, UninhabitedType) and (not isinstance(t, UninhabitedType))):
(s, t) = (t, s)
(s, t) = normalize_callables(s, t)
if (isinstance(s, UnionType) and (not isinstance(t, UnionType))):
(s, t) = (t, s)
value = t.accept(TypeJoinVisitor(s))
if ((declaration is None) or is_subtype(value, declaration)):
return value
return declaration |
def compute_sec_ver(remediations, packages: Dict[(str, Package)], secure_vulns_by_user, db_full):
for pkg_name in remediations.keys():
pkg: Package = packages.get(pkg_name, None)
secure_versions = []
if pkg:
secure_versions = pkg.secure_versions
analyzed = set(remediations[pkg_name].keys())
if (not is_using_api_key()):
continue
for analyzed_requirement in analyzed:
rem = remediations[pkg_name][analyzed_requirement]
spec = rem.get('requirement').specifier
version = rem['version']
if (not secure_vulns_by_user):
secure_v = sorted(secure_versions, key=(lambda ver: parse_version(ver)), reverse=True)
else:
secure_v = compute_sec_ver_for_user(package=pkg, secure_vulns_by_user=secure_vulns_by_user, db_full=db_full)
rem['closest_secure_version'] = get_closest_ver(secure_v, version, spec)
upgrade = rem['closest_secure_version'].get('upper', None)
downgrade = rem['closest_secure_version'].get('lower', None)
recommended_version = None
if upgrade:
recommended_version = upgrade
elif downgrade:
recommended_version = downgrade
rem['recommended_version'] = recommended_version
rem['other_recommended_versions'] = [other_v for other_v in secure_v if (other_v != str(recommended_version))]
spec = str(rem['requirement'].specifier)
base_url = rem['more_info_url']
rem['more_info_url'] = build_remediation_info_url(base_url=base_url, version=version, spec=spec, target_version=recommended_version) |
def _orthographic__to_cf(conversion):
params = _to_dict(conversion)
return {'grid_mapping_name': 'orthographic', 'latitude_of_projection_origin': params['latitude_of_natural_origin'], 'longitude_of_projection_origin': params['longitude_of_natural_origin'], 'false_easting': params['false_easting'], 'false_northing': params['false_northing']} |
class BsonConverter(Converter):
def dumps(self, obj: Any, unstructure_as: Any=None, check_keys: bool=False, codec_options: CodecOptions=DEFAULT_CODEC_OPTIONS) -> bytes:
return encode(self.unstructure(obj, unstructure_as=unstructure_as), check_keys=check_keys, codec_options=codec_options)
def loads(self, data: bytes, cl: Type[T], codec_options: CodecOptions=DEFAULT_CODEC_OPTIONS) -> T:
return self.structure(decode(data, codec_options=codec_options), cl) |
class BellState(Bloq):
_property
def signature(self) -> 'Signature':
return Signature([Register('q0', 1, side=Side.RIGHT), Register('q1', 1, side=Side.RIGHT)])
def build_composite_bloq(self, bb):
q0 = bb.add(PlusState())
q1 = bb.add(ZeroState())
(q0, q1) = bb.add(CNOT(), ctrl=q0, target=q1)
return {'q0': q0, 'q1': q1} |
.parametrize('url, expected_matches', [(' 1), (' 0), (' 0)])
def test_regex_includes_scripts_for(gm_manager, url, expected_matches):
gh_dark_example = textwrap.dedent('\n // ==UserScript==\n // /^ // / // -at document-start\n // ==/UserScript==\n ')
_save_script(gh_dark_example, 'test.user.js')
gm_manager.load_scripts()
scripts = gm_manager.scripts_for(QUrl(url))
assert (len(((scripts.start + scripts.end) + scripts.idle)) == expected_matches) |
class SemiDataset(Dataset):
def __init__(self, name, root, mode, size=None, id_path=None, nsample=None):
self.name = name
self.root = root
self.mode = mode
self.size = size
if ((mode == 'train_l') or (mode == 'train_u')):
with open(id_path, 'r') as f:
self.ids = f.read().splitlines()
if ((mode == 'train_l') and (nsample is not None)):
self.ids *= math.ceil((nsample / len(self.ids)))
random.shuffle(self.ids)
self.ids = self.ids[:nsample]
else:
with open(('partitions/%s/val.txt' % name), 'r') as f:
self.ids = f.read().splitlines()
def __getitem__(self, item):
id = self.ids[item]
img = Image.open(os.path.join(self.root, id.split(' ')[0])).convert('RGB')
mask = Image.fromarray(np.array(Image.open(os.path.join(self.root, id.split(' ')[1]))))
img = transforms.ToTensor()(img)
mask = torch.from_numpy(np.array(mask)).long()
return (img, mask, id)
def __len__(self):
return len(self.ids) |
class ClusterRedisContextFactory(ContextFactory):
def __init__(self, connection_pool: rediscluster.ClusterConnectionPool, name: str='redis', redis_client_name: str=''):
self.connection_pool = connection_pool
self.name = name
self.redis_client_name = redis_client_name
def report_runtime_metrics(self, batch: metrics.Client) -> None:
if (not isinstance(self.connection_pool, rediscluster.ClusterBlockingConnectionPool)):
return
size = self.connection_pool.max_connections
open_connections_num = len(self.connection_pool._connections)
MAX_CONNECTIONS.labels(self.name).set(size)
OPEN_CONNECTIONS.labels(self.name).set(open_connections_num)
batch.gauge('pool.size').replace(size)
batch.gauge('pool.open_connections').replace(open_connections_num)
def make_object_for_context(self, name: str, span: Span) -> 'MonitoredRedisClusterConnection':
return MonitoredRedisClusterConnection(name, span, self.connection_pool, getattr(self.connection_pool, 'track_key_reads_sample_rate', 0), getattr(self.connection_pool, 'track_key_writes_sample_rate', 0), self.redis_client_name) |
def sampling(imps, ratio=4):
pos = []
neg = []
for imp in imps.split():
if (imp[(- 1)] == '1'):
pos.append(imp)
else:
neg.append(imp)
n_neg = (ratio * len(pos))
if (n_neg <= len(neg)):
neg = random.sample(neg, n_neg)
else:
neg = random.sample((neg * ((n_neg // len(neg)) + 1)), n_neg)
random.shuffle(neg)
res = (pos + neg)
random.shuffle(res)
return ' '.join(res) |
class Adam(Optimizer):
def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0, **kwargs):
super(Adam, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.lr = K.variable(lr, name='lr')
self.beta_1 = K.variable(beta_1, name='beta_1')
self.beta_2 = K.variable(beta_2, name='beta_2')
self.decay = K.variable(decay, name='decay')
self.epsilon = epsilon
self.initial_decay = decay
_get_updates_support
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [K.update_add(self.iterations, 1)]
lr = self.lr
if (self.initial_decay > 0):
lr *= (1.0 / (1.0 + (self.decay * K.cast(self.iterations, K.dtype(self.decay)))))
t = (K.cast(self.iterations, K.floatx()) + 1)
lr_t = (lr * (K.sqrt((1.0 - K.pow(self.beta_2, t))) / (1.0 - K.pow(self.beta_1, t))))
ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
self.weights = (([self.iterations] + ms) + vs)
for (p, g, m, v) in zip(params, grads, ms, vs):
m_t = ((self.beta_1 * m) + ((1.0 - self.beta_1) * g))
v_t = ((self.beta_2 * v) + ((1.0 - self.beta_2) * K.square(g)))
p_t = (p - ((lr_t * m_t) / (K.sqrt(v_t) + self.epsilon)))
self.updates.append(K.update(m, m_t))
self.updates.append(K.update(v, v_t))
new_p = p_t
if (getattr(p, 'constraint', None) is not None):
new_p = p.constraint(new_p)
self.updates.append(K.update(p, new_p))
return self.updates
def get_config(self):
config = {'lr': float(K.get_value(self.lr)), 'beta_1': float(K.get_value(self.beta_1)), 'beta_2': float(K.get_value(self.beta_2)), 'decay': float(K.get_value(self.decay)), 'epsilon': self.epsilon}
base_config = super(Adam, self).get_config()
return dict((list(base_config.items()) + list(config.items()))) |
def send_key_click(widget, accel, recursive=False):
(key, mods) = Gtk.accelerator_parse(accel)
assert (key is not None)
assert (mods is not None)
assert isinstance(widget, Gtk.Widget)
handled = _send_key_click_event(widget, state=mods, keyval=key)
if recursive:
if isinstance(widget, Gtk.Container):
for child in widget.get_children():
handled += send_key_click(child, accel, recursive)
return handled |
def loadDataFile_with_groupseglabel(filename):
f = h5py.File(filename)
data = f['data'][:]
group = f['pid'][:]
if ('groupcategory' in f):
cate = f['groupcategory'][:]
else:
cate = 0
seg = ((- 1) * np.ones_like(group))
for i in range(group.shape[0]):
for j in range(group.shape[1]):
if ((group[(i, j, 0)] != (- 1)) and (cate[(i, group[(i, j, 0)], 0)] != (- 1))):
seg[(i, j, 0)] = cate[(i, group[(i, j, 0)], 0)]
return (data, group, cate, seg) |
.parametrize('genotype, expect', [([(- 1), 0], (- 1)), ([0, (- 1)], (- 1)), ([0, 0], 0), ([0, 1], 1), ([1, 0], 1), ([1, 1], 2), ([0, 0, 0], 0), ([0, 1, 0], 1), ([1, 1, 1], 3), ([0, 0, 0, 0], 0), ([0, 1, 0, 1], 2), ([1, 1, 0, 1], 3)])
def test__biallelic_genotype_index(genotype, expect):
genotype = np.array(genotype)
assert (_biallelic_genotype_index(genotype) == expect) |
class MLPBlockFC(nn.Module):
def __init__(self, d_points, d_model, p_dropout):
super(MLPBlockFC, self).__init__()
self.mlp = nn.Sequential(nn.Linear(d_points, d_model, bias=False), nn.BatchNorm1d(d_model), nn.LeakyReLU(negative_slope=0.2), nn.Dropout(p=p_dropout))
def forward(self, x):
return self.mlp(x) |
def main():
args = set_args()
init(args)
Tokenizer = eval_object(model_dict[args.model][0])
bert_path_or_name = model_dict[args.model][(- 1)]
tokenizer = Tokenizer.from_pretrained(bert_path_or_name)
print((20 * '='), ' Preparing for training ', (20 * '='))
print('\t* Loading training data...')
train_data = DataPrecessForSentence(tokenizer, args)
labels = train_data.labels
if args.use_sample:
from collections import Counter
count_dict = Counter(labels)
count_dict = {k: (1 - (v / len(train_data))) for (k, v) in count_dict.items()}
print(count_dict)
sampler = WeightedRandomSampler([count_dict[int(i)] for i in labels], args.batch_size, replacement=True)
train_loader = DataLoader(train_data, shuffle=False, batch_size=args.batch_size, sampler=sampler)
else:
train_loader = DataLoader(train_data, shuffle=True, batch_size=args.batch_size)
print('\t* Loading validation data...')
dev_data = DataPrecessForSentence(tokenizer, args, type='dev')
dev_loader = DataLoader(dev_data, shuffle=True, batch_size=args.batch_size)
print('\t* Building model...')
model = get_model(args)
for (name, para) in model.named_parameters():
if (len(para.size()) < 2):
continue
if ('classifier' in name):
nn.init.xavier_normal_(para)
if args.freeze_bert_head:
if ('classifier' in name):
para.requires_grad = True
else:
para.requires_grad = False
param_optimizer = list(model.named_parameters())
param_optimizer = [(i, k) for (i, k) in param_optimizer if k.requires_grad]
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in param_optimizer if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': 0.01}, {'params': [p for (n, p) in param_optimizer if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.lr)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.85, patience=0)
best_score = 0.0
start_epoch = 1
epochs_count = []
train_losses = []
valid_losses = []
(train_acc_list, dev_acc_list) = ([], [])
if args.checkpoint:
checkpoint_save = torch.load(args.checkpoint)
print(checkpoint_save.keys())
start_epoch = (checkpoint_save['epoch'] + 1)
best_score = checkpoint_save['best_score']
print('\t* Training will continue on existing model from epoch {}...'.format(start_epoch))
model.load_state_dict(checkpoint_save['model'])
epochs_count = checkpoint_save['epochs_count']
train_losses = checkpoint_save['train_losses']
valid_losses = checkpoint_save['valid_losses']
(_, valid_loss, valid_accuracy) = validate(model, dev_loader, args)
print('\t* Validation loss before training: {:.4f}, accuracy: {:.4f}%'.format(valid_loss, (valid_accuracy * 100)))
print('\n', (20 * '='), 'Training model on device: {}'.format(args.device), (20 * '='))
patience_counter = 0
for epoch in range(start_epoch, (args.epochs + 1)):
epochs_count.append(epoch)
print('* Training epoch {}:'.format(epoch))
(epoch_time, epoch_loss, train_epoch_accuracy) = train(model, train_loader, optimizer, args)
train_losses.append(epoch_loss)
print('-> Training time: {:.4f}s, loss = {:.4f}, accuracy: {:.4f}%'.format(epoch_time, epoch_loss, (train_epoch_accuracy * 100)))
print('* Validation for epoch {}:'.format(epoch))
(epoch_time, epoch_loss, epoch_accuracy) = validate(model, dev_loader, args)
valid_losses.append(epoch_loss)
print('-> Valid. time: {:.4f}s, loss: {:.4f}, accuracy: {:.4f}% \n'.format(epoch_time, epoch_loss, (epoch_accuracy * 100)))
scheduler.step(epoch_accuracy)
if (epoch_accuracy < best_score):
patience_counter += 1
else:
print('save data')
best_score = epoch_accuracy
patience_counter = 0
tokenizer.save_pretrained(args.pretrain_dir)
model.save_pretrained(args.pretrain_dir)
torch.save({'epoch': epoch, 'model': model.state_dict(), 'best_score': best_score, 'epochs_count': epochs_count, 'train_losses': train_losses, 'valid_losses': valid_losses}, args.target_file)
train_acc_list.append(train_epoch_accuracy)
dev_acc_list.append(epoch_accuracy)
my_plot(train_acc_list, dev_acc_list, train_losses, args)
if (patience_counter >= args.patience):
print('-> Early stopping: patience limit reached, stopping...')
break |
class nnUNetTrainerDAOrd0(nnUNetTrainer):
def get_dataloaders(self):
patch_size = self.configuration_manager.patch_size
dim = len(patch_size)
deep_supervision_scales = self._get_deep_supervision_scales()
(rotation_for_DA, do_dummy_2d_data_aug, initial_patch_size, mirror_axes) = self.configure_rotation_dummyDA_mirroring_and_inital_patch_size()
tr_transforms = self.get_training_transforms(patch_size, rotation_for_DA, deep_supervision_scales, mirror_axes, do_dummy_2d_data_aug, order_resampling_data=0, order_resampling_seg=0, use_mask_for_norm=self.configuration_manager.use_mask_for_norm, is_cascaded=self.is_cascaded, foreground_labels=self.label_manager.all_labels, regions=(self.label_manager.foreground_regions if self.label_manager.has_regions else None), ignore_label=self.label_manager.ignore_label)
val_transforms = self.get_validation_transforms(deep_supervision_scales, is_cascaded=self.is_cascaded, foreground_labels=self.label_manager.all_labels, regions=(self.label_manager.foreground_regions if self.label_manager.has_regions else None), ignore_label=self.label_manager.ignore_label)
(dl_tr, dl_val) = self.get_plain_dataloaders(initial_patch_size, dim)
allowed_num_processes = get_allowed_n_proc_DA()
if (allowed_num_processes == 0):
mt_gen_train = SingleThreadedAugmenter(dl_tr, tr_transforms)
mt_gen_val = SingleThreadedAugmenter(dl_val, val_transforms)
else:
mt_gen_train = LimitedLenWrapper(self.num_iterations_per_epoch, dl_tr, tr_transforms, allowed_num_processes, 6, None, True, 0.02)
mt_gen_val = LimitedLenWrapper(self.num_val_iterations_per_epoch, dl_val, val_transforms, max(1, (allowed_num_processes // 2)), 3, None, True, 0.02)
return (mt_gen_train, mt_gen_val) |
def prepare_ocp(biorbd_model_path, n_shooting, tf, ode_solver=OdeSolver.RK4(), use_sx=True, expand_dynamics=True):
bio_model = BiorbdModel(biorbd_model_path)
dynamics = Dynamics(DynamicsFcn.TORQUE_DRIVEN, expand_dynamics=expand_dynamics)
x_bounds = BoundsList()
x_bounds['q'] = bio_model.bounds_from_ranges('q')
x_bounds['qdot'] = bio_model.bounds_from_ranges('qdot')
(tau_min, tau_max) = ((- 100), 100)
u_bounds = BoundsList()
u_bounds['tau'] = (([tau_min] * bio_model.nb_tau), ([tau_max] * bio_model.nb_tau))
return OptimalControlProgram(bio_model, dynamics, n_shooting, tf, x_bounds=x_bounds, u_bounds=u_bounds, ode_solver=ode_solver, use_sx=use_sx) |
class SetData(namedtuple('SetData', 'path data version')):
type = 5
def serialize(self):
b = bytearray()
b.extend(write_string(self.path))
b.extend(write_buffer(self.data))
b.extend(int_struct.pack(self.version))
return b
def deserialize(cls, bytes, offset):
return ZnodeStat._make(stat_struct.unpack_from(bytes, offset)) |
class OptUx(object):
def __init__(self, formula, solver='g3', adapt=False, cover=None, dcalls=False, exhaust=False, minz=False, puresat=False, unsorted=False, trim=False, verbose=0):
assert ((not puresat) or unsorted), "'unsorted' needs to be True for pure SAT mode"
self.verbose = verbose
self.formula = WCNFPlus()
self.formula.hard = formula.hard[:]
self.formula.wght = formula.wght[:]
self.formula.topw = formula.topw
self.formula.nv = formula.nv
if (isinstance(formula, WCNFPlus) and formula.atms):
self.formula.atms = formula.atms[:]
self.topv = formula.nv
self._process_soft(formula)
self.formula.nv = self.topv
unweighted = self.formula.copy()
unweighted.wght = [1 for w in unweighted.wght]
(to_hit, self.units) = self._disjoint(unweighted, solver, adapt, exhaust, minz, trim)
if (self.verbose > 2):
print('c mcses: {0} unit, {1} disj'.format(len(self.units), (len(to_hit) + len(self.units))))
if (not unsorted):
self.hitman = Hitman(bootstrap_with=to_hit, weights=self.weights, solver=solver, htype='sorted', mxs_adapt=adapt, mxs_exhaust=exhaust, mxs_minz=minz, mxs_trim=trim)
elif (not puresat):
self.hitman = Hitman(bootstrap_with=to_hit, weights=self.weights, solver=solver, htype='lbx', mcs_usecld=dcalls)
else:
self.hitman = Hitman(bootstrap_with=to_hit, weights=self.weights, solver=puresat, htype='sat')
self.cover = (cover is not None)
if cover:
m = (lambda l: (Atom(l, sign=True) if ((- l) not in self.weights) else Atom((- l), sign=False)))
for cl in cover:
if ((len(cl) != 2) or (not (type(cl[0]) in (list, tuple, set)))):
cl = [m(l) for l in cl]
else:
cl = [[m(l) for l in cl[0]], cl[1]]
self.hitman.add_hard(cl, weights=self.weights)
self.oracle = Solver(name=solver, bootstrap_with=(unweighted.hard + [[mcs] for mcs in self.units]))
if unweighted.atms:
assert self.oracle.supports_atmost(), '{0} does not support native cardinality constraints. Make sure you use the right type of formula.'.format(self.solver)
for atm in unweighted.atms:
self.oracle.add_atmost(*atm)
def __del__(self):
self.delete()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.delete()
def delete(self):
if self.hitman:
self.hitman.delete()
self.hitman = None
if self.oracle:
self.oracle.delete()
self.oracle = None
def _process_soft(self, formula):
self.sels = []
self.smap = {}
processed_dups = set()
for cl in formula.soft:
selv = cl[0]
if (len(cl) > 1):
self.topv += 1
selv = self.topv
self.formula.hard.append((cl + [(- selv)]))
elif (selv in self.smap):
if (selv not in processed_dups):
self.topv += 1
nsel = self.topv
self.sels[(self.smap[selv] - 1)] = nsel
self.formula.hard.append((self.formula.soft[(self.smap[selv] - 1)] + [(- nsel)]))
self.formula.soft[(self.smap[selv] - 1)] = [nsel]
self.smap[nsel] = self.smap[selv]
processed_dups.add(selv)
self.topv += 1
selv = self.topv
self.formula.hard.append((cl + [(- selv)]))
self.sels.append(selv)
self.formula.soft.append([selv])
self.smap[selv] = len(self.sels)
for selv in processed_dups:
del self.smap[selv]
assert (len(self.sels) == len(self.smap) == len(self.formula.wght))
self.weights = {l: w for (l, w) in zip(self.sels, self.formula.wght)}
def _disjoint(self, formula, solver, adapt, exhaust, minz, trim):
(to_hit, units) = ([], [])
with RC2(formula, solver=solver, adapt=adapt, exhaust=exhaust, minz=minz, trim=trim, verbose=0) as oracle:
while True:
model = oracle.compute()
if (model is None):
break
falsified = list(filter((lambda l: (model[(abs(l) - 1)] == (- l))), self.sels))
if (len(falsified) > 1):
to_hit.append(falsified)
else:
units.append(falsified[0])
for l in falsified:
oracle.add_clause([l])
if (self.verbose > 3):
print('c mcs: {0} 0'.format(' '.join([str(self.smap[s]) for s in falsified])))
self.disj_time = oracle.oracle_time()
return (to_hit, units)
def compute(self):
units_cost = sum(map((lambda l: self.weights[l]), self.units))
while True:
hs = self.hitman.get()
if (hs is None):
break
self.oracle.set_phases(self.sels)
res = self.oracle.solve(assumptions=hs)
if (res == False):
self.hitman.block(hs)
self.cost = (sum(map((lambda l: self.weights[l]), hs)) + units_cost)
if (self.units and self.cover):
return sorted(map((lambda s: self.smap[s]), sorted(set((self.units + hs)))))
else:
return sorted(map((lambda s: self.smap[s]), (self.units + hs)))
else:
model = self.oracle.get_model()
cs = list(filter((lambda l: (model[(abs(l) - 1)] == (- l))), self.sels))
self.hitman.hit(cs, weights=self.weights)
def enumerate(self):
done = False
while (not done):
mus = self.compute()
if (mus != None):
(yield mus)
else:
done = True
def oracle_time(self):
return ((self.disj_time + self.hitman.oracle_time()) + self.oracle.time_accum()) |
class VideoChunkIterator():
def __init__(self, video_features: np.ndarray, chunk_frames: int, num_border_frames: int) -> None:
self.chunk_features_expanded = None
self.valid_chunk_size = None
self._output_start = None
self._output_end = None
self._result_start = None
self._result_end = None
self._is_last = False
self._chunk_start = 0
self._chunk_frames = chunk_frames
self._num_border_frames = num_border_frames
self._video_features_expanded = np.expand_dims(video_features, axis=(- 1))
self._num_frames = self._video_features_expanded.shape[0]
def prepare_input_batch(self) -> Tuple[(np.ndarray, List[int])]:
input_chunk_batch_list = []
valid_chunk_sizes = []
while self.has_next():
self.next()
input_chunk_batch_list.append(self.chunk_features_expanded)
valid_chunk_sizes.append(self.valid_chunk_size)
return (np.concatenate(input_chunk_batch_list), valid_chunk_sizes)
def accumulate_chunk_outputs(self, accumulated_output: np.ndarray, output_chunks: List[np.ndarray]) -> None:
chunk_index = 0
while self.has_next():
self.next()
self.accumulate(accumulated_output, output_chunks[chunk_index])
chunk_index += 1
def has_next(self) -> bool:
return (not self._is_last)
def next(self) -> None:
self.valid_chunk_size = min(self._chunk_frames, (self._video_features_expanded.shape[0] - self._chunk_start))
chunk_end = (self._chunk_start + self.valid_chunk_size)
if (self.valid_chunk_size == self._chunk_frames):
chunk_features_expanded = self._video_features_expanded[self._chunk_start:chunk_end]
else:
chunk_features_expanded = np.zeros((self._chunk_frames, self._video_features_expanded.shape[1], self._video_features_expanded.shape[2]))
chunk_features_expanded[0:self.valid_chunk_size] = self._video_features_expanded[self._chunk_start:chunk_end]
self.chunk_features_expanded = np.expand_dims(chunk_features_expanded, axis=0)
is_first = (self._chunk_start == 0)
if is_first:
self._output_start = 0
else:
self._output_start = self._num_border_frames
self._result_start = (self._chunk_start + self._output_start)
self._is_last = (self._chunk_start >= (self._num_frames - self._chunk_frames))
if self._is_last:
self._output_end = self.valid_chunk_size
else:
self._output_end = (self.valid_chunk_size - self._num_border_frames)
self._result_end = (self._chunk_start + self._output_end)
self._chunk_start += (self._chunk_frames - (2 * self._num_border_frames))
if (self._chunk_start > (self._num_frames - self._chunk_frames)):
self._chunk_start = (self._num_frames - self._chunk_frames)
def accumulate(self, accumulated_output: np.ndarray, output_chunk: np.ndarray) -> None:
result_start = self._result_start
result_end = self._result_end
output_start = self._output_start
output_end = self._output_end
accumulated_output[result_start:result_end] = output_chunk[output_start:output_end] |
def test_interpolation():
interp = Interpolate((0, 100), (0, 100))
for i in range(101):
assert (interp(i) == i)
interp = Interpolate((0, 50, 100), (0, 100, 200))
for i in range(101):
assert (interp(i) == (2 * i))
interp = Interpolate((0, 50, 100), (0, (- 50), 50))
assert (interp(40) == (- 40))
assert (interp(60) == (- 30))
assert (interp(90) == 30)
assert (interp(99) == 48)
interp = Interpolate((0, 100), (Fraction('12.35'), Fraction('67.2')))
assert (interp(0) == Fraction('12.35'))
assert (interp(50) == pytest.approx(((12.35 + 67.2) / 2)))
assert (interp(100) == Fraction('67.2')) |
def burn_eth(rpc_client: JSONRPCClient, amount_to_leave: int=0) -> None:
address = rpc_client.address
web3 = rpc_client.web3
gas_price = web3.eth.gas_price
amount_to_leave = (TRANSACTION_INTRINSIC_GAS + amount_to_leave)
amount_to_burn = (web3.eth.get_balance(address) - (gas_price * amount_to_leave))
burn_transfer = EthTransfer(to_address=Address(HOP1), value=amount_to_burn, gas_price=gas_price)
transaction_hash = rpc_client.transact(burn_transfer)
rpc_client.poll_transaction(transaction_hash) |
class DebianControlLexer(RegexLexer):
name = 'Debian Control file'
url = '
aliases = ['debcontrol', 'control']
filenames = ['control']
version_added = '0.9'
tokens = {'root': [('^(Description)', Keyword, 'description'), ('^(Maintainer|Uploaders)(:\\s*)', bygroups(Keyword, Text), 'maintainer'), ('^((?:Build-|Pre-)?Depends(?:-Indep|-Arch)?)(:\\s*)', bygroups(Keyword, Text), 'depends'), ('^(Recommends|Suggests|Enhances)(:\\s*)', bygroups(Keyword, Text), 'depends'), ('^((?:Python-)?Version)(:\\s*)(\\S+)$', bygroups(Keyword, Text, Number)), ('^((?:Installed-)?Size)(:\\s*)(\\S+)$', bygroups(Keyword, Text, Number)), ('^(MD5Sum|SHA1|SHA256)(:\\s*)(\\S+)$', bygroups(Keyword, Text, Number)), ('^([a-zA-Z\\-0-9\\.]*?)(:\\s*)(.*?)$', bygroups(Keyword, Whitespace, String))], 'maintainer': [('<[^>]+>$', Generic.Strong, '#pop'), ('<[^>]+>', Generic.Strong), (',\\n?', Text), ('[^,<]+$', Text, '#pop'), ('[^,<]+', Text)], 'description': [('(.*)(Homepage)(: )(\\S+)', bygroups(Text, String, Name, Name.Class)), (':.*\\n', Generic.Strong), (' .*\\n', Text), default('#pop')], 'depends': [('(\\$)(\\{)(\\w+\\s*:\\s*\\w+)(\\})', bygroups(Operator, Text, Name.Entity, Text)), ('\\(', Text, 'depend_vers'), ('\\|', Operator), (',\\n', Text), ('\\n', Text, '#pop'), ('[,\\s]', Text), ('[+.a-zA-Z0-9-]+', Name.Function), ('\\[.*?\\]', Name.Entity)], 'depend_vers': [('\\)', Text, '#pop'), ('([><=]+)(\\s*)([^)]+)', bygroups(Operator, Text, Number))]} |
def test_upload_photos(requests_mock):
requests_mock.post(f'{API_V0}/observation_photos', json=load_sample_data('post_observation_photos.json'), status_code=200)
response = upload_photos(1234, BytesIO(), access_token='token')
assert (response[0]['id'] == 1234)
assert (response[0]['created_at'] == '2020-09-24T21:06:16.964-05:00')
assert (response[0]['photo']['native_username'] == 'username') |
class NoOptionError(Error):
def __init__(self, option: str, *, all_names: List[str]=None, deleted: bool=False, renamed: str=None) -> None:
if deleted:
assert (renamed is None)
suffix = ' (this option was removed from qutebrowser)'
elif (renamed is not None):
suffix = ' (this option was renamed to {!r})'.format(renamed)
elif all_names:
matches = difflib.get_close_matches(option, all_names, n=1)
if matches:
suffix = f' (did you mean {matches[0]!r}?)'
else:
suffix = ''
else:
suffix = ''
super().__init__('No option {!r}{}'.format(option, suffix))
self.option = option |
class TaggerModel(nn.Module):
def __init__(self, args: Namespace, device: torch.device):
super(TaggerModel, self).__init__()
self.modelid = 'tagger_baseline'
self.args = args
self.device = device
self.max_token = (args.max_generate + 1)
self._encoder = PLM(args, device, use_encoder=True, pooler_output=False)
self._hidden2tag = Linear(args.lm_hidden_size, args.tagger_classes)
self._hidden2t = Linear(args.lm_hidden_size, self.max_token)
self._lm_dropout = nn.Dropout(args.dropout)
def forward(self, input: torch.Tensor, attention_mask: torch.Tensor=None):
encoded = self._encoder(input, attention_mask=attention_mask)
encoded = self._lm_dropout(encoded)
tagger_logits = self._hidden2tag(encoded)
t_logits = self._hidden2t(encoded)
return (tagger_logits, t_logits) |
def test_tc_bit_defers_last_response_missing():
zc = Zeroconf(interfaces=['127.0.0.1'])
_wait_for_start(zc)
type_ = '_knowndefer._tcp.local.'
name = 'knownname'
name2 = 'knownname2'
name3 = 'knownname3'
registration_name = f'{name}.{type_}'
registration2_name = f'{name2}.{type_}'
registration3_name = f'{name3}.{type_}'
desc = {'path': '/~paulsm/'}
server_name = 'ash-2.local.'
server_name2 = 'ash-3.local.'
server_name3 = 'ash-4.local.'
info = r.ServiceInfo(type_, registration_name, 80, 0, 0, desc, server_name, addresses=[socket.inet_aton('10.0.1.2')])
info2 = r.ServiceInfo(type_, registration2_name, 80, 0, 0, desc, server_name2, addresses=[socket.inet_aton('10.0.1.2')])
info3 = r.ServiceInfo(type_, registration3_name, 80, 0, 0, desc, server_name3, addresses=[socket.inet_aton('10.0.1.2')])
zc.registry.async_add(info)
zc.registry.async_add(info2)
zc.registry.async_add(info3)
protocol = zc.engine.protocols[0]
now = r.current_time_millis()
_clear_cache(zc)
source_ip = '203.0.113.12'
generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
question = r.DNSQuestion(type_, const._TYPE_PTR, const._CLASS_IN)
generated.add_question(question)
for _ in range(300):
generated.add_answer_at_time(info.dns_pointer(), now)
generated.add_answer_at_time(info2.dns_pointer(), now)
generated.add_answer_at_time(info3.dns_pointer(), now)
packets = generated.packets()
assert (len(packets) == 4)
expected_deferred = []
next_packet = r.DNSIncoming(packets.pop(0))
expected_deferred.append(next_packet)
threadsafe_query(zc, protocol, next_packet, source_ip, const._MDNS_PORT, Mock(), ())
assert (protocol._deferred[source_ip] == expected_deferred)
timer1 = protocol._timers[source_ip]
next_packet = r.DNSIncoming(packets.pop(0))
expected_deferred.append(next_packet)
threadsafe_query(zc, protocol, next_packet, source_ip, const._MDNS_PORT, Mock(), ())
assert (protocol._deferred[source_ip] == expected_deferred)
timer2 = protocol._timers[source_ip]
assert timer1.cancelled()
assert (timer2 != timer1)
threadsafe_query(zc, protocol, next_packet, source_ip, const._MDNS_PORT, Mock(), ())
assert (protocol._deferred[source_ip] == expected_deferred)
assert (source_ip in protocol._timers)
timer3 = protocol._timers[source_ip]
assert (not timer3.cancelled())
assert (timer3 == timer2)
next_packet = r.DNSIncoming(packets.pop(0))
expected_deferred.append(next_packet)
threadsafe_query(zc, protocol, next_packet, source_ip, const._MDNS_PORT, Mock(), ())
assert (protocol._deferred[source_ip] == expected_deferred)
assert (source_ip in protocol._timers)
timer4 = protocol._timers[source_ip]
assert timer3.cancelled()
assert (timer4 != timer3)
for _ in range(8):
time.sleep(0.1)
if ((source_ip not in protocol._timers) and (source_ip not in protocol._deferred)):
break
assert (source_ip not in protocol._deferred)
assert (source_ip not in protocol._timers)
zc.registry.async_remove(info)
zc.close() |
class GP(SingleTaskGP):
def __init__(self, train_x, train_y, likelihood, lengthscale_constraint, outputscale_constraint, ard_dims, hyper=1.0, saas=True):
covar_module = _prepare_covar_module(ard_dims, lengthscale_constraint, outputscale_constraint, hyper=hyper, saas=saas)
super(GP, self).__init__(train_x, train_y.view((- 1), 1), likelihood, outcome_transform=Standardize(1), covar_module=covar_module)
self.ard_dims = ard_dims
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return MultivariateNormal(mean_x, covar_x) |
class GaussianBlur(object):
def __init__(self, kernel_size, min=0.1, max=2.0):
self.min = min
self.max = max
self.kernel_size = kernel_size
def __call__(self, sample):
sample = np.array(sample)
prob = np.random.random_sample()
if (prob < 0.5):
sigma = (((self.max - self.min) * np.random.random_sample()) + self.min)
sample = cv2.GaussianBlur(sample, (self.kernel_size, self.kernel_size), sigma)
return sample |
class FM(object):
def __init__(self, formula, enc=EncType.pairwise, solver='m22', verbose=1):
self.verbose = verbose
self.solver = solver
self.time = 0.0
self.topv = self.orig_nv = formula.nv
self.hard = copy.deepcopy(formula.hard)
self.soft = copy.deepcopy(formula.soft)
self.wght = formula.wght[:]
self.cenc = enc
self.cost = 0
if (isinstance(formula, WCNFPlus) and formula.atms):
self.atm1 = copy.deepcopy(formula.atms)
else:
self.atm1 = None
self.init(with_soft=False)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.delete()
def init(self, with_soft=True):
self.oracle = Solver(name=self.solver, bootstrap_with=self.hard, use_timer=True)
if self.atm1:
assert self.oracle.supports_atmost(), '{0} does not support native cardinality constraints. Make sure you use the right type of formula.'.format(solver_name)
for am in self.atm1:
self.oracle.add_atmost(*am)
if with_soft:
for (cl, cpy) in zip(self.soft, self.scpy):
if cpy:
self.oracle.add_clause(cl)
def delete(self):
if self.oracle:
self.time += self.oracle.time_accum()
self.oracle.delete()
self.oracle = None
def reinit(self):
self.delete()
self.init()
def compute(self):
if self.oracle.solve():
(self.sels, self.vmap) = ([], {})
self.scpy = [True for cl in self.soft]
for i in range(len(self.soft)):
self.topv += 1
self.soft[i].append((- self.topv))
self.sels.append(self.topv)
self.oracle.add_clause(self.soft[i])
self.vmap[self.topv] = i
self._compute()
return True
else:
return False
def _compute(self):
while True:
if self.oracle.solve(assumptions=self.sels):
self.model = self.oracle.get_model()
self.model = list(filter((lambda l: (abs(l) <= self.orig_nv)), self.model))
return
else:
self.treat_core()
if (self.verbose > 1):
print('c cost: {0}; core sz: {1}'.format(self.cost, len(self.core)))
self.reinit()
def treat_core(self):
self.core = [self.vmap[sel] for sel in self.oracle.get_core()]
minw = min(map((lambda i: self.wght[i]), self.core))
self.cost += minw
self.split_core(minw)
self.relax_core()
def split_core(self, minw):
for clid in self.core:
sel = self.sels[clid]
if (self.wght[clid] > minw):
self.topv += 1
cl_new = []
for l in self.soft[clid]:
if (l != (- sel)):
cl_new.append(l)
else:
cl_new.append((- self.topv))
self.sels.append(self.topv)
self.vmap[self.topv] = len(self.soft)
self.soft.append(cl_new)
self.wght.append((self.wght[clid] - minw))
self.wght[clid] = minw
self.scpy.append(True)
def relax_core(self):
if (len(self.core) > 1):
rels = []
for clid in self.core:
self.topv += 1
rels.append(self.topv)
self.soft[clid].append(self.topv)
am1 = CardEnc.atmost(lits=rels, top_id=self.topv, encoding=self.cenc)
for cl in am1.clauses:
self.hard.append(cl)
for am in am1.atmosts:
self.atm1.append(am)
self.topv = am1.nv
elif (len(self.core) == 1):
self.remove_unit_core()
def remove_unit_core(self):
self.scpy[self.core[0]] = False
for l in self.soft[self.core[0]]:
self.hard.append([(- l)])
def oracle_time(self):
self.time += self.oracle.time_accum()
return self.time |
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
centralWidget = QWidget()
self.setCentralWidget(centralWidget)
self.glWidget = GLWidget()
self.pixmapLabel = QLabel()
self.glWidgetArea = QScrollArea()
self.glWidgetArea.setWidget(self.glWidget)
self.glWidgetArea.setWidgetResizable(True)
self.glWidgetArea.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.glWidgetArea.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.glWidgetArea.setSizePolicy(QSizePolicy.Ignored, QSizePolicy.Ignored)
self.glWidgetArea.setMinimumSize(50, 50)
self.pixmapLabelArea = QScrollArea()
self.pixmapLabelArea.setWidget(self.pixmapLabel)
self.pixmapLabelArea.setSizePolicy(QSizePolicy.Ignored, QSizePolicy.Ignored)
self.pixmapLabelArea.setMinimumSize(50, 50)
xSlider = self.createSlider(self.glWidget.xRotationChanged, self.glWidget.setXRotation)
ySlider = self.createSlider(self.glWidget.yRotationChanged, self.glWidget.setYRotation)
zSlider = self.createSlider(self.glWidget.zRotationChanged, self.glWidget.setZRotation)
self.createActions()
self.createMenus()
centralLayout = QGridLayout()
centralLayout.addWidget(self.glWidgetArea, 0, 0)
centralLayout.addWidget(self.pixmapLabelArea, 0, 1)
centralLayout.addWidget(xSlider, 1, 0, 1, 2)
centralLayout.addWidget(ySlider, 2, 0, 1, 2)
centralLayout.addWidget(zSlider, 3, 0, 1, 2)
centralWidget.setLayout(centralLayout)
xSlider.setValue((15 * 16))
ySlider.setValue((345 * 16))
zSlider.setValue((0 * 16))
self.setWindowTitle('Grabber')
self.resize(400, 300)
def grabFrameBuffer(self):
image = self.glWidget.grabFramebuffer()
self.setPixmap(QPixmap.fromImage(image))
def clearPixmap(self):
self.setPixmap(QPixmap())
def about(self):
QMessageBox.about(self, 'About Grabber', 'The <b>Grabber</b> example demonstrates two approaches for rendering OpenGL into a Qt pixmap.')
def createActions(self):
self.grabFrameBufferAct = QAction('&Grab Frame Buffer', self, shortcut='Ctrl+G', triggered=self.grabFrameBuffer)
self.clearPixmapAct = QAction('&Clear Pixmap', self, shortcut='Ctrl+L', triggered=self.clearPixmap)
self.exitAct = QAction('E&xit', self, shortcut='Ctrl+Q', triggered=self.close)
self.aboutAct = QAction('&About', self, triggered=self.about)
self.aboutQtAct = QAction('About &Qt', self, triggered=QApplication.instance().aboutQt)
def createMenus(self):
self.fileMenu = self.menuBar().addMenu('&File')
self.fileMenu.addAction(self.grabFrameBufferAct)
self.fileMenu.addAction(self.clearPixmapAct)
self.fileMenu.addSeparator()
self.fileMenu.addAction(self.exitAct)
self.helpMenu = self.menuBar().addMenu('&Help')
self.helpMenu.addAction(self.aboutAct)
self.helpMenu.addAction(self.aboutQtAct)
def createSlider(self, changedSignal, setterSlot):
slider = QSlider(Qt.Horizontal)
slider.setRange(0, (360 * 16))
slider.setSingleStep(16)
slider.setPageStep((15 * 16))
slider.setTickInterval((15 * 16))
slider.setTickPosition(QSlider.TicksRight)
slider.valueChanged.connect(setterSlot)
changedSignal.connect(slider.setValue)
return slider
def setPixmap(self, pixmap):
self.pixmapLabel.setPixmap(pixmap)
size = pixmap.size()
if ((size - QSize(1, 0)) == self.pixmapLabelArea.maximumViewportSize()):
size -= QSize(1, 0)
self.pixmapLabel.resize(size) |
class Effect5927(BaseEffect):
runTime = 'early'
type = ('projected', 'passive')
def handler(fit, beacon, context, projectionRange, **kwargs):
fit.modules.filteredChargeMultiply((lambda mod: mod.charge.requiresSkill('Bomb Deployment')), 'scanRadarStrengthBonus', beacon.getModifiedItemAttr('smartbombDamageMultiplier'), stackingPenalties=True, penaltyGroup='postMul', **kwargs) |
_fixtures(WebFixture, ChoicesFixture)
def test_choices_layout_applied_to_checkbox(web_fixture, choices_fixture):
fixture = choices_fixture
stacked_container = Div(web_fixture.view).use_layout(ChoicesLayout())
stacked_container.layout.add_choice(PrimitiveCheckboxInput(fixture.form, fixture.boolean_field))
stacked_container_classes = stacked_container.children[0].get_attribute('class').split(' ')
assert ('custom-control' in stacked_container_classes)
assert ('custom-checkbox' in stacked_container_classes)
[checkbox_input, label] = stacked_container.children[0].children
[description_widget] = label.children
assert (label.tag_name == 'label')
assert (checkbox_input.html_representation.input_type == 'checkbox')
assert (description_widget.value == 'field') |
class Trainer(TrainerBase):
def __init__(self, args, train_loader=None, val_loader=None, test_loader=None, train=True):
super().__init__(args, train_loader=train_loader, val_loader=val_loader, test_loader=test_loader, train=train)
if (not self.verbose):
set_global_logging_level(logging.ERROR, ['transformers'])
from vqa_model import FewVLMVQA
model_kwargs = {}
if ('t5' in args.backbone):
model_class = FewVLMVQA
config = self.create_config()
self.tokenizer = self.create_tokenizer()
self.model = self.create_model(model_class, config, **model_kwargs)
if ('t5' in self.args.tokenizer):
self.model.resize_token_embeddings(self.tokenizer.vocab_size)
self.model.tokenizer = self.tokenizer
self.start_epoch = None
if (args.load is not None):
ckpt_path = (args.load + '.pth')
self.load_checkpoint(ckpt_path)
if self.args.from_scratch:
self.init_weights()
print(f'Model Launching at GPU {self.args.gpu}')
if self.verbose:
from time import time
start = time()
self.model = self.model.to(args.gpu)
print('num grad param:', count_parameters(self.model))
print('num total elements:', sum((p.numel() for p in self.model.parameters())))
if train:
(self.optim, self.lr_scheduler) = self.create_optimizer_and_scheduler()
if (self.args.fp16 and _use_native_amp):
self.scaler = torch.cuda.amp.GradScaler()
elif _use_apex:
(self.model, self.optim) = amp.initialize(self.model, self.optim, opt_level='O1', verbosity=self.verbose)
if args.multiGPU:
if args.distributed:
self.model = DDP(self.model, device_ids=[args.gpu], find_unused_parameters=True)
if self.verbose:
print(f'It took {(time() - start):.1f}s')
def train(self):
if self.verbose:
loss_meter = LossMeter()
best_valid = 0.0
best_epoch = 0
if self.args.distributed:
dist.barrier()
global_step = 0
if (not self.args.test_only):
for epoch in range(self.args.epochs):
if (self.start_epoch is not None):
epoch += self.start_epoch
self.model.train()
if self.args.distributed:
self.train_loader.sampler.set_epoch(epoch)
if self.verbose:
pbar = tqdm(total=len(self.train_loader), ncols=120)
epoch_results = {'loss': 0.0}
quesid2ans = {}
for (step_i, batch) in enumerate(self.train_loader):
if (self.args.fp16 and _use_native_amp):
with autocast():
if self.args.distributed:
results = self.model.module.train_step(batch)
else:
results = self.model.train_step(batch)
elif self.args.distributed:
results = self.model.module.train_step(batch)
else:
results = self.model.train_step(batch)
loss = results['loss']
if (self.args.fp16 and _use_native_amp):
self.scaler.scale(loss).backward()
elif (self.args.fp16 and _use_apex):
with amp.scale_loss(loss, self.optim) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
loss = loss.detach()
if (self.args.clip_grad_norm > 0):
if (self.args.fp16 and _use_native_amp):
self.scaler.unscale_(self.optim)
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.args.clip_grad_norm)
elif (self.args.fp16 and _use_apex):
torch.nn.utils.clip_grad_norm_(amp.master_params(self.optim), self.args.clip_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.args.clip_grad_norm)
if (self.args.fp16 and _use_native_amp):
self.scaler.step(self.optim)
self.scaler.update()
else:
self.optim.step()
if self.lr_scheduler:
self.lr_scheduler.step()
for param in self.model.parameters():
param.grad = None
global_step += 1
for (k, v) in results.items():
if (k in epoch_results):
epoch_results[k] += v.item()
if self.lr_scheduler:
if (version.parse(torch.__version__) >= version.parse('1.4')):
lr = self.lr_scheduler.get_last_lr()[0]
else:
lr = self.lr_scheduler.get_lr()[0]
else:
try:
lr = self.optim.get_lr()[0]
except AttributeError:
lr = self.args.lr
if self.verbose:
loss_meter.update(loss.item())
desc_str = f'Epoch {epoch} | LR {lr:.6f}'
desc_str += f' | Loss {loss_meter.val:4f}'
pbar.set_description(desc_str)
pbar.update(1)
if self.args.distributed:
dist.barrier()
if self.verbose:
pbar.close()
score_dict = self.evaluate(self.val_loader)
if self.verbose:
valid_score = (score_dict['topk_score'] * 100.0)
valid_score_raw = score_dict['overall']
if ((valid_score_raw >= best_valid) or (epoch == 0)):
best_valid = valid_score_raw
best_epoch = epoch
self.save('BEST')
log_str = ''
log_str += ('\nEpoch %d: Valid Raw %0.2f Topk %0.2f' % (epoch, valid_score_raw, valid_score))
log_str += ('\nEpoch %d: Best Raw %0.2f\n' % (best_epoch, best_valid))
print(log_str)
if self.args.distributed:
dist.barrier()
if self.verbose:
self.save('LAST')
if (not self.args.test_only):
best_path = os.path.join(self.args.output, 'BEST')
self.load(best_path)
quesid2ans = self.predict(self.test_loader)
if self.verbose:
evaluator = self.test_loader.evaluator
score_dict = evaluator.evaluate(quesid2ans)
evaluator.dump_result(quesid2ans, 'result.txt')
acc_dict_all = evaluator.evaluate_raw(quesid2ans)
acc_dict_answerable = evaluator.evaluate_raw(quesid2ans, is_topk_optimal=True)
acc_dict_unanswerable = evaluator.evaluate_raw(quesid2ans, is_topk_optimal=False)
log_dict = {}
log_dict['Test/overall'] = acc_dict_all['overall']
log_dict['Test/topk_optimal'] = acc_dict_answerable['overall']
log_dict['Test/topk_not_optimal'] = acc_dict_unanswerable['overall']
for (qtype, score) in acc_dict_all['perQuestionType'].items():
log_dict[f'Test_Qtypes/{qtype}'] = score
for (atype, score) in acc_dict_all['perAnswerType'].items():
if (atype == 'yes/no'):
atype = 'yes_no'
log_dict[f'Test_Atypes/{atype}'] = score
print(log_dict)
if self.args.submit:
if (not os.path.isdir(self.args.output)):
os.makedirs(self.args.output, exist_ok=True)
dump_path = os.path.join(self.args.output, 'submit.json')
self.predict(self.submit_test_loader, dump_path)
if self.args.distributed:
dist.barrier()
exit()
def predict(self, loader, dump_path=None):
self.model.eval()
with torch.no_grad():
quesid2ans = {}
if self.verbose:
pbar = tqdm(total=len(loader), ncols=120, desc='Prediction')
for (i, batch) in enumerate(loader):
if self.args.distributed:
results = self.model.module.test_step(batch)
else:
results = self.model.test_step(batch)
pred_ans = results['pred_ans']
ques_ids = batch['question_ids']
for (qid, ans) in zip(ques_ids, pred_ans):
quesid2ans[qid] = ans
if self.verbose:
pbar.update(1)
if self.verbose:
pbar.close()
if self.args.distributed:
dist.barrier()
qid2ans_list = dist_utils.all_gather(quesid2ans)
if self.verbose:
quesid2ans = {}
for qid2ans in qid2ans_list:
for (k, v) in qid2ans.items():
quesid2ans[k] = v
if (dump_path is not None):
evaluator = loader.evaluator
evaluator.dump_result(quesid2ans, dump_path)
return quesid2ans
def evaluate(self, loader, dump_path=None):
quesid2ans = self.predict(loader, dump_path)
if self.verbose:
evaluator = loader.evaluator
acc_dict = evaluator.evaluate_raw(quesid2ans)
topk_score = evaluator.evaluate(quesid2ans)
acc_dict['topk_score'] = topk_score
return acc_dict |
class AsyncWorker():
_terminator = object()
def __init__(self, shutdown_timeout=DEFAULT_TIMEOUT):
check_threads()
self._queue = Queue((- 1))
self._lock = threading.Lock()
self._thread = None
self._thread_for_pid = None
self.options = {'shutdown_timeout': shutdown_timeout}
self.start()
def is_alive(self):
if (self._thread_for_pid != os.getpid()):
return False
return (self._thread and self._thread.is_alive())
def _ensure_thread(self):
if self.is_alive():
return
self.start()
def main_thread_terminated(self):
with self._lock:
if (not self.is_alive()):
return
self._queue.put_nowait(self._terminator)
timeout = self.options['shutdown_timeout']
initial_timeout = min(0.1, timeout)
if (not self._timed_queue_join(initial_timeout)):
size = self._queue.qsize()
print(('Sentry is attempting to send %i pending error messages' % size))
print(('Waiting up to %s seconds' % timeout))
if (os.name == 'nt'):
print('Press Ctrl-Break to quit')
else:
print('Press Ctrl-C to quit')
self._timed_queue_join((timeout - initial_timeout))
self._thread = None
def _timed_queue_join(self, timeout):
deadline = (time() + timeout)
queue = self._queue
queue.all_tasks_done.acquire()
try:
while queue.unfinished_tasks:
delay = (deadline - time())
if (delay <= 0):
return False
queue.all_tasks_done.wait(timeout=delay)
return True
finally:
queue.all_tasks_done.release()
def start(self):
self._lock.acquire()
try:
if (not self.is_alive()):
self._thread = threading.Thread(target=self._target, name='raven.AsyncWorker')
self._thread.setDaemon(True)
self._thread.start()
self._thread_for_pid = os.getpid()
finally:
self._lock.release()
atexit.register(self.main_thread_terminated)
def stop(self, timeout=None):
with self._lock:
if self._thread:
self._queue.put_nowait(self._terminator)
self._thread.join(timeout=timeout)
self._thread = None
self._thread_for_pid = None
def queue(self, callback, *args, **kwargs):
self._ensure_thread()
self._queue.put_nowait((callback, args, kwargs))
def _target(self):
while True:
record = self._queue.get()
try:
if (record is self._terminator):
break
(callback, args, kwargs) = record
try:
callback(*args, **kwargs)
except Exception:
logger.error('Failed processing job', exc_info=True)
finally:
self._queue.task_done()
sleep(0) |
_test
def test_merge_average():
i1 = layers.Input(shape=(4, 5))
i2 = layers.Input(shape=(4, 5))
o = layers.average([i1, i2])
assert (o._keras_shape == (None, 4, 5))
model = models.Model([i1, i2], o)
avg_layer = layers.Average()
o2 = avg_layer([i1, i2])
assert (avg_layer.output_shape == (None, 4, 5))
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
out = model.predict([x1, x2])
assert (out.shape == (2, 4, 5))
assert_allclose(out, (0.5 * (x1 + x2)), atol=0.0001) |
def test_offxml_combine_no_polar_lj(tmpdir, methanol, rfree_data, vs):
with tmpdir.as_cwd():
alpha = rfree_data.pop('alpha')
beta = rfree_data.pop('beta')
lj = LennardJones612(free_parameters=rfree_data, alpha=alpha, beta=beta, lj_on_polar_h=False)
lj.run(methanol)
rfree_data['alpha'] = alpha
rfree_data['beta'] = beta
_combine_molecules_offxml(molecules=[methanol], parameters=elements, rfree_data=rfree_data, filename='no_polar_h.offxml', water_model='tip4p-fb', h_constraints=True, lj_on_polar_h=False)
ff = ForceField('no_polar_h.offxml', load_plugins=True, allow_cosmetic_attributes=True)
vdw = ff.get_parameter_handler('QUBEKitvdWTS')
assert (vdw.lj_on_polar_h == 'False')
assert ('parameterize' in vdw._cosmetic_attribs)
assert ('xfree' not in getattr(vdw, '_parameterize').split(','))
off_mol = Molecule.from_rdkit(methanol.to_rdkit())
openmm_system = ff.create_openmm_system(off_mol.to_topology())
forces = dict(((force.__class__.__name__, force) for force in openmm_system.getForces()))
nonbonded_force: openmm.NonbondedForce = forces['NonbondedForce']
for i in range(methanol.n_atoms):
ref_params = methanol.NonbondedForce[(i,)]
(charge, sigma, epsilon) = nonbonded_force.getParticleParameters(i)
assert (charge.value_in_unit(unit.elementary_charge) == float(ref_params.charge))
assert (sigma.value_in_unit(unit.nanometers) == ref_params.sigma)
assert (epsilon.value_in_unit(unit.kilojoule_per_mole) == ref_params.epsilon) |
def test_python_cmdline(testcase: DataDrivenTestCase, step: int) -> None:
assert (testcase.old_cwd is not None), 'test was not properly set up'
program = '_program.py'
program_path = os.path.join(test_temp_dir, program)
with open(program_path, 'w', encoding='utf8') as file:
for s in testcase.input:
file.write(f'''{s}
''')
args = parse_args(testcase.input[0])
custom_cwd = (parse_cwd(testcase.input[1]) if (len(testcase.input) > 1) else None)
args.append('--show-traceback')
if ('--error-summary' not in args):
args.append('--no-error-summary')
if ('--show-error-codes' not in args):
args.append('--hide-error-codes')
if ('--disallow-empty-bodies' not in args):
args.append('--allow-empty-bodies')
if ('--no-force-uppercase-builtins' not in args):
args.append('--force-uppercase-builtins')
if ('--no-force-union-syntax' not in args):
args.append('--force-union-syntax')
fixed = [python3_path, '-m', 'mypy']
env = os.environ.copy()
env.pop('COLUMNS', None)
extra_path = os.path.join(os.path.abspath(test_temp_dir), 'pypath')
env['PYTHONPATH'] = PREFIX
if os.path.isdir(extra_path):
env['PYTHONPATH'] += (os.pathsep + extra_path)
cwd = os.path.join(test_temp_dir, (custom_cwd or ''))
args = [arg.replace('$CWD', os.path.abspath(cwd)) for arg in args]
process = subprocess.Popen((fixed + args), stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd, env=env)
(outb, errb) = process.communicate()
result = process.returncode
out = [s.rstrip('\n\r') for s in str(outb, 'utf8').splitlines()]
err = [s.rstrip('\n\r') for s in str(errb, 'utf8').splitlines()]
if ('PYCHARM_HOSTED' in os.environ):
for (pos, line) in enumerate(err):
if line.startswith('pydev debugger: '):
del err[pos:(pos + 2)]
break
os.remove(program_path)
if testcase.output_files:
if (err or result):
raise AssertionError(('Expected zero status and empty stderr%s, got %d and\n%s' % (((' on step %d' % step) if testcase.output2 else ''), result, '\n'.join((err + out)))))
check_test_output_files(testcase, step)
else:
if testcase.normalize_output:
out = normalize_error_messages((err + out))
obvious_result = (1 if out else 0)
if (obvious_result != result):
out.append(f'== Return code: {result}')
expected_out = (testcase.output if (step == 1) else testcase.output2[step])
expected_out = [s.replace(('tmp' + os.sep), '') for s in expected_out]
assert_string_arrays_equal(expected_out, out, 'Invalid output ({}, line {}){}'.format(testcase.file, testcase.line, ((' on step %d' % step) if testcase.output2 else ''))) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.