code stringlengths 281 23.7M |
|---|
def _next_cfg(stride_mode='dw', pool_type='avg2', conv_norm_layer='layernorm2d', conv_norm_layer_cl='layernorm', transformer_norm_layer='layernorm2d', transformer_norm_layer_cl='layernorm', window_size=None, init_values=1e-06, rel_pos_type='mlp', rel_pos_dim=512):
init_values = to_2tuple(init_values)
return dict(conv_cfg=MaxxVitConvCfg(block_type='convnext', stride_mode=stride_mode, pool_type=pool_type, expand_output=False, init_values=init_values[0], norm_layer=conv_norm_layer, norm_layer_cl=conv_norm_layer_cl), transformer_cfg=MaxxVitTransformerCfg(expand_first=False, pool_type=pool_type, window_size=window_size, init_values=init_values[1], norm_layer=transformer_norm_layer, norm_layer_cl=transformer_norm_layer_cl, rel_pos_type=rel_pos_type, rel_pos_dim=rel_pos_dim)) |
def define_and_solve_sims(model, experiments, parameter_values):
sims = {}
for (C_rate, experiment) in experiments.items():
sim = pybamm.Simulation(model, experiment=experiment, parameter_values=parameter_values)
sim.solve(calc_esoh=False)
sims[C_rate] = sim
return sims |
class TokenizerUtilsTest(unittest.TestCase):
def check_tokenizer_from_pretrained(self, tokenizer_class):
s3_models = list(tokenizer_class.max_model_input_sizes.keys())
for model_name in s3_models[:1]:
tokenizer = tokenizer_class.from_pretrained(model_name)
self.assertIsNotNone(tokenizer)
self.assertIsInstance(tokenizer, tokenizer_class)
self.assertIsInstance(tokenizer, PreTrainedTokenizer)
for special_tok in tokenizer.all_special_tokens:
if six.PY2:
self.assertIsInstance(special_tok, unicode)
else:
self.assertIsInstance(special_tok, str)
special_tok_id = tokenizer.convert_tokens_to_ids(special_tok)
self.assertIsInstance(special_tok_id, int)
def test_pretrained_tokenizers(self):
self.check_tokenizer_from_pretrained(GPT2Tokenizer) |
def build_language_model(opt, dicts):
opt = backward_compatible(opt)
onmt.constants.layer_norm = opt.layer_norm
onmt.constants.weight_norm = opt.weight_norm
onmt.constants.activation_layer = opt.activation_layer
onmt.constants.version = 1.0
onmt.constants.attention_out = opt.attention_out
onmt.constants.residual_type = opt.residual_type
from onmt.models.transformer_xl import TransformerXL
embedding_tgt = nn.Embedding(dicts['tgt'].size(), opt.model_size, padding_idx=onmt.constants.TGT_PAD)
if opt.use_language_embedding:
print(('* Create language embeddings with %d languages' % len(dicts['langs'])))
language_embeddings = nn.Embedding(len(dicts['langs']), opt.model_size)
else:
language_embeddings = None
generators = [onmt.modules.base_seq2seq.Generator(opt.model_size, dicts['tgt'].size())]
model = TransformerXL(opt, embedding_tgt, nn.ModuleList(generators), language_embeddings=language_embeddings)
model.tgt_dict = dicts['tgt']
if opt.tie_weights:
print('* Joining the weights of decoder input and output embeddings')
model.tie_weights()
return model |
class SqueezeboxPlaylistPlugin(PlaylistPlugin, SqueezeboxPluginMixin):
PLUGIN_ID = 'Export to Squeezebox Playlist'
PLUGIN_NAME = _('Export to Squeezebox')
PLUGIN_DESC_MARKUP = ((_('Dynamically exports a playlist to Logitech Squeezebox playlist, provided both share a directory structure.') + '\n') + (_('Shares configuration with <a href="%(plugin_link)s">Squeezebox Sync plugin</a>.') % {'plugin_link': 'quodlibet:///prefs/plugins/Squeezebox Output'}))
PLUGIN_ICON = Icons.NETWORK_WORKGROUP
ELLIPSIZE_NAME = True
_PERSIST_FUDGE = 100
TEMP_PLAYLIST = '_quodlibet'
def __add_songs(self, task, songs, name):
print_d('Backing up current Squeezebox playlist.This can take a while if your current playlist is big...')
self.__cancel = False
task_total = float(((len(songs) + (2 * self._PERSIST_FUDGE)) + (3 * 2)))
self.server.playlist_save(self.TEMP_PLAYLIST)
task.update((self._PERSIST_FUDGE / task_total))
(yield True)
self.server.playlist_clear()
task.update(((self._PERSIST_FUDGE + 2.0) // task_total))
(yield True)
stopped = self.server.is_stopped()
total = len(songs)
print_d(('Adding %d song(s) to Squeezebox playlist. This might take a while...' % total))
for (i, song) in enumerate(songs):
if self.__cancel:
print_d('Cancelled squeezebox export')
self.__cancel = False
break
self.server.playlist_add(self.get_sb_path(song))
task.update((float(i) / task_total))
(yield True)
print_d(('Saving Squeezebox playlist "%s"' % name))
self.server.playlist_save(name)
task.update(((task_total - 2) / task_total))
(yield True)
self.server.playlist_resume(self.TEMP_PLAYLIST, (not stopped), True)
task.finish()
def __cancel_add(self):
self.__cancel = True
def __get_playlist_name(name='Quod Libet playlist'):
dialog = GetStringDialog(None, _('Export playlist to Squeezebox'), _('Playlist name (will overwrite existing)'), button_label=_('_Save'), button_icon=Icons.DOCUMENT_SAVE)
name = dialog.run(text=name)
return name
def plugin_playlist(self, playlist):
self.init_server()
if (not self.server.is_connected):
qltk.ErrorMessage(app.window, _('Error finding Squeezebox server'), (_('Error finding %s. Please check settings') % self.server.config)).run()
else:
name = self.__get_playlist_name(name=playlist.name)
if name:
task = Task('Squeezebox', _('Export to Squeezebox playlist'), stop=self.__cancel_add)
copool.add(self.__add_songs, task, playlist.songs, name, funcid='squeezebox-playlist-save') |
class ResNet(nn.Module):
def __init__(self, block: Type[Union[(BasicBlock, Bottleneck)]], layers: List[int], num_classes: int=1000, zero_init_residual: bool=False, groups: int=1, width_per_group: int=64, replace_stride_with_dilation: Optional[List[bool]]=None, norm_layer: Optional[Callable[(..., nn.Module)]]=None) -> None:
super().__init__()
_log_api_usage_once(self)
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if (replace_stride_with_dilation is None):
replace_stride_with_dilation = [False, False, False]
if (len(replace_stride_with_dilation) != 3):
raise ValueError(f'replace_stride_with_dilation should be None or a 3-element tuple, got {replace_stride_with_dilation}')
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear((512 * block.expansion), num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if (isinstance(m, Bottleneck) and (m.bn3.weight is not None)):
nn.init.constant_(m.bn3.weight, 0)
elif (isinstance(m, BasicBlock) and (m.bn2.weight is not None)):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block: Type[Union[(BasicBlock, Bottleneck)]], planes: int, blocks: int, stride: int=1, dilate: bool=False) -> nn.Sequential:
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(conv1x1(self.inplanes, (planes * block.expansion), stride), norm_layer((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups, self.base_width, previous_dilation, norm_layer))
self.inplanes = (planes * block.expansion)
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups, base_width=self.base_width, dilation=self.dilation, norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x: Tensor) -> Tensor:
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x) |
def test_structure_flattening(debug_ctx, debug_trail, trail_select):
loader_getter = make_loader_getter(shape=COMPLEX_STRUCTURE_SHAPE, name_layout=InputNameLayout(crown=COMPLEX_STRUCTURE_CROWN, extra_move=ExtraTargets(('extra',))), debug_trail=debug_trail, debug_ctx=debug_ctx)
loader = loader_getter()
assert (loader({'z': {'y': 1, 'x': 2}, 'w': 3, 'v': [4, {'u': 5}, [6]]}) == gauge(a=1, b=2, c=3, d=4, e=5, f=6, extra={'z': {}, 'v': [{}, {}, [{}]]}))
assert (loader({'z': {'y': 1, 'x': 2, 'extra_1': 3}, 'w': 4, 'v': [5, {'u': 6, 'extra_2': 7}, [8]], 'extra_3': 9}) == gauge(a=1, b=2, c=4, d=5, e=6, f=8, extra={'z': {'extra_1': 3}, 'v': [{}, {'extra_2': 7}, [{}]], 'extra_3': 9}))
raises_exc(trail_select(disable=TypeLoadError(CollectionsMapping, 'this is not a dict'), first=with_trail(TypeLoadError(CollectionsMapping, 'this is not a dict'), ['z']), all=AggregateLoadError(f'while loading model {Gauge}', [with_trail(TypeLoadError(CollectionsMapping, 'this is not a dict'), ['z'])])), (lambda : loader({'z': 'this is not a dict', 'w': 3, 'v': [4, {'u': 5}, [6]]})))
raises_exc(trail_select(disable=TypeLoadError(CollectionsSequence, None), first=with_trail(TypeLoadError(CollectionsSequence, None), ['v']), all=AggregateLoadError(f'while loading model {Gauge}', [with_trail(TypeLoadError(CollectionsSequence, None), ['v'])])), (lambda : loader({'z': {'y': 1, 'x': 2}, 'w': 3, 'v': None})))
raises_exc(trail_select(disable=ExcludedTypeLoadError(CollectionsSequence, str, 'this is not a list'), first=with_trail(ExcludedTypeLoadError(CollectionsSequence, str, 'this is not a list'), ['v']), all=AggregateLoadError(f'while loading model {Gauge}', [with_trail(ExcludedTypeLoadError(CollectionsSequence, str, 'this is not a list'), ['v'])])), (lambda : loader({'z': {'y': 1, 'x': 2}, 'w': 3, 'v': 'this is not a list'}))) |
class ActionScriptLexer(RegexLexer):
name = 'ActionScript'
aliases = ['actionscript', 'as']
filenames = ['*.as']
mimetypes = ['application/x-actionscript', 'text/x-actionscript', 'text/actionscript']
url = '
version_added = '0.9'
flags = re.DOTALL
tokens = {'root': [('\\s+', Whitespace), ('//.*?\\n', Comment.Single), ('/\\*.*?\\*/', Comment.Multiline), ('/(|\\\\[^\\\\]|[^/\\\\\\n])*/[gim]*', String.Regex), ('[~^*!%&<>|+=:;,/?\\\\-]+', Operator), ('[{}\\[\\]();.]+', Punctuation), (words(('case', 'default', 'for', 'each', 'in', 'while', 'do', 'break', 'return', 'continue', 'if', 'else', 'throw', 'try', 'catch', 'var', 'with', 'new', 'typeof', 'arguments', 'instanceof', 'this', 'switch'), suffix='\\b'), Keyword), (words(('class', 'public', 'final', 'internal', 'native', 'override', 'private', 'protected', 'static', 'import', 'extends', 'implements', 'interface', 'intrinsic', 'return', 'super', 'dynamic', 'function', 'const', 'get', 'namespace', 'package', 'set'), suffix='\\b'), Keyword.Declaration), ('(true|false|null|NaN|Infinity|-Infinity|undefined|Void)\\b', Keyword.Constant), (words(('Accessibility', 'AccessibilityProperties', 'ActionScriptVersion', 'ActivityEvent', 'AntiAliasType', 'ApplicationDomain', 'AsBroadcaster', 'Array', 'AsyncErrorEvent', 'AVM1Movie', 'BevelFilter', 'Bitmap', 'BitmapData', 'BitmapDataChannel', 'BitmapFilter', 'BitmapFilterQuality', 'BitmapFilterType', 'BlendMode', 'BlurFilter', 'Boolean', 'ByteArray', 'Camera', 'Capabilities', 'CapsStyle', 'Class', 'Color', 'ColorMatrixFilter', 'ColorTransform', 'ContextMenu', 'ContextMenuBuiltInItems', 'ContextMenuEvent', 'ContextMenuItem', 'ConvultionFilter', 'CSMSettings', 'DataEvent', 'Date', 'DefinitionError', 'DeleteObjectSample', 'Dictionary', 'DisplacmentMapFilter', 'DisplayObject', 'DisplacmentMapFilterMode', 'DisplayObjectContainer', 'DropShadowFilter', 'Endian', 'EOFError', 'Error', 'ErrorEvent', 'EvalError', 'Event', 'EventDispatcher', 'EventPhase', 'ExternalInterface', 'FileFilter', 'FileReference', 'FileReferenceList', 'FocusDirection', 'FocusEvent', 'Font', 'FontStyle', 'FontType', 'FrameLabel', 'FullScreenEvent', 'Function', 'GlowFilter', 'GradientBevelFilter', 'GradientGlowFilter', 'GradientType', 'Graphics', 'GridFitType', 'HTTPStatusEvent', 'IBitmapDrawable', 'ID3Info', 'IDataInput', 'IDataOutput', 'IDynamicPropertyOutputIDynamicPropertyWriter', 'IEventDispatcher', 'IExternalizable', 'IllegalOperationError', 'IME', 'IMEConversionMode', 'IMEEvent', 'int', 'InteractiveObject', 'InterpolationMethod', 'InvalidSWFError', 'InvokeEvent', 'IOError', 'IOErrorEvent', 'JointStyle', 'Key', 'Keyboard', 'KeyboardEvent', 'KeyLocation', 'LineScaleMode', 'Loader', 'LoaderContext', 'LoaderInfo', 'LoadVars', 'LocalConnection', 'Locale', 'Math', 'Matrix', 'MemoryError', 'Microphone', 'MorphShape', 'Mouse', 'MouseEvent', 'MovieClip', 'MovieClipLoader', 'Namespace', 'NetConnection', 'NetStatusEvent', 'NetStream', 'NewObjectSample', 'Number', 'Object', 'ObjectEncoding', 'PixelSnapping', 'Point', 'PrintJob', 'PrintJobOptions', 'PrintJobOrientation', 'ProgressEvent', 'Proxy', 'QName', 'RangeError', 'Rectangle', 'ReferenceError', 'RegExp', 'Responder', 'Sample', 'Scene', 'ScriptTimeoutError', 'Security', 'SecurityDomain', 'SecurityError', 'SecurityErrorEvent', 'SecurityPanel', 'Selection', 'Shape', 'SharedObject', 'SharedObjectFlushStatus', 'SimpleButton', 'Socket', 'Sound', 'SoundChannel', 'SoundLoaderContext', 'SoundMixer', 'SoundTransform', 'SpreadMethod', 'Sprite', 'StackFrame', 'StackOverflowError', 'Stage', 'StageAlign', 'StageDisplayState', 'StageQuality', 'StageScaleMode', 'StaticText', 'StatusEvent', 'String', 'StyleSheet', 'SWFVersion', 'SyncEvent', 'SyntaxError', 'System', 'TextColorType', 'TextField', 'TextFieldAutoSize', 'TextFieldType', 'TextFormat', 'TextFormatAlign', 'TextLineMetrics', 'TextRenderer', 'TextSnapshot', 'Timer', 'TimerEvent', 'Transform', 'TypeError', 'uint', 'URIError', 'URLLoader', 'URLLoaderDataFormat', 'URLRequest', 'URLRequestHeader', 'URLRequestMethod', 'URLStream', 'URLVariabeles', 'VerifyError', 'Video', 'XML', 'XMLDocument', 'XMLList', 'XMLNode', 'XMLNodeType', 'XMLSocket', 'XMLUI'), suffix='\\b'), Name.Builtin), (words(('decodeURI', 'decodeURIComponent', 'encodeURI', 'escape', 'eval', 'isFinite', 'isNaN', 'isXMLName', 'clearInterval', 'fscommand', 'getTimer', 'getURL', 'getVersion', 'parseFloat', 'parseInt', 'setInterval', 'trace', 'updateAfterEvent', 'unescape'), suffix='\\b'), Name.Function), ('[$a-zA-Z_]\\w*', Name.Other), ('[0-9][0-9]*\\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float), ('0x[0-9a-f]+', Number.Hex), ('[0-9]+', Number.Integer), ('"(|\\\\[^\\\\]|[^"\\\\])*"', String.Double), ("'(|\\\\[^\\\\]|[^'\\\\])*'", String.Single)]}
def analyse_text(text):
return 0 |
class LdjsonWriterTest(Ldjson, WriterTest, TestCase):
()
def test_fields(self, context):
context.set_input_fields(['foo', 'bar'])
context.write_sync(('a', 'b'), ('c', 'd'))
context.stop()
assert (self.readlines() == ('{"foo": "a", "bar": "b"}', '{"foo": "c", "bar": "d"}'))
()
def test_fields_from_type(self, context):
context.set_input_type(namedtuple('Point', 'x y'))
context.write_sync((1, 2), (3, 4))
context.stop()
assert (self.readlines() == ('{"x": 1, "y": 2}', '{"x": 3, "y": 4}'))
()
def test_nofields_multiple_args(self, context):
context.write_sync((FOOBAR, FOOBAR), (OD_ABC, FOOBAR), (FOOBAZ, FOOBAR))
context.stop()
assert (self.readlines() == ('{"foo": "bar"}', '{"foo": "bar"}', '{"a": "A", "b": "B", "c": "C"}', '{"foo": "bar"}', '{"foo": "baz"}', '{"foo": "bar"}'))
()
def test_nofields_multiple_args_length_mismatch(self, context):
with pytest.raises(TypeError):
context.write_sync((FOOBAR, FOOBAR), OD_ABC)
()
def test_nofields_single_arg(self, context):
context.write_sync(FOOBAR, OD_ABC, FOOBAZ)
context.stop()
assert (self.readlines() == ('{"foo": "bar"}', '{"a": "A", "b": "B", "c": "C"}', '{"foo": "baz"}'))
()
def test_nofields_empty_args(self, context):
context.write_sync(EMPTY, EMPTY, EMPTY)
context.stop()
assert (self.readlines() == ()) |
def test_no_argument_provided(runner):
arguments = ['--deffile', '--profile', '--prefix', '--output', '--defdir', '--iocfile', '--ioctype', '--query', '--hostname', '--days', '--minutes', '--username', '--limit']
for arg in arguments:
result = runner.invoke(cli, [arg])
assert (f"Option '{arg}' requires an argument" in result.output)
assert (result.exit_code != 0) |
class ISDALoss(nn.Module):
def __init__(self, feature_num, class_num):
super(ISDALoss, self).__init__()
self.estimator = EstimatorCV(feature_num, class_num)
self.class_num = class_num
self.cross_entropy = nn.CrossEntropyLoss()
def isda_aug(self, fc, features, y, labels, cv_matrix, beta, index_tail, sth):
N = features.size(0)
C = self.class_num
A = features.size(1)
weight_m = list(fc.parameters())[0]
CV_temp = cv_matrix[labels]
sigma2 = torch.zeros(N, C).cuda()
for i in range(N):
W_kj = torch.gather(weight_m, 0, labels[i].view(1, 1).expand(C, A))
sigma2[i] = (beta * (weight_m - W_kj).pow(2).mul(CV_temp[i].view(1, A).expand(C, A)).sum(1))
aug_result = (y + (0.5 * sigma2))
del sigma2
return aug_result
def forward(self, model, x, target_x, alpha, weights, kg_sigma, index_tail, beta, sth, args):
(y, features) = model(x)
self.estimator.update_CV(features.detach(), target_x)
cv_var = self.get_cv()
cv_matrix_temp = cv_var.cuda(args.gpu)
cv_var_new = torch.matmul(kg_sigma[index_tail], cv_matrix_temp).cuda(args.gpu)
cv_var[index_tail] = cv_var_new
self.estimator.CoVariance = cv_var
isda_aug_y = self.isda_aug(model.module.fc, features, y, target_x, self.estimator.CoVariance.detach(), beta, index_tail, sth)
loss = F.cross_entropy(isda_aug_y, target_x, weight=weights)
return (loss, y)
def get_cv(self):
return self.estimator.CoVariance |
def main(args):
if (not os.path.exists(args.res_dir)):
os.mkdir(args.res_dir)
if (not os.path.exists(os.path.join(args.res_dir, args.trainsite))):
os.mkdir(os.path.join(args.res_dir, args.trainsite))
if (not os.path.exists(args.model_dir)):
os.mkdir(args.model_dir)
torch.manual_seed(args.seed)
data1 = dd.io.load(os.path.join(args.vec_dir, 'NYU_correlation_matrix.h5'))
data2 = dd.io.load(os.path.join(args.vec_dir, 'UM_correlation_matrix.h5'))
data3 = dd.io.load(os.path.join(args.vec_dir, 'USM_correlation_matrix.h5'))
data4 = dd.io.load(os.path.join(args.vec_dir, 'UCLA_correlation_matrix.h5'))
x1 = torch.from_numpy(data1['data']).float()
y1 = torch.from_numpy(data1['label']).long()
x2 = torch.from_numpy(data2['data']).float()
y2 = torch.from_numpy(data2['label']).long()
x3 = torch.from_numpy(data3['data']).float()
y3 = torch.from_numpy(data3['label']).long()
x4 = torch.from_numpy(data4['data']).float()
y4 = torch.from_numpy(data4['label']).long()
if args.sepnorm:
mean = x1.mean(0, keepdim=True)
dev = x1.std(0, keepdim=True)
x1 = ((x1 - mean) / dev)
mean = x2.mean(0, keepdim=True)
dev = x2.std(0, keepdim=True)
x2 = ((x2 - mean) / dev)
mean = x3.mean(0, keepdim=True)
dev = x3.std(0, keepdim=True)
x3 = ((x3 - mean) / dev)
mean = x4.mean(0, keepdim=True)
dev = x4.std(0, keepdim=True)
x4 = ((x4 - mean) / dev)
else:
if (args.trainsite == 'NYU'):
mean = x1.mean(0, keepdim=True)
dev = x1.std(0, keepdim=True)
elif (args.trainsite == 'UM'):
mean = x2.mean(0, keepdim=True)
dev = x2.std(0, keepdim=True)
elif (args.trainsite == 'USM'):
mean = x3.mean(0, keepdim=True)
dev = x3.std(0, keepdim=True)
elif (args.trainsite == 'UCLA'):
mean = x4.mean(0, keepdim=True)
dev = x4.std(0, keepdim=True)
x1 = ((x1 - mean) / dev)
x2 = ((x2 - mean) / dev)
x3 = ((x3 - mean) / dev)
x4 = ((x4 - mean) / dev)
datas = [TensorDataset(x1, y1), TensorDataset(x2, y2), TensorDataset(x3, y3), TensorDataset(x4, y4)]
if (args.trainsite == 'NYU'):
train_loader = DataLoader(datas[0], batch_size=args.batch_size, shuffle=True)
elif (args.trainsite == 'UM'):
train_loader = DataLoader(datas[1], batch_size=args.batch_size, shuffle=True)
elif (args.trainsite == 'USM'):
train_loader = DataLoader(datas[2], batch_size=args.batch_size, shuffle=True)
elif (args.trainsite == 'UCLA'):
train_loader = DataLoader(datas[3], batch_size=args.batch_size, shuffle=True)
test_loader1 = DataLoader(datas[0], batch_size=args.test_batch_size1, shuffle=False)
test_loader2 = DataLoader(datas[1], batch_size=args.test_batch_size2, shuffle=False)
test_loader3 = DataLoader(datas[2], batch_size=args.test_batch_size3, shuffle=False)
test_loader4 = DataLoader(datas[3], batch_size=args.test_batch_size4, shuffle=False)
model = MLP(6105, args.dim, 2).to(device)
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=0.05)
print(model)
nnloss = nn.NLLLoss()
def train(data_loader, epoch):
model.train()
if ((epoch <= 50) and ((epoch % 20) == 0)):
for param_group1 in optimizer.param_groups:
param_group1['lr'] = (0.5 * param_group1['lr'])
elif ((epoch > 50) and ((epoch % 20) == 0)):
for param_group1 in optimizer.param_groups:
param_group1['lr'] = (0.5 * param_group1['lr'])
loss_all1 = 0
for (data, target) in data_loader:
optimizer.zero_grad()
data = data.to(device)
target = target.to(device)
output1 = model(data)
loss1 = nnloss(output1, target)
loss1.backward()
loss_all1 += (loss1.item() * target.size(0))
optimizer.step()
return ((loss_all1 / len(data_loader.dataset)), model)
def test(data_loader, train=False):
model.eval()
test_loss = 0
correct = 0
outputs = []
preds = []
targets = []
for (data, target) in data_loader:
data = data.to(device)
targets.append(target[0].detach().numpy())
target = target.to(device)
output = federated_model(data)
outputs.append(output.detach().cpu().numpy())
test_loss += (nnloss(output, target).item() * target.size(0))
pred = output.data.max(1)[1]
preds.append(pred.detach().cpu().numpy())
correct += pred.eq(target.view((- 1))).sum().item()
test_loss /= len(data_loader.dataset)
correct /= len(data_loader.dataset)
if train:
print('Train set: Average loss: {:.4f}, Average acc: {:.4f}'.format(test_loss, correct))
else:
print('Test set: Average loss: {:.4f}, Average acc: {:.4f}'.format(test_loss, correct))
return (test_loss, correct, targets, outputs, preds)
for epoch in range(args.epochs):
start_time = time.time()
print(f'Epoch Number {(epoch + 1)}')
(l1, federated_model) = train(train_loader, epoch)
print(' L1 loss: {:.4f}'.format(l1))
print('===NYU===')
(_, acc1, targets1, outputs1, preds1) = test(test_loader1, train=False)
print('===UM===')
(_, acc2, targets2, outputs2, preds2) = test(test_loader2, train=False)
print('===USM===')
(_, acc3, targets3, outputs3, preds3) = test(test_loader3, train=False)
print('===UCLA===')
(_, acc4, targets4, outputs4, preds4) = test(test_loader4, train=False)
total_time = (time.time() - start_time)
print('Communication time over the network', round(total_time, 2), 's\n')
model_wts = copy.deepcopy(model.state_dict())
torch.save(model_wts, os.path.join(args.model_dir, (args.trainsite + '.pth')))
dd.io.save(os.path.join(args.res_dir, args.trainsite, 'NYU.h5'), {'outputs': outputs1, 'preds': preds1, 'targets': targets1})
dd.io.save(os.path.join(args.res_dir, args.trainsite, 'UM.h5'), {'outputs': outputs2, 'preds': preds2, 'targets': targets2})
dd.io.save(os.path.join(args.res_dir, args.trainsite, 'USM.h5'), {'outputs': outputs3, 'preds': preds3, 'targets': targets3})
dd.io.save(os.path.join(args.res_dir, args.trainsite, 'UCLA.h5'), {'outputs': outputs4, 'preds': preds4, 'targets': targets4}) |
def count_conv2d(m, x, y):
x = x[0]
cin = (m.in_channels // m.groups)
cout = (m.out_channels // m.groups)
(kh, kw) = m.kernel_size
batch_size = x.size()[0]
kernel_mul = ((kh * kw) * cin)
kernel_add = (((kh * kw) * cin) - 1)
bias_ops = (1 if (m.bias is not None) else 0)
ops = ((kernel_mul + kernel_add) + bias_ops)
num_out_elements = y.numel()
total_ops = ((num_out_elements * ops) * m.groups)
m.total_ops += torch.Tensor([int(total_ops)]) |
def upsample_flops_counter_hook(module: nn.Module, input: tuple, output: torch.Tensor) -> None:
output_size = output[0]
batch_size = output_size.shape[0]
output_elements_count = batch_size
for val in output_size.shape[1:]:
output_elements_count *= val
module.__flops__ += int(output_elements_count) |
class Configurable():
def _override_defaults(self, params):
params = copy.copy(params)
if ('identical_default_ok' in params):
identical_default_ok = True
params.pop('identical_default_ok')
else:
identical_default_ok = False
for (name, value) in params.items():
if ((value == getattr(self._hp, name)) and (not identical_default_ok)):
raise ValueError('attribute is {} is identical to default value {} !!'.format(name, value))
self._hp[name] = value
def _default_hparams(self):
return AttrDict() |
class StsbProcessor(DataProcessor):
def __init__(self, task_name):
self.task_name = task_name
def get_example_from_tensor_dict(self, tensor_dict):
return InputExample(tensor_dict['idx'].numpy(), tensor_dict['sentence1'].numpy().decode('utf-8'), tensor_dict['sentence2'].numpy().decode('utf-8'), str(tensor_dict['label'].numpy()))
def get_train_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'train.tsv')), 'train')
def get_dev_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'dev.tsv')), 'dev')
def get_test_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'test.tsv')), 'test')
def get_labels(self):
return [None]
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
if (i == 0):
continue
guid = ('%s-%s' % (set_type, line[0]))
text_a = line[7]
text_b = line[8]
label = line[(- 1)]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples |
_macro(shortcut='Ctrl+Alt+Q')
def optimize2():
xl = xl_app()
in_values = xl.Range('C11:C12').Value
X = np.array([x[0] for x in in_values])
orig_calc_mode = xl.Calculation
try:
xl.Calculation = constants.xlManual
xl.ScreenUpdating = False
minimize(obj_func, X, method='nelder-mead')
finally:
xl.ScreenUpdating = True
xl.Calculation = orig_calc_mode |
def count_commits_on_date(dt: datetime.datetime) -> int:
dt = dt.combine((dt - datetime.timedelta(days=1)), dt.max.time(), dt.tzinfo)
args = ['git', 'log', '--oneline', '--after', str(dt.timestamp())]
stdout = subprocess.check_output(args, text=True)
return (stdout.strip().count('\n') + 1) |
def get_room(df_objects, df_receptacles, debug=False):
rooms = get_list(df_receptacles, 'room', remove_none_dup=True, insert_spaces=True, append_list=living_room_syns)
objects = get_list(df_objects, 'entity')
target_rooms = get_list(df_objects, 'room')
object_room_scores = []
for (obj, tar) in tqdm(zip(objects, target_rooms), desc='Matching rooms given objects', total=len(objects)):
(rr_choices, one_step_probs) = get_room_receptacle(obj, None, rooms, rr_type='room')
object_room_scores.append(one_step_probs.squeeze())
scores = np.stack(object_room_scores, axis=0)
result = {'rooms': rooms, 'objects': objects, 'scores': scores}
return result |
def setUpModule():
global mol, mf
mol = gto.Mole()
mol.atom = '\n N 0. 0. 0.\n N 0. 0. 1.\n '
mol.basis = 'sto-3g'
mol.symmetry = 'D2h'
mol.charge = 0
mol.spin = 0
mol.verbose = 0
mol.build(0, 0)
mf = mol.RHF(chkfile=tempfile.NamedTemporaryFile().name).run() |
.parametrize('position', [OSC.WorldPosition(), OSC.RelativeWorldPosition('target', 0, 1, 0), OSC.RelativeObjectPosition('target', 1, 1), OSC.RoadPosition(10, 20, 0, orientation=OSC.Orientation(1, 1, 1, OSC.ReferenceContext.absolute)), OSC.RelativeRoadPosition(10, 0, 'ego', orientation=OSC.Orientation(1, 1, 1, OSC.ReferenceContext.relative)), OSC.LanePosition(10, 1, (- 1), 1, orientation=OSC.Orientation(1, 1, 1, OSC.ReferenceContext.relative)), OSC.RelativeLanePosition((- 1), 'target', 0, None, 0.1, orientation=OSC.Orientation(1, 1, 1, OSC.ReferenceContext.relative)), OSC.GeoPosition(10.11, 12.001), OSC.TrajectoryPosition(traj, 10), OSC.RoutePositionOfCurrentEntity(route, 'Ego'), OSC.RoutePositionInRoadCoordinates(route, 1, 3), OSC.RoutePositionInLaneCoordinates(route, 1, 1, 2)])
def test_position_factory(position):
factoryoutput = OSC.position._PositionFactory.parse_position(position.get_element())
prettyprint(position)
prettyprint(factoryoutput)
assert (position == factoryoutput) |
class TestInclude():
.parametrize(('incl', 'value'), [((int,), 42), ((str,), 'hello'), ((str, fields(C).a), 42), ((str, fields(C).b), 'hello'), (('a',), 42), (('a',), 'hello'), (('a', str), 42), (('a', fields(C).b), 'hello')])
def test_allow(self, incl, value):
i = include(*incl)
assert (i(fields(C).a, value) is True)
.parametrize(('incl', 'value'), [((str,), 42), ((int,), 'hello'), ((str, fields(C).b), 42), ((int, fields(C).b), 'hello'), (('b',), 42), (('b',), 'hello'), (('b', str), 42), (('b', fields(C).b), 'hello')])
def test_drop_class(self, incl, value):
i = include(*incl)
assert (i(fields(C).a, value) is False) |
def get_eval_loaders(opt):
(train_trans, test_trans) = transforms_options[opt.transform]
if (opt.dataset == 'miniImageNet'):
assert (opt.transform == 'A')
meta_testloader = DataLoader(MetaImageNet(args=opt, partition='test', train_transform=train_trans, test_transform=test_trans, fix_seed=False), batch_size=opt.test_batch_size, shuffle=False, drop_last=False, num_workers=opt.num_workers)
meta_valloader = DataLoader(MetaImageNet(args=opt, partition='val', train_transform=train_trans, test_transform=test_trans, fix_seed=False), batch_size=opt.test_batch_size, shuffle=False, drop_last=False, num_workers=opt.num_workers)
if opt.use_trainval:
n_cls = 80
else:
n_cls = 64
elif (opt.dataset == 'tieredImageNet'):
assert (opt.transform == 'A')
meta_testloader = DataLoader(MetaTieredImageNet(args=opt, partition='test', train_transform=train_trans, test_transform=test_trans, fix_seed=False), batch_size=opt.test_batch_size, shuffle=False, drop_last=False, num_workers=opt.num_workers)
meta_valloader = DataLoader(MetaTieredImageNet(args=opt, partition='val', train_transform=train_trans, test_transform=test_trans, fix_seed=False), batch_size=opt.test_batch_size, shuffle=False, drop_last=False, num_workers=opt.num_workers)
if opt.use_trainval:
n_cls = 448
else:
n_cls = 351
elif ((opt.dataset == 'CIFAR-FS') or (opt.dataset == 'FC100')):
assert (opt.transform == 'D')
meta_testloader = DataLoader(MetaCIFAR100(args=opt, partition='test', train_transform=train_trans, test_transform=test_trans, fix_seed=False), batch_size=opt.test_batch_size, shuffle=False, drop_last=False, num_workers=opt.num_workers)
meta_valloader = DataLoader(MetaCIFAR100(args=opt, partition='val', train_transform=train_trans, test_transform=test_trans, fix_seed=False), batch_size=opt.test_batch_size, shuffle=False, drop_last=False, num_workers=opt.num_workers)
if opt.use_trainval:
n_cls = 80
elif (opt.dataset == 'CIFAR-FS'):
n_cls = 64
elif (opt.dataset == 'FC100'):
n_cls = 60
else:
raise NotImplementedError('dataset not supported: {}'.format(opt.dataset))
elif (opt.dataset in ['cub', 'cars', 'places', 'plantae']):
train_classes = {'cub': 100, 'cars': 98, 'places': 183, 'plantae': 100}
assert (opt.transform == 'C')
assert (not opt.use_trainval), f'Train val option not possible for dataset {opt.dataset}'
meta_testloader = DataLoader(MetaCUB(args=opt, partition='novel', train_transform=train_trans, test_transform=test_trans, fix_seed=False), batch_size=opt.test_batch_size, shuffle=False, drop_last=False, num_workers=opt.num_workers)
meta_valloader = DataLoader(MetaCUB(args=opt, partition='val', train_transform=train_trans, test_transform=test_trans, fix_seed=False), batch_size=opt.test_batch_size, shuffle=False, drop_last=False, num_workers=opt.num_workers)
n_cls = train_classes[opt.dataset]
else:
raise NotImplementedError(opt.dataset)
return (meta_testloader, meta_valloader, n_cls) |
class ScenarioReport():
def __init__(self, scenario: Scenario) -> None:
self.scenario: Scenario = scenario
self.step_reports: list[StepReport] = []
def current_step_report(self) -> StepReport:
return self.step_reports[(- 1)]
def add_step_report(self, step_report: StepReport) -> None:
self.step_reports.append(step_report)
def serialize(self) -> dict[(str, Any)]:
scenario = self.scenario
feature = scenario.feature
return {'steps': [step_report.serialize() for step_report in self.step_reports], 'name': scenario.name, 'line_number': scenario.line_number, 'tags': sorted(scenario.tags), 'feature': {'name': feature.name, 'filename': feature.filename, 'rel_filename': feature.rel_filename, 'line_number': feature.line_number, 'description': feature.description, 'tags': sorted(feature.tags)}}
def fail(self) -> None:
self.current_step_report.finalize(failed=True)
remaining_steps = self.scenario.steps[len(self.step_reports):]
for step in remaining_steps:
report = StepReport(step=step)
report.finalize(failed=True)
self.add_step_report(report) |
def seg_from_api(data):
try:
datas = {'text': data}
headers = {'Content-Type': 'application/json'}
res = requests.post(SEGURL, data=json.dumps(datas), headers=headers)
text = res.text
text_dict = json.loads(text)
return text_dict
except:
print('dfdfdf') |
def test_only_target(local_client, grpc_client):
def f(client: QdrantBase, **kwargs: Dict[(str, Any)]) -> List[models.ScoredPoint]:
return client.discover(collection_name=COLLECTION_NAME, target=10, with_payload=True, limit=10, using='image')
compare_client_results(grpc_client, f)
compare_client_results(local_client, f) |
def test_connect_rd_x_conn_A_b_wr_A_mark_writer():
class Top(ComponentLevel3):
def construct(s):
s.x = Wire(Bits32)
s.A = Wire(SomeMsg)
connect(s.A.b, s.x)
def up_wr_A():
s.A = SomeMsg(12, 123)
def up_rd_x():
z = s.x
_test_model(Top) |
class BenefitFeatureConfiguration(PolymorphicModel):
objects = BenefitFeatureQuerySet.as_manager()
benefit = models.ForeignKey('sponsors.SponsorshipBenefit', on_delete=models.CASCADE)
non_polymorphic = models.Manager()
class Meta():
verbose_name = 'Benefit Feature Configuration'
verbose_name_plural = 'Benefit Feature Configurations'
base_manager_name = 'non_polymorphic'
def benefit_feature_class(self):
raise NotImplementedError
def get_cfg_kwargs(self, **kwargs):
base_fields = set(BenefitFeatureConfiguration._meta.get_fields())
benefit_fields = (set(self._meta.get_fields()) - base_fields)
for field in benefit_fields:
if (BenefitFeatureConfiguration is getattr(field, 'related_model', None)):
continue
elif (field.name in kwargs):
continue
kwargs[field.name] = getattr(self, field.name)
return kwargs
def get_benefit_feature_kwargs(self, **kwargs):
return self.get_cfg_kwargs(**kwargs)
def get_clone_kwargs(self, new_benefit):
kwargs = self.get_cfg_kwargs()
kwargs['benefit'] = new_benefit
return kwargs
def get_benefit_feature(self, **kwargs):
BenefitFeatureClass = self.benefit_feature_class
kwargs = self.get_benefit_feature_kwargs(**kwargs)
if (kwargs is None):
return None
return BenefitFeatureClass(**kwargs)
def display_modifier(self, name, **kwargs):
return name
def create_benefit_feature(self, sponsor_benefit, **kwargs):
feature = self.get_benefit_feature(sponsor_benefit=sponsor_benefit, **kwargs)
if (feature is not None):
feature.save()
return feature
def clone(self, sponsorship_benefit):
cfg_kwargs = self.get_clone_kwargs(sponsorship_benefit)
return self.__class__.objects.get_or_create(**cfg_kwargs) |
class LocalConnectionTest(unittest.TestCase):
lc = Globals.local_connection
def testGetAttrs(self):
self.assertIsInstance(self.lc.LocalConnection, type)
try:
self.lc.asotnuhaoseu
except (NameError, KeyError):
pass
else:
unittest.fail('NameError or KeyError should be raised')
def testSetattrs(self):
self.lc.x = 5
self.assertEqual(self.lc.x, 5)
self.lc.x = 7
self.assertEqual(self.lc.x, 7)
def testDelattrs(self):
self.lc.x = 5
del self.lc.x
try:
self.lc.x
except (NameError, KeyError):
pass
else:
unittest.fail('No exception raised')
def testReval(self):
self.assertEqual(self.lc.reval('pow', 2, 3), 8) |
class Normalize():
def __init__(self, mean, std):
super().__init__()
self.mean = torch.tensor(mean)
self.std = torch.tensor(std)
def __call__(self, x):
mean = self.mean.reshape((1, 3, 1, 1))
std = self.std.reshape((1, 3, 1, 1))
x = ((x - mean) / std)
return x |
class Calendar(ContentManageable):
url = models.URLField('URL iCal', blank=True, null=True)
rss = models.URLField('RSS Feed', blank=True, null=True)
embed = models.URLField('URL embed', blank=True, null=True)
twitter = models.URLField('Twitter feed', blank=True, null=True)
name = models.CharField(max_length=100)
slug = models.SlugField(unique=True)
description = models.CharField(max_length=255, null=True, blank=True)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('events:event_list', kwargs={'calendar_slug': self.slug})
def import_events(self):
if (self.url is None):
raise ValueError('calendar must have a url field set')
from .importer import ICSImporter
importer = ICSImporter(calendar=self)
importer.import_events() |
def sentence_bleu(hypothesis, reference):
bleu = _corpus_bleu(hypothesis, reference)
for i in range(1, 4):
bleu.counts[i] += 1
bleu.totals[i] += 1
bleu = compute_bleu(bleu.counts, bleu.totals, bleu.sys_len, bleu.ref_len, smooth='exp', smooth_floor=0.0)
return bleu.score |
class LastFMSyncCache():
registered = 0
lastupdated = None
def __init__(self, username):
self.username = username
self.charts = {}
self.songs = {}
def update_charts(self, progress=None):
def prog(msg, frac):
if progress:
if (not progress(msg, frac)):
raise ValueError()
try:
if (not self.registered):
resp = apicall('user.getinfo', user=self.username)
self.registered = int(resp['user']['registered']['unixtime'])
now = time.time()
if ((not self.lastupdated) or ((self.lastupdated + ((24 * 60) * 60)) < now)):
prog(_('Updating chart list.'), 0)
resp = apicall('user.getweeklychartlist', user=self.username)
charts = resp['weeklychartlist']['chart']
for chart in charts:
(fro, to) = (int(chart[s]) for s in ('from', 'to'))
if (to < self.registered):
continue
self.charts.setdefault((fro, to), True)
self.lastupdated = now
elif (not [v for v in self.charts.values() if v]):
prog(_('Already up-to-date.'), 1.0)
return False
new_charts = [k for (k, v) in self.charts.items() if v]
for (idx, (fro, to)) in enumerate(sorted(new_charts)):
chart_week = date.fromtimestamp(fro).isoformat()
prog((_('Fetching chart for week of %s.') % chart_week), ((idx + 1.0) / (len(new_charts) + 2.0)))
args = {'user': self.username, 'from': fro, 'to': to}
try:
resp = apicall('user.getweeklytrackchart', **args)
except OSError as err:
msg = 'HTTP error %d, retrying in %d seconds.'
print_w((msg % (err.code, max_wait)))
for i in range(max_wait, 0, (- 1)):
time.sleep(1)
prog((msg % (err.code, i)), None)
resp = apicall('user.getweeklytrackchart', **args)
try:
tracks = resp['weeklytrackchart']['track']
except KeyError:
tracks = []
if isinstance(tracks, dict):
tracks = [tracks]
for track in tracks:
self._update_stats(track, fro, to)
self.charts[(fro, to)] = False
prog(_('Sync complete.'), 1.0)
except ValueError:
pass
except Exception as e:
util.print_exc()
prog((_('Error during sync (%s)') % e), None)
return False
return True
def _update_stats(self, track, chart_fro, chart_to):
keys = []
if track['mbid']:
keys.append(track['mbid'])
for artist in (track['artist']['mbid'], track['artist']['#text']):
if artist:
keys.append((artist.lower(), track['name'].lower()))
stats = list(filter(None, map(self.songs.get, keys)))
if stats:
plays = max((d.get('playcount', 0) for d in stats))
last = max((d.get('lastplayed', 0) for d in stats))
added = max((d.get('added', chart_to) for d in stats))
stats = stats[0]
stats.update({'playcount': plays, 'lastplayed': last, 'added': added})
else:
stats = {'playcount': 0, 'lastplayed': 0, 'added': chart_to}
stats['playcount'] = (stats['playcount'] + int(track['playcount']))
stats['lastplayed'] = max(stats['lastplayed'], chart_fro)
stats['added'] = min(stats['added'], chart_to)
for key in keys:
self.songs[key] = stats
def update_songs(self, songs):
for song in songs:
keys = []
if ('musicbrainz_trackid' in song):
keys.append(song['musicbrainz_trackid'].lower())
if ('musiscbrainz_artistid' in song):
keys.append((song['musicbrainz_artistid'].lower(), song.get('title', '').lower()))
keys.append((song.get('artist', '').lower(), song.get('title', '').lower()))
stats = list(filter(None, map(self.songs.get, keys)))
if (not stats):
continue
stats = stats[0]
playcount = max(song.get('~#playcount', 0), stats['playcount'])
if (playcount != 0):
song['~#playcount'] = playcount
lastplayed = max(song.get('~#lastplayed', 0), stats['lastplayed'])
if (lastplayed != 0):
song['~#lastplayed'] = lastplayed
song['~#added'] = min(song['~#added'], stats['added']) |
class BackendTestCases(unittest.TestCase):
def setUp(self):
backend.activate('win32')
def test_register(self):
self.assertRaises(TypeError, backend.register, 'dummy', object, HwndWrapper)
self.assertRaises(TypeError, backend.register, 'dummy', HwndElementInfo, object)
def test_backend_attrs(self):
self.assertEqual(backend.name(), 'win32')
self.assertEqual(backend.element_class(), HwndElementInfo)
self.assertEqual(backend.wrapper_class(), HwndWrapper)
def test_activate(self):
self.assertRaises(ValueError, backend.activate, 'invalid backend') |
class chamferFunction(Function):
def forward(ctx, xyz1, xyz2):
(batchsize, n, _) = xyz1.size()
(_, m, _) = xyz2.size()
dist1 = torch.zeros(batchsize, n)
dist2 = torch.zeros(batchsize, m)
idx1 = torch.zeros(batchsize, n).type(torch.IntTensor)
idx2 = torch.zeros(batchsize, m).type(torch.IntTensor)
dist1 = dist1.cuda()
dist2 = dist2.cuda()
idx1 = idx1.cuda()
idx2 = idx2.cuda()
chamfer.forward(xyz1, xyz2, dist1, dist2, idx1, idx2)
ctx.save_for_backward(xyz1, xyz2, idx1, idx2)
return (dist1, dist2, idx1, idx2)
def backward(ctx, graddist1, graddist2, idx1_, idx2_):
(xyz1, xyz2, idx1, idx2) = ctx.saved_tensors
graddist1 = graddist1.contiguous()
graddist2 = graddist2.contiguous()
gradxyz1 = torch.zeros(xyz1.size())
gradxyz2 = torch.zeros(xyz2.size())
gradxyz1 = gradxyz1.cuda()
gradxyz2 = gradxyz2.cuda()
chamfer.backward(xyz1, xyz2, gradxyz1, gradxyz2, graddist1, graddist2, idx1, idx2)
return (gradxyz1, gradxyz2) |
class PresetPrimeChaos(PresetTab, Ui_PresetPrimeChaos):
def __init__(self, editor: PresetEditor, game_description: GameDescription, window_manager: WindowManager):
super().__init__(editor, game_description, window_manager)
self.setupUi(self)
self.chaos_label.setText(self.chaos_label.text().replace('color:#0000ff;', ''))
self.room_rando_combo.setItemData(0, RoomRandoMode.NONE)
self.room_rando_combo.setItemData(1, RoomRandoMode.ONE_WAY)
self.room_rando_combo.setItemData(2, RoomRandoMode.TWO_WAY)
signal_handling.on_combo(self.room_rando_combo, self._on_room_rando_changed)
for f in _FIELDS:
self._add_persist_option(getattr(self, f'{f}_check'), f)
signal_handling.on_checked(self.small_samus_check, self._on_small_samus_changed)
signal_handling.on_checked(self.large_samus_check, self._on_large_samus_changed)
self.superheated_slider.valueChanged.connect(self._on_slider_changed)
self.submerged_slider.valueChanged.connect(self._on_slider_changed)
def tab_title(cls) -> str:
return 'Chaos Settings'
def is_experimental(cls) -> bool:
return True
def uses_patches_tab(cls) -> bool:
return True
def _add_persist_option(self, check: QtWidgets.QCheckBox, attribute_name: str):
def persist(value: bool):
with self._editor as editor:
editor.set_configuration_field(attribute_name, value)
signal_handling.on_checked(check, persist)
def _on_small_samus_changed(self, value: bool):
with self._editor as editor:
editor.set_configuration_field('small_samus', value)
if value:
editor.set_configuration_field('large_samus', False)
def _on_large_samus_changed(self, value: bool):
with self._editor as editor:
editor.set_configuration_field('large_samus', value)
if value:
editor.set_configuration_field('small_samus', False)
def _on_room_rando_changed(self, value: RoomRandoMode):
with self._editor as editor:
editor.set_configuration_field('room_rando', value)
def on_preset_changed(self, preset: Preset):
config = preset.configuration
for f in _FIELDS:
typing.cast(QtWidgets.QCheckBox, getattr(self, f'{f}_check')).setChecked(getattr(config, f))
signal_handling.set_combo_with_value(self.room_rando_combo, config.room_rando)
self.superheated_slider.setValue(preset.configuration.superheated_probability)
self.submerged_slider.setValue(preset.configuration.submerged_probability)
self._on_slider_changed()
def _update_editor(self):
with self._editor as editor:
editor.set_configuration_field('superheated_probability', self.superheated_slider.value())
editor.set_configuration_field('submerged_probability', self.submerged_slider.value())
def _on_slider_changed(self):
self.superheated_slider_label.setText(f'{(self.superheated_slider.value() / 10.0):.1f}%')
self.submerged_slider_label.setText(f'{(self.submerged_slider.value() / 10.0):.1f}%')
self._update_editor() |
def get_backbone_cfg(backbone):
for i in [1, 2, 3, 4, 5]:
if (backbone == f'mitb{i}'):
return dict(type=f'mit_b{i}')
if (backbone == f'mitb{i}-del'):
return dict(_delete_=True, type=f'mit_b{i}')
return {'r50v1c': {'depth': 50}, 'r101v1c': {'depth': 101}, 'x50-32': {'type': 'ResNeXt', 'depth': 50, 'groups': 32, 'base_width': 4}, 'x101-32': {'type': 'ResNeXt', 'depth': 101, 'groups': 32, 'base_width': 4}, 's50': {'type': 'ResNeSt', 'depth': 50, 'stem_channels': 64, 'radix': 2, 'reduction_factor': 4, 'avg_down_stride': True}, 's101': {'type': 'ResNeSt', 'depth': 101, 'stem_channels': 128, 'radix': 2, 'reduction_factor': 4, 'avg_down_stride': True}, 's200': {'type': 'ResNeSt', 'depth': 200, 'stem_channels': 128, 'radix': 2, 'reduction_factor': 4, 'avg_down_stride': True}}[backbone] |
def _gen_mixnet_s(variant, channel_multiplier=1.0, pretrained=False, **kwargs):
arch_def = [['ds_r1_k3_s1_e1_c16'], ['ir_r1_k3_a1.1_p1.1_s2_e6_c24', 'ir_r1_k3_a1.1_p1.1_s1_e3_c24'], ['ir_r1_k3.5.7_s2_e6_c40_se0.5_nsw', 'ir_r3_k3.5_a1.1_p1.1_s1_e6_c40_se0.5_nsw'], ['ir_r1_k3.5.7_p1.1_s2_e6_c80_se0.25_nsw', 'ir_r2_k3.5_p1.1_s1_e6_c80_se0.25_nsw'], ['ir_r1_k3.5.7_a1.1_p1.1_s1_e6_c120_se0.5_nsw', 'ir_r2_k3.5.7.9_a1.1_p1.1_s1_e3_c120_se0.5_nsw'], ['ir_r1_k3.5.7.9.11_s2_e6_c200_se0.5_nsw', 'ir_r2_k3.5.7.9_p1.1_s1_e6_c200_se0.5_nsw']]
model_kwargs = dict(block_args=decode_arch_def(arch_def), num_features=1536, stem_size=16, round_chs_fn=partial(round_channels, multiplier=channel_multiplier), norm_layer=(kwargs.pop('norm_layer', None) or partial(nn.BatchNorm2d, **resolve_bn_args(kwargs))), **kwargs)
model = _create_effnet(variant, pretrained, **model_kwargs)
return model |
def test_cf_rotated_latlon():
crs = CRS.from_cf(dict(grid_mapping_name='rotated_latitude_longitude', grid_north_pole_latitude=32.5, grid_north_pole_longitude=170.0))
expected_cf = {'semi_major_axis': 6378137.0, 'semi_minor_axis': crs.ellipsoid.semi_minor_metre, 'inverse_flattening': crs.ellipsoid.inverse_flattening, 'reference_ellipsoid_name': 'WGS 84', 'longitude_of_prime_meridian': 0.0, 'prime_meridian_name': 'Greenwich', 'horizontal_datum_name': 'World Geodetic System 1984 ensemble', 'grid_mapping_name': 'rotated_latitude_longitude', 'grid_north_pole_latitude': 32.5, 'grid_north_pole_longitude': 170.0, 'north_pole_grid_longitude': 0.0, 'geographic_crs_name': 'undefined'}
cf_dict = crs.to_cf()
assert cf_dict.pop('crs_wkt').startswith('GEOGCRS[')
assert (cf_dict == expected_cf)
_test_roundtrip(expected_cf, 'GEOGCRS[')
assert (crs.cs_to_cf() == [{'standard_name': 'grid_longitude', 'long_name': 'longitude in rotated pole grid', 'units': 'degrees', 'axis': 'X'}, {'standard_name': 'grid_latitude', 'long_name': 'latitude in rotated pole grid', 'units': 'degrees', 'axis': 'Y'}])
with pytest.warns(UserWarning):
proj_dict = crs.to_dict()
assert (proj_dict == {'proj': 'ob_tran', 'o_proj': 'longlat', 'o_lat_p': 32.5, 'o_lon_p': 0, 'lon_0': 350, 'datum': 'WGS84', 'no_defs': None, 'type': 'crs'}) |
def restoreSplitter(w, s):
if (type(s) is list):
w.setSizes(s)
elif (type(s) is str):
w.restoreState(QtCore.QByteArray.fromPercentEncoding(s.encode()))
else:
print("Can't configure QSplitter using object of type", type(s))
if (w.count() > 0):
for i in w.sizes():
if (i > 0):
return
w.setSizes(([50] * w.count())) |
class TestSplitValueFunc(unittest.TestCase):
def test_with_default_args(self):
line = " key = 'value here' "
r = misc.split_key_value(line)
expected = ('key', "'value here'")
self.assertEqual(expected, r)
def test_with_whitespace_stripping_disabled(self):
line = " key = 'value here' "
r = misc.split_key_value(line, strip_whitespace=False)
expected = (' key ', " 'value here' ")
self.assertEqual(expected, r)
def test_with_colon_as_separator(self):
line = " key : 'value here' "
r = misc.split_key_value(line, sep=':')
expected = ('key', "'value here'")
self.assertEqual(expected, r) |
('pypyr.cache.loadercache.Loader.get_pipeline')
('pypyr.cache.stepcache.step_cache.get_step')
def test_stop_all_for(mock_step_cache, mock_get_pipe):
nothing_mock = DeepCopyMagicMock()
mock312 = DeepCopyMagicMock()
def step31(context):
mock312(context)
if (context['i'] == 'two'):
raise Stop()
mock_step_cache.side_effect = [nothing_mock, nothing_mock, step31]
mock_get_pipe.return_value = get_for_step_pipeline()
context = Context()
pipeline = Pipeline('arb pipe', groups=['sg2', 'sg3', 'sg4', 'sg1'], success_group='sg5', failure_group=None)
pipeline.run(context)
assert (not context.is_in_pipeline_scope)
assert (mock_step_cache.mock_calls == [call('sg2.step1'), call('sg2.step2'), call('sg3.step1')])
assert (nothing_mock.mock_calls == [call({}), call({})])
assert (mock312.mock_calls == [call({'i': 'one'}), call({'i': 'two'})]) |
def _symlink_package_resource(dest_dir: Path, path: Path, *, force: bool, suffix: str='', executable: bool=False) -> None:
name_suffixed = add_suffix(path.name, suffix)
symlink_path = Path((dest_dir / name_suffixed))
if (not symlink_path.parent.is_dir()):
mkdir(symlink_path.parent)
if force:
logger.info(f'Force is true. Removing {str(symlink_path)}.')
try:
symlink_path.unlink()
except FileNotFoundError:
pass
except IsADirectoryError:
rmdir(symlink_path)
exists = symlink_path.exists()
is_symlink = symlink_path.is_symlink()
if exists:
if symlink_path.samefile(path):
logger.info(f'Same path {str(symlink_path)} and {str(path)}')
else:
logger.warning(pipx_wrap(f'''
{hazard} File exists at {str(symlink_path)} and points
to {symlink_path.resolve()}, not {str(path)}. Not
modifying.
''', subsequent_indent=(' ' * 4)))
return
if (is_symlink and (not exists)):
logger.info(f'Removing existing symlink {str(symlink_path)} since it pointed non-existent location')
symlink_path.unlink()
if executable:
existing_executable_on_path = which(name_suffixed)
else:
existing_executable_on_path = None
symlink_path.symlink_to(path)
if (executable and existing_executable_on_path):
logger.warning(pipx_wrap(f'''
{hazard} Note: {name_suffixed} was already on your
PATH at {existing_executable_on_path}
''', subsequent_indent=(' ' * 4))) |
class TrackCurrentModel(ObjectStore):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__iter = None
last_current: (Any | None) = None
def set(self, songs: Sequence[Any]):
print_d(('Filling view model with %d songs.' % len(songs)))
self.clear()
self.__iter = None
oldsong = self.last_current
for (iter_, song) in zip(self.iter_append_many(songs), songs, strict=False):
if (song is oldsong):
self.__iter = iter_
def get(self) -> list[Any]:
return list(self.itervalues())
def current(self) -> (Any | None):
return (self.__iter and self.get_value(self.__iter, 0))
def current_path(self):
return (self.__iter and self.get_path(self.__iter))
def current_iter(self):
return self.__iter
_iter.setter
def current_iter(self, iter_):
if (iter_ == self.__iter):
return
for it in filter(None, (self.__iter, iter_)):
self.row_changed(self.get_path(it), it)
self.__iter = iter_
self.last_current = self.current
def find(self, song: Any):
if (self.current == song):
return self.current_iter
for (iter_, value) in self.iterrows():
if (value == song):
return iter_
return
def find_all(self, songs: Iterable[Any]):
songs = set(songs)
return [iter_ for (iter_, value) in self.iterrows() if (value in songs)]
def remove(self, iter_):
if (self.__iter and (self[iter_].path == self[self.__iter].path)):
self.__iter = None
super().remove(iter_)
def clear(self):
self.__iter = None
super().clear()
def __contains__(self, song):
return bool(self.find(song)) |
def get_available_reporting_integrations():
integrations = []
if is_azureml_available():
integrations.append('azure_ml')
if is_comet_available():
integrations.append('comet_ml')
if is_mlflow_available():
integrations.append('mlflow')
if is_tensorboard_available():
integrations.append('tensorboard')
if is_wandb_available():
integrations.append('wandb')
if is_codecarbon_available():
integrations.append('codecarbon')
return integrations |
class SelectiveKernelBottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, cardinality=1, base_width=64, sk_kwargs=None, reduce_first=1, dilation=1, first_dilation=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, attn_layer=None, aa_layer=None, drop_block=None, drop_path=None):
super(SelectiveKernelBottleneck, self).__init__()
sk_kwargs = (sk_kwargs or {})
conv_kwargs = dict(act_layer=act_layer, norm_layer=norm_layer)
width = int((math.floor((planes * (base_width / 64))) * cardinality))
first_planes = (width // reduce_first)
outplanes = (planes * self.expansion)
first_dilation = (first_dilation or dilation)
self.conv1 = ConvNormAct(inplanes, first_planes, kernel_size=1, **conv_kwargs)
self.conv2 = SelectiveKernel(first_planes, width, stride=stride, dilation=first_dilation, groups=cardinality, aa_layer=aa_layer, drop_layer=drop_block, **conv_kwargs, **sk_kwargs)
self.conv3 = ConvNormAct(width, outplanes, kernel_size=1, apply_act=False, **conv_kwargs)
self.se = create_attn(attn_layer, outplanes)
self.act = act_layer(inplace=True)
self.downsample = downsample
self.drop_path = drop_path
def zero_init_last(self):
nn.init.zeros_(self.conv3.bn.weight)
def forward(self, x):
shortcut = x
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
if (self.se is not None):
x = self.se(x)
if (self.drop_path is not None):
x = self.drop_path(x)
if (self.downsample is not None):
shortcut = self.downsample(shortcut)
x += shortcut
x = self.act(x)
return x |
class Test_isdiag():
.parametrize('shape', [(10, 1), (2, 5), (5, 2), (5, 5)])
def test_isdiag(self, shape, datatype):
mat = np.zeros(shape)
data = _data.to(datatype, _data.Dense(mat))
assert _data.isdiag(data)
mat[(0, 0)] = 1
data = _data.to(datatype, _data.Dense(mat))
assert _data.isdiag(data)
mat[(1, 0)] = 1
data = _data.to(datatype, _data.Dense(mat))
assert (not _data.isdiag(data)) |
def main():
(left_column, right_column) = st.columns(2)
with left_column:
st.write('## Upload DICOM RT Dose files')
files: Sequence[BinaryIO] = st.file_uploader("Upload at least two DICOM RT Dose files whose doses you'd like to add together. The first file uploaded will be used as a template for the summed DICOM RT Dose file.", ['dcm'], accept_multiple_files=True)
if (not files):
st.stop()
try:
datasets = _load_and_check_files_valid(files)
except ValueError as e:
st.write(e)
st.stop()
if st.button('Click to Sum Doses'):
with right_column:
st.write(f'''
## Details
* Patient ID: `{datasets[0].PatientID}`
* Patient Name: `{pretty_patient_name(datasets[0])}`
''')
if (len(datasets) < 2):
raise ValueError('Please upload at least two DICOM RT Dose files.')
st.write('---')
st.write('Summing doses...')
ds_summed = sum_doses_in_datasets(datasets)
_save_dataset_to_downloads_dir(ds_summed)
st.write('Done!')
st.markdown('*Download the summed DICOM dose file from [downloads/RD.summed.dcm](downloads/RD.summed.dcm)*') |
class TestStatsMetadata():
def test_infer_warn_stats_info(self):
with pytest.warns(DeprecationWarning, match='to specify'):
(old, new) = infer_warn_stats_info([{'a': int, 'b': object}], {}, 'bla')
assert isinstance(old, list)
assert (len(old) == 1)
assert (old[0] == {'a': int, 'b': object})
assert isinstance(new, dict)
assert (new['a'] == (int, None))
assert (new['b'] == (object, None))
(old, new) = infer_warn_stats_info([], {'a': (int, []), 'b': (float, [2])}, 'bla')
assert isinstance(old, list)
assert (len(old) == 1)
assert (old[0] == {'a': int, 'b': float})
assert isinstance(new, dict)
assert (new['a'] == (int, []))
assert (new['b'] == (float, [2]))
with pytest.raises(TypeError, match='Only one of'):
infer_warn_stats_info([{'a': float}], {'b': (int, [])}, 'bla')
def test_stats_from_steps(self):
with pm.Model():
s1 = pm.NUTS(pm.Normal('n'))
s2 = pm.Metropolis(pm.Bernoulli('b', 0.5))
cs = pm.CompoundStep([s1, s2])
assert (pm.NUTS.stats_dtypes == [])
assert (pm.Metropolis.stats_dtypes == [])
sds = get_stats_dtypes_shapes_from_steps([s1, s2])
assert ('sampler_0__step_size' in sds)
assert ('sampler_1__accepted' in sds)
assert (len(cs.stats_dtypes) == 2)
assert (cs.stats_dtypes_shapes == sds) |
class GDBWatch(GDBBreakpoint):
def __init__(self, exp):
self.exp = exp
super(GDBWatch, self).__init__(None, (- 1))
def insert(self):
out = run_cmd(('-break-watch %s' % self.exp), True)
res = parse_result_line(out)
if (get_result(out) == 'error'):
return
self.number = int(res['wpt']['number'])
def format(self):
return ('%d - watch: %s\n' % (self.number, self.exp)) |
def enrich_ctypes_redefined_types():
c_class_to_type = (('c_byte', 'int', 'b'), ('c_char', 'bytes', 'c'), ('c_double', 'float', 'd'), ('c_float', 'float', 'f'), ('c_int', 'int', 'i'), ('c_int16', 'int', 'h'), ('c_int32', 'int', 'i'), ('c_int64', 'int', 'l'), ('c_int8', 'int', 'b'), ('c_long', 'int', 'l'), ('c_longdouble', 'float', 'g'), ('c_longlong', 'int', 'l'), ('c_short', 'int', 'h'), ('c_size_t', 'int', 'L'), ('c_ssize_t', 'int', 'l'), ('c_ubyte', 'int', 'B'), ('c_uint', 'int', 'I'), ('c_uint16', 'int', 'H'), ('c_uint32', 'int', 'I'), ('c_uint64', 'int', 'L'), ('c_uint8', 'int', 'B'), ('c_ulong', 'int', 'L'), ('c_ulonglong', 'int', 'L'), ('c_ushort', 'int', 'H'), ('c_wchar', 'str', 'u'))
src = ["\nfrom _ctypes import _SimpleCData\n\nclass c_bool(_SimpleCData):\n def __init__(self, value):\n self.value = True\n self._type_ = '?'\n "]
for (c_type, builtin_type, type_code) in c_class_to_type:
src.append(f'''
class {c_type}(_SimpleCData):
def __init__(self, value):
self.value = {builtin_type}(value)
self._type_ = '{type_code}'
''')
return parse('\n'.join(src)) |
class AdaroundParameters():
def __init__(self, data_set: tf.data.Dataset, num_batches: int, default_num_iterations: int=10000, default_reg_param: float=0.01, default_beta_range: Tuple=(20, 2), default_warm_start: float=0.2):
self.data_set = data_set
self.num_batches = num_batches
self.num_iterations = default_num_iterations
self.reg_param = default_reg_param
self.beta_range = default_beta_range
self.warm_start = default_warm_start
def __eq__(self, other: 'AdaroundParameters'):
return ((self.data_set == other.data_set) and (self.num_batches == other.num_batches) and (self.num_iterations == other.num_iterations) and (self.reg_param == other.reg_param) and (self.beta_range == other.beta_range) and (self.warm_start == other.warm_start)) |
(python=USE_PYTHON_VERSIONS)
('command_a', install_commands)
('command_b', install_commands)
def session_cross_pep420_pkgutil(session, command_a, command_b):
session.install('--upgrade', 'setuptools', 'pip')
install_packages(session, 'native/pkg_a', 'pkgutil/pkg_b', command_a, command_b)
session.run('python', 'verify_packages.py') |
class AttnSkipUpBlock2D(nn.Module):
def __init__(self, in_channels: int, prev_output_channel: int, out_channels: int, temb_channels: int, dropout: float=0.0, num_layers: int=1, resnet_eps: float=1e-06, resnet_time_scale_shift: str='default', resnet_act_fn: str='swish', resnet_pre_norm: bool=True, attn_num_head_channels=1, attention_type='default', output_scale_factor=np.sqrt(2.0), upsample_padding=1, add_upsample=True):
super().__init__()
self.attentions = nn.ModuleList([])
self.resnets = nn.ModuleList([])
self.attention_type = attention_type
for i in range(num_layers):
res_skip_channels = (in_channels if (i == (num_layers - 1)) else out_channels)
resnet_in_channels = (prev_output_channel if (i == 0) else out_channels)
self.resnets.append(ResnetBlock(in_channels=(resnet_in_channels + res_skip_channels), out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=min((resnet_in_channels + (res_skip_channels // 4)), 32), groups_out=min((out_channels // 4), 32), dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm))
self.attentions.append(AttentionBlockNew(out_channels, num_head_channels=attn_num_head_channels, rescale_output_factor=output_scale_factor, eps=resnet_eps))
self.upsampler = FirUpsample2D(in_channels, out_channels=out_channels)
if add_upsample:
self.resnet_up = ResnetBlock(in_channels=out_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=min((out_channels // 4), 32), groups_out=min((out_channels // 4), 32), dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, use_nin_shortcut=True, up=True, kernel='fir')
self.skip_conv = nn.Conv2d(out_channels, 3, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.skip_norm = torch.nn.GroupNorm(num_groups=min((out_channels // 4), 32), num_channels=out_channels, eps=resnet_eps, affine=True)
self.act = nn.SiLU()
else:
self.resnet_up = None
self.skip_conv = None
self.skip_norm = None
self.act = None
def forward(self, hidden_states, res_hidden_states_tuple, temb=None, skip_sample=None):
for resnet in self.resnets:
res_hidden_states = res_hidden_states_tuple[(- 1)]
res_hidden_states_tuple = res_hidden_states_tuple[:(- 1)]
hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
hidden_states = resnet(hidden_states, temb)
hidden_states = self.attentions[0](hidden_states)
if (skip_sample is not None):
skip_sample = self.upsampler(skip_sample)
else:
skip_sample = 0
if (self.resnet_up is not None):
skip_sample_states = self.skip_norm(hidden_states)
skip_sample_states = self.act(skip_sample_states)
skip_sample_states = self.skip_conv(skip_sample_states)
skip_sample = (skip_sample + skip_sample_states)
hidden_states = self.resnet_up(hidden_states, temb)
return (hidden_states, skip_sample) |
.skipif((PY2 or (not LINUX) or (not CI)), reason='tested on linux and python 3 only')
def test_jedi_completion_environment(workspace):
doc_content = 'import logh\n'
doc = Document(DOC_URI, workspace, doc_content)
com_position = {'line': 0, 'character': 11}
assert os.path.isdir('/tmp/pyenv/')
settings = {'pylsp': {'plugins': {'jedi': {'environment': None}}}}
doc.update_config(settings)
completions = pylsp_jedi_completions(doc._config, doc, com_position)
assert (completions is None)
env_path = '/tmp/pyenv/bin/python'
settings = {'pylsp': {'plugins': {'jedi': {'environment': env_path}}}}
doc.update_config(settings)
completions = pylsp_jedi_completions(doc._config, doc, com_position)
assert (completions[0]['label'] == 'loghub')
resolved = pylsp_jedi_completion_item_resolve(doc._config, completions[0], doc)
assert ('changelog generator' in resolved['documentation']['value'].lower()) |
.xfail(reason='causing issues in CI, to be fixed later')
.spark_functions
def test_update_where_column_dne(dataframe, spark_dataframe):
assert_frame_equal(spark_dataframe.update_where(conditions="\n `decorated-elephant` = 1 AND `#$%^` = 'rabbit'\n ", target_column_name='c', target_val=10).toPandas(), dataframe.update_where(((dataframe['decorated-elephant'] == 1) & (dataframe['#$%^'] == 'rabbit')), 'c', 10)) |
class Solution(object):
def letterCombinations(self, digits):
result = []
ls = len(digits)
if (ls == 0):
return result
current = digits[0]
posfix = self.letterCombinations(digits[1:])
for t in dmap[current]:
if (len(posfix) > 0):
for p in posfix:
temp = (t + p)
result.append(temp)
else:
result.append(t)
return result |
def _format_cycles(dag: nx.DiGraph, cycles: list[tuple[(str, ...)]]) -> str:
chain = [x for (i, x) in enumerate(itertools.chain.from_iterable(cycles)) if ((i % 2) == 0)]
chain += [cycles[(- 1)][1]]
lines: list[str] = []
for x in chain:
node = (dag.nodes[x].get('task') or dag.nodes[x].get('node'))
if isinstance(node, PTask):
short_name = format_task_name(node, editor_url_scheme='no_link').plain
elif isinstance(node, PNode):
short_name = node.name
lines.extend((short_name, (' ' + ARROW_DOWN_ICON)))
return '\n'.join(lines[:(- 1)]) |
class ConstrainedVar(Var):
__slots__ = ('constraint',)
def __new__(cls, constraint, token=None, prefix=''):
if (token is None):
token = f'{prefix}_{Var._id}'
Var._id += 1
key = (token, constraint)
obj = cls._refs.get(key, None)
if (obj is None):
obj = object.__new__(cls)
obj.token = token
obj.constraint = constraint
cls._refs[key] = obj
return obj
def __eq__(self, other):
if (type(self) == type(other)):
return ((self.token == other.token) and (self.constraint == other.constraint))
return NotImplemented
def __hash__(self):
return hash((type(self), self.token, self.constraint))
def __str__(self):
return f'~{self.token} [{self.constraint}]'
def __repr__(self):
return f'{type(self).__name__}({repr(self.constraint)}, {self.token})' |
def test_issue_594_random_parametrize(pytester: pytest.Pytester) -> None:
p1 = pytester.makepyfile("\n import pytest\n import random\n\n xs = list(range(10))\n random.shuffle(xs)\n .parametrize('x', xs)\n def test_foo(x):\n assert 1\n ")
result = pytester.runpytest(p1, '-v', '-n4')
assert (result.ret == 1)
result.stdout.fnmatch_lines(['Different tests were collected between gw* and gw*']) |
((not _optionals.HAS_PYSCF), 'pyscf not available.')
class TestElectronicDipoleMoment(PropertyTest):
def setUp(self):
super().setUp()
driver = PySCFDriver()
self.prop = driver.run().properties.electronic_dipole_moment
(('XDipole', {}), ('YDipole', {}), ('ZDipole', {'+_0 -_0': 0., '+_0 -_1': 0., '+_1 -_0': 0., '+_1 -_1': 0., '+_2 -_2': 0., '+_2 -_3': 0., '+_3 -_2': 0., '+_3 -_3': 0.}))
def test_second_q_ops(self, key: str, expected_op_data: dict[(str, float)]):
op = self.prop.second_q_ops()[key]
self.assertEqual(len(op), len(expected_op_data))
for ((key1, val1), (key2, val2)) in zip(op.items(), expected_op_data.items()):
self.assertEqual(key1, key2)
self.assertTrue(np.isclose(np.abs(val1), val2)) |
def test_n_steps_type_error():
x0 = float64('x0')
const = float64('const')
x = (x0 + const)
op = ScalarLoop(init=[x0], constant=[const], update=[x])
with pytest.raises(TypeError, match=re.escape('(n_steps) must be of integer type. Got float64')):
op(float64('n_steps'), x0, const) |
(suppress_health_check=[HealthCheck.function_scoped_fixture], deadline=None)
(args=arglists(anything_pickleable_and_hashable()), kwargs=map_reduce_kwargs_iterators())
.filterwarnings('ignore:.*:pytest.PytestUnraisableExceptionWarning')
def test_map_with_iterators(ray_context, func, args, kwargs):
(iterables1, iterables2) = args
expected = list(map(func, *iterables1))
actual = parallel.MapReduce(func, *iterables2, parallel=True, **kwargs).run()
if kwargs['ordered']:
assert (expected == actual)
else:
assert (set(expected) == set(actual)) |
class SurveyMonkeyOAuth2(BaseOAuth2):
name = 'surveymonkey'
AUTHORIZATION_URL = '
ACCESS_TOKEN_URL = '
ACCESS_TOKEN_METHOD = 'POST'
USER_DATA_URL = '/v3/users/me'
STATE_PARAMETER = False
REDIRECT_STATE = False
EXTRA_DATA = [('access_url', 'access_url')]
def get_user_details(self, response):
response['name'] = ((response['first_name'] + ' ') + response['last_name'])
return response
def user_data(self, access_token, *args, **kwargs):
base_url = kwargs['response']['access_url']
return self.get_json((base_url + self.USER_DATA_URL), headers={'Authorization': ('bearer ' + access_token)}) |
.route('/')
def index() -> None:
if (not plugin.settings.access_token):
li = plugin.list_item(name=localize(32018), iconImage=plugin.routing.build_icon_path('activate'))
xbmcplugin.addDirectoryItem(plugin.handle, plugin.routing.build_url('login/'), li, False)
else:
for menu_item in plugin.main_menu_items:
if menu_item.is_displayed:
li = plugin.list_item(name=menu_item.title, iconImage=menu_item.icon, thumbnailImage=menu_item.icon)
xbmcplugin.addDirectoryItem(plugin.handle, menu_item.url, li, menu_item.is_dir)
xbmcplugin.endOfDirectory(plugin.handle) |
def _compute_cross_entropy_norm(mean_label: torch.Tensor, pos_labels: torch.Tensor, neg_labels: torch.Tensor, eta: float) -> torch.Tensor:
mean_label = mean_label.double()
mean_label.clamp_(min=eta, max=(1 - eta))
return (((- pos_labels) * torch.log2(mean_label)) - (neg_labels * torch.log2((1.0 - mean_label)))) |
class BaseModel(nn.Module):
def __init__(self, name, config):
super(BaseModel, self).__init__()
self.name = name
self.config = config
self.exp = config.exp
self.epoch = (- 1)
self.iteration = 0
self.eva_res = 0
self.best_suffix = '_best.pth'
self.suffix = '.pth'
self.skip_names = ['loss']
self.saving_pth = os.path.join(config.PATH, 'ckp', name, self.exp)
Path(self.saving_pth).mkdir(parents=True, exist_ok=True)
self.config_path = os.path.join(self.saving_pth, 'config')
def saveConfig(self, path):
torch.save({'iteration': self.iteration, 'eva_res': self.eva_res}, path)
def loadConfig(self, path):
if os.path.exists(path):
if torch.cuda.is_available():
data = torch.load(path)
else:
data = torch.load(path, map_location=(lambda storage, loc: storage))
try:
eva_res = data['eva_res']
except:
print('Target saving config file does not contain eva_res!')
eva_res = 0
return (data['iteration'], eva_res)
else:
return (0, 0)
def save(self):
print(('\nSaving %s...' % self.name))
if (not os.path.exists((self.config_path + self.best_suffix))):
print('No previous best model found. Saving this as the best.\n')
suffix = self.best_suffix
else:
print('Found the previous best model.')
(_, eva_res) = self.loadConfig((self.config_path + self.best_suffix))
print('current v.s. previous: {:1.3f} {:1.3f}'.format(self.eva_res, eva_res))
if (self.eva_res > eva_res):
print('Current IoU is better. Update best model.\n')
suffix = self.best_suffix
else:
print('Previous IoU is better, save this one as checkpoint.\n')
suffix = self.suffix
self.saveConfig((self.config_path + suffix))
for (name, model) in self._modules.items():
skip = False
for k in self.skip_names:
if (name.find(k) != (- 1)):
skip = True
if (skip is False):
self.saveWeights(model, os.path.join(self.saving_pth, (name + suffix)))
torch.save({'optimizer': self.optimizer.state_dict()}, os.path.join(self.saving_pth, ('optimizer' + suffix)))
torch.save({'lr_scheduler': self.lr_scheduler.state_dict()}, os.path.join(self.saving_pth, ('lr_scheduler' + suffix)))
def load(self, best=False):
print(('\nLoading %s model...' % self.name))
loaded = True
if best:
suffix = self.best_suffix
elif (os.path.exists((self.config_path + self.best_suffix)) and best):
print('\tTrying to load the best model')
suffix = self.best_suffix
elif ((not os.path.exists((self.config_path + self.suffix))) and os.path.exists((self.config_path + self.best_suffix))):
print('\tNo checkpoints, but has saved best model. Load the best model')
suffix = self.best_suffix
elif (os.path.exists((self.config_path + self.suffix)) and os.path.exists((self.config_path + self.best_suffix))):
print('\tFound checkpoint model and the best model. Comparing itertaion')
(iteration, _) = self.loadConfig((self.config_path + self.suffix))
(iteration_best, _) = self.loadConfig((self.config_path + self.best_suffix))
if (iteration > iteration_best):
print('\tcheckpoint has larger iteration value. Load checkpoint')
suffix = self.suffix
else:
print('\tthe best model has larger iteration value. Load the best model')
suffix = self.best_suffix
elif os.path.exists((self.config_path + self.suffix)):
print('\tLoad checkpoint')
suffix = self.suffix
else:
print('\tNo saved model found')
return False
(self.iteration, self.eva_res) = self.loadConfig((self.config_path + suffix))
for (name, model) in self._modules.items():
skip = False
for k in self.skip_names:
if (name.find(k) != (- 1)):
skip = True
if (skip is False):
loaded &= self.loadWeights(model, os.path.join(self.saving_pth, (name + suffix)))
if os.path.exists(os.path.join(self.saving_pth, ('optimizer' + suffix))):
data = torch.load(os.path.join(self.saving_pth, ('optimizer' + suffix)))
self.optimizer.load_state_dict(data['optimizer'])
print(f'resume optimizer from {suffix}', flush=True)
if os.path.exists(os.path.join(self.saving_pth, ('lr_scheduler' + suffix))):
data = torch.load(os.path.join(self.saving_pth, ('lr_scheduler' + suffix)))
self.lr_scheduler.load_state_dict(data['lr_scheduler'])
print(f'resume lr scehduler from {suffix}', flush=True)
if loaded:
print('\tmodel loaded!\n')
else:
print('\tmodel loading failed!\n')
return loaded
def load_pretrain_model(self, path, skip_names=['predictor'], is_freeze=True):
loaded = True
for (name, model) in self._modules.items():
skip = False
for k in skip_names:
if (name.find(k) != (- 1)):
skip = True
if (skip is False):
loaded &= self.loadWeights(model, os.path.join(path, (name + '_best.pth')))
if is_freeze:
for (k, v) in model.named_parameters():
v.requires_grad = False
if loaded:
print('\tmodel loaded!\n')
else:
print('\tmodel loading failed!\n')
def saveWeights(self, model, path):
if isinstance(model, nn.DataParallel):
torch.save({'model': model.module.state_dict()}, path)
else:
torch.save({'model': model.state_dict()}, path)
def loadWeights(self, model, path):
if os.path.exists(path):
if torch.cuda.is_available():
data = torch.load(path)
else:
data = torch.load(path, map_location=(lambda storage, loc: storage))
new_dict = collections.OrderedDict()
if isinstance(model, nn.DataParallel):
for (k, v) in data['model'].items():
if (k[:6] != 'module'):
name = ('module.' + k)
new_dict[name] = v
model.load_state_dict(new_dict)
else:
for (k, v) in data['model'].items():
if (k[:6] == 'module'):
name = k[7:]
new_dict[name] = v
model.load_state_dict(data['model'])
return True
else:
return False |
def _get_health_state_cache(filename):
last_error_file = ('cache/last-error-state_' + os.path.basename(filename).rstrip('.yaml'))
if os.path.exists(last_error_file):
with open(last_error_file, 'rb') as f:
last_error_state_cache = pickle.load(f)
return last_error_state_cache |
def test_bloch_redfield_tensor_spectral_string():
N = 5
H = qutip.num(N)
a = qutip.destroy(N)
A_op = (a + a.dag())
spectra = '(w>0) * 0.5'
(R_eigs, evecs) = bloch_redfield_tensor(H=H, a_ops=[(A_op, spectra)], c_ops=[(a ** 2)], fock_basis=False)
assert isinstance(R_eigs, qutip.Qobj)
assert isinstance(evecs, qutip.Qobj) |
def close_db_filter(_):
if ((db.obj is not None) and (not db.is_closed())):
logger.debug('Disconnecting from database.')
db.close()
if (read_only_config.obj is not None):
for read_replica in read_only_config.obj.read_replicas:
if (not read_replica.is_closed()):
logger.debug('Disconnecting from read replica.')
read_replica.close() |
class BasePersistence(Generic[(UD, CD, BD)], ABC):
__slots__ = ('bot', 'store_data', '_update_interval')
def __init__(self, store_data: Optional[PersistenceInput]=None, update_interval: float=60):
self.store_data: PersistenceInput = (store_data or PersistenceInput())
self._update_interval: float = update_interval
self.bot: Bot = None
def update_interval(self) -> float:
return self._update_interval
_interval.setter
def update_interval(self, value: object) -> NoReturn:
raise AttributeError('You can not assign a new value to update_interval after initialization.')
def set_bot(self, bot: Bot) -> None:
if (self.store_data.callback_data and (not isinstance(bot, ExtBot))):
raise TypeError('callback_data can only be stored when using telegram.ext.ExtBot.')
self.bot = bot
async def get_user_data(self) -> Dict[(int, UD)]:
async def get_chat_data(self) -> Dict[(int, CD)]:
async def get_bot_data(self) -> BD:
async def get_callback_data(self) -> Optional[CDCData]:
async def get_conversations(self, name: str) -> ConversationDict:
async def update_conversation(self, name: str, key: ConversationKey, new_state: Optional[object]) -> None:
async def update_user_data(self, user_id: int, data: UD) -> None:
async def update_chat_data(self, chat_id: int, data: CD) -> None:
async def update_bot_data(self, data: BD) -> None:
async def update_callback_data(self, data: CDCData) -> None:
async def drop_chat_data(self, chat_id: int) -> None:
async def drop_user_data(self, user_id: int) -> None:
async def refresh_user_data(self, user_id: int, user_data: UD) -> None:
async def refresh_chat_data(self, chat_id: int, chat_data: CD) -> None:
async def refresh_bot_data(self, bot_data: BD) -> None:
async def flush(self) -> None: |
def test_cache_classifier():
cache_helper.clear_cache()
for (Wrapper, Model) in [(CacheClassifier, LogisticRegression), (CacheRegressor, LinearRegression)]:
(X, y, weights) = generate_classification_data(n_classes=2)
clf = Wrapper('first', Model()).fit(X, y)
assert (clf._used_cache == False)
clf = Wrapper('first', Model()).fit((X + 0), (y + 0))
assert (clf._used_cache == True)
clf = Wrapper('second', Model()).fit(X, y)
assert (clf._used_cache == False)
X_new = X.copy()
X_new.iloc[(0, 0)] += 1
clf = Wrapper('first', Model()).fit(X_new, y)
assert (clf._used_cache == False)
y_new = y.copy()
y_new[0] += 1
clf = Wrapper('first', Model()).fit(X, y_new)
assert (clf._used_cache == False)
clf = Wrapper('first', Model()).fit(X, y, sample_weight=None)
assert (clf._used_cache == False)
clf = Wrapper('first', Model(n_jobs=2)).fit(X, y)
assert (clf._used_cache == False)
clf = Wrapper('first', Model(n_jobs=2)).fit(X, y)
assert (clf._used_cache == True)
cache_helper.clear_cache() |
def get_job_status(batch_cli, name, namespace='default'):
try:
return batch_cli.read_namespaced_job_status(name=name, namespace=namespace)
except Exception as e:
logging.error(('Exception when calling BatchV1Api->read_namespaced_job_status: %s' % e))
raise |
def test_validate_without_strict_fails_only_non_strict() -> None:
project_failing_strict_validation = ((fixtures_dir / 'project_failing_strict_validation') / 'pyproject.toml')
with project_failing_strict_validation.open('rb') as f:
doc = tomllib.load(f)
content = doc['tool']['poetry']
assert (Factory.validate(content) == {'errors': ["The fields ['authors', 'description', 'name', 'version'] are required in package mode."], 'warnings': []}) |
def tune_test(path, num_trials, num_workers, num_boost_rounds, num_files=0, regression=False, use_gpu=False, fake_data=False, smoke_test=False):
ray_params = RayParams(elastic_training=False, max_actor_restarts=0, num_actors=num_workers, cpus_per_actor=1, gpus_per_actor=(0 if (not use_gpu) else 1))
def local_train(config):
temp_dir = None
if (fake_data or smoke_test):
temp_dir = '/tmp/release_test_data'
if os.path.exists(temp_dir):
shutil.rmtree(temp_dir)
os.makedirs(temp_dir, 493)
local_path = os.path.join(temp_dir, 'smoketest.parquet')
create_parquet(filename=local_path, num_rows=(args.num_workers * 500), num_features=4, num_classes=2, num_partitions=(args.num_workers * 10))
else:
if (not os.path.exists(path)):
raise ValueError(f'''Benchmarking data not found: {path}.
FIX THIS by running `python create_test_data.py` on all nodes first.''')
local_path = path
xgboost_params = {'tree_method': ('hist' if (not use_gpu) else 'gpu_hist')}
xgboost_params.update({'objective': 'binary:logistic', 'eval_metric': ['logloss', 'error']})
xgboost_params.update(config)
additional_results = {}
(bst, time_taken) = train_ray(path=local_path, num_workers=num_workers, num_boost_rounds=num_boost_rounds, num_files=num_files, regression=regression, use_gpu=use_gpu, smoke_test=smoke_test, ray_params=ray_params, xgboost_params=xgboost_params, additional_results=additional_results, callbacks=[PlacementCallback(), TuneReportCallback()])
bst.save_model('tuned.xgb')
trial_ips = []
for (rank, ips) in enumerate(additional_results['callback_returns']):
for ip in ips:
trial_ips.append(ip)
tune_trial = get_trial_id()
with tune.checkpoint_dir((num_boost_rounds + 1)) as checkpoint_dir:
with open(os.path.join(checkpoint_dir, 'callback_returns.json'), 'wt') as f:
json.dump({tune_trial: trial_ips}, f)
if temp_dir:
shutil.rmtree(temp_dir)
search_space = {'eta': tune.loguniform(0.0001, 0.1), 'subsample': tune.uniform(0.5, 1.0), 'max_depth': tune.randint(1, 9)}
analysis = tune.run(local_train, config=search_space, num_samples=num_trials, sync_config=tune.SyncConfig(sync_to_driver=DockerSyncer), resources_per_trial=ray_params.get_tune_resources())
ip_to_trials = defaultdict(list)
for trial in analysis.trials:
trial = trial
with open(os.path.join(trial.checkpoint.value, 'callback_returns.json'), 'rt') as f:
trial_to_ips = json.load(f)
for (tune_trial, ips) in trial_to_ips.items():
for node_ip in ips:
ip_to_trials[node_ip].append(tune_trial)
fail = False
for (ip, trial_ids) in ip_to_trials.items():
print(f'For IP {ip} got trial IDs {trial_ids}')
fail = (fail or any(((trial_id != trial_ids[0]) for trial_id in trial_ids)))
if fail:
raise ValueError('Different trial IDs found on same node.')
else:
print('Success.') |
class TestClassyTestCase(unittest.TestCase):
def test_assert_torch_all_close(self):
test_fixture = ClassyTestCase()
data = [1.1, 2.2]
tensor_1 = torch.Tensor(data)
tensor_2 = tensor_1
test_fixture.assertTorchAllClose(tensor_1, tensor_2)
tensor_2 = (tensor_1 / 2)
with self.assertRaises(AssertionError):
test_fixture.assertTorchAllClose(tensor_1, tensor_2)
tensor_2 = data
with self.assertRaises(AssertionError):
test_fixture.assertTorchAllClose(tensor_1, tensor_2)
tensor_1 = data
tensor_2 = torch.Tensor(data)
with self.assertRaises(AssertionError):
test_fixture.assertTorchAllClose(tensor_1, tensor_2) |
def custom_debugger_hook():
called = []
class _CustomDebugger():
def __init__(self, *args, **kwargs):
called.append('init')
def reset(self):
called.append('reset')
def interaction(self, *args):
called.append('interaction')
def set_trace(self, frame):
print('**CustomDebugger**')
called.append('set_trace')
_pytest._CustomDebugger = _CustomDebugger
(yield called)
del _pytest._CustomDebugger |
def get_config():
config = get_default_configs()
training = config.training
training.sde = 'vpsde'
training.continuous = True
training.reduce_mean = True
sampling = config.sampling
sampling.method = 'ode'
sampling.smallest_time = 0.001
data = config.data
data.centered = True
model = config.model
model.name = 'ncsnpp'
model.scale_by_sigma = False
model.ema_rate = 0.9999
model.normalization = 'GroupNorm'
model.nonlinearity = 'swish'
model.nf = 128
model.ch_mult = (1, 2, 2, 2)
model.num_res_blocks = 4
model.attn_resolutions = (16,)
model.resamp_with_conv = True
model.conditional = True
model.fir = False
model.fir_kernel = [1, 3, 3, 1]
model.skip_rescale = True
model.resblock_type = 'biggan'
model.progressive = 'none'
model.progressive_input = 'none'
model.progressive_combine = 'sum'
model.attention_type = 'ddpm'
model.init_scale = 0.0
model.embedding_type = 'positional'
model.fourier_scale = 16
model.conv_size = 3
return config |
def render_policy(policy, log_dir, total_timesteps, eval_episodes=5):
frames = []
for episode in range(eval_episodes):
obs = env.reset()
policy.reset()
frames.append(env.render(mode='rgb_array'))
done = False
while (not done):
action = policy.select_action(np.array(obs))
(obs, reward, done, _) = env.step(action)
frame = env.render(mode='rgb_array')
frames.append(frame)
utils.save_gif('{}/{}.mp4'.format(log_dir, total_timesteps), [(torch.tensor(frame.copy()).float() / 255) for frame in frames], color_last=True) |
class BaseImageHeader():
def __init__(self, px_width, px_height, horz_dpi, vert_dpi):
self._px_width = px_width
self._px_height = px_height
self._horz_dpi = horz_dpi
self._vert_dpi = vert_dpi
def content_type(self):
msg = 'content_type property must be implemented by all subclasses of BaseImageHeader'
raise NotImplementedError(msg)
def default_ext(self):
msg = 'default_ext property must be implemented by all subclasses of BaseImageHeader'
raise NotImplementedError(msg)
def px_width(self):
return self._px_width
def px_height(self):
return self._px_height
def horz_dpi(self):
return self._horz_dpi
def vert_dpi(self):
return self._vert_dpi |
def save_weights(G, D, E1, A1, state_dict, weights_root, experiment_name, name_suffix=None, G_ema=None, copy=False):
if copy:
root = '/'.join([weights_root, experiment_name, str(state_dict['itr'])])
else:
root = '/'.join([weights_root, experiment_name])
if (not os.path.exists(root)):
os.mkdir(root)
if name_suffix:
print(('Saving weights to %s/%s...' % (root, name_suffix)))
else:
print(('Saving weights to %s...' % root))
torch.save(G.state_dict(), ('%s/%s.pth' % (root, join_strings('_', ['G', name_suffix]))))
torch.save(G.optim.state_dict(), ('%s/%s.pth' % (root, join_strings('_', ['G_optim', name_suffix]))))
torch.save(D.state_dict(), ('%s/%s.pth' % (root, join_strings('_', ['D', name_suffix]))))
torch.save(D.optim.state_dict(), ('%s/%s.pth' % (root, join_strings('_', ['D_optim', name_suffix]))))
torch.save(E1.state_dict(), ('%s/%s.pth' % (root, join_strings('_', ['E1', name_suffix]))))
torch.save(E1.optim.state_dict(), ('%s/%s.pth' % (root, join_strings('_', ['E1_optim', name_suffix]))))
torch.save(A1.state_dict(), ('%s/%s.pth' % (root, join_strings('_', ['A1', name_suffix]))))
torch.save(A1.optim.state_dict(), ('%s/%s.pth' % (root, join_strings('_', ['A1_optim', name_suffix]))))
torch.save(state_dict, ('%s/%s.pth' % (root, join_strings('_', ['state_dict', name_suffix]))))
if (G_ema is not None):
torch.save(G_ema.state_dict(), ('%s/%s.pth' % (root, join_strings('_', ['G_ema', name_suffix])))) |
def _get_gdal_info():
import rasterio
blob = [('rasterio', rasterio.__version__), ('GDAL', rasterio.__gdal_version__), ('PROJ', rasterio.__proj_version__), ('GEOS', rasterio.__geos_version__), ('PROJ DATA', os.pathsep.join(rasterio._env.get_proj_data_search_paths())), ('GDAL DATA', rasterio._env.get_gdal_data())]
return dict(blob) |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--game', default='StreetFighterIISpecialChampionEdition-Genesis')
parser.add_argument('--state', default=retro.State.DEFAULT)
parser.add_argument('--scenario', default='scenario')
args = parser.parse_args()
ia = RetroInteractive(game=args.game, state=args.state, scenario=args.scenario)
ia.run() |
class Effect7173(BaseEffect):
type = 'passive'
def handler(fit, src, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: (mod.item.group.name == 'Mutadaptive Remote Armor Repairer')), 'armorDamageAmount', src.getModifiedItemAttr('eliteBonusLogistics2'), skill='Logistics Cruisers', **kwargs) |
class ReportMgr(ReportMgrBase):
def __init__(self, report_every, start_time=(- 1.0), tensorboard_writer=None):
super(ReportMgr, self).__init__(report_every, start_time)
self.tensorboard_writer = tensorboard_writer
def maybe_log_tensorboard(self, stats, prefix, learning_rate, step):
if (self.tensorboard_writer is not None):
stats.log_tensorboard(prefix, self.tensorboard_writer, learning_rate, step)
def _report_training(self, step, num_steps, learning_rate, report_stats):
report_stats.output(step, num_steps, learning_rate, self.start_time)
self.maybe_log_tensorboard(report_stats, 'progress', learning_rate, self.progress_step)
report_stats = onmt.utils.Statistics()
return report_stats
def _report_step(self, lr, step, train_stats=None, valid_stats=None):
if (train_stats is not None):
self.log(('Train perplexity: %g' % train_stats.ppl()))
self.log(('Train accuracy: %g' % train_stats.accuracy()))
self.maybe_log_tensorboard(train_stats, 'train', lr, step)
if (valid_stats is not None):
self.log(('Validation perplexity: %g' % valid_stats.ppl()))
self.log(('Validation accuracy: %g' % valid_stats.accuracy()))
self.maybe_log_tensorboard(valid_stats, 'valid', lr, step) |
class TagHint(BaseEntry):
def __init__(self, tag: str, message: str, description: str, default_query: str=None, inline_keyboard: InlineKeyboardMarkup=None, group_command: bool=False):
self.tag = tag
self._message = message
self._default_query = default_query
self._description = description
self._inline_keyboard = inline_keyboard
self.group_command = group_command
def display_name(self) -> str:
return f'Tag hint: {self.short_name}'
def short_name(self) -> str:
return f'/{self.tag}'
def description(self) -> str:
return self._description
def html_markup(self, search_query: str=None) -> str:
parts = (search_query.split(maxsplit=1) if search_query else [])
insert = (parts[1] if (len(parts) > 1) else None)
return self._message.format(query=(insert or self._default_query))
def html_insertion_markup(self, search_query: str=None) -> str:
return self.html_markup(search_query=search_query)
def compare_to_query(self, search_query: str) -> float:
parts = search_query.lstrip('/').split(maxsplit=1)
if parts:
return fuzz.ratio(self.tag, parts[0])
return 0
def inline_keyboard(self) -> Optional[InlineKeyboardMarkup]:
return self._inline_keyboard |
def obj_func_cell_cycle(trajectory):
timestep = tspan[:(- 1)]
y = (trajectory[:(- 1)] - trajectory[1:])
freq = 0
local_times = []
prev = y[0]
for n in range(1, len(y)):
if (y[n] > 0 > prev):
local_times.append(timestep[n])
freq += 1
prev = y[n]
local_times = np.array(local_times)
local_freq = ((np.average(local_times) / len(local_times)) * 2)
return local_freq |
def test_ineichen_series_perez_enhancement():
times = pd.date_range(start='2014-06-24', end='2014-06-25', freq='3h', tz='America/Phoenix')
apparent_zenith = pd.Series(np.array([, 113., 82., 46.0467599, 10., 34., 72., 105., 124.]), index=times)
am = pd.Series(np.array([nan, nan, 6., 1., 0., 1., 3., nan, nan]), index=times)
expected = pd.DataFrame(np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [91.1249279, 321., 51.], [716., , 99.], [1053., 953., ], [863., 922., ], [271., 655., 73.], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]), columns=['ghi', 'dni', 'dhi'], index=times)
out = clearsky.ineichen(apparent_zenith, am, 3, perez_enhancement=True)
assert_frame_equal(expected, out) |
def test_mercator_a_operation__defaults():
aeaop = MercatorAConversion()
assert (aeaop.name == 'unknown')
assert (aeaop.method_name == 'Mercator (variant A)')
assert (_to_dict(aeaop) == {'Latitude of natural origin': 0.0, 'Longitude of natural origin': 0.0, 'False easting': 0.0, 'False northing': 0.0, 'Scale factor at natural origin': 1.0}) |
def parse_input():
description = 'This script allows you to evaluate the ActivityNet untrimmed video classification task which is intended to evaluate the ability of algorithms to predict activities in untrimmed video sequences.'
p = argparse.ArgumentParser(description=description)
p.add_argument('ground_truth_filename', help='Full path to json file containing the ground truth.')
p.add_argument('prediction_filename', help='Full path to json file containing the predictions.')
p.add_argument('--subset', default='validation', help='String indicating subset to evaluate: (training, validation)')
p.add_argument('--verbose', type=bool, default=True)
p.add_argument('--check_status', type=bool, default=True)
return p.parse_args() |
class DistcheckCmd(sdist):
description = 'run tests on a fresh sdist'
def _check_manifest(self):
assert self.get_archive_files()
if (subprocess.call(['git', 'status'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) == 0):
included_files = self.filelist.files
assert included_files
process = subprocess.Popen(['git', 'ls-tree', '-r', 'HEAD', '--name-only'], stdout=subprocess.PIPE, universal_newlines=True)
(out, err) = process.communicate()
assert (process.returncode == 0)
tracked_files = out.splitlines()
ignore_tracked = ['dev-utils/*', '.github/*', '.ci/*', '.codecov.yml', '.editorconfig', '.git*']
tracked_files = [p for p in tracked_files if (not any((fnmatch.fnmatch(p, i) for i in ignore_tracked)))]
diff = (set(tracked_files) - set(included_files))
assert (not diff), ('Not all tracked files included in tarball, check MANIFEST.in', diff)
def _check_dist(self):
assert self.get_archive_files()
distcheck_dir = os.path.join(self.dist_dir, 'distcheck')
if os.path.exists(distcheck_dir):
dir_util.remove_tree(distcheck_dir)
self.mkpath(distcheck_dir)
archive = self.get_archive_files()[0]
tfile = tarfile.open(archive, 'r:gz')
tfile.extractall(distcheck_dir)
tfile.close()
name = self.distribution.get_fullname()
extract_dir = os.path.join(distcheck_dir, name)
old_pwd = os.getcwd()
os.chdir(extract_dir)
self.spawn([sys.executable, 'setup.py', 'test'])
self.spawn([sys.executable, 'setup.py', 'build'])
self.spawn([sys.executable, 'setup.py', 'build_sphinx'])
self.spawn([sys.executable, 'setup.py', 'install', '--root', '../prefix', '--record', '../log.txt'])
os.chdir(old_pwd)
def run(self):
sdist.run(self)
self._check_manifest()
self._check_dist() |
_torch
class SplinterModelIntegrationTest(unittest.TestCase):
def test_splinter_question_answering(self):
model = SplinterForQuestionAnswering.from_pretrained('tau/splinter-base-qass')
input_ids = torch.tensor([[101, 7796, 1108, 1255, 1107, 104, 119, 1124, 1608, 1106, 1103, 1244, 2325, 1224, 119, 102]])
output = model(input_ids)
expected_shape = torch.Size((1, 16))
self.assertEqual(output.start_logits.shape, expected_shape)
self.assertEqual(output.end_logits.shape, expected_shape)
self.assertEqual(torch.argmax(output.start_logits), 10)
self.assertEqual(torch.argmax(output.end_logits), 12) |
class CustomObjectProperty(bpy.types.PropertyGroup, SizeOffsetGetSet, ArrayGetSet):
array: PointerProperty(type=ArrayProperty)
size_offset: PointerProperty(type=SizeOffsetProperty)
def init(self, wall_dimensions):
self['wall_dimensions'] = wall_dimensions
self.size_offset.init(((self['wall_dimensions'][0] / self.count), self['wall_dimensions'][1]), default_size=(1.0, 1.0), default_offset=(0.0, 0.0))
def draw(self, context, layout):
box = layout.box()
self.size_offset.draw(context, box)
layout.prop(self.array, 'count') |
def meet_similar_callables(t: CallableType, s: CallableType) -> CallableType:
from mypy.join import safe_join
arg_types: list[Type] = []
for i in range(len(t.arg_types)):
arg_types.append(safe_join(t.arg_types[i], s.arg_types[i]))
if (t.fallback.type.fullname != 'builtins.function'):
fallback = t.fallback
else:
fallback = s.fallback
return t.copy_modified(arg_types=arg_types, ret_type=meet_types(t.ret_type, s.ret_type), fallback=fallback, name=None) |
.parametrize('learned_grid', [False, True])
def test_qc_rnn_learned_grid_mode(tmp_path, learned_grid):
torch.manual_seed(0)
model = GruModel()
input_shape = (4, 3, 4)
dummy_input = (torch.rand(input_shape, requires_grad=True).to('cpu'), torch.rand((1, 3, 4), requires_grad=True).to('cpu'))
quant_scheme = (QuantScheme.training_range_learning_with_tf_enhanced_init if learned_grid else QuantScheme.post_training_tf_enhanced)
sim = QuantizationSimModel(model, dummy_input=dummy_input, quant_scheme=quant_scheme)
sim.model.train()
for module in [sim.model.gru1, sim.model.gru2]:
for input_quantizer in module.input_quantizers.values():
input_quantizer.enabled = True
for (name, param) in module.named_parameters(recurse=False):
module.param_quantizers[name].enabled = True
for output_quantizer in module.output_quantizers.values():
output_quantizer.enabled = True
def forward_pass(model, args):
model.eval()
with torch.no_grad():
output = model(*dummy_input)
return output
print(sim)
sim.compute_encodings(forward_pass, None)
print(sim)
if learned_grid:
assert (sim.model.gru1._mode == QcQuantizeOpMode.LEARN_ENCODINGS)
assert (sim.model.gru2._mode == QcQuantizeOpMode.LEARN_ENCODINGS)
assert all((isinstance(q, LearnedGridTensorQuantizer) for module in [sim.model.gru1, sim.model.gru2] for quantizers in [module.input_quantizers.values(), module.param_quantizers.values(), module.output_quantizers.values()] for q in quantizers))
params = {f'{i}.{name}': param.clone().detach() for (i, module) in enumerate([sim.model.gru1, sim.model.gru2]) for (name, param) in module.named_parameters(recurse=False)}
optimizer = torch.optim.SGD(sim.model.parameters(), lr=0.05, momentum=0.5)
for i in range(10):
dummy_input = (torch.rand(input_shape, requires_grad=True).to('cpu'), torch.rand((1, 3, 4), requires_grad=True).to('cpu'))
(o_qc_rnn, h_qc_rnn) = sim.model(*dummy_input)
loss = (o_qc_rnn.flatten().sum() + h_qc_rnn.flatten().sum())
loss.backward()
optimizer.step()
learned_params = {f'{i}.{name}': param.clone().detach() for (i, module) in enumerate([sim.model.gru1, sim.model.gru2]) for (name, param) in module.named_parameters(recurse=False)}
for name in params:
assert (not torch.equal(params[name], learned_params[name]))
sim.export(tmp_path, 'gru_learned', dummy_input)
onnx_model = onnx.load(os.path.join(tmp_path, 'gru_learned.onnx'))
for node in onnx_model.graph.node:
if (node.op_type == 'GRU'):
with open(os.path.join(tmp_path, 'gru_learned.encodings'), 'r') as encodings_file:
encodings = json.load(encodings_file)
encoding_tensors = set([*encodings['activation_encodings'].keys(), *encodings['param_encodings']])
assert ((set([*node.input, *node.output]) - encoding_tensors) == {''}) |
def pca(mat):
if (mat.shape[0] >= mat.shape[1]):
(eig_vals, eig_vecs) = np.linalg.eig(np.cov(mat.T))
eig_pairs = zip(np.abs(eig_vals), eig_vecs.T)
eig_pairs.sort(reverse=True)
(eig_vals, eig_vecs) = zip(*eig_pairs)
eig_vals = np.asarray(eig_vals)
eig_vecs = np.asarray(eig_vecs)
info(('attribute matrix shape: %s' % (mat.shape,)))
info(('Eigenvalues \n%s' % (np.asarray(eig_vals),)))
info(('Cum Explained Variance \n%s' % (np.cumsum((eig_vals / np.sum(eig_vals))),)))
else:
info('#data < dimension. PCA has complex eigenvalues. ignore for now')
(eig_vals, eig_vecs) = (None, None)
return (eig_vals, eig_vecs) |
class NNPolicy(Policy, Serializable):
def __init__(self, env_spec, observation_ph, actions, scope_name=None):
Serializable.quick_init(self, locals())
self._observations_ph = observation_ph
self._actions = actions
self._scope_name = (tf.get_variable_scope().name if (not scope_name) else scope_name)
super(NNPolicy, self).__init__(env_spec)
def get_action(self, observation):
return (self.get_actions(observation[None])[0], {})
def get_actions(self, observations, currentdropoutpi=1.0, isbnpitrainmode=False):
feed_dict = {self._observations_ph: observations}
if (('todropoutpi' in self.__dict__.keys()) and self.todropoutpi):
feed_dict[self.dropoutpi_placeholder] = currentdropoutpi
if (('batchnormpi' in self.__dict__.keys()) and self.batchnormpi):
feed_dict[self.isbnpitrainmode] = isbnpitrainmode
actions = tf.get_default_session().run(self._actions, feed_dict)
return actions
def log_diagnostics(self, paths):
pass
def get_params_internal(self, **tags):
if tags:
raise NotImplementedError
scope = self._scope_name
scope = (scope if (scope == '') else (scope + '/'))
return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope) |
class AdvertiserFlightReportView(AdvertiserAccessMixin, BaseReportView):
export_view = 'flight_report_export'
template_name = 'adserver/reports/advertiser-flight.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
advertiser_slug = kwargs.get('advertiser_slug', '')
flight_slug = kwargs.get('flight_slug', '')
advertiser = get_object_or_404(Advertiser, slug=advertiser_slug)
flight = get_object_or_404(Flight, slug=flight_slug, campaign__advertiser=advertiser)
queryset = self.get_queryset(advertiser=advertiser, flight=flight, start_date=context['start_date'], end_date=context['end_date'])
report = AdvertiserReport(queryset)
report.generate()
advertisements = []
for ad in flight.advertisements.prefetch_related('ad_types'):
ad_queryset = queryset.filter(advertisement=ad)
ad_report = AdvertiserReport(ad_queryset)
ad_report.generate()
ad.report = ad_report
if ad_report.total['views']:
advertisements.append(ad)
context.update({'advertiser': advertiser, 'flight': flight, 'report': report, 'advertisements': advertisements, 'export_url': self.get_export_url(advertiser_slug=advertiser.slug, flight_slug=flight.slug)})
return context |
def unlock_view(ModelAdmin, request, pk):
sponsorship = get_object_or_404(ModelAdmin.get_queryset(request), pk=pk)
if ((request.method.upper() == 'POST') and (request.POST.get('confirm') == 'yes')):
try:
sponsorship.locked = False
sponsorship.save(update_fields=['locked'])
ModelAdmin.message_user(request, 'Sponsorship is now unlocked!', messages.SUCCESS)
except InvalidStatusException as e:
ModelAdmin.message_user(request, str(e), messages.ERROR)
redirect_url = reverse('admin:sponsors_sponsorship_change', args=[sponsorship.pk])
return redirect(redirect_url)
context = {'sponsorship': sponsorship}
return render(request, 'sponsors/admin/unlock.html', context=context) |
class WaitLoadBar(WaitLoadBase, Gtk.HBox):
def __init__(self):
super().__init__()
self._label.set_alignment(0.0, 0.5)
self._label.set_ellipsize(Pango.EllipsizeMode.END)
self._cancel_button.remove(self._cancel_button.get_child())
self._cancel_button.add(Gtk.Image.new_from_icon_name(Icons.PROCESS_STOP, Gtk.IconSize.MENU))
self._pause_button.remove(self._pause_button.get_child())
self._pause_button.add(Gtk.Image.new_from_icon_name(Icons.MEDIA_PLAYBACK_PAUSE, Gtk.IconSize.MENU))
self.pack_start(self._label, True, True, 0)
self.pack_start(self._progress, False, True, 6)
self.pack_start(self._pause_button, False, True, 0)
self.pack_start(self._cancel_button, False, True, 0)
for child in self.get_children():
child.show_all()
def step(self, **values):
ret = super().step(**values)
params = {'current': format_int_locale(self.current), 'all': format_int_locale(self.count)}
self._progress.set_text((_('%(current)s of %(all)s') % params))
return ret |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.