code stringlengths 281 23.7M |
|---|
def test_unsupported_dtypes():
a = np.zeros((10, 10), np.float64)
with pytest.raises(ValueError):
gfx.Texture(a, dim=2)
a = np.zeros((10, 10), np.int64)
with pytest.raises(ValueError):
gfx.Texture(a, dim=2)
a = np.zeros((10, 10), np.uint64)
with pytest.raises(ValueError):
gfx.Texture(a, dim=2) |
def create_session():
initialize_globals()
early_training_checks()
tfv1.reset_default_graph()
session = tfv1.Session(config=Config.session_config)
(inputs, outputs, _) = create_inference_graph(batch_size=1, n_steps=(- 1))
load_graph_for_evaluation(session)
DeepSpeechGlobalSession.session = session
DeepSpeechGlobalSession.inputs = inputs
DeepSpeechGlobalSession.outputs = outputs |
class TestCurrentFunctions(TestCase):
def test_constant_current(self):
param = pybamm.electrical_parameters
current = param.current_with_time
parameter_values = pybamm.ParameterValues({'Current function [A]': 2})
processed_current = parameter_values.process_symbol(current)
self.assertIsInstance(processed_current, pybamm.Scalar)
def test_get_current_data(self):
current_data = pd.read_csv(os.path.join(pybamm.__path__[0], 'input', 'drive_cycles', 'US06.csv'), comment='#', names=['Time [s]', 'Current [A]'])
(t, I) = (current_data['Time [s]'].values, current_data['Current [A]'].values)
parameter_values = pybamm.ParameterValues({'Current function [A]': pybamm.Interpolant(t, I, pybamm.t, 'US06')})
current_eval = parameter_values.process_symbol(pybamm.electrical_parameters.current_with_time)
def current(t):
return current_eval.evaluate(t=t)
standard_tests = StandardCurrentFunctionTests([current], always_array=True)
standard_tests.test_all()
def test_user_current(self):
def my_fun(t, A, omega):
return (A * pybamm.sin((((2 * np.pi) * omega) * t)))
param = pybamm.electrical_parameters
A = 5
omega = pybamm.Parameter('omega')
def current(t):
return my_fun(t, A, omega)
parameter_values = pybamm.ParameterValues({'omega': 3, 'Current function [A]': current})
current = param.current_with_time
current_eval = parameter_values.process_symbol(current)
def user_current(t):
return current_eval.evaluate(t=t)
standard_tests = StandardCurrentFunctionTests([user_current])
standard_tests.test_all()
time = np.linspace(0, 3600, 600)
np.testing.assert_array_almost_equal(user_current(time), (5 * np.sin((((2 * np.pi) * 3) * time)))) |
class FlagZeroAsFailure(Bloq):
num_bits_p: int
adjoint: bool = False
_property
def signature(self) -> Signature:
return Signature([Register('nu', (self.num_bits_p + 1), shape=(3,)), Register('flag_minus_zero', 1, side=Side.RIGHT)])
def short_name(self) -> str:
return '$\\nu\\ne -0$'
def build_call_graph(self, ssa: 'SympySymbolAllocator') -> Set['BloqCountT']:
if self.adjoint:
return {}
else:
return {(Toffoli(), ((3 * self.num_bits_p) + 2))} |
def main():
args = parse_args()
root_path = args.root_path
img_dir = osp.join(root_path, 'imgs')
gt_dir = osp.join(root_path, 'annotations')
set_name = {}
for split in ['training', 'test']:
set_name.update({split: ((split + '_label') + '.txt')})
assert osp.exists(osp.join(img_dir, split))
for (split, ann_name) in set_name.items():
print(f'Converting {split} into {ann_name}')
with mmcv.Timer(print_tmpl='It takes {}s to convert totaltext annotation'):
files = collect_files(osp.join(img_dir, split), osp.join(gt_dir, split))
image_infos = collect_annotations(files, nproc=args.nproc)
generate_ann(root_path, split, image_infos) |
def _print_usage(error_message=None):
if error_message:
print(error_message)
print(('\nUsage: %s [OPTION]... PATH\n Delete PATH from a rdiff-backup repository including the current\n mirror and all its history.\nOptions:\n h, --help\n Display this help text and exit\n d, --dry-run\n Run the script without doing modifications to the repository.\n PATH\n A relative or absolute path to be deleted. This path must be\n inside a rdiff-backup repository.\n' % (sys.argv[0],)))
sys.exit((1 if error_message else 0)) |
class MoveShard(BaseModel, extra='forbid'):
shard_id: int = Field(..., description='')
to_peer_id: int = Field(..., description='')
from_peer_id: int = Field(..., description='')
method: Optional['ShardTransferMethod'] = Field(default=None, description='Method for transferring the shard from one node to another') |
def fill_template_strings_from_tree(template_strings: dict[(str, list[str])], tree: ConditionalMessageTree) -> None:
for (category, entries) in tree.items():
if (category not in template_strings):
template_strings[category] = []
for entry in entries:
messages = [message for (message, flag) in entry.items() if flag]
if messages:
template_strings[category].append(', '.join(messages)) |
class RealUrlExtractor():
__metaclass__ = ABCMeta
lock = Lock()
def __init__(self, room, auto_refresh_interval):
self.room = room
self.real_url = None
self.last_valid_real_url = None
self._extracting_real_url = False
self.auto_refresh_interval = auto_refresh_interval
self.last_refresh_time = datetime.min
if (self.auto_refresh_interval > 0):
self.refresh_timer = Timer(self.auto_refresh_interval, self.refresh_real_url)
def reset_refresh_timer(self, failover):
if (self.auto_refresh_interval > 0):
self.refresh_timer.cancel()
if failover:
refresh_interval = (self.auto_refresh_interval / 2)
else:
refresh_interval = self.auto_refresh_interval
self.refresh_timer = Timer(refresh_interval, self.refresh_real_url)
self.refresh_timer.start()
def refresh_real_url(self):
RealUrlExtractor.lock.acquire()
try:
self._extract_real_url()
except:
pass
RealUrlExtractor.lock.release()
def _extract_real_url(self):
failover = True
if self._is_url_valid(self.real_url):
self.last_valid_real_url = self.real_url
failover = False
elif (self.last_valid_real_url is not None):
self.real_url = self.last_valid_real_url
self.last_refresh_time = datetime.now()
self.reset_refresh_timer(failover)
if failover:
log.logger.info('failed to extract real url')
else:
log.logger.info('extracted url: %s', self.real_url)
def _is_url_valid(self, url):
return False
def get_real_url(self, bit_rate):
if ((self.real_url is None) or (bit_rate == 'refresh')):
if (not self._extracting_real_url):
RealUrlExtractor.lock.acquire()
self._extracting_real_url = True
try:
self._extract_real_url()
except:
pass
self._extracting_real_url = False
RealUrlExtractor.lock.release()
else:
while self._extracting_real_url:
sleep(100) |
class TestMimicTPW2Reader(unittest.TestCase):
yaml_file = 'mimicTPW2_comp.yaml'
def setUp(self):
from satpy._config import config_search_paths
from satpy.readers.mimic_TPW2_nc import MimicTPW2FileHandler
self.reader_configs = config_search_paths(os.path.join('readers', self.yaml_file))
self.p = mock.patch.object(MimicTPW2FileHandler, '__bases__', (FakeNetCDF4FileHandlerMimicLow,))
self.fake_handler = self.p.start()
self.p.is_local = True
def tearDown(self):
self.p.stop()
def test_init(self):
from satpy.readers import load_reader
r = load_reader(self.reader_configs)
loadables = r.select_files_from_pathnames(['comp.130000.nc'])
assert (len(loadables) == 1)
r.create_filehandlers(loadables)
assert r.file_handlers
def test_load_mimic_float(self):
from satpy.readers import load_reader
r = load_reader(self.reader_configs)
with mock.patch('satpy.readers.mimic_TPW2_nc.netCDF4.Variable', xr.DataArray):
loadables = r.select_files_from_pathnames(['comp.130000.nc'])
r.create_filehandlers(loadables)
ds = r.load(float_variables)
assert (len(ds) == len(float_variables))
for d in ds.values():
assert (d.attrs['platform_shortname'] == 'aggregated microwave')
assert (d.attrs['sensor'] == 'mimic')
assert (d.attrs['units'] == 'mm')
assert ('area' in d.attrs)
assert (d.attrs['area'] is not None)
def test_load_mimic_timedelta(self):
from satpy.readers import load_reader
r = load_reader(self.reader_configs)
with mock.patch('satpy.readers.mimic_TPW2_nc.netCDF4.Variable', xr.DataArray):
loadables = r.select_files_from_pathnames(['comp.130000.nc'])
r.create_filehandlers(loadables)
ds = r.load(date_variables)
assert (len(ds) == len(date_variables))
for d in ds.values():
assert (d.attrs['platform_shortname'] == 'aggregated microwave')
assert (d.attrs['sensor'] == 'mimic')
assert (d.attrs['units'] == 'minutes')
assert ('area' in d.attrs)
assert (d.attrs['area'] is not None)
assert (d.dtype == DEFAULT_FILE_DTYPE)
def test_load_mimic_ubyte(self):
from satpy.readers import load_reader
r = load_reader(self.reader_configs)
with mock.patch('satpy.readers.mimic_TPW2_nc.netCDF4.Variable', xr.DataArray):
loadables = r.select_files_from_pathnames(['comp.130000.nc'])
r.create_filehandlers(loadables)
ds = r.load(ubyte_variables)
assert (len(ds) == len(ubyte_variables))
for d in ds.values():
assert (d.attrs['platform_shortname'] == 'aggregated microwave')
assert (d.attrs['sensor'] == 'mimic')
assert ('source_key' in d.attrs)
assert ('area' in d.attrs)
assert (d.attrs['area'] is not None)
assert (d.dtype == np.uint8) |
def comp_coverage(src_data, tgt_data, ali_data):
(src_align, tgt_align) = ([], [])
for idx in tqdm(range(len(src_data))):
src = src_data[idx].strip('\n').split()
tgt = tgt_data[idx].strip('\n').split()
ali = ali_data[idx].strip('\n').split()
(src_ali, tgt_ali) = ([], [])
for al in ali:
(als, alt) = al.split('-')
src_ali.append(als)
tgt_ali.append(alt)
src_align.append((len(set(src_ali)) / len(src)))
tgt_align.append((len(set(tgt_ali)) / len(tgt)))
(cov_src, cov_tgt) = (np.mean(src_align), np.mean(tgt_align))
return (cov_src, cov_tgt) |
class MyUnit(TrainUnit[Iterator[Batch]], EvalUnit[Iterator[Batch]]):
def __init__(self, module: torch.nn.Module, optimizer: torch.optim.Optimizer, device: torch.device, tb_logger: TensorBoardLogger, train_auroc: BinaryAUROC, log_every_n_steps: int) -> None:
super().__init__()
self.module = module
self.pipeline: TrainPipelineSparseDist = TrainPipelineSparseDist(module, optimizer, device, execute_all_batches=True)
self.optimizer = optimizer
self.device = device
self.train_auroc = train_auroc
self.tb_logger = tb_logger
self.log_every_n_steps = log_every_n_steps
def train_step(self, state: State, data: Iterator[Batch]) -> None:
step = self.train_progress.num_steps_completed
(loss, logits, labels) = self.pipeline.progress(data)
preds = torch.sigmoid(logits)
self.train_auroc.update(preds, labels)
if ((step % self.log_every_n_steps) == 0):
accuracy = sync_and_compute(self.train_auroc)
self.tb_logger.log('train_auroc', accuracy, step)
self.tb_logger.log('loss', loss, step)
def on_train_epoch_end(self, state: State) -> None:
super().on_train_epoch_end(state)
self.train_auroc.reset()
def eval_step(self, state: State, data: Iterator[Batch]) -> None:
step = self.eval_progress.num_steps_completed
(loss, _, _) = self.pipeline.progress(data)
if ((step % self.log_every_n_steps) == 0):
self.tb_logger.log('evaluation_loss', loss, step) |
class SoftmaxBlurBlock(nn.Module):
def __init__(self, in_filters, temp=10.0, sfilter=(1, 1), pad_mode='constant', **kwargs):
super(SoftmaxBlurBlock, self).__init__()
self.temp = temp
self.relu = layers.relu()
self.softmax = nn.Softmax(dim=1)
self.blur = layers.blur(in_filters, sfilter=sfilter, pad_mode=pad_mode)
def forward(self, x):
x = (self.temp * self.softmax((x / self.temp)))
x = self.relu(x)
x = self.blur(x)
return x
def extra_repr(self):
return ('temp=%.3e' % self.temp) |
class DSBUFFERDESC(ctypes.Structure):
_fields_ = [('dwSize', DWORD), ('dwFlags', DWORD), ('dwBufferBytes', DWORD), ('dwReserved', DWORD), ('lpwfxFormat', LPWAVEFORMATEX)]
def __repr__(self):
return 'DSBUFFERDESC(dwSize={}, dwFlags={}, dwBufferBytes={}, lpwfxFormat={})'.format(self.dwSize, self.dwFlags, self.dwBufferBytes, (self.lpwfxFormat.contents if self.lpwfxFormat else None)) |
def compute_ne_helper(ce_sum: torch.Tensor, weighted_num_samples: torch.Tensor, pos_labels: torch.Tensor, neg_labels: torch.Tensor, eta: float) -> torch.Tensor:
mean_label = (pos_labels / weighted_num_samples)
ce_norm = _compute_cross_entropy_norm(mean_label, pos_labels, neg_labels, eta)
return (ce_sum / ce_norm) |
def get_token_network_by_address(chain_state: ChainState, token_network_address: TokenNetworkAddress) -> Optional[TokenNetworkState]:
token_network_state = None
for token_network_registry_state in chain_state.identifiers_to_tokennetworkregistries.values():
networks_by_address = token_network_registry_state.tokennetworkaddresses_to_tokennetworks
token_network_state = networks_by_address.get(token_network_address)
if token_network_state:
return token_network_state
return token_network_state |
class DeviceNumberHypothesis(Hypothesis):
def _match_major_minor(cls, value):
major_minor_re = re.compile('^(?P<major>\\d+)(\\D+)(?P<minor>\\d+)$')
match = major_minor_re.match(value)
return (match and os.makedev(int(match.group('major')), int(match.group('minor'))))
def _match_number(cls, value):
number_re = re.compile('^(?P<number>\\d+)$')
match = number_re.match(value)
return (match and int(match.group('number')))
def match(cls, value):
return (cls._match_major_minor(value) or cls._match_number(value))
def find_subsystems(cls, context):
sys_path = context.sys_path
return os.listdir(os.path.join(sys_path, 'dev'))
def lookup(cls, context, key):
func = wrap_exception(Devices.from_device_number)
res = (func(context, s, key) for s in cls.find_subsystems(context))
return frozenset((r for r in res if (r is not None))) |
def gen_forward():
kernels = [3, 5, 7, 15, 31, 63, 127, 255]
blocks = [32, 64, 128, 256]
head = '\n/**\n * Copyright (c) Facebook, Inc. and its affiliates.\n *\n * This source code is licensed under the MIT license found in the\n * LICENSE file in the root directory of this source tree.\n */\n\n#include "dynamicconv_cuda.cuh"\n\nstd::vector<at::Tensor> dynamicconv_cuda_forward(at::Tensor input, at::Tensor weight, int padding_l) {\n\n at::DeviceGuard g(input.device());\n const auto minibatch = input.size(0);\n const auto numFeatures = input.size(1);\n const auto sequenceLength = input.size(2);\n\n const auto numHeads = weight.size(1);\n const auto filterSize = weight.size(2);\n\n const auto numFiltersInBlock = numFeatures / numHeads;\n const dim3 blocks(minibatch, numFeatures);\n\n auto output = at::zeros_like(input);\n auto stream = at::cuda::getCurrentCUDAStream();\n'
switch = '\n switch(filterSize) {\n'
case_k = '\n case {k}:\n'
main_block = '\n if (padding_l == {pad}) {{\n AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "dynamicconv_forward", ([&] {{\n dynamicconv_forward_kernel<{k}, {b_size}, {pad}, scalar_t>\n <<<blocks, {b_size}, 0, stream>>>(\n input.data<scalar_t>(),\n weight.data<scalar_t>(),\n minibatch,\n sequenceLength,\n numFeatures,\n numFiltersInBlock,\n numHeads,\n output.data<scalar_t>());\n }}));\n }} else\n'
bad_padding = '\n {\n std::cout << "WARNING: Unsupported padding size - skipping forward pass" << std::endl;\n }\n break;\n\n'
end = '\n default:\n std::cout << "WARNING: Unsupported filter length passed - skipping forward pass" << std::endl;\n }\n\n return {output};\n}\n'
with open('dynamicconv_cuda_forward.cu', 'w') as forward:
forward.write(head)
forward.write(switch)
for k in kernels:
b_size = 32
for b in blocks:
if (b > k):
b_size = b
break
forward.write(case_k.format(k=k))
for pad in [(k // 2), (k - 1)]:
forward.write(main_block.format(k=k, b_size=b_size, pad=pad))
forward.write(bad_padding)
forward.write(end) |
def test_memoize_key_signature():
mf = memoize((lambda x: False), cache={1: True})
assert (mf(1) is True)
assert (mf(2) is False)
mf = memoize((lambda x, *args: False), cache={(1,): True, (1, 2): 2})
assert (mf(1) is True)
assert (mf(2) is False)
assert (mf(1, 1) is False)
assert (mf(1, 2) == 2)
assert (mf((1, 2)) is False)
mf = memoize((lambda x, y: False), cache={(1, 2): True})
assert (mf(1, 2) is True)
assert (mf(1, 3) is False)
assert raises(TypeError, (lambda : mf((1, 2))))
mf = memoize((lambda : False), cache={(): True})
assert (mf() is True)
mf = memoize((lambda x, y=0: False), cache={((1,), frozenset((('y', 2),))): 2, ((1, 2), None): 3})
assert (mf(1, y=2) == 2)
assert (mf(1, 2) == 3)
assert (mf(2, y=2) is False)
assert (mf(2, 2) is False)
assert (mf(1) is False)
assert (mf((1, 2)) is False)
mf = memoize((lambda x=0: False), cache={(None, frozenset((('x', 1),))): 1, ((1,), None): 2})
assert (mf() is False)
assert (mf(x=1) == 1)
assert (mf(1) == 2) |
def find_users_bash_config(home_dir):
bash_files = ['/.bashrc', '/.bash_profile', '/.profile']
for file in bash_files:
if os.path.isfile((home_dir + file)):
return (home_dir + file)
raise RuntimeError(("Bummer looks, like we couldn't find a bash profile file. " + 'Do you have a ~/.profile or ~/.bashhrc?')) |
class Command(BaseCommand):
help = 'Update old news'
def handle(self, *args, **options):
prev_date = (datetime.datetime.now() - datetime.timedelta(days=10))
items = Item.objects.filter(id__in=ItemClsCheck.objects.filter(last_check__lte=prev_date).values_list('item', flat=True))
update_cls(items) |
class _Function(object):
def __init__(self, inputs, outputs, updates, givens):
for inpt in inputs:
if ((not hasattr(inpt, 'make_feed_dict')) and (not ((type(inpt) is tf.Tensor) and (len(inpt.op.inputs) == 0)))):
assert False, 'inputs should all be placeholders, constants, or have a make_feed_dict method'
self.inputs = inputs
self.input_names = {inp.name.split('/')[(- 1)].split(':')[0]: inp for inp in inputs}
updates = (updates or [])
self.update_group = tf.group(*updates)
self.outputs_update = (list(outputs) + [self.update_group])
self.givens = ({} if (givens is None) else givens)
def _feed_input(self, feed_dict, inpt, value):
if hasattr(inpt, 'make_feed_dict'):
feed_dict.update(inpt.make_feed_dict(value))
else:
feed_dict[inpt] = adjust_shape(inpt, value)
def __call__(self, *args, **kwargs):
assert ((len(args) + len(kwargs)) <= len(self.inputs)), 'Too many arguments provided'
feed_dict = {}
for inpt in self.givens:
feed_dict[inpt] = adjust_shape(inpt, feed_dict.get(inpt, self.givens[inpt]))
for (inpt, value) in zip(self.inputs, args):
self._feed_input(feed_dict, inpt, value)
for (inpt_name, value) in kwargs.items():
self._feed_input(feed_dict, self.input_names[inpt_name], value)
results = get_session().run(self.outputs_update, feed_dict=feed_dict)[:(- 1)]
return results |
def test_payee_timeout_must_be_equal_to_payer_timeout():
block_number = BlockNumber(5)
pseudo_random_generator = random.Random()
channels = mediator_make_channel_pair()
payer_transfer = factories.make_signed_transfer_for(channels[0], LockedTransferSignedStateProperties(expiration=BlockExpiration(30)))
mediator_state = MediatorTransferState(UNIT_SECRETHASH, channels.get_routes())
iteration = mediator.mediate_transfer(mediator_state, channels[0], channels.addresses_to_channel(), pseudo_random_generator, payer_transfer, block_number)
assert search_for_item(iteration.events, SendLockedTransfer, {'transfer': {'lock': {'expiration': payer_transfer.lock.expiration}}}) |
.parametrize('support_shape, shape, support_shape_offset, expected_support_shape, ndim_supp, consistent', [((10, 5), None, (0,), (10, 5), 1, True), ((10, 5), None, (1, 1), (10, 5), 1, True), (None, (10, 5), (0,), 5, 1, True), (None, (10, 5), (1,), 4, 1, True), (None, (10, 5, 2), (0,), 2, 1, True), (None, None, None, None, 1, True), ((10, 5), (10, 5), None, (10, 5), 2, True), ((10, 5), (11, 10, 5), None, (10, 5), 2, True), (None, (11, 10, 5), (0, 1, 0), (11, 9, 5), 3, True), ((10, 5), (10, 5, 5), (0,), (5,), 1, False), ((10, 5), (10, 5), (1, 1), (9, 4), 2, False)])
.parametrize('info_source', ('shape', 'dims', 'observed'))
def test_get_support_shape(info_source, support_shape, shape, support_shape_offset, expected_support_shape, ndim_supp, consistent):
if (info_source == 'shape'):
inferred_support_shape = get_support_shape(support_shape=support_shape, shape=shape, support_shape_offset=support_shape_offset, ndim_supp=ndim_supp)
elif (info_source == 'dims'):
if (shape is None):
dims = None
coords = {}
else:
dims = tuple((str(i) for (i, _) in enumerate(shape)))
coords = {str(i): range(shape) for (i, shape) in enumerate(shape)}
with Model(coords=coords):
inferred_support_shape = get_support_shape(support_shape=support_shape, dims=dims, support_shape_offset=support_shape_offset, ndim_supp=ndim_supp)
elif (info_source == 'observed'):
if (shape is None):
observed = None
else:
observed = np.zeros(shape)
inferred_support_shape = get_support_shape(support_shape=support_shape, observed=observed, support_shape_offset=support_shape_offset, ndim_supp=ndim_supp)
if (not isinstance(inferred_support_shape, TensorVariable)):
assert (inferred_support_shape == expected_support_shape)
elif consistent:
assert (inferred_support_shape.eval() == expected_support_shape).all()
else:
f = pytensor.function([], inferred_support_shape, mode=Mode().including('local_remove_all_assert'))
assert (f() == expected_support_shape).all()
with pytest.raises(AssertionError, match='support_shape does not match'):
inferred_support_shape.eval() |
_model
def test_energy():
Monomer('A', ['a', 'b'])
Monomer('B', ['a'])
Parameter('RT', 2)
Parameter('A_0', 10)
Parameter('AB_0', 10)
Parameter('phi', 0)
Expression('E_AAB_RT', ((- 5) / RT))
Expression('E0_AA_RT', ((- 1) / RT))
Rule('A_dimerize', ((A(a=None) + A(a=None)) | (A(a=1) % A(a=1))), phi, E0_AA_RT, energy=True)
EnergyPattern('epAAB', ((A(a=1) % A(a=1, b=2)) % B(a=2)), E_AAB_RT)
Initial(A(a=None, b=None), A_0)
Initial((A(a=None, b=1) % B(a=1)), AB_0)
assert ('energy=True' in repr(A_dimerize)) |
def Lop(f: Union[(Variable, Sequence[Variable])], wrt: Union[(Variable, Sequence[Variable])], eval_points: Union[(Variable, Sequence[Variable])], consider_constant: Optional[Sequence[Variable]]=None, disconnected_inputs: Literal[('ignore', 'warn', 'raise')]='raise') -> Union[(Optional[Variable], Sequence[Optional[Variable]])]:
if (not isinstance(eval_points, (list, tuple))):
_eval_points: list[Variable] = [pytensor.tensor.as_tensor_variable(eval_points)]
else:
_eval_points = [pytensor.tensor.as_tensor_variable(x) for x in eval_points]
if (not isinstance(f, (list, tuple))):
_f: list[Variable] = [pytensor.tensor.as_tensor_variable(f)]
else:
_f = [pytensor.tensor.as_tensor_variable(x) for x in f]
grads = list(_eval_points)
if (not isinstance(wrt, (list, tuple))):
_wrt: list[Variable] = [pytensor.tensor.as_tensor_variable(wrt)]
else:
_wrt = [pytensor.tensor.as_tensor_variable(x) for x in wrt]
assert (len(_f) == len(grads))
known = dict(zip(_f, grads))
ret = grad(cost=None, known_grads=known, consider_constant=consider_constant, wrt=_wrt, disconnected_inputs=disconnected_inputs)
using_list = isinstance(wrt, list)
using_tuple = isinstance(wrt, tuple)
return as_list_or_tuple(using_list, using_tuple, ret) |
class AGNewsProcessor_sep(DataProcessor):
def get_train_examples(self, data_dir):
train_data = pd.read_csv(os.path.join(data_dir, 'train.csv'), header=None).values
return self._create_examples(train_data, 'train')
def get_dev_examples(self, data_dir):
dev_data = pd.read_csv(os.path.join(data_dir, 'test.csv'), header=None).values
return self._create_examples(dev_data, 'dev')
def get_labels(self):
return ['1', '2', '3', '4']
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
guid = ('%s-%s' % (set_type, i))
text_a = tokenization.convert_to_unicode(line[1])
text_b = tokenization.convert_to_unicode(line[2])
label = tokenization.convert_to_unicode(str(line[0]))
if ((i % 1000) == 0):
print(i)
print('guid=', guid)
print('text_a=', text_a)
print('text_b=', text_b)
print('label=', label)
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples |
def visualize_sample_with_prediction(image, gt, prediction, filename=None):
cmap = color_map()
image = image.cpu().numpy()
image[0] = ((image[0] * 0.229) + 0.485)
image[1] = ((image[1] * 0.224) + 0.456)
image[2] = ((image[2] * 0.225) + 0.406)
image = np.transpose((255 * image), (1, 2, 0)).astype(np.uint8)
gt = gt.cpu().numpy().astype(np.uint8)
array_gt = np.empty((gt.shape[0], gt.shape[1], cmap.shape[1]), dtype=cmap.dtype)
for class_i in np.unique(gt):
array_gt[(gt == class_i)] = cmap[class_i]
prediction = prediction.cpu().numpy().astype(np.uint8)
array_pred = np.empty((prediction.shape[0], prediction.shape[1], cmap.shape[1]), dtype=cmap.dtype)
for class_i in np.unique(prediction):
array_pred[(prediction == class_i)] = cmap[class_i]
(fig, axes) = plt.subplots(3)
axes[0].imshow(image)
axes[1].imshow(array_gt)
axes[2].imshow(array_pred)
plt.axis('off')
plt.tight_layout()
if (filename is not None):
plt.savefig(filename)
else:
plt.show() |
('aimet_common.connected_graph.connectedgraph.ConnectedGraph.__abstractmethods__', set())
def test_export_connected_graph():
conn_graph = get_dummy_connected_graph()
connectedgraph_utils.export_connected_graph(conn_graph, '/tmp/', 'dummy_cg_export')
with open('/tmp/dummy_cg_export.json', 'r') as cg_export_file:
cg_export = json.load(cg_export_file)
assert ('ops' in cg_export)
assert ('products' in cg_export)
assert ('activations' in cg_export['products'])
assert ('parameters' in cg_export['products'])
assert (len(cg_export['ops']) == 5)
assert (len(cg_export['products']['activations']) == 5)
assert (len(cg_export['products']['parameters']) == 3)
if os.path.exists('/tmp/dummy_cg_export.json'):
os.remove('/tmp/dummy_cg_export.json') |
class CmdDoff(Command):
key = 'doff'
help_category = 'combat'
def func(self):
if is_in_combat(self.caller):
self.caller.msg("You can't doff armor in a fight!")
return
if (not self.caller.db.worn_armor):
self.caller.msg("You aren't wearing any armor!")
else:
old_armor = self.caller.db.worn_armor
self.caller.db.worn_armor = None
self.caller.location.msg_contents(('%s removes %s.' % (self.caller, old_armor))) |
def _get_users_handler(auth_type):
config = {}
config['AUTHENTICATION_TYPE'] = auth_type
config['LDAP_BASE_DN'] = ['dc=quay', 'dc=io']
config['LDAP_ADMIN_DN'] = 'uid=testy,ou=employees,dc=quay,dc=io'
config['LDAP_ADMIN_PASSWD'] = 'password'
config['LDAP_USER_RDN'] = ['ou=employees']
return get_users_handler(config, None, None) |
def test_history_with_span_end(base_app):
run_cmd(base_app, 'help')
run_cmd(base_app, 'shortcuts')
run_cmd(base_app, 'help history')
(out, err) = run_cmd(base_app, 'history :2')
expected = normalize('\n 1 help\n 2 shortcuts\n')
assert (out == expected)
verify_hi_last_result(base_app, 2) |
def find_code_in_transformers(object_name):
parts = object_name.split('.')
i = 0
module = parts[i]
while ((i < len(parts)) and (not os.path.isfile(os.path.join(TRANSFORMERS_PATH, f'{module}.py')))):
i += 1
if (i < len(parts)):
module = os.path.join(module, parts[i])
if (i >= len(parts)):
raise ValueError(f'`object_name` should begin with the name of a module of transformers but got {object_name}.')
with open(os.path.join(TRANSFORMERS_PATH, f'{module}.py'), 'r', encoding='utf-8', newline='\n') as f:
lines = f.readlines()
indent = ''
line_index = 0
for name in parts[(i + 1):]:
while ((line_index < len(lines)) and (re.search(f'^{indent}(class|def)\s+{name}(\(|\:)', lines[line_index]) is None)):
line_index += 1
indent += ' '
line_index += 1
if (line_index >= len(lines)):
raise ValueError(f' {object_name} does not match any function or class in {module}.')
start_index = line_index
while ((line_index < len(lines)) and _should_continue(lines[line_index], indent)):
line_index += 1
while (len(lines[(line_index - 1)]) <= 1):
line_index -= 1
code_lines = lines[start_index:line_index]
return ''.join(code_lines) |
def wrap_text(text, font, allowed_width):
words = text.split()
lines = []
max_lw = 0
max_lh = 0
while (len(words) > 0):
line_words = []
while (len(words) > 0):
line_words.append(words.pop(0))
if (len(line_words) == 1):
(lw, lh) = font.size(line_words[0])
max_lh = lh
max_lw = max(max_lw, lw)
if (lw > allowed_width):
max_lw = allowed_width
cut = []
while (lw > allowed_width):
cut.append(line_words[0][(- 1)])
line_words[0] = line_words[0][:(- 1)]
(lw, lh) = font.size(line_words[0])
cut.reverse()
words.insert(0, ''.join(cut))
break
else:
(lw, lh) = font.size(' '.join((line_words + words[:1])))
if (lw > allowed_width):
break
max_lw = max(max_lw, lw)
line = ' '.join(line_words)
lines.append(line)
return (lines, max_lw, max_lh) |
class DocstringSignatureGenerator(SignatureGenerator):
def get_function_sig(self, default_sig: FunctionSig, ctx: FunctionContext) -> (list[FunctionSig] | None):
inferred = infer_sig_from_docstring(ctx.docstring, ctx.name)
if inferred:
assert (ctx.docstring is not None)
if is_pybind11_overloaded_function_docstring(ctx.docstring, ctx.name):
del inferred[(- 1)]
if ctx.class_info:
if ((not inferred) and (ctx.name == '__init__')):
inferred = infer_sig_from_docstring(ctx.class_info.docstring, ctx.class_info.name)
if inferred:
inferred = [sig._replace(name='__init__') for sig in inferred]
return self.remove_self_type(inferred, ctx.class_info.self_var)
else:
return inferred
def get_property_type(self, default_type: (str | None), ctx: FunctionContext) -> (str | None):
if (ctx.docstring is not None):
inferred = infer_ret_type_sig_from_anon_docstring(ctx.docstring)
if inferred:
return inferred
inferred = infer_ret_type_sig_from_docstring(ctx.docstring, ctx.name)
if inferred:
return inferred
inferred = infer_prop_type_from_docstring(ctx.docstring)
return inferred
else:
return None |
class TextStyle(AnsiSequence, Enum):
RESET_ALL = 0
ALT_RESET_ALL = ''
INTENSITY_BOLD = 1
INTENSITY_DIM = 2
INTENSITY_NORMAL = 22
ITALIC_ENABLE = 3
ITALIC_DISABLE = 23
OVERLINE_ENABLE = 53
OVERLINE_DISABLE = 55
STRIKETHROUGH_ENABLE = 9
STRIKETHROUGH_DISABLE = 29
UNDERLINE_ENABLE = 4
UNDERLINE_DISABLE = 24
def __str__(self) -> str:
return f'{CSI}{self.value}m' |
class DiffEqWrapper(nn.Module):
def __init__(self, module):
super(DiffEqWrapper, self).__init__()
self.module = module
def forward(self, t, y):
if ('t' in signature(self.module.forward).parameters):
return self.module.forward(t, y)
elif ('y' in signature(self.module.forward).parameters):
return self.module.forward(y)
else:
raise ValueError('Differential equation needs to either take (t, y) or (y,) as input.')
def __repr__(self):
return self.module.__repr__() |
def _compute_dloss_by_dx(encoding_min: tf.Variable, encoding_max: tf.Variable, inputs: tf.Tensor, op_mode: tf.Variable, grad: tf.Tensor) -> tf.Variable:
x = tf.cast(inputs[0], tf.float32)
encoding_min = tf.cast(encoding_min, tf.float32)
encoding_max = tf.cast(encoding_max, tf.float32)
op_mode = tf.cast(op_mode, tf.int8)
inner_cond = tf.compat.v2.where(tf.less_equal(x, encoding_max), 1.0, 0.0)
dloss_by_dx = (tf.compat.v2.where(tf.less_equal(encoding_min, x), inner_cond, 0.0) * grad)
dloss_by_dx = tf.cond(tf.equal(op_mode, 3), (lambda : grad), (lambda : dloss_by_dx))
return dloss_by_dx |
def get_inc(rp, typestr, inc_time):
def addtostr(s):
return b'.'.join(map(os.fsencode, (s, Time.timetostring(inc_time), typestr)))
if rp.index:
incrp = rp.__class__(rp.conn, rp.base, (rp.index[:(- 1)] + (addtostr(rp.index[(- 1)]),)))
else:
(dirname, basename) = rp.dirsplit()
incrp = rp.__class__(rp.conn, dirname, (addtostr(basename),))
if incrp.lstat():
log.Log.FatalError("New increment path '{ip}' shouldn't exist, something went really wrong.".format(ip=incrp))
return incrp |
def _get_single_hud_text(pickup_name: str, memo_data: dict[(str, str)], resources: ResourceGainTuple) -> str:
return memo_data[pickup_name].format(**{**{item_names.resource_user_friendly_name(resource): abs(quantity) for (resource, quantity) in resources}, **{item_names.resource_user_friendly_delta(resource): ('increased' if (quantity >= 0) else 'decreased') for (resource, quantity) in resources}}) |
def _get_bpe(in_path: str, model_prefix: str, vocab_size: int):
arguments = [f'--input={in_path}', f'--model_prefix={model_prefix}', f'--model_type=bpe', f'--vocab_size={vocab_size}', '--character_coverage=1.0', '--normalization_rule_name=identity', f'--num_threads={cpu_count()}']
sp.SentencePieceTrainer.Train(' '.join(arguments)) |
def _wrap_core(wrapping_key: bytes, a: bytes, r: list[bytes]) -> bytes:
encryptor = Cipher(AES(wrapping_key), ECB()).encryptor()
n = len(r)
for j in range(6):
for i in range(n):
b = encryptor.update((a + r[i]))
a = (int.from_bytes(b[:8], byteorder='big') ^ (((n * j) + i) + 1)).to_bytes(length=8, byteorder='big')
r[i] = b[(- 8):]
assert (encryptor.finalize() == b'')
return (a + b''.join(r)) |
def count_parameters(model, verbose=True):
n_all = sum((p.numel() for p in model.parameters()))
n_trainable = sum((p.numel() for p in model.parameters() if p.requires_grad))
if verbose:
print('Parameter Count: all {:,d}; trainable {:,d}'.format(n_all, n_trainable))
return (n_all, n_trainable) |
def PGM_feature_generation(opt):
video_dict = getDatasetDict(opt)
video_list = video_dict.keys()
num_videos = len(video_list)
num_videos_per_thread = (num_videos / opt['pgm_thread'])
processes = []
for tid in range((opt['pgm_thread'] - 1)):
tmp_video_list = video_list[(tid * num_videos_per_thread):((tid + 1) * num_videos_per_thread)]
p = mp.Process(target=generateFeature, args=(opt, tmp_video_list, video_dict))
p.start()
processes.append(p)
tmp_video_list = video_list[((opt['pgm_thread'] - 1) * num_videos_per_thread):]
p = mp.Process(target=generateFeature, args=(opt, tmp_video_list, video_dict))
p.start()
processes.append(p)
for p in processes:
p.join() |
_dataframe_method
_alias(rows='index')
def select(df: pd.DataFrame, *args, index: Any=None, columns: Any=None, axis: str='columns', invert: bool=False) -> pd.DataFrame:
if args:
check('invert', invert, [bool])
if ((index is not None) or (columns is not None)):
raise ValueError('Either provide variable args with the axis parameter, or provide arguments to the index and/or columns parameters.')
if (axis == 'index'):
return _select(df, rows=list(args), columns=columns, invert=invert)
if (axis == 'columns'):
return _select(df, columns=list(args), rows=index, invert=invert)
raise ValueError("axis should be either 'index' or 'columns'.")
return _select(df, rows=index, columns=columns, invert=invert) |
class SystemSendToChannel(COMMAND_DEFAULT_CLASS):
key = CMD_CHANNEL
locks = 'cmd:all()'
def parse(self):
(channelname, msg) = self.args.split(':', 1)
self.args = (channelname.strip(), msg.strip())
def func(self):
caller = self.caller
(channelkey, msg) = self.args
if (not msg):
caller.msg('Say what?')
return
channel = ChannelDB.objects.get_channel(channelkey)
if (not channel):
caller.msg(("Channel '%s' not found." % channelkey))
return
if (not channel.has_connection(caller)):
string = "You are not connected to channel '%s'."
caller.msg((string % channelkey))
return
if (not channel.access(caller, 'send')):
string = "You are not permitted to send to channel '%s'."
caller.msg((string % channelkey))
return
msg = ('[%s] %s: %s' % (channel.key, caller.name, msg))
msgobj = create.create_message(caller, msg, channels=[channel])
channel.msg(msgobj) |
class TestChangeKeyboardMapping(EndianTest):
def setUp(self):
self.req_args_0 = {'first_keycode': 157, 'keysyms': [[, , ], [, , ], [, , ], [, , ], [, , ], [, , ], [, , ], [, , ], [, , ], [, , ], [, , ], [, , ], [, , ], [, , ], [, , ], [, , ], [, , ], [, , ], [, , ], [, , ]]}
self.req_bin_0 = b"d\x14>\x00\x9d\x03\x00\x001\xfc\xf4nN\xef\x18;\x90\xe5B\x17\x15\xbao\x06R\xcdL!\xb9y\x8604\xb1\xe6).\xaf\x17\x1bl\x963:\\\x15\xd6)\xcf73+\x9a\xf9oa\x0b\x0b\x98p25\x870\xac\xe9\x9d#\x9d\xdbA`~\x0e\xc1y\xb9~|j\x18\xd6\xf55\xf6nJ h\\\xdejF\xe7{\n\xb2\x033<\xc3&{\x02\xe4\x06\xe0_\xff\xae\x0bE\xb0\xbd\xaa\x06\xc2\xfa\xfc\x7f-\x81\xd8y!\x80\xcb,\xd3O\xb3\x1c\xfb\xeb7S\x9c|\xc8\x08\xcfq\xffc\xa9U\x88zQ\xb2lS\xb8-'\x05\xb6\xb6\x8a`|\xca\xf8\x03\x92\xe0\x8cy\xb0\xfa\xa6\x1f\x9a\xa6\x84\x19\xdc\xd9kk\xfb\xcf\x9b\x02F\xed\x87O\xc5C\x81\x13\x01\xa4\x88\x1eJ\x02(\x10\x19\xa6\x9d<\xd8\xd3\xb0U8P\xa4\x0e\xb1'0H,\x1aPy\x1dC\xe6\x1b\xcc;\x86;x\x11\x88Y\xef\x97\xcc \x8e\xb7)b\x03\xf57/[\x1dz\x15"
def testPackRequest0(self):
bin = request.ChangeKeyboardMapping._request.to_binary(*(), **self.req_args_0)
self.assertBinaryEqual(bin, self.req_bin_0)
def testUnpackRequest0(self):
(args, remain) = request.ChangeKeyboardMapping._request.parse_binary(self.req_bin_0, dummy_display, 1)
self.assertBinaryEmpty(remain)
self.assertEqual(args, self.req_args_0) |
(simple_typed_attrs(defaults=True, kw_only=False, newtypes=False))
def test_simple_roundtrip_defaults_tuple(attr_and_vals):
(a, _) = attr_and_vals
cl = make_class('HypClass', {'a': a})
converter = Converter(unstruct_strat=UnstructureStrategy.AS_TUPLE)
inst = cl()
assert (converter.unstructure(converter.structure({}, cl)) == converter.unstructure(inst))
assert (inst == converter.structure(converter.unstructure(inst), cl)) |
class TestORegan2022(TestCase):
def test_functions(self):
param = pybamm.ParameterValues('ORegan2022')
T = pybamm.Scalar(298.15)
c_p_max = param['Maximum concentration in positive electrode [mol.m-3]']
c_n_max = param['Maximum concentration in negative electrode [mol.m-3]']
fun_test = {'Positive electrode OCP entropic change [V.K-1]': ([0.5, c_p_max], (- 9.794e-07)), 'Positive electrode specific heat capacity [J.kg-1.K-1]': ([298.15], 902.6502), 'Positive electrode diffusivity [m2.s-1]': ([0.5, 298.15], 7.2627e-15), 'Positive electrode exchange-current density [A.m-2]': ([1000.0, 10000.0, c_p_max, 298.15], 2.1939), 'Positive electrode OCP [V]': ([0.5], 3.972), 'Positive electrode conductivity [S.m-1]': ([298.15], 0.8473), 'Positive electrode thermal conductivity [W.m-1.K-1]': ([T], 0.8047), 'Negative electrode OCP entropic change [V.K-1]': ([0.5, c_n_max], (- 2.646e-07)), 'Negative electrode specific heat capacity [J.kg-1.K-1]': ([298.15], 847.7155), 'Negative electrode diffusivity [m2.s-1]': ([0.5, 298.15], 2.8655e-16), 'Negative electrode exchange-current density [A.m-2]': ([1000.0, 10000.0, c_n_max, 298.15], 1.0372), 'Negative electrode OCP [V]': ([0.5], 0.1331), 'Negative electrode thermal conductivity [W.m-1.K-1]': ([T], 3.7695), 'Positive current collector specific heat capacity [J.kg-1.K-1]': ([T], 897.1585), 'Negative current collector specific heat capacity [J.kg-1.K-1]': ([T], 388.519), 'Negative current collector thermal conductivity [W.m-1.K-1]': ([T], 400.8491), 'Separator specific heat capacity [J.kg-1.K-1]': ([298.15], 1130.9656)}
for (name, value) in fun_test.items():
self.assertAlmostEqual(param.evaluate(param[name](*value[0])), value[1], places=4) |
def compute_quartiles(values):
n = len(values)
assert (n > 0)
if (n == 1):
return (values[0], values[0], values[0])
median = get_median(values)
half = (n // 2)
if ((n % 2) == 0):
q1 = get_median(values[:half])
q3 = get_median(values[half:])
elif ((n % 4) == 1):
m = ((n - 1) // 4)
q1 = ((0.25 * values[(m - 1)]) + (0.75 * values[m]))
q3 = ((0.75 * values[(3 * m)]) + (0.25 * values[((3 * m) + 1)]))
else:
assert ((n % 4) == 3)
m = ((n - 3) // 4)
q1 = ((0.75 * values[m]) + (0.25 * values[(m + 1)]))
q3 = ((0.25 * values[((3 * m) + 1)]) + (0.75 * values[((3 * m) + 2)]))
return (q1, median, q3) |
class Latin1TextListSpec(Spec):
def __init__(self, name, default=[]):
super(Latin1TextListSpec, self).__init__(name, default)
self._bspec = ByteSpec('entry_count', default=0)
self._lspec = Latin1TextSpec('child_element_id')
def read(self, header, frame, data):
(count, data) = self._bspec.read(header, frame, data)
entries = []
for i in range(count):
(entry, data) = self._lspec.read(header, frame, data)
entries.append(entry)
return (entries, data)
def write(self, config, frame, value):
b = self._bspec.write(config, frame, len(value))
for v in value:
b += self._lspec.write(config, frame, v)
return b
def validate(self, frame, value):
return [self._lspec.validate(frame, v) for v in value] |
class Distribution(torch.Tensor):
def init_distribution(self, dist_type, **kwargs):
self.dist_type = dist_type
self.dist_kwargs = kwargs
if (self.dist_type == 'normal'):
(self.mean, self.var) = (kwargs['mean'], kwargs['var'])
elif (self.dist_type == 'categorical'):
self.num_categories = kwargs['num_categories']
def sample_(self):
if (self.dist_type == 'normal'):
self.normal_(self.mean, self.var)
elif (self.dist_type == 'categorical'):
self.random_(0, self.num_categories)
def to(self, *args, **kwargs):
new_obj = Distribution(self)
new_obj.init_distribution(self.dist_type, **self.dist_kwargs)
new_obj.data = super().to(*args, **kwargs)
return new_obj |
_torch
class LukeTokenizerIntegrationTests(unittest.TestCase):
tokenizer_class = LukeTokenizer
from_pretrained_kwargs = {'cls_token': '<s>'}
def setUp(self):
super().setUp()
def test_single_text_no_padding_or_truncation(self):
tokenizer = LukeTokenizer.from_pretrained('studio-ousia/luke-base', return_token_type_ids=True)
sentence = 'Top seed Ana Ivanovic said on Thursday she could hardly believe her luck.'
entities = ['Ana Ivanovic', 'Thursday', 'Dummy Entity']
spans = [(9, 21), (30, 38), (39, 42)]
encoding = tokenizer(sentence, entities=entities, entity_spans=spans, return_token_type_ids=True)
self.assertEqual(tokenizer.decode(encoding['input_ids'], spaces_between_special_tokens=False), '<s>Top seed Ana Ivanovic said on Thursday she could hardly believe her luck.</s>')
self.assertEqual(tokenizer.decode(encoding['input_ids'][3:6], spaces_between_special_tokens=False), ' Ana Ivanovic')
self.assertEqual(tokenizer.decode(encoding['input_ids'][8:9], spaces_between_special_tokens=False), ' Thursday')
self.assertEqual(tokenizer.decode(encoding['input_ids'][9:10], spaces_between_special_tokens=False), ' she')
self.assertEqual(encoding['entity_ids'], [tokenizer.entity_vocab['Ana Ivanovic'], tokenizer.entity_vocab['Thursday'], tokenizer.entity_vocab['[UNK]']])
self.assertEqual(encoding['entity_attention_mask'], [1, 1, 1])
self.assertEqual(encoding['entity_token_type_ids'], [0, 0, 0])
self.assertEqual(encoding['entity_position_ids'], [[3, 4, 5, (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1)], [8, (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1)], [9, (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1)]])
def test_single_text_only_entity_spans_no_padding_or_truncation(self):
tokenizer = LukeTokenizer.from_pretrained('studio-ousia/luke-base', return_token_type_ids=True)
sentence = 'Top seed Ana Ivanovic said on Thursday she could hardly believe her luck.'
spans = [(9, 21), (30, 38), (39, 42)]
encoding = tokenizer(sentence, entity_spans=spans, return_token_type_ids=True)
self.assertEqual(tokenizer.decode(encoding['input_ids'], spaces_between_special_tokens=False), '<s>Top seed Ana Ivanovic said on Thursday she could hardly believe her luck.</s>')
self.assertEqual(tokenizer.decode(encoding['input_ids'][3:6], spaces_between_special_tokens=False), ' Ana Ivanovic')
self.assertEqual(tokenizer.decode(encoding['input_ids'][8:9], spaces_between_special_tokens=False), ' Thursday')
self.assertEqual(tokenizer.decode(encoding['input_ids'][9:10], spaces_between_special_tokens=False), ' she')
mask_id = tokenizer.entity_vocab['[MASK]']
self.assertEqual(encoding['entity_ids'], [mask_id, mask_id, mask_id])
self.assertEqual(encoding['entity_attention_mask'], [1, 1, 1])
self.assertEqual(encoding['entity_token_type_ids'], [0, 0, 0])
self.assertEqual(encoding['entity_position_ids'], [[3, 4, 5, (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1)], [8, (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1)], [9, (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1)]])
def test_single_text_padding_pytorch_tensors(self):
tokenizer = LukeTokenizer.from_pretrained('studio-ousia/luke-base', return_token_type_ids=True)
sentence = 'Top seed Ana Ivanovic said on Thursday she could hardly believe her luck.'
entities = ['Ana Ivanovic', 'Thursday', 'Dummy Entity']
spans = [(9, 21), (30, 38), (39, 42)]
encoding = tokenizer(sentence, entities=entities, entity_spans=spans, return_token_type_ids=True, padding='max_length', max_length=30, max_entity_length=16, return_tensors='pt')
self.assertEqual(encoding['input_ids'].shape, (1, 30))
self.assertEqual(encoding['attention_mask'].shape, (1, 30))
self.assertEqual(encoding['token_type_ids'].shape, (1, 30))
self.assertEqual(encoding['entity_ids'].shape, (1, 16))
self.assertEqual(encoding['entity_attention_mask'].shape, (1, 16))
self.assertEqual(encoding['entity_token_type_ids'].shape, (1, 16))
self.assertEqual(encoding['entity_position_ids'].shape, (1, 16, tokenizer.max_mention_length))
def test_text_pair_no_padding_or_truncation(self):
tokenizer = LukeTokenizer.from_pretrained('studio-ousia/luke-base', return_token_type_ids=True)
sentence = 'Top seed Ana Ivanovic said on Thursday'
sentence_pair = 'She could hardly believe her luck.'
entities = ['Ana Ivanovic', 'Thursday']
entities_pair = ['Dummy Entity']
spans = [(9, 21), (30, 38)]
spans_pair = [(0, 3)]
encoding = tokenizer(sentence, sentence_pair, entities=entities, entities_pair=entities_pair, entity_spans=spans, entity_spans_pair=spans_pair, return_token_type_ids=True)
self.assertEqual(tokenizer.decode(encoding['input_ids'], spaces_between_special_tokens=False), '<s>Top seed Ana Ivanovic said on Thursday</s></s>She could hardly believe her luck.</s>')
self.assertEqual(tokenizer.decode(encoding['input_ids'][3:6], spaces_between_special_tokens=False), ' Ana Ivanovic')
self.assertEqual(tokenizer.decode(encoding['input_ids'][8:9], spaces_between_special_tokens=False), ' Thursday')
self.assertEqual(tokenizer.decode(encoding['input_ids'][11:12], spaces_between_special_tokens=False), 'She')
self.assertEqual(encoding['entity_ids'], [tokenizer.entity_vocab['Ana Ivanovic'], tokenizer.entity_vocab['Thursday'], tokenizer.entity_vocab['[UNK]']])
self.assertEqual(encoding['entity_attention_mask'], [1, 1, 1])
self.assertEqual(encoding['entity_token_type_ids'], [0, 0, 0])
self.assertEqual(encoding['entity_position_ids'], [[3, 4, 5, (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1)], [8, (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1)], [11, (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1)]])
def test_text_pair_only_entity_spans_no_padding_or_truncation(self):
tokenizer = LukeTokenizer.from_pretrained('studio-ousia/luke-base', return_token_type_ids=True)
sentence = 'Top seed Ana Ivanovic said on Thursday'
sentence_pair = 'She could hardly believe her luck.'
spans = [(9, 21), (30, 38)]
spans_pair = [(0, 3)]
encoding = tokenizer(sentence, sentence_pair, entity_spans=spans, entity_spans_pair=spans_pair, return_token_type_ids=True)
self.assertEqual(tokenizer.decode(encoding['input_ids'], spaces_between_special_tokens=False), '<s>Top seed Ana Ivanovic said on Thursday</s></s>She could hardly believe her luck.</s>')
self.assertEqual(tokenizer.decode(encoding['input_ids'][3:6], spaces_between_special_tokens=False), ' Ana Ivanovic')
self.assertEqual(tokenizer.decode(encoding['input_ids'][8:9], spaces_between_special_tokens=False), ' Thursday')
self.assertEqual(tokenizer.decode(encoding['input_ids'][11:12], spaces_between_special_tokens=False), 'She')
mask_id = tokenizer.entity_vocab['[MASK]']
self.assertEqual(encoding['entity_ids'], [mask_id, mask_id, mask_id])
self.assertEqual(encoding['entity_attention_mask'], [1, 1, 1])
self.assertEqual(encoding['entity_token_type_ids'], [0, 0, 0])
self.assertEqual(encoding['entity_position_ids'], [[3, 4, 5, (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1)], [8, (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1)], [11, (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1)]])
def test_text_pair_padding_pytorch_tensors(self):
tokenizer = LukeTokenizer.from_pretrained('studio-ousia/luke-base', return_token_type_ids=True)
sentence = 'Top seed Ana Ivanovic said on Thursday'
sentence_pair = 'She could hardly believe her luck.'
entities = ['Ana Ivanovic', 'Thursday']
entities_pair = ['Dummy Entity']
spans = [(9, 21), (30, 38)]
spans_pair = [(0, 3)]
encoding = tokenizer(sentence, sentence_pair, entities=entities, entities_pair=entities_pair, entity_spans=spans, entity_spans_pair=spans_pair, return_token_type_ids=True, padding='max_length', max_length=30, max_entity_length=16, return_tensors='pt')
self.assertEqual(encoding['input_ids'].shape, (1, 30))
self.assertEqual(encoding['attention_mask'].shape, (1, 30))
self.assertEqual(encoding['token_type_ids'].shape, (1, 30))
self.assertEqual(encoding['entity_ids'].shape, (1, 16))
self.assertEqual(encoding['entity_attention_mask'].shape, (1, 16))
self.assertEqual(encoding['entity_token_type_ids'].shape, (1, 16))
self.assertEqual(encoding['entity_position_ids'].shape, (1, 16, tokenizer.max_mention_length))
def test_entity_classification_no_padding_or_truncation(self):
tokenizer = LukeTokenizer.from_pretrained('studio-ousia/luke-base', task='entity_classification')
sentence = 'Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the new world number one avoid a humiliating second- round exit at Wimbledon .'
span = (39, 42)
encoding = tokenizer(sentence, entity_spans=[span], return_token_type_ids=True)
self.assertEqual(len(encoding['input_ids']), 42)
self.assertEqual(len(encoding['attention_mask']), 42)
self.assertEqual(len(encoding['token_type_ids']), 42)
self.assertEqual(tokenizer.decode(encoding['input_ids'], spaces_between_special_tokens=False), '<s>Top seed Ana Ivanovic said on Thursday<ent> she<ent> could hardly believe her luck as a fortuitous netcord helped the new world number one avoid a humiliating second- round exit at Wimbledon.</s>')
self.assertEqual(tokenizer.decode(encoding['input_ids'][9:12], spaces_between_special_tokens=False), '<ent> she<ent>')
self.assertEqual(encoding['entity_ids'], [2])
self.assertEqual(encoding['entity_attention_mask'], [1])
self.assertEqual(encoding['entity_token_type_ids'], [0])
self.assertEqual(encoding['entity_position_ids'], [[9, 10, 11, (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1)]])
def test_entity_classification_padding_pytorch_tensors(self):
tokenizer = LukeTokenizer.from_pretrained('studio-ousia/luke-base', task='entity_classification', return_token_type_ids=True)
sentence = 'Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the new world number one avoid a humiliating second- round exit at Wimbledon .'
span = (39, 42)
encoding = tokenizer(sentence, entity_spans=[span], return_token_type_ids=True, padding='max_length', return_tensors='pt')
self.assertEqual(encoding['input_ids'].shape, (1, 512))
self.assertEqual(encoding['attention_mask'].shape, (1, 512))
self.assertEqual(encoding['token_type_ids'].shape, (1, 512))
self.assertEqual(encoding['entity_ids'].shape, (1, 1))
self.assertEqual(encoding['entity_attention_mask'].shape, (1, 1))
self.assertEqual(encoding['entity_token_type_ids'].shape, (1, 1))
self.assertEqual(encoding['entity_position_ids'].shape, (1, tokenizer.max_entity_length, tokenizer.max_mention_length))
def test_entity_pair_classification_no_padding_or_truncation(self):
tokenizer = LukeTokenizer.from_pretrained('studio-ousia/luke-base', task='entity_pair_classification', return_token_type_ids=True)
sentence = 'Top seed Ana Ivanovic said on Thursday she could hardly believe her luck.'
spans = [(9, 21), (39, 42)]
encoding = tokenizer(sentence, entity_spans=spans, return_token_type_ids=True)
self.assertEqual(tokenizer.decode(encoding['input_ids'], spaces_between_special_tokens=False), '<s>Top seed<ent> Ana Ivanovic<ent> said on Thursday<ent2> she<ent2> could hardly believe her luck.</s>')
self.assertEqual(tokenizer.decode(encoding['input_ids'][3:8], spaces_between_special_tokens=False), '<ent> Ana Ivanovic<ent>')
self.assertEqual(tokenizer.decode(encoding['input_ids'][11:14], spaces_between_special_tokens=False), '<ent2> she<ent2>')
self.assertEqual(encoding['entity_ids'], [2, 3])
self.assertEqual(encoding['entity_attention_mask'], [1, 1])
self.assertEqual(encoding['entity_token_type_ids'], [0, 0])
self.assertEqual(encoding['entity_position_ids'], [[3, 4, 5, 6, 7, (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1)], [11, 12, 13, (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1)]])
def test_entity_pair_classification_padding_pytorch_tensors(self):
tokenizer = LukeTokenizer.from_pretrained('studio-ousia/luke-base', task='entity_pair_classification', return_token_type_ids=True)
sentence = 'Top seed Ana Ivanovic said on Thursday she could hardly believe her luck.'
spans = [(9, 21), (39, 42)]
encoding = tokenizer(sentence, entity_spans=spans, return_token_type_ids=True, padding='max_length', max_length=30, return_tensors='pt')
self.assertEqual(encoding['input_ids'].shape, (1, 30))
self.assertEqual(encoding['attention_mask'].shape, (1, 30))
self.assertEqual(encoding['token_type_ids'].shape, (1, 30))
self.assertEqual(encoding['entity_ids'].shape, (1, 2))
self.assertEqual(encoding['entity_attention_mask'].shape, (1, 2))
self.assertEqual(encoding['entity_token_type_ids'].shape, (1, 2))
self.assertEqual(encoding['entity_position_ids'].shape, (1, tokenizer.max_entity_length, tokenizer.max_mention_length))
def test_entity_span_classification_no_padding_or_truncation(self):
tokenizer = LukeTokenizer.from_pretrained('studio-ousia/luke-base', task='entity_span_classification', return_token_type_ids=True)
sentence = 'Top seed Ana Ivanovic said on Thursday she could hardly believe her luck.'
spans = [(0, 8), (9, 21), (39, 42)]
encoding = tokenizer(sentence, entity_spans=spans, return_token_type_ids=True)
self.assertEqual(tokenizer.decode(encoding['input_ids'], spaces_between_special_tokens=False), '<s>Top seed Ana Ivanovic said on Thursday she could hardly believe her luck.</s>')
self.assertEqual(encoding['entity_ids'], [2, 2, 2])
self.assertEqual(encoding['entity_attention_mask'], [1, 1, 1])
self.assertEqual(encoding['entity_token_type_ids'], [0, 0, 0])
self.assertEqual(encoding['entity_position_ids'], [[1, 2, (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1)], [3, 4, 5, (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1)], [9, (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1), (- 1)]])
self.assertEqual(encoding['entity_start_positions'], [1, 3, 9])
self.assertEqual(encoding['entity_end_positions'], [2, 5, 9])
def test_entity_span_classification_padding_pytorch_tensors(self):
tokenizer = LukeTokenizer.from_pretrained('studio-ousia/luke-base', task='entity_span_classification', return_token_type_ids=True)
sentence = 'Top seed Ana Ivanovic said on Thursday she could hardly believe her luck.'
spans = [(0, 8), (9, 21), (39, 42)]
encoding = tokenizer(sentence, entity_spans=spans, return_token_type_ids=True, padding='max_length', max_length=30, max_entity_length=16, return_tensors='pt')
self.assertEqual(encoding['input_ids'].shape, (1, 30))
self.assertEqual(encoding['attention_mask'].shape, (1, 30))
self.assertEqual(encoding['token_type_ids'].shape, (1, 30))
self.assertEqual(encoding['entity_ids'].shape, (1, 16))
self.assertEqual(encoding['entity_attention_mask'].shape, (1, 16))
self.assertEqual(encoding['entity_token_type_ids'].shape, (1, 16))
self.assertEqual(encoding['entity_position_ids'].shape, (1, 16, tokenizer.max_mention_length))
self.assertEqual(encoding['entity_start_positions'].shape, (1, 16))
self.assertEqual(encoding['entity_end_positions'].shape, (1, 16)) |
def get_environment(cache_size=MAX_CACHE_SIZE, maxage=timedelta(seconds=0), targetID=None, use_volatile=False):
env_cmd = ops.cmd.getDszCommand('environment -get')
return ops.project.generic_cache_get(env_cmd, cache_tag=ENVIRONMENT_TAG, cache_size=MAX_CACHE_SIZE, maxage=maxage, targetID=targetID, use_volatile=use_volatile) |
class Sobel(nn.Module):
def __init__(self):
super(Sobel, self).__init__()
self.edge_conv = nn.Conv2d(1, 2, kernel_size=3, stride=1, padding=1, bias=False)
edge_kx = np.array([[(- 1), 0, 1], [(- 2), 0, 2], [(- 1), 0, 1]])
edge_ky = np.array([[1, 2, 1], [0, 0, 0], [(- 1), (- 2), (- 1)]])
edge_k = np.stack((edge_kx, edge_ky))
edge_k = torch.from_numpy(edge_k).float().view(2, 1, 3, 3)
self.edge_conv.weight = nn.Parameter(edge_k)
for param in self.parameters():
param.requires_grad = False
def forward(self, x):
out = self.edge_conv(x)
out = out.contiguous().view((- 1), 2, x.size(2), x.size(3))
return out |
class Irradiance():
def setup(self):
self.times = pd.date_range(start='', freq='1min', periods=14400)
self.days = pd.date_range(start='', freq='d', periods=30)
self.location = location.Location(40, (- 80))
self.solar_position = self.location.get_solarposition(self.times)
self.clearsky_irradiance = self.location.get_clearsky(self.times)
self.tilt = 20
self.azimuth = 180
self.aoi = irradiance.aoi(self.tilt, self.azimuth, self.solar_position.apparent_zenith, self.solar_position.azimuth)
def time_get_extra_radiation(self):
irradiance.get_extra_radiation(self.days)
def time_aoi(self):
irradiance.aoi(self.tilt, self.azimuth, self.solar_position.apparent_zenith, self.solar_position.azimuth)
def time_aoi_projection(self):
irradiance.aoi_projection(self.tilt, self.azimuth, self.solar_position.apparent_zenith, self.solar_position.azimuth)
def time_get_ground_diffuse(self):
irradiance.get_ground_diffuse(self.tilt, self.clearsky_irradiance.ghi)
def time_get_total_irradiance(self):
irradiance.get_total_irradiance(self.tilt, self.azimuth, self.solar_position.apparent_zenith, self.solar_position.azimuth, self.clearsky_irradiance.dni, self.clearsky_irradiance.ghi, self.clearsky_irradiance.dhi)
def time_disc(self):
irradiance.disc(self.clearsky_irradiance.ghi, self.solar_position.apparent_zenith, self.times)
def time_dirint(self):
irradiance.dirint(self.clearsky_irradiance.ghi, self.solar_position.apparent_zenith, self.times)
def time_dirindex(self):
irradiance.dirindex(self.clearsky_irradiance.ghi, self.clearsky_irradiance.ghi, self.clearsky_irradiance.dni, self.solar_position.apparent_zenith, self.times)
def time_erbs(self):
irradiance.erbs(self.clearsky_irradiance.ghi, self.solar_position.apparent_zenith, self.times) |
def assert_wrapper(__wrapped_mock_method__: Callable[(..., Any)], *args: Any, **kwargs: Any) -> None:
__tracebackhide__ = True
try:
__wrapped_mock_method__(*args, **kwargs)
return
except AssertionError as e:
if getattr(e, '_mock_introspection_applied', 0):
msg = str(e)
else:
__mock_self = args[0]
msg = str(e)
if (__mock_self.call_args is not None):
(actual_args, actual_kwargs) = __mock_self.call_args
introspection = ''
try:
assert (actual_args == args[1:])
except AssertionError as e_args:
introspection += ('\nArgs:\n' + str(e_args))
try:
assert (actual_kwargs == kwargs)
except AssertionError as e_kwargs:
introspection += ('\nKwargs:\n' + str(e_kwargs))
if introspection:
msg += ('\n\npytest introspection follows:\n' + introspection)
e = AssertionError(msg)
e._mock_introspection_applied = True
raise e |
.parametrize('input_type', [tuple, list])
def test_run_model_from_effective_irradiance_multi_array(sapm_dc_snl_ac_system_Array, location, weather, total_irrad, input_type):
data = weather.copy()
data[['poa_global', 'poa_diffuse', 'poa_direct']] = total_irrad
data['effective_irradiance'] = data['poa_global']
mc = ModelChain(sapm_dc_snl_ac_system_Array, location, aoi_model='no_loss', spectral_model='no_loss')
mc.run_model_from_effective_irradiance(input_type((data, data)))
assert_frame_equal(mc.results.dc[0], mc.results.dc[1]) |
_criterion('cross_entropy')
class CrossEntropyCriterion(FairseqCriterion):
def __init__(self, args, task):
super().__init__(args, task)
def forward(self, model, sample, reduce=True):
net_output = model(**sample['net_input'])
(loss, _) = self.compute_loss(model, net_output, sample, reduce=reduce)
sample_size = (sample['target'].size(0) if self.args.sentence_avg else sample['ntokens'])
logging_output = {'loss': (utils.item(loss.data) if reduce else loss.data), 'nll_loss': (utils.item(loss.data) if reduce else loss.data), 'ntokens': sample['ntokens'], 'nsentences': sample['target'].size(0), 'sample_size': sample_size}
return (loss, sample_size, logging_output)
def compute_loss(self, model, net_output, sample, reduce=True):
lprobs = model.get_normalized_probs(net_output, log_probs=True)
lprobs = lprobs.view((- 1), lprobs.size((- 1)))
target = model.get_targets(sample, net_output).view((- 1))
loss = F.nll_loss(lprobs, target, ignore_index=self.padding_idx, reduction=('sum' if reduce else 'none'))
return (loss, loss)
def aggregate_logging_outputs(logging_outputs):
loss_sum = sum((log.get('loss', 0) for log in logging_outputs))
ntokens = sum((log.get('ntokens', 0) for log in logging_outputs))
nsentences = sum((log.get('nsentences', 0) for log in logging_outputs))
sample_size = sum((log.get('sample_size', 0) for log in logging_outputs))
agg_output = {'loss': (((loss_sum / sample_size) / math.log(2)) if (sample_size > 0) else 0.0), 'ntokens': ntokens, 'nsentences': nsentences, 'sample_size': sample_size}
if (sample_size != ntokens):
agg_output['nll_loss'] = ((loss_sum / ntokens) / math.log(2))
return agg_output |
def check_args(args):
args.text_in_handle = (sys.stdin if (args.text_in == '-') else open(args.text_in, 'r'))
args.prob_file_handle = (sys.stdout if (args.prob_file == '-') else open(args.prob_file, 'w'))
if (args.log_base <= 0):
sys.exit('compute_sentence_probs_arpa.py: Invalid log base (must be greater than 0)') |
.parametrize('fields_to_test', [pytest.param(combination, id=','.join(combination)) for combination in itertools.combinations(randovania.interface_common.options._SERIALIZER_FOR_FIELD.keys(), 2)])
def test_load_from_disk_with_data(fields_to_test: list[str], tmp_path, mocker):
mock_get_persisted_options_from_data: MagicMock = mocker.patch('randovania.interface_common.persisted_options.get_persisted_options_from_data', autospec=True)
mock_set_field: MagicMock = mocker.patch('randovania.interface_common.options.Options._set_field', autospec=True)
tmp_path.joinpath('config.json').write_text('[1, 2, 54, 69]')
option = Options(tmp_path)
persisted_options = {field_name: MagicMock() for field_name in fields_to_test}
new_serializers = {field_name: MagicMock() for field_name in fields_to_test}
mock_get_persisted_options_from_data.return_value = persisted_options
with patch.dict(randovania.interface_common.options._SERIALIZER_FOR_FIELD, new_serializers):
option.load_from_disk()
mock_get_persisted_options_from_data.assert_called_once_with([1, 2, 54, 69])
for (field_name, serializer) in new_serializers.items():
serializer.decode.assert_called_once_with(persisted_options[field_name])
mock_set_field.assert_has_calls([call(option, field_name, new_serializers[field_name].decode.return_value) for field_name in fields_to_test]) |
def test_StandarScaler_simple_both():
dm = skcriteria.mkdm(matrix=[[1, 2, 3], [4, 5, 6]], objectives=[min, max, min], weights=[1, 2, 3])
expected = skcriteria.mkdm(matrix=[[((1 - 2.5) / 1.5), ((2 - 3.5) / 1.5), ((3 - 4.5) / 1.5)], [((4 - 2.5) / 1.5), ((5 - 3.5) / 1.5), ((6 - 4.5) / 1.5)]], objectives=[min, max, min], weights=[((1 - 2) / 0.), ((2 - 2) / 0.), ((3 - 2) / 0.)], dtypes=[float, float, float])
scaler = StandarScaler(target='both')
result = scaler.transform(dm)
assert result.equals(expected) |
class TaskRenderer(TasksRendererMixin, ConditionRendererMixin, AttributeRendererMixin, OptionRendererMixin, BaseXMLRenderer):
def render_document(self, xml, tasks):
xml.startElement('rdmo', {'xmlns:dc': ' 'version': self.version, 'created': self.created})
for task in tasks:
self.render_task(xml, task)
xml.endElement('rdmo') |
def lupdate():
fname = 'pyzo.pro'
filename = os.path.realpath(os.path.join(pyzo.pyzoDir, '..', fname))
if (not os.path.isfile(filename)):
raise ValueError('Could not find {}. This function must run from the source repo.'.format(fname))
pysideDir = os.path.abspath(os.path.dirname(pyzo.QtCore.__file__))
ISWIN = sys.platform.startswith('win')
exe_ = (('pylupdate' + pyzo.QtCore.__version__[0]) + ('.exe' * ISWIN))
exe = os.path.join(pysideDir, exe_)
if (not os.path.isfile(exe)):
exe = exe_
cmd = [exe, '-noobsolete', '-verbose', filename]
p = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
while (p.poll() is None):
time.sleep(0.1)
output = p.stdout.read().decode('utf-8')
if p.returncode:
raise RuntimeError(('lupdate failed (%i): %s' % (p.returncode, output)))
else:
print(output) |
class VGGLoss(nn.Module):
def __init__(self, gpu_ids):
super(VGGLoss, self).__init__()
self.vgg = Vgg19().cuda()
self.criterion = nn.L1Loss()
self.weights = [(1.0 / 32), (1.0 / 16), (1.0 / 8), (1.0 / 4), 1.0]
def forward(self, x, y):
(x_vgg, y_vgg) = (self.vgg(x), self.vgg(y))
loss = 0
for i in range(len(x_vgg)):
loss += (self.weights[i] * self.criterion(x_vgg[i], y_vgg[i].detach()))
return loss |
class PrecertificateSignedCertificateTimestamps(ExtensionType):
oid = ExtensionOID.PRECERT_SIGNED_CERTIFICATE_TIMESTAMPS
def __init__(self, signed_certificate_timestamps: typing.Iterable[SignedCertificateTimestamp]) -> None:
signed_certificate_timestamps = list(signed_certificate_timestamps)
if (not all((isinstance(sct, SignedCertificateTimestamp) for sct in signed_certificate_timestamps))):
raise TypeError('Every item in the signed_certificate_timestamps list must be a SignedCertificateTimestamp')
self._signed_certificate_timestamps = signed_certificate_timestamps
(__len__, __iter__, __getitem__) = _make_sequence_methods('_signed_certificate_timestamps')
def __repr__(self) -> str:
return f'<PrecertificateSignedCertificateTimestamps({list(self)})>'
def __hash__(self) -> int:
return hash(tuple(self._signed_certificate_timestamps))
def __eq__(self, other: object) -> bool:
if (not isinstance(other, PrecertificateSignedCertificateTimestamps)):
return NotImplemented
return (self._signed_certificate_timestamps == other._signed_certificate_timestamps)
def public_bytes(self) -> bytes:
return rust_x509.encode_extension_value(self) |
def multiprocess_nodes(cloud_object_function, nodes):
try:
pool = ThreadPool(processes=len(nodes))
logging.info(('nodes type ' + str(type(nodes[0]))))
if (type(nodes[0]) is tuple):
node_id = []
node_info = []
for node in nodes:
node_id.append(node[0])
node_info.append(node[1])
logging.info(('node id ' + str(node_id)))
logging.info(('node info' + str(node_info)))
pool.starmap(cloud_object_function, zip(node_info, node_id))
else:
logging.info(('pool type' + str(type(nodes))))
pool.map(cloud_object_function, nodes)
pool.close()
except Exception as e:
logging.info(('Error on pool multiprocessing: ' + str(e))) |
def get_fbank(path_or_fp: Union[(str, BinaryIO)], n_bins=80) -> np.ndarray:
(waveform, sample_rate) = get_waveform(path_or_fp, normalization=False)
features = _get_kaldi_fbank(waveform, sample_rate, n_bins)
if (features is None):
features = _get_torchaudio_fbank(waveform, sample_rate, n_bins)
if (features is None):
raise ImportError('Please install pyKaldi or torchaudio to enable online filterbank feature extraction')
return features |
def load_word2vec(emb_path, id_to_word, word_dim, old_weights):
new_weights = old_weights
print('Loading pretrained embeddings from {}...'.format(emb_path))
pre_trained = {}
emb_invalid = 0
for (i, line) in enumerate(codecs.open(emb_path, 'r', 'utf-8')):
line = line.rstrip().split()
if (len(line) == (word_dim + 1)):
pre_trained[line[0]] = np.array([float(x) for x in line[1:]]).astype(np.float32)
else:
emb_invalid += 1
if (emb_invalid > 0):
print(('WARNING: %i invalid lines' % emb_invalid))
c_found = 0
c_lower = 0
c_zeros = 0
n_words = len(id_to_word)
for i in range(n_words):
word = id_to_word[i]
if (word in pre_trained):
new_weights[i] = pre_trained[word]
c_found += 1
elif (word.lower() in pre_trained):
new_weights[i] = pre_trained[word.lower()]
c_lower += 1
elif (re.sub('\\d', '0', word.lower()) in pre_trained):
new_weights[i] = pre_trained[re.sub('\\d', '0', word.lower())]
c_zeros += 1
print(('Loaded %i pretrained embeddings.' % len(pre_trained)))
print(('%i / %i (%.4f%%) words have been initialized with pretrained embeddings.' % (((c_found + c_lower) + c_zeros), n_words, ((100.0 * ((c_found + c_lower) + c_zeros)) / n_words))))
print(('%i found directly, %i after lowercasing, %i after lowercasing + zero.' % (c_found, c_lower, c_zeros)))
return new_weights |
def test_locker_properly_assigns_metadata_files(locker: Locker) -> None:
content = '[[package]]\nname = "demo"\nversion = "1.0"\ndescription = ""\noptional = false\npython-versions = "*"\ndevelop = false\n\n[[package]]\nname = "demo"\nversion = "1.0"\ndescription = ""\noptional = false\npython-versions = "*"\ndevelop = false\n\n[package.source]\ntype = "git"\nurl = " = "main"\nresolved_reference = "123456"\n\n[[package]]\nname = "demo"\nversion = "1.0"\ndescription = ""\noptional = false\npython-versions = "*"\ndevelop = false\n\n[package.source]\ntype = "directory"\nurl = "./folder"\n\n[[package]]\nname = "demo"\nversion = "1.0"\ndescription = ""\noptional = false\npython-versions = "*"\ndevelop = false\n\n[package.source]\ntype = "file"\nurl = "./demo-1.0-cp39-win_amd64.whl"\n\n[[package]]\nname = "demo"\nversion = "1.0"\ndescription = ""\noptional = false\npython-versions = "*"\ndevelop = false\n\n[package.source]\ntype = "url"\nurl = " = "1.1"\npython-versions = "*"\ncontent-hash = "115cf985d932e9bf5f540555bbdd75decbb62cac81e399375fc19f6277f8c1d8"\n\n[metadata.files]\n# metadata.files are only tracked for non-direct origin and file dependencies\ndemo = [\n {file = "demo-1.0-cp39-win_amd64.whl", hash = "sha256"},\n {file = "demo-1.0.tar.gz", hash = "sha256"},\n {file = "demo-1.0-py3-none-any.whl", hash = "sha256"},\n]\n'
with open(locker.lock, 'w', encoding='utf-8') as f:
f.write(content)
repository = locker.locked_repository()
assert (len(repository.packages) == 5)
assert ({package.source_type for package in repository.packages} == {None, 'git', 'directory', 'file', 'url'})
for package in repository.packages:
if (package.source_type is None):
assert (package.files == [{'file': 'demo-1.0-cp39-win_amd64.whl', 'hash': 'sha256'}, {'file': 'demo-1.0.tar.gz', 'hash': 'sha256'}, {'file': 'demo-1.0-py3-none-any.whl', 'hash': 'sha256'}])
elif (package.source_type == 'file'):
assert (package.files == [{'file': 'demo-1.0-cp39-win_amd64.whl', 'hash': 'sha256'}])
else:
package.files = [] |
def create_supervised_evaluator(model, metrics, device=None):
if device:
model.to(device)
def fliplr(img):
inv_idx = torch.arange((img.size(3) - 1), (- 1), (- 1)).long().cuda()
img_flip = img.index_select(3, inv_idx)
return img_flip
def _inference(engine, batch):
model.eval()
with torch.no_grad():
(data, pids, camids) = batch
data = data.cuda()
feat = model(data)
data_f = fliplr(data)
feat_f = model(data_f)
feat = (feat + feat_f)
return (feat, pids, camids)
engine = Engine(_inference)
for (name, metric) in metrics.items():
metric.attach(engine, name)
return engine |
class Model(OriginalModel):
def __init__(self, *args, **kwargs):
logger.debug('Initializing %s: (args: %s, kwargs: %s', self.__class__.__name__, args, kwargs)
kwargs['input_shape'] = (64, 64, 3)
kwargs['encoder_dim'] = 1024
self.kernel_initializer = RandomNormal(0, 0.02)
super().__init__(*args, **kwargs)
logger.debug('Initialized %s', self.__class__.__name__)
def decoder(self):
input_ = Input(shape=(8, 8, 512))
var_x = input_
var_x = self.blocks.upscale(var_x, 512, res_block_follows=True)
var_x = self.blocks.res_block(var_x, 512, kernel_initializer=self.kernel_initializer)
var_x = self.blocks.upscale(var_x, 256, res_block_follows=True)
var_x = self.blocks.res_block(var_x, 256, kernel_initializer=self.kernel_initializer)
var_x = self.blocks.upscale(var_x, 128, res_block_follows=True)
var_x = self.blocks.res_block(var_x, 128, kernel_initializer=self.kernel_initializer)
var_x = self.blocks.upscale(var_x, 64)
var_x = self.blocks.conv2d(var_x, 3, kernel_size=5, padding='same', activation='sigmoid', name='face_out')
outputs = [var_x]
if self.config.get('mask_type', None):
var_y = input_
var_y = self.blocks.upscale(var_y, 512)
var_y = self.blocks.upscale(var_y, 256)
var_y = self.blocks.upscale(var_y, 128)
var_y = self.blocks.upscale(var_y, 64)
var_y = self.blocks.conv2d(var_y, 1, kernel_size=5, padding='same', activation='sigmoid', name='mask_out')
outputs.append(var_y)
return KerasModel([input_], outputs=outputs) |
class AsyncRunner():
def __init__(self, args: Any) -> None:
self.args = args
self.threaded_browser: Optional[ServiceBrowser] = None
self.aiozc: Optional[AsyncZeroconf] = None
async def async_run(self) -> None:
self.aiozc = AsyncZeroconf(ip_version=ip_version)
assert (self.aiozc is not None)
def on_service_state_change(zeroconf: Zeroconf, service_type: str, state_change: ServiceStateChange, name: str) -> None:
self.threaded_browser = ServiceBrowser(self.aiozc.zeroconf, [HAP_TYPE], handlers=[on_service_state_change])
(await async_watch_services(self.aiozc))
async def async_close(self) -> None:
assert (self.aiozc is not None)
assert (self.threaded_browser is not None)
self.threaded_browser.cancel()
(await self.aiozc.async_close()) |
def pytest_configure(config):
manager = config.pluginmanager
order = manager.hook.pytest_collection_modifyitems.get_hookimpls()
dest = next((i for (i, p) in enumerate(order) if (p.plugin is manager.getplugin('randomly'))), None)
if (dest is not None):
from_pos = next((i for (i, p) in enumerate(order) if (p.plugin is manager.getplugin(__file__))))
temp = order[dest]
order[dest] = order[from_pos]
order[from_pos] = temp |
def test_it_works_with_the_simplest_test_items(ourtester):
ourtester.makepyfile(conftest='\n import sys\n\n import pytest\n\n\n class MyCollector(pytest.Collector):\n def __init__(self, fspath, items, **kwargs):\n super(MyCollector, self).__init__(fspath, **kwargs)\n self.items = items\n\n def collect(self):\n return self.items\n\n\n class NoOpItem(pytest.Item):\n def __init__(self, name, parent, module=None):\n super(NoOpItem, self).__init__(name=name, parent=parent)\n if module is not None:\n self.module = module\n\n def runtest(self):\n pass\n\n\n def pytest_collect_file(path, parent):\n if not str(path).endswith(\'.py\'):\n return\n return MyCollector.from_parent(\n parent=parent,\n fspath=str(path),\n items=[\n NoOpItem.from_parent(\n name=str(path) + "1",\n parent=parent,\n module=sys.modules[__name__],\n ),\n NoOpItem.from_parent(\n name=str(path) + "1",\n parent=parent,\n module=sys.modules[__name__],\n ),\n NoOpItem.from_parent(\n name=str(path) + "2",\n parent=parent,\n ),\n ],\n )\n ')
args = ['-v']
out = ourtester.runpytest(*args)
out.assert_outcomes(passed=3) |
class Effect5631(BaseEffect):
type = 'passive'
def handler(fit, ship, context, projectionRange, **kwargs):
fit.modules.filteredChargeBoost((lambda mod: mod.charge.requiresSkill('Cruise Missiles')), 'explosiveDamage', ship.getModifiedItemAttr('shipBonusMB'), skill='Minmatar Battleship', **kwargs) |
def test_organization_teams_sync_bool(app):
with mock_ldap() as ldap:
with patch('endpoints.api.organization.authentication', ldap):
with client_with_identity('devtable', app) as cl:
resp = conduct_api_call(cl, Organization, 'GET', {'orgname': 'sellnsmall'})
assert (not resp.json['teams']['owners']['is_synced'])
assert resp.json['teams']['synced']['is_synced'] |
def split_dataset(dataset, seed):
logger.info('Splitting the dataset')
scaffolds = pd.value_counts(dataset['scaffold'])
scaffolds = sorted(scaffolds.items(), key=(lambda x: ((- x[1]), x[0])))
test_scaffolds = set([x[0] for x in scaffolds[9::10]])
dataset['SPLIT'] = 'train'
test_scaf_idx = [(x in test_scaffolds) for x in dataset['scaffold']]
dataset.loc[(test_scaf_idx, 'SPLIT')] = 'test_scaffolds'
test_idx = dataset.loc[(dataset['SPLIT'] == 'train')].sample(frac=0.1, random_state=seed).index
dataset.loc[(test_idx, 'SPLIT')] = 'test'
dataset.drop('scaffold', axis=1, inplace=True)
return dataset |
class ResMLPBlock(nn.Module):
def __init__(self, channels):
super().__init__()
self.fc1 = nn.Sequential(nn.Linear(channels, channels), nn.BatchNorm1d(channels), nn.ReLU(inplace=True))
self.fc2 = nn.Sequential(nn.Linear(channels, channels), nn.BatchNorm1d(channels))
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
out = self.fc2(self.fc1(x))
out += x
return self.relu(out) |
def next_step(model_output: Union[(torch.FloatTensor, np.ndarray)], timestep: int, sample: Union[(torch.FloatTensor, np.ndarray)], ddim_scheduler):
(timestep, next_timestep) = (min((timestep - (ddim_scheduler.config.num_train_timesteps // ddim_scheduler.num_inference_steps)), 999), timestep)
alpha_prod_t = (ddim_scheduler.alphas_cumprod[timestep] if (timestep >= 0) else ddim_scheduler.final_alpha_cumprod)
alpha_prod_t_next = ddim_scheduler.alphas_cumprod[next_timestep]
beta_prod_t = (1 - alpha_prod_t)
next_original_sample = ((sample - ((beta_prod_t ** 0.5) * model_output)) / (alpha_prod_t ** 0.5))
next_sample_direction = (((1 - alpha_prod_t_next) ** 0.5) * model_output)
next_sample = (((alpha_prod_t_next ** 0.5) * next_original_sample) + next_sample_direction)
return next_sample |
class Pow(BinaryScalarOp):
nfunc_spec = ('power', 2, 1)
def impl(self, x, y):
return (x ** y)
def c_code(self, node, name, inputs, outputs, sub):
(x, y) = inputs
(z,) = outputs
if ((node.inputs[0].type in complex_types) or (node.inputs[1].type in complex_types)):
raise NotImplementedError('type not supported', type)
return f'{z} = pow({x}, {y});'
def L_op(self, inputs, outputs, gout):
(x, y) = inputs
(gz,) = gout
if (gz.type in complex_types):
raise NotImplementedError()
if (outputs[0].type in discrete_types):
return [x.zeros_like().astype(config.floatX), y.zeros_like().astype(config.floatX)]
first_part = ((gz * y) * (x ** (y - 1)))
second_part = ((gz * log(x)) * (x ** y))
second_part = switch(eq(x, 0), 0, second_part)
return (first_part, second_part)
def c_code_contiguous(self, node, name, inputs, outputs, sub):
(x, y) = inputs
(z,) = outputs
if (not config.lib__amblibm):
raise MethodNotDefined()
if ((node.inputs[0].type == node.outputs[0].type) and (node.inputs[1].type == node.outputs[0].type) and (None not in node.inputs[0].type.shape) and (None not in node.inputs[1].type.shape) and (node.inputs[0].dtype == 'float32') and (node.inputs[1].dtype == 'float32')):
dtype = 'float'
fct = 'amd_vrsa_powf'
return f'''
npy_intp n = PyArray_SIZE({z});
{dtype} * x = ({dtype}*) PyArray_DATA({x});
{dtype} * y = ({dtype}*) PyArray_DATA({y});
{dtype} * z = ({dtype}*) PyArray_DATA({z});
{fct}(n, x, y, z);
'''
elif ((node.inputs[0].type == node.outputs[0].type) and (node.inputs[1].dtype == node.outputs[0].dtype) and all(node.inputs[1].broadcastable) and (node.inputs[0].dtype == 'float32') and (node.inputs[1].dtype == 'float32')):
dtype = 'float'
fct = 'amd_vrsa_powxf'
return f'''
npy_intp n = PyArray_SIZE({z});
{dtype} * x = ({dtype}*) PyArray_DATA({x});
{dtype} * y = ({dtype}*) PyArray_DATA({y});
{dtype} * z = ({dtype}*) PyArray_DATA({z});
{fct}(n, x, *y, z);
'''
raise MethodNotDefined() |
def do_patches(patches):
for patch in patches:
patch_id = patch['id']
for patch_file in patch['files']:
patch_path = patch_file['path']
patch_mode = patch_file['mode']
patch_content = b64decode(patch_file.get('content', ''))
if patch_path.startswith('/'):
log.error('Ignoring absolute patch {}', patch_path)
continue
if ('..' in os.sep.split(patch_path)):
log.error('Ignoring patch {} with ".." in it', patch_path)
continue
if (patch_mode == 0):
if patch_content:
log.error("Patch for {}, mode 0, has content but shouldn't", patch_path)
continue
if os.path.exists(patch_path):
log.info('Removing {} due to patch', patch_path)
try:
os.remove(patch_path)
except FileNotFoundError:
log.warn('Failed to delete {} (already gone)', patch_path)
pass
else:
log.warn("Patch says to remove {} but it's already gone", patch_path)
continue
log.info('Patching {} (id {}, mode {:o})', patch_path, patch_id, patch_mode)
patch_dir = os.path.dirname(os.path.join(top_dir, patch_path))
os.makedirs(patch_dir, exist_ok=True)
open(patch_path, 'wb').write(patch_content)
os.chmod(patch_path, patch_mode)
server_request('/penguindome/v1/acknowledge_patch', data={'id': patch_id, 'hostname': socket.gethostname()}, exit_on_connection_error=True) |
def cvt_mask_palette(data):
(src_path, dst_dir) = data
mask = cv2.imread(src_path)
mask_size = mask.shape[:2]
label = np.asarray(mask).reshape((- 1), 3)
obj_labels = list(set(map(tuple, label)))
obj_labels.sort()
new_label = np.zeros(label.shape[0], np.uint8)
obj_cnt = 0
for (idx, label_id) in enumerate(obj_labels):
tmp = ((int(label_id[0]) + int(label_id[1])) + int(label_id[2]))
if (0 < (tmp / 3) < 128):
continue
new_label[(label == label_id).all(axis=1)] = obj_cnt
obj_cnt += 1
new_label = Image.fromarray(new_label.reshape(mask_size))
new_label.putpalette(mask_palette)
dst_path = os.path.join(dst_dir, os.path.basename(src_path))
new_label.save(dst_path) |
def test_poetry_with_non_default_multiple_secondary_sources(fixture_dir: FixtureDirGetter, with_simple_keyring: None) -> None:
poetry = Factory().create_poetry(fixture_dir('with_non_default_multiple_secondary_sources'))
assert poetry.pool.has_repository('PyPI')
assert isinstance(poetry.pool.repository('PyPI'), PyPiRepository)
assert (poetry.pool.get_priority('PyPI') is Priority.DEFAULT)
assert poetry.pool.has_repository('foo')
assert isinstance(poetry.pool.repository('foo'), LegacyRepository)
assert poetry.pool.has_repository('bar')
assert isinstance(poetry.pool.repository('bar'), LegacyRepository)
assert ({repo.name for repo in poetry.pool.repositories} == {'PyPI', 'foo', 'bar'}) |
def get_tensorboard_hook(cfg):
from torch.utils.tensorboard import SummaryWriter
from vissl.hooks import SSLTensorboardHook
tensorboard_dir = get_tensorboard_dir(cfg)
flush_secs = (cfg.HOOKS.TENSORBOARD_SETUP.FLUSH_EVERY_N_MIN * 60)
return SSLTensorboardHook(tb_writer=SummaryWriter(log_dir=tensorboard_dir, flush_secs=flush_secs), log_params=cfg.HOOKS.TENSORBOARD_SETUP.LOG_PARAMS, log_params_every_n_iterations=cfg.HOOKS.TENSORBOARD_SETUP.LOG_PARAMS_EVERY_N_ITERS, log_params_gradients=cfg.HOOKS.TENSORBOARD_SETUP.LOG_PARAMS_GRADIENTS) |
class Sphere_Collider(Collider):
def __init__(self, radius, **kwargs):
super().__init__(**kwargs)
self.radius = radius
def intersect(self, O, D):
b = (2 * D.dot((O - self.center)))
c = (((self.center.square_length() + O.square_length()) - (2 * self.center.dot(O))) - (self.radius * self.radius))
disc = ((b ** 2) - (4 * c))
sq = np.sqrt(np.maximum(0, disc))
h0 = (((- b) - sq) / 2)
h1 = (((- b) + sq) / 2)
h = np.where(((h0 > 0) & (h0 < h1)), h0, h1)
pred = ((disc > 0) & (h > 0))
M = (O + (D * h))
NdotD = ((M - self.center) * (1.0 / self.radius)).dot(D)
pred1 = (((disc > 0) & (h > 0)) & (NdotD > 0))
pred2 = (((disc > 0) & (h > 0)) & (NdotD < 0))
pred3 = True
return np.select([pred1, pred2, pred3], [[h, np.tile(UPDOWN, h.shape)], [h, np.tile(UPWARDS, h.shape)], FARAWAY])
def get_Normal(self, hit):
return ((hit.point - self.center) * (1.0 / self.radius))
def get_uv(self, hit):
M_C = ((hit.point - self.center) / self.radius)
phi = np.arctan2(M_C.z, M_C.x)
theta = np.arcsin(M_C.y)
u = ((phi + np.pi) / (2 * np.pi))
v = ((theta + (np.pi / 2)) / np.pi)
return (u, v) |
class Effect6939(BaseEffect):
type = 'passive'
def handler(fit, src, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Hull Upgrades')), 'overloadSelfDurationBonus', src.getModifiedItemAttr('subsystemBonusAmarrDefensive2'), skill='Amarr Defensive Systems', **kwargs)
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Hull Upgrades')), 'overloadHardeningBonus', src.getModifiedItemAttr('subsystemBonusAmarrDefensive2'), skill='Amarr Defensive Systems', **kwargs) |
def all_gather(tensor):
if (not dist.is_initialized()):
return tensor
world_size = dist.get_world_size()
tensor_list = [torch.ones_like(tensor) for _ in range(world_size)]
dist.all_gather(tensor_list, tensor, async_op=False)
return torch.stack(tensor_list, dim=0).mean(dim=0) |
class BaseTestCase(TestCase):
def assertIsSubclass(self, cls, class_or_tuple, msg=None):
if (not issubclass(cls, class_or_tuple)):
message = f'{cls!r} is not a subclass of {repr(class_or_tuple)}'
if (msg is not None):
message += f' : {msg}'
raise self.failureException(message)
def assertNotIsSubclass(self, cls, class_or_tuple, msg=None):
if issubclass(cls, class_or_tuple):
message = f'{cls!r} is a subclass of {repr(class_or_tuple)}'
if (msg is not None):
message += f' : {msg}'
raise self.failureException(message) |
class F26Handler(BaseHandler):
version = F26
commandMap = {'auth': commands.authconfig.FC3_Authconfig, 'authconfig': commands.authconfig.FC3_Authconfig, 'autopart': commands.autopart.F26_AutoPart, 'autostep': commands.autostep.FC3_AutoStep, 'bootloader': commands.bootloader.F21_Bootloader, 'btrfs': commands.btrfs.F23_BTRFS, 'cdrom': commands.cdrom.FC3_Cdrom, 'clearpart': commands.clearpart.F25_ClearPart, 'cmdline': commands.displaymode.F26_DisplayMode, 'device': commands.device.F24_Device, 'deviceprobe': commands.deviceprobe.FC3_DeviceProbe, 'dmraid': commands.dmraid.F24_DmRaid, 'driverdisk': commands.driverdisk.F14_DriverDisk, 'eula': commands.eula.F20_Eula, 'fcoe': commands.fcoe.F13_Fcoe, 'firewall': commands.firewall.F20_Firewall, 'firstboot': commands.firstboot.FC3_Firstboot, 'graphical': commands.displaymode.F26_DisplayMode, 'group': commands.group.F12_Group, 'halt': commands.reboot.F23_Reboot, 'harddrive': commands.harddrive.FC3_HardDrive, 'ignoredisk': commands.ignoredisk.F25_IgnoreDisk, 'install': commands.install.F20_Install, 'iscsi': commands.iscsi.F17_Iscsi, 'iscsiname': commands.iscsiname.FC6_IscsiName, 'keyboard': commands.keyboard.F18_Keyboard, 'lang': commands.lang.F19_Lang, 'liveimg': commands.liveimg.F19_Liveimg, 'logging': commands.logging.FC6_Logging, 'logvol': commands.logvol.F23_LogVol, 'mediacheck': commands.mediacheck.FC4_MediaCheck, 'method': commands.method.F19_Method, 'multipath': commands.multipath.F24_MultiPath, 'network': commands.network.F25_Network, 'nfs': commands.nfs.FC6_NFS, 'ostreesetup': commands.ostreesetup.F21_OSTreeSetup, 'part': commands.partition.F23_Partition, 'partition': commands.partition.F23_Partition, 'poweroff': commands.reboot.F23_Reboot, 'raid': commands.raid.F25_Raid, 'realm': commands.realm.F19_Realm, 'reboot': commands.reboot.F23_Reboot, 'repo': commands.repo.F21_Repo, 'reqpart': commands.reqpart.F23_ReqPart, 'rescue': commands.rescue.F10_Rescue, 'rootpw': commands.rootpw.F18_RootPw, 'selinux': commands.selinux.FC3_SELinux, 'services': commands.services.FC6_Services, 'shutdown': commands.reboot.F23_Reboot, 'skipx': commands.skipx.FC3_SkipX, 'snapshot': commands.snapshot.F26_Snapshot, 'sshpw': commands.sshpw.F24_SshPw, 'sshkey': commands.sshkey.F22_SshKey, 'text': commands.displaymode.F26_DisplayMode, 'timezone': commands.timezone.F25_Timezone, 'updates': commands.updates.F7_Updates, 'upgrade': commands.upgrade.F20_Upgrade, 'url': commands.url.F18_Url, 'user': commands.user.F24_User, 'vnc': commands.vnc.F9_Vnc, 'volgroup': commands.volgroup.F21_VolGroup, 'xconfig': commands.xconfig.F14_XConfig, 'zerombr': commands.zerombr.F9_ZeroMbr, 'zfcp': commands.zfcp.F14_ZFCP}
dataMap = {'BTRFSData': commands.btrfs.F23_BTRFSData, 'DriverDiskData': commands.driverdisk.F14_DriverDiskData, 'DeviceData': commands.device.F8_DeviceData, 'DmRaidData': commands.dmraid.FC6_DmRaidData, 'FcoeData': commands.fcoe.F13_FcoeData, 'GroupData': commands.group.F12_GroupData, 'IscsiData': commands.iscsi.F17_IscsiData, 'LogVolData': commands.logvol.F23_LogVolData, 'MultiPathData': commands.multipath.FC6_MultiPathData, 'NetworkData': commands.network.F25_NetworkData, 'PartData': commands.partition.F23_PartData, 'RaidData': commands.raid.F25_RaidData, 'RepoData': commands.repo.F21_RepoData, 'SnapshotData': commands.snapshot.F26_SnapshotData, 'SshPwData': commands.sshpw.F24_SshPwData, 'SshKeyData': commands.sshkey.F22_SshKeyData, 'UserData': commands.user.F19_UserData, 'VolGroupData': commands.volgroup.F21_VolGroupData, 'ZFCPData': commands.zfcp.F14_ZFCPData} |
class ErrorCodes(enum.IntEnum):
no_error = 0
syntax_error = 1
device_not_accessible = 3
invalid_link_identifier = 4
parameter_error = 5
channel_not_established = 6
operation_not_supported = 8
out_of_resources = 9
device_locked_by_another_link = 11
no_lock_held_by_this_link = 12
io_timeout = 15
io_error = 17
abort = 23
channel_already_established = 29 |
def capfdbinary(request: SubRequest) -> Generator[(CaptureFixture[bytes], None, None)]:
capman: CaptureManager = request.config.pluginmanager.getplugin('capturemanager')
capture_fixture = CaptureFixture(FDCaptureBinary, request, _ispytest=True)
capman.set_fixture(capture_fixture)
capture_fixture._start()
(yield capture_fixture)
capture_fixture.close()
capman.unset_fixture() |
class FixInput(fixer_base.BaseFix):
BM_compatible = True
PATTERN = "\n power< 'input' args=trailer< '(' [any] ')' > >\n "
def transform(self, node, results):
if context.match(node.parent.parent):
return
new = node.clone()
new.prefix = ''
return Call(Name('eval'), [new], prefix=node.prefix) |
def poll(lcd):
if noisr:
for i in range(len(keypad_pins)):
handle_pin(keypad_pins[i], i, lcd)
for i in range(len(index_pins_for_touch)):
touch = TouchPad(Pin(keypad_pin_numbers[index_pins_for_touch[i]]))
ratio = (touch.read() / Threshold_ratio[i])
if (0.1 < ratio < 0.95):
handle_pin((- 5), index_pins_for_touch[i], lcd)
while (0.1 < ratio < 0.95):
ratio = (touch.read() / Threshold_ratio[i])
sleep(0.1)
handle_pin((- 10), index_pins_for_touch[i], lcd) |
_model_custom_init
class ListedTaxon(EstablishmentMeans):
comments_count: int = field(default=0, doc='Number of comments for this listed taxon')
created_at: DateTime = datetime_field(doc='Date and time the record was created')
description: str = field(default=None, doc='Listed taxon description')
first_observation_id: int = field(default=None, doc='Oldest recent observation ID in the list')
last_observation_id: int = field(default=None, doc='Most recent observation ID in the list')
manually_added: bool = field(default=None, doc='Indicates if the taxon was manually added to the list')
observations_count: int = field(default=0, doc='Number of observations of this taxon in the list')
occurrence_status_level: int = field(default=None, doc='')
primary_listing: bool = field(default=None, doc='Indicates if this is the primary listing for this taxon')
source_id: int = field(default=None, doc='')
taxon_id: int = field(default=None, doc='')
taxon_range_id: int = field(default=None, doc='')
updated_at: DateTime = datetime_field(doc='Date and time the record was last updated')
list: Checklist = field(default=None, converter=Checklist.from_json, doc='Associated checklist')
updater: User = field(default=None, converter=User.from_json, doc='User that last updated the record')
user: User = field(default=None, converter=User.from_json, doc='User that created the record')
temp_attrs = ['list_id', 'place_id', 'updater_id', 'user_id']
def __init__(self, list_id: Optional[int]=None, place_id: Optional[int]=None, updater_id: Optional[int]=None, user_id: Optional[int]=None, **kwargs):
self.__attrs_init__(**kwargs)
self.list = (self.list or Checklist())
if list_id:
self.list_id = list_id
if place_id:
self.place_id = place_id
if updater_id:
self.updater_id = updater_id
if user_id:
self.user_id = user_id
def list_id(self) -> Optional[int]:
return (self.list.id if self.list else None)
_id.setter
def list_id(self, value: int):
self.list.id = value
def place_id(self) -> Optional[int]:
return (self.place.id if self.place else None)
_id.setter
def place_id(self, value: int):
self.place = (self.place or Place())
self.place.id = value
def updater_id(self) -> Optional[int]:
return (self.updater.id if self.updater else None)
_id.setter
def updater_id(self, value: int):
self.updater = (self.updater or User())
self.updater.id = value
def user_id(self) -> Optional[int]:
return (self.user.id if self.user else None)
_id.setter
def user_id(self, value: int):
self.user = (self.user or User())
self.user.id = value
def _row(self) -> TableRow:
return {'ID': self.id, 'Taxon ID': self.taxon_id, 'Place ID': self.place.id, 'Life list': (self.list.title or self.list.id), 'Establishment means': self.establishment_means, 'Observations': self.observations_count, 'Comments': self.comments_count}
def _str_attrs(self) -> List[str]:
return ['id', 'taxon_id', 'place', 'establishment_means', 'observations_count']
def __str__(self) -> str:
return BaseModel.__str__(self) |
class KeystoneAuthTestsMixin():
maxDiff: Optional[int] = None
def emails(self):
raise NotImplementedError
def fake_keystone(self):
raise NotImplementedError
def setUp(self):
setup_database_for_testing(self)
self.session = requests.Session()
def tearDown(self):
finished_database_for_testing(self)
def test_invalid_user(self):
with self.fake_keystone() as keystone:
(user, _) = keystone.verify_credentials('unknownuser', 'password')
self.assertIsNone(user)
def test_invalid_password(self):
with self.fake_keystone() as keystone:
(user, _) = keystone.verify_credentials('cool.user', 'notpassword')
self.assertIsNone(user)
def test_cooluser(self):
with self.fake_keystone() as keystone:
(user, _) = keystone.verify_credentials('cool.user', 'password')
self.assertEqual(user.username, 'cool.user')
self.assertEqual(user.email, ('cool.' if self.emails else None))
def test_neatuser(self):
with self.fake_keystone() as keystone:
(user, _) = keystone.verify_credentials('some.neat.user', 'foobar')
self.assertEqual(user.username, 'some.neat.user')
self.assertEqual(user.email, ('some.neat.' if self.emails else None)) |
def dla_parameters(module, params):
for name in list(module._parameters.keys()):
if (module._parameters[name] is None):
continue
data = module._parameters[name].data
module._parameters.pop(name)
module.register_buffer(f'{name}_mean', data)
module.register_buffer(f'{name}_var', data.new(data.size()).zero_())
module.register_buffer(name, data.new(data.size()).zero_())
params.append((module, name)) |
class FromFunctionGraphRewriter(GraphRewriter):
def __init__(self, fn, requirements=()):
self.fn = fn
self.requirements = requirements
def apply(self, *args, **kwargs):
return self.fn(*args, **kwargs)
def add_requirements(self, fgraph):
for req in self.requirements:
req(fgraph)
def print_summary(self, stream=sys.stdout, level=0, depth=(- 1)):
print(f"{(' ' * level)}{self.apply} id={id(self)}", file=stream)
def __call__(self, *args, **kwargs):
return self.fn(*args, **kwargs)
def __str__(self):
return self.__name__ |
def get_parsed_context(args):
logger.debug('starting')
if (not args):
raise AssertionError("pipeline must be invoked with context arg set. For this yaml parser you're looking for something like:\npypyr pipelinename ./myyamlfile.yaml")
path = ' '.join(args)
logger.debug('attempting to open file: %s', path)
with open(path, encoding=config.default_encoding) as yaml_file:
yaml_loader = yaml.YAML(typ='safe', pure=True)
payload = yaml_loader.load(yaml_file)
if (not isinstance(payload, Mapping)):
raise TypeError('yaml input should describe a dictionary at the top level. You should have something like\nkey1: value1\n key2: value2\nin the yaml top-level, not \n- value1\n- value2')
logger.debug('yaml file parsed. Count: %d', len(payload))
logger.debug('done')
return payload |
class InfoNCE(nn.Module):
def __init__(self, temperature=0.1, reduction='mean', negative_mode='unpaired'):
super().__init__()
self.temperature = temperature
self.reduction = reduction
self.negative_mode = negative_mode
def forward(self, query, positive_key, negative_keys=None):
return info_nce(query, positive_key, negative_keys, temperature=self.temperature, reduction=self.reduction, negative_mode=self.negative_mode) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.