code stringlengths 281 23.7M |
|---|
def do_setup() -> int:
root = get_root()
try:
cfg = get_config_from_root(root)
except (OSError, configparser.NoSectionError, configparser.NoOptionError) as e:
if isinstance(e, (OSError, configparser.NoSectionError)):
print('Adding sample versioneer config to setup.cfg', file=sys.stderr)
with open(os.path.join(root, 'setup.cfg'), 'a') as f:
f.write(SAMPLE_CONFIG)
print(CONFIG_ERROR, file=sys.stderr)
return 1
print((' creating %s' % cfg.versionfile_source))
with open(cfg.versionfile_source, 'w') as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write((LONG % {'DOLLAR': '$', 'STYLE': cfg.style, 'TAG_PREFIX': cfg.tag_prefix, 'PARENTDIR_PREFIX': cfg.parentdir_prefix, 'VERSIONFILE_SOURCE': cfg.versionfile_source}))
ipy = os.path.join(os.path.dirname(cfg.versionfile_source), '__init__.py')
maybe_ipy: Optional[str] = ipy
if os.path.exists(ipy):
try:
with open(ipy, 'r') as f:
old = f.read()
except OSError:
old = ''
module = os.path.splitext(os.path.basename(cfg.versionfile_source))[0]
snippet = INIT_PY_SNIPPET.format(module)
if (OLD_SNIPPET in old):
print((' replacing boilerplate in %s' % ipy))
with open(ipy, 'w') as f:
f.write(old.replace(OLD_SNIPPET, snippet))
elif (snippet not in old):
print((' appending to %s' % ipy))
with open(ipy, 'a') as f:
f.write(snippet)
else:
print((' %s unmodified' % ipy))
else:
print((" %s doesn't exist, ok" % ipy))
maybe_ipy = None
do_vcs_install(cfg.versionfile_source, maybe_ipy)
return 0 |
class KinopoiskObject(object):
id = None
objects = None
_urls = {}
_sources = []
_source_classes = {}
def __init__(self, id=None, **kwargs):
if id:
self.id = id
self.set_defaults()
self.__dict__.update(kwargs)
def set_defaults(self):
pass
def parse(self, name, content):
self.get_source_instance(name, instance=self, content=content).parse()
def get_content(self, name):
self.get_source_instance(name, instance=self).get()
def get_parsed(cls, name, content):
instance = cls()
instance.parse(name, content)
return instance
def register_source(self, name, class_name):
try:
self.set_url(name, class_name.url)
except AttributeError:
pass
self.set_source(name)
self._source_classes[name] = class_name
def set_url(self, name, url):
self._urls[name] = url
def get_url(self, name, postfix='', **kwargs):
url = self._urls.get(name)
if (not url):
raise ValueError(('There is no urlpage with name "%s"' % name))
if (not self.id):
raise ValueError('ID of object is empty')
kwargs['id'] = self.id
return ((' + url).format(**kwargs) + postfix)
def set_source(self, name):
if (name not in self._sources):
self._sources += [name]
def get_source_instance(self, name, **kwargs):
class_name = self._source_classes.get(name)
if (not class_name):
raise ValueError(('There is no source with name "%s"' % name))
instance = class_name(name, **kwargs)
return instance |
def ArtistList():
(artist_to_add, set_artist_to_add) = use_state('')
(artists, set_artists) = use_state([])
def handle_change(event):
set_artist_to_add(event['target']['value'])
def handle_click(event):
if (artist_to_add and (artist_to_add not in artists)):
set_artists([*artists, artist_to_add])
set_artist_to_add('')
return html.div(html.h1('Inspiring sculptors:'), html.input({'value': artist_to_add, 'on_change': handle_change}), html.button({'on_click': handle_click}, 'add'), html.ul([html.li({'key': name}, name) for name in artists])) |
def run_and_save(n: int, depth: int, n_data: int, batch_size: int, n_shots: int, save_dir: str, use_engine: bool) -> None:
logging.info('Beginning conventional circuit generation for Weber.')
system_pairs = run_config.qubit_pairs()
system_pairs = system_pairs[:n]
to_run_scramb = [_build_circuit(system_pairs, False, depth) for _ in range(n_data)]
to_run_tsym = [_build_circuit(system_pairs, True, depth) for _ in range(n_data)]
logging.info(f'Circuit generation complete. Generated {(len(to_run_tsym) + len(to_run_scramb))} total circuits')
for k in range(0, n_data, batch_size):
logging.info(f'Running batch: [{k}-{(k + batch_size)}) / {n_data}')
for is_tsym in [0, 1]:
batch = to_run_scramb[k:(k + batch_size)]
if is_tsym:
batch = to_run_tsym[k:(k + batch_size)]
results = run_config.execute_batch(batch, n_shots, use_engine)
for (j, single_circuit_samples) in enumerate(results):
name0 = f'1D-scramble-C-size-{n}-depth-{depth}-type-{is_tsym}-batch-{k}-number-{j}'
qubit_order = [f'q{i}' for i in range(n)]
out0 = single_circuit_samples.data[qubit_order].to_numpy()
np.save(os.path.join(save_dir, name0), out0)
logging.debug(('Saved: ' + name0)) |
def make_layers(cfg: List[Union[(str, int)]], batch_norm: bool=False) -> nn.Sequential:
layers = []
in_channels = 3
for v in cfg:
if (v == 'M'):
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
v = cast(int, v)
conv2d = MetaConv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, MetaBatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers) |
class AdvancedSubtensor1(COp):
__props__ = ()
_f16_ok = True
check_input = False
def __init__(self, sparse_grad=False):
self.sparse_grad = sparse_grad
def make_node(self, x, ilist):
x_ = as_tensor_variable(x)
ilist_ = as_tensor_variable(ilist)
if (ilist_.type.dtype not in integer_dtypes):
raise TypeError('index must be integers')
if (ilist_.type.ndim != 1):
raise TypeError('index must be vector')
if (x_.type.ndim == 0):
raise TypeError('cannot index into a scalar')
out_shape = ((ilist_.type.shape[0],) + x_.type.shape[1:])
out_shape = tuple(((1 if (s == 1) else None) for s in out_shape))
return Apply(self, [x_, ilist_], [TensorType(dtype=x.dtype, shape=out_shape)()])
def perform(self, node, inp, out_):
(x, i) = inp
(out,) = out_
if ((out[0] is not None) and (out[0].shape == ((len(i),) + x.shape[1:]))):
o = out[0]
else:
o = None
if (i.dtype != np.intp):
i_ = _asarray(i, dtype=np.intp)
if (not np.can_cast(i.dtype, np.intp)):
if np.any((i != i_)):
raise IndexError('index contains values that are bigger than the maximum array size on this system.', i)
i = i_
out[0] = x.take(i, axis=0, out=o)
def connection_pattern(self, node):
rval = [[True]]
for ipt in node.inputs[1:]:
rval.append([False])
return rval
def grad(self, inputs, grads):
(x, ilist) = inputs
(gz,) = grads
assert (len(inputs) == 2)
if self.sparse_grad:
if (x.type.ndim != 2):
raise TypeError(("AdvancedSubtensor1: you can't take the sparse grad from a tensor with ndim != 2. ndim is " + str(x.type.ndim)))
rval1 = [pytensor.sparse.construct_sparse_from_list(x, gz, ilist)]
else:
if (x.dtype in discrete_dtypes):
gx = x.zeros_like(dtype=config.floatX)
elif (x.dtype in complex_dtypes):
raise NotImplementedError('No support for complex grad yet')
else:
gx = x.zeros_like()
rval1 = [advanced_inc_subtensor1(gx, gz, ilist)]
return (rval1 + ([DisconnectedType()()] * (len(inputs) - 1)))
def R_op(self, inputs, eval_points):
if (eval_points[0] is None):
return [None]
return self.make_node(eval_points[0], *inputs[1:]).outputs
def infer_shape(self, fgraph, node, ishapes):
(x, ilist) = ishapes
return [(ilist + x[1:])]
def c_support_code(self, **kwargs):
return dedent(' #ifndef MIN_LONG\n #define MIN_LONG NPY_MIN_LONG\n #endif')
def c_code(self, node, name, input_names, output_names, sub):
if (self.__class__ is not AdvancedSubtensor1):
raise MethodNotDefined('c_code defined for AdvancedSubtensor1, not for child class', type(self))
(a_name, i_name) = (input_names[0], input_names[1])
output_name = output_names[0]
fail = sub['fail']
return ('\n PyArrayObject *indices;\n int i_type = PyArray_TYPE(%(i_name)s);\n if (i_type != NPY_INTP) {\n // Cast %(i_name)s to NPY_INTP (expected by PyArray_TakeFrom),\n // if all values fit.\n if (!PyArray_CanCastSafely(i_type, NPY_INTP) &&\n PyArray_SIZE(%(i_name)s) > 0) {\n npy_int64 min_val, max_val;\n PyObject* py_min_val = PyArray_Min(%(i_name)s, NPY_MAXDIMS,\n NULL);\n if (py_min_val == NULL) {\n %(fail)s;\n }\n min_val = PyLong_AsLongLong(py_min_val);\n Py_DECREF(py_min_val);\n if (min_val == -1 && PyErr_Occurred()) {\n %(fail)s;\n }\n PyObject* py_max_val = PyArray_Max(%(i_name)s, NPY_MAXDIMS,\n NULL);\n if (py_max_val == NULL) {\n %(fail)s;\n }\n max_val = PyLong_AsLongLong(py_max_val);\n Py_DECREF(py_max_val);\n if (max_val == -1 && PyErr_Occurred()) {\n %(fail)s;\n }\n if (min_val < NPY_MIN_INTP || max_val > NPY_MAX_INTP) {\n PyErr_SetString(PyExc_IndexError,\n "Index contains values "\n "that are bigger than the maximum array "\n "size on this system.");\n %(fail)s;\n }\n }\n indices = (PyArrayObject*) PyArray_Cast(%(i_name)s, NPY_INTP);\n if (indices == NULL) {\n %(fail)s;\n }\n }\n else {\n indices = %(i_name)s;\n Py_INCREF(indices);\n }\n if (%(output_name)s != NULL) {\n npy_intp nd, i, *shape;\n nd = PyArray_NDIM(%(a_name)s) + PyArray_NDIM(indices) - 1;\n if (PyArray_NDIM(%(output_name)s) != nd) {\n Py_CLEAR(%(output_name)s);\n }\n else {\n shape = PyArray_DIMS(%(output_name)s);\n for (i = 0; i < PyArray_NDIM(indices); i++) {\n if (shape[i] != PyArray_DIMS(indices)[i]) {\n Py_CLEAR(%(output_name)s);\n break;\n }\n }\n if (%(output_name)s != NULL) {\n for (; i < nd; i++) {\n if (shape[i] != PyArray_DIMS(%(a_name)s)[\n i-PyArray_NDIM(indices)+1]) {\n Py_CLEAR(%(output_name)s);\n break;\n }\n }\n }\n }\n }\n %(output_name)s = (PyArrayObject*)PyArray_TakeFrom(\n %(a_name)s, (PyObject*)indices, 0, %(output_name)s, NPY_RAISE);\n Py_DECREF(indices);\n if (%(output_name)s == NULL) %(fail)s;\n ' % locals())
def c_code_cache_version(self):
return (0, 1, 2) |
class ModelArguments():
model_name_or_path: Optional[str] = field(default=None, metadata={'help': "The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."})
model_type: Optional[str] = field(default=None, metadata={'help': ('If training from scratch, pass a model type from the list: ' + ', '.join(MODEL_TYPES))})
config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'})
tokenizer_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'})
use_fast_tokenizer: bool = field(default=True, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'})
dtype: Optional[str] = field(default='bfloat16', metadata={'help': 'Floating-point format in which the model weights should be initialized and trained. Choose one of `[float32, float16, bfloat16]`.'})
use_auth_token: bool = field(default=False, metadata={'help': 'Will use the token generated when running `huggingface-cli login` (necessary to use this script with private models).'})
num_partitions: int = field(default=1, metadata={'help': 'Number of partitions to split the model into.'}) |
.parametrize('orientation', ['vertical', 'horizontal'])
def test_clip_to_plot_data_item(orientation):
init_vals = ((- 1.5), 1.5)
x = np.linspace((- 1), 1, 10)
y = np.linspace(1, 1.2, 10)
p = pg.PlotWidget()
pdi = p.plot(x=x, y=y)
lr = pg.LinearRegionItem(init_vals, clipItem=pdi, orientation=orientation)
p.addItem(lr)
app.processEvents()
if (orientation == 'vertical'):
check_region(lr, x[[0, (- 1)]])
else:
check_region(lr, y[[0, (- 1)]]) |
def test_call_on_instance_with_inherited_dunder_call_method() -> None:
node = extract_node('\n class Base:\n def __call__(self):\n return self\n\n class Sub(Base):\n pass\n obj = Sub()\n val = obj()\n val #\n ')
assert isinstance(node, nodes.NodeNG)
[val] = node.inferred()
assert isinstance(val, Instance)
assert (val.name == 'Sub') |
class Migration(migrations.Migration):
initial = True
dependencies = [('auth', '0011_update_proxy_permissions')]
operations = [migrations.CreateModel(name='User', fields=[('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('password', models.CharField(max_length=128, verbose_name='password')), ('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')), ('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')), ('username', models.CharField(blank=True, max_length=100, null=True, verbose_name='username')), ('email', models.EmailField(max_length=254, unique=True, verbose_name='email address')), ('full_name', models.CharField(blank=True, max_length=300, verbose_name='full name')), ('name', models.CharField(blank=True, max_length=300, verbose_name='name')), ('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')), ('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')), ('gender', models.CharField(blank=True, choices=[('male', 'Male'), ('female', 'Female')], max_length=10, verbose_name='gender')), ('date_birth', models.DateField(null=True, verbose_name='date of birth')), ('open_to_recruiting', models.BooleanField(default=False, verbose_name='open to recruiting')), ('open_to_newsletter', models.BooleanField(default=False, verbose_name='open to newsletter')), ('country', models.CharField(blank=True, choices=[{'code': 'AW', 'name': 'Aruba'}, {'code': 'AF', 'name': 'Afghanistan'}, {'code': 'AO', 'name': 'Angola'}, {'code': 'AI', 'name': 'Anguilla'}, {'code': 'AX', 'name': 'Aland Islands'}, {'code': 'AL', 'name': 'Albania'}, {'code': 'AD', 'name': 'Andorra'}, {'code': 'AE', 'name': 'United Arab Emirates'}, {'code': 'AR', 'name': 'Argentina'}, {'code': 'AM', 'name': 'Armenia'}, {'code': 'AS', 'name': 'American Samoa'}, {'code': 'AQ', 'name': 'Antarctica'}, {'code': 'TF', 'name': 'French Southern Territories'}, {'code': 'AG', 'name': 'Antigua and Barbuda'}, {'code': 'AU', 'name': 'Australia'}, {'code': 'AT', 'name': 'Austria'}, {'code': 'AZ', 'name': 'Azerbaijan'}, {'code': 'BI', 'name': 'Burundi'}, {'code': 'BE', 'name': 'Belgium'}, {'code': 'BJ', 'name': 'Benin'}, {'code': 'BQ', 'name': 'Bonaire, Sint Eustatius and Saba'}, {'code': 'BF', 'name': 'Burkina Faso'}, {'code': 'BD', 'name': 'Bangladesh'}, {'code': 'BG', 'name': 'Bulgaria'}, {'code': 'BH', 'name': 'Bahrain'}, {'code': 'BS', 'name': 'Bahamas'}, {'code': 'BA', 'name': 'Bosnia and Herzegovina'}, {'code': 'BL', 'name': 'Saint Barthelemy'}, {'code': 'BY', 'name': 'Belarus'}, {'code': 'BZ', 'name': 'Belize'}, {'code': 'BM', 'name': 'Bermuda'}, {'code': 'BO', 'name': 'Bolivia, Plurinational State of'}, {'code': 'BR', 'name': 'Brazil'}, {'code': 'BB', 'name': 'Barbados'}, {'code': 'BN', 'name': 'Brunei Darussalam'}, {'code': 'BT', 'name': 'Bhutan'}, {'code': 'BV', 'name': 'Bouvet Island'}, {'code': 'BW', 'name': 'Botswana'}, {'code': 'CF', 'name': 'Central African Republic'}, {'code': 'CA', 'name': 'Canada'}, {'code': 'CC', 'name': 'Cocos (Keeling) Islands'}, {'code': 'CH', 'name': 'Switzerland'}, {'code': 'CL', 'name': 'Chile'}, {'code': 'CN', 'name': 'China'}, {'code': 'CI', 'name': "Cote d'Ivoire"}, {'code': 'CM', 'name': 'Cameroon'}, {'code': 'CD', 'name': 'Congo, The Democratic Republic of the'}, {'code': 'CG', 'name': 'Congo'}, {'code': 'CK', 'name': 'Cook Islands'}, {'code': 'CO', 'name': 'Colombia'}, {'code': 'KM', 'name': 'Comoros'}, {'code': 'CV', 'name': 'Cabo Verde'}, {'code': 'CR', 'name': 'Costa Rica'}, {'code': 'CU', 'name': 'Cuba'}, {'code': 'CW', 'name': 'Curacao'}, {'code': 'CX', 'name': 'Christmas Island'}, {'code': 'KY', 'name': 'Cayman Islands'}, {'code': 'CY', 'name': 'Cyprus'}, {'code': 'CZ', 'name': 'Czechia'}, {'code': 'DE', 'name': 'Germany'}, {'code': 'DJ', 'name': 'Djibouti'}, {'code': 'DM', 'name': 'Dominica'}, {'code': 'DK', 'name': 'Denmark'}, {'code': 'DO', 'name': 'Dominican Republic'}, {'code': 'DZ', 'name': 'Algeria'}, {'code': 'EC', 'name': 'Ecuador'}, {'code': 'EG', 'name': 'Egypt'}, {'code': 'ER', 'name': 'Eritrea'}, {'code': 'EH', 'name': 'Western Sahara'}, {'code': 'ES', 'name': 'Spain'}, {'code': 'EE', 'name': 'Estonia'}, {'code': 'ET', 'name': 'Ethiopia'}, {'code': 'FI', 'name': 'Finland'}, {'code': 'FJ', 'name': 'Fiji'}, {'code': 'FK', 'name': 'Falkland Islands (Malvinas)'}, {'code': 'FR', 'name': 'France'}, {'code': 'FO', 'name': 'Faroe Islands'}, {'code': 'FM', 'name': 'Micronesia, Federated States of'}, {'code': 'GA', 'name': 'Gabon'}, {'code': 'GB', 'name': 'United Kingdom'}, {'code': 'GE', 'name': 'Georgia'}, {'code': 'GG', 'name': 'Guernsey'}, {'code': 'GH', 'name': 'Ghana'}, {'code': 'GI', 'name': 'Gibraltar'}, {'code': 'GN', 'name': 'Guinea'}, {'code': 'GP', 'name': 'Guadeloupe'}, {'code': 'GM', 'name': 'Gambia'}, {'code': 'GW', 'name': 'Guinea-Bissau'}, {'code': 'GQ', 'name': 'Equatorial Guinea'}, {'code': 'GR', 'name': 'Greece'}, {'code': 'GD', 'name': 'Grenada'}, {'code': 'GL', 'name': 'Greenland'}, {'code': 'GT', 'name': 'Guatemala'}, {'code': 'GF', 'name': 'French Guiana'}, {'code': 'GU', 'name': 'Guam'}, {'code': 'GY', 'name': 'Guyana'}, {'code': 'HK', 'name': 'Hong Kong'}, {'code': 'HM', 'name': 'Heard Island and McDonald Islands'}, {'code': 'HN', 'name': 'Honduras'}, {'code': 'HR', 'name': 'Croatia'}, {'code': 'HT', 'name': 'Haiti'}, {'code': 'HU', 'name': 'Hungary'}, {'code': 'ID', 'name': 'Indonesia'}, {'code': 'IM', 'name': 'Isle of Man'}, {'code': 'IN', 'name': 'India'}, {'code': 'IO', 'name': 'British Indian Ocean Territory'}, {'code': 'IE', 'name': 'Ireland'}, {'code': 'IR', 'name': 'Iran, Islamic Republic of'}, {'code': 'IQ', 'name': 'Iraq'}, {'code': 'IS', 'name': 'Iceland'}, {'code': 'IL', 'name': 'Israel'}, {'code': 'IT', 'name': 'Italy'}, {'code': 'JM', 'name': 'Jamaica'}, {'code': 'JE', 'name': 'Jersey'}, {'code': 'JO', 'name': 'Jordan'}, {'code': 'JP', 'name': 'Japan'}, {'code': 'KZ', 'name': 'Kazakhstan'}, {'code': 'KE', 'name': 'Kenya'}, {'code': 'KG', 'name': 'Kyrgyzstan'}, {'code': 'KH', 'name': 'Cambodia'}, {'code': 'KI', 'name': 'Kiribati'}, {'code': 'KN', 'name': 'Saint Kitts and Nevis'}, {'code': 'KR', 'name': 'Korea, Republic of'}, {'code': 'KW', 'name': 'Kuwait'}, {'code': 'LA', 'name': "Lao People's Democratic Republic"}, {'code': 'LB', 'name': 'Lebanon'}, {'code': 'LR', 'name': 'Liberia'}, {'code': 'LY', 'name': 'Libya'}, {'code': 'LC', 'name': 'Saint Lucia'}, {'code': 'LI', 'name': 'Liechtenstein'}, {'code': 'LK', 'name': 'Sri Lanka'}, {'code': 'LS', 'name': 'Lesotho'}, {'code': 'LT', 'name': 'Lithuania'}, {'code': 'LU', 'name': 'Luxembourg'}, {'code': 'LV', 'name': 'Latvia'}, {'code': 'MO', 'name': 'Macao'}, {'code': 'MF', 'name': 'Saint Martin (French part)'}, {'code': 'MA', 'name': 'Morocco'}, {'code': 'MC', 'name': 'Monaco'}, {'code': 'MD', 'name': 'Moldova, Republic of'}, {'code': 'MG', 'name': 'Madagascar'}, {'code': 'MV', 'name': 'Maldives'}, {'code': 'MX', 'name': 'Mexico'}, {'code': 'MH', 'name': 'Marshall Islands'}, {'code': 'MK', 'name': 'North Macedonia'}, {'code': 'ML', 'name': 'Mali'}, {'code': 'MT', 'name': 'Malta'}, {'code': 'MM', 'name': 'Myanmar'}, {'code': 'ME', 'name': 'Montenegro'}, {'code': 'MN', 'name': 'Mongolia'}, {'code': 'MP', 'name': 'Northern Mariana Islands'}, {'code': 'MZ', 'name': 'Mozambique'}, {'code': 'MR', 'name': 'Mauritania'}, {'code': 'MS', 'name': 'Montserrat'}, {'code': 'MQ', 'name': 'Martinique'}, {'code': 'MU', 'name': 'Mauritius'}, {'code': 'MW', 'name': 'Malawi'}, {'code': 'MY', 'name': 'Malaysia'}, {'code': 'YT', 'name': 'Mayotte'}, {'code': 'NA', 'name': 'Namibia'}, {'code': 'NC', 'name': 'New Caledonia'}, {'code': 'NE', 'name': 'Niger'}, {'code': 'NF', 'name': 'Norfolk Island'}, {'code': 'NG', 'name': 'Nigeria'}, {'code': 'NI', 'name': 'Nicaragua'}, {'code': 'NU', 'name': 'Niue'}, {'code': 'NL', 'name': 'Netherlands'}, {'code': 'NO', 'name': 'Norway'}, {'code': 'NP', 'name': 'Nepal'}, {'code': 'NR', 'name': 'Nauru'}, {'code': 'NZ', 'name': 'New Zealand'}, {'code': 'OM', 'name': 'Oman'}, {'code': 'PK', 'name': 'Pakistan'}, {'code': 'PA', 'name': 'Panama'}, {'code': 'PN', 'name': 'Pitcairn'}, {'code': 'PE', 'name': 'Peru'}, {'code': 'PH', 'name': 'Philippines'}, {'code': 'PW', 'name': 'Palau'}, {'code': 'PG', 'name': 'Papua New Guinea'}, {'code': 'PL', 'name': 'Poland'}, {'code': 'PR', 'name': 'Puerto Rico'}, {'code': 'KP', 'name': "Korea, Democratic People's Republic of"}, {'code': 'PT', 'name': 'Portugal'}, {'code': 'PY', 'name': 'Paraguay'}, {'code': 'PS', 'name': 'Palestine, State of'}, {'code': 'PF', 'name': 'French Polynesia'}, {'code': 'QA', 'name': 'Qatar'}, {'code': 'RE', 'name': 'Reunion'}, {'code': 'RO', 'name': 'Romania'}, {'code': 'RU', 'name': 'Russian Federation'}, {'code': 'RW', 'name': 'Rwanda'}, {'code': 'SA', 'name': 'Saudi Arabia'}, {'code': 'SD', 'name': 'Sudan'}, {'code': 'SN', 'name': 'Senegal'}, {'code': 'SG', 'name': 'Singapore'}, {'code': 'GS', 'name': 'South Georgia and the South Sandwich Islands'}, {'code': 'SH', 'name': 'Saint Helena, Ascension and Tristan da Cunha'}, {'code': 'SJ', 'name': 'Svalbard and Jan Mayen'}, {'code': 'SB', 'name': 'Solomon Islands'}, {'code': 'SL', 'name': 'Sierra Leone'}, {'code': 'SV', 'name': 'El Salvador'}, {'code': 'SM', 'name': 'San Marino'}, {'code': 'SO', 'name': 'Somalia'}, {'code': 'PM', 'name': 'Saint Pierre and Miquelon'}, {'code': 'RS', 'name': 'Serbia'}, {'code': 'SS', 'name': 'South Sudan'}, {'code': 'ST', 'name': 'Sao Tome and Principe'}, {'code': 'SR', 'name': 'Suriname'}, {'code': 'SK', 'name': 'Slovakia'}, {'code': 'SI', 'name': 'Slovenia'}, {'code': 'SE', 'name': 'Sweden'}, {'code': 'SZ', 'name': 'Eswatini'}, {'code': 'SX', 'name': 'Sint Maarten (Dutch part)'}, {'code': 'SC', 'name': 'Seychelles'}, {'code': 'SY', 'name': 'Syrian Arab Republic'}, {'code': 'TC', 'name': 'Turks and Caicos Islands'}, {'code': 'TD', 'name': 'Chad'}, {'code': 'TG', 'name': 'Togo'}, {'code': 'TH', 'name': 'Thailand'}, {'code': 'TJ', 'name': 'Tajikistan'}, {'code': 'TK', 'name': 'Tokelau'}, {'code': 'TM', 'name': 'Turkmenistan'}, {'code': 'TL', 'name': 'Timor-Leste'}, {'code': 'TO', 'name': 'Tonga'}, {'code': 'TT', 'name': 'Trinidad and Tobago'}, {'code': 'TN', 'name': 'Tunisia'}, {'code': 'TR', 'name': 'Turkey'}, {'code': 'TV', 'name': 'Tuvalu'}, {'code': 'TW', 'name': 'Taiwan, Province of China'}, {'code': 'TZ', 'name': 'Tanzania, United Republic of'}, {'code': 'UG', 'name': 'Uganda'}, {'code': 'UA', 'name': 'Ukraine'}, {'code': 'UM', 'name': 'United States Minor Outlying Islands'}, {'code': 'UY', 'name': 'Uruguay'}, {'code': 'US', 'name': 'United States'}, {'code': 'UZ', 'name': 'Uzbekistan'}, {'code': 'VA', 'name': 'Holy See (Vatican City State)'}, {'code': 'VC', 'name': 'Saint Vincent and the Grenadines'}, {'code': 'VE', 'name': 'Venezuela, Bolivarian Republic of'}, {'code': 'VG', 'name': 'Virgin Islands, British'}, {'code': 'VI', 'name': 'Virgin Islands, U.S.'}, {'code': 'VN', 'name': 'Viet Nam'}, {'code': 'VU', 'name': 'Vanuatu'}, {'code': 'WF', 'name': 'Wallis and Futuna'}, {'code': 'WS', 'name': 'Samoa'}, {'code': 'YE', 'name': 'Yemen'}, {'code': 'ZA', 'name': 'South Africa'}, {'code': 'ZM', 'name': 'Zambia'}, {'code': 'ZW', 'name': 'Zimbabwe'}], max_length=50, verbose_name='country')), ('date_joined', models.DateTimeField(auto_now_add=True, verbose_name='date joined')), ('is_active', models.BooleanField(default=True, verbose_name='active')), ('is_staff', models.BooleanField(default=False, verbose_name='is staff')), ('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')), ('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions'))], options={'abstract': False}, managers=[('objects', users.managers.UserManager())])] |
class WRNInitBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(WRNInitBlock, self).__init__()
self.conv = WRNConv(in_channels=in_channels, out_channels=out_channels, kernel_size=7, stride=2, padding=3, activate=True)
self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def forward(self, x):
x = self.conv(x)
x = self.pool(x)
return x |
def infer_gsdmm_topics(gsdmm_model, texts):
assert (type(texts) == list)
assert (type(texts[0]) == list)
assert (type(texts[0][0]) == str)
dist_over_topic = [gsdmm_model.score(t) for t in texts]
global_topics = extract_topic_from_gsdmm_prediction(dist_over_topic)
return global_topics |
class TagModelQuerySet(models.query.QuerySet):
def initial(self):
return self.filter(name__in=self.model.tag_options.initial)
def filter_or_initial(self, *args, **kwargs):
return self.filter((models.Q(*args, **kwargs) | models.Q(name__in=self.model.tag_options.initial)))
def weight(self, min=settings.WEIGHT_MIN, max=settings.WEIGHT_MAX):
scale = (int(max) - int(min))
max_count = (self.model.objects.aggregate(Max('count'))['count__max'] or 1)
qs = self.annotate(weight=((Floor((F('count') * scale)) / max_count) + int(min)))
return qs
def __str__(self):
return utils.render_tags(self) |
def chemcore(mol, spinorb=False):
core = 0
for a in range(mol.natm):
atm_nelec = mol.atom_charge(a)
atm_z = charge(mol.atom_symbol(a))
ne_ecp = (atm_z - atm_nelec)
ncore_ecp = (ne_ecp // 2)
atm_ncore = chemcore_atm[atm_z]
if (ncore_ecp > atm_ncore):
core += 0
else:
core += (atm_ncore - ncore_ecp)
if spinorb:
core *= 2
return core |
def assert_plugin_add_result(tester: CommandTester, expected: str, constraint: (str | Mapping[(str, (str | list[str]))])) -> None:
assert (tester.io.fetch_output() == expected)
dependencies: dict[(str, Any)] = get_self_command_dependencies()
assert ('poetry-plugin' in dependencies)
assert (dependencies['poetry-plugin'] == constraint) |
class TCN_GCN_unit_5(nn.Module):
def __init__(self, in_channels, out_channels, A, stride=1, residual=True):
super(TCN_GCN_unit_5, self).__init__()
self.gcn1 = unit_gtcn_5(in_channels, out_channels, A)
self.tcn1 = unit_tcn(out_channels, out_channels, stride=stride)
self.relu = nn.ReLU()
if (not residual):
self.residual = (lambda x: 0)
elif ((in_channels == out_channels) and (stride == 1)):
self.residual = (lambda x: x)
else:
self.residual = unit_tcn(in_channels, out_channels, kernel_size=1, stride=stride)
def forward(self, x):
x = (self.tcn1(self.gcn1(x)) + self.residual(x))
return self.relu(x) |
class BaseLM(LM):
def eot_token_id(self):
pass
def max_length(self):
pass
def max_gen_toks(self):
pass
def batch_size(self):
pass
def device(self):
pass
def tok_encode(self, string: str):
pass
def tok_decode(self, tokens: Iterable[int]):
pass
def _model_generate(self, context, max_length, eos_token_id):
pass
def _model_call(self, inps):
pass
def loglikelihood(self, requests):
new_reqs = []
for (context, continuation) in requests:
if (context == ''):
context_enc = [self.eot_token_id]
else:
context_enc = self.tok_encode(context)
continuation_enc = self.tok_encode(continuation)
new_reqs.append(((context, continuation), context_enc, continuation_enc))
return self._loglikelihood_tokens(new_reqs)
def loglikelihood_rolling(self, requests):
loglikelihoods = []
for (string,) in tqdm(requests):
rolling_token_windows = list(map(utils.make_disjoint_window, utils.get_rolling_token_windows(token_list=self.tok_encode(string), prefix_token=self.eot_token_id, max_seq_len=self.max_length, context_len=1)))
rolling_token_windows = [((None,) + x) for x in rolling_token_windows]
string_nll = self._loglikelihood_tokens(rolling_token_windows, disable_tqdm=True)
string_nll = [x[0] for x in string_nll]
string_nll = sum(string_nll)
loglikelihoods.append(string_nll)
return loglikelihoods
def _loglikelihood_tokens(self, requests, disable_tqdm=False):
res = []
dataset_inps = []
def _collate(x):
toks = (x[1] + x[2])
return ((- len(toks)), tuple(toks))
re_ord = utils.Reorderer(requests, _collate)
for chunk in utils.chunks(tqdm(re_ord.get_reordered(), disable=disable_tqdm), self.batch_size):
inps = []
cont_toks_list = []
inplens = []
padding_length = None
for (_, context_enc, continuation_enc) in chunk:
assert (len(context_enc) > 0)
assert (len(continuation_enc) > 0)
assert (len(continuation_enc) <= self.max_length)
inp = torch.tensor((context_enc + continuation_enc)[(- (self.max_length + 1)):][:(- 1)], dtype=torch.long).to(self.device)
(inplen,) = inp.shape
cont = continuation_enc
padding_length = (padding_length if (padding_length is not None) else inplen)
inp = torch.cat([inp, torch.zeros((padding_length - inplen), dtype=torch.long).to(inp.device)], dim=0)
inps.append(inp.unsqueeze(0))
cont_toks_list.append(cont)
inplens.append(inplen)
batched_inps = torch.cat(inps, dim=0)
dataset_inps.append(batched_inps)
dataset_logits = self._model_logits_on_dataset(dataset_inps)
iter = 0
for chunk in utils.chunks(tqdm(re_ord.get_reordered(), disable=disable_tqdm), self.batch_size):
multi_logits = dataset_logits[iter]
iter += 1
inps = []
cont_toks_list = []
inplens = []
padding_length = None
for (_, context_enc, continuation_enc) in chunk:
assert (len(context_enc) > 0)
assert (len(continuation_enc) > 0)
assert (len(continuation_enc) <= self.max_length)
inp = torch.tensor((context_enc + continuation_enc)[(- (self.max_length + 1)):][:(- 1)], dtype=torch.long).to(self.device)
(inplen,) = inp.shape
cont = continuation_enc
padding_length = (padding_length if (padding_length is not None) else inplen)
inp = torch.cat([inp, torch.zeros((padding_length - inplen), dtype=torch.long).to(inp.device)], dim=0)
inps.append(inp.unsqueeze(0))
cont_toks_list.append(cont)
inplens.append(inplen)
for ((cache_key, _, _), logits, inp, inplen, cont_toks) in zip(chunk, multi_logits, inps, inplens, cont_toks_list):
contlen = len(cont_toks)
logits = logits[(inplen - contlen):inplen].unsqueeze(0)
greedy_tokens = logits.argmax(dim=(- 1))
cont_toks = torch.tensor(cont_toks, dtype=torch.long).unsqueeze(0)
max_equal = (greedy_tokens == cont_toks).all()
logits = torch.gather(logits, 2, cont_toks.unsqueeze((- 1))).squeeze((- 1))
answer = (float(logits.sum()), bool(max_equal))
if (cache_key is not None):
self.cache_hook.add_partial('loglikelihood', cache_key, answer)
res.append(answer)
return re_ord.get_original(res)
def greedy_until(self, requests):
res = []
def _collate(x):
toks = self.tok_encode(x[0])
return (len(toks), x[0])
re_ord = utils.Reorderer(requests, _collate)
for (context, until) in tqdm(re_ord.get_reordered()):
if isinstance(until, str):
until = [until]
(primary_until,) = self.tok_encode(until[0])
context_enc = torch.tensor([self.tok_encode(context)[(self.max_gen_toks - self.max_length):]]).to(self.device)
cont = self._model_generate(context_enc, (context_enc.shape[1] + self.max_gen_toks), primary_until)
s = self.tok_decode(cont[0].tolist()[context_enc.shape[1]:])
for term in until:
s = s.split(term)[0]
self.cache_hook.add_partial('greedy_until', (context, until), s)
res.append(s)
return re_ord.get_original(res) |
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view((- 1), 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
x = F.log_softmax(x, dim=1)
return x |
def parse_html_form(attr_filter, html, input_names={}):
attr_str = ('' if callable(attr_filter) else attr_filter)
for form in re.finditer(f'(?P<TAG><form[^>]*{attr_str}.*?>)(?P<CONTENT>.*?)</?(form|body|html).*?>', html, (re.I | re.S)):
if (callable(attr_filter) and (not attr_filter(form.group('TAG')))):
continue
inputs = {}
action = parse_html_tag_attr_value('action', form.group('TAG'))
for inputtag in re.finditer('(<(input|textarea).*?>)([^<]*(?=</\\2)|)', re.sub(re.compile('<!--.+?-->', (re.I | re.S)), '', form.group('CONTENT')), (re.I | re.S)):
name = parse_html_tag_attr_value('name', inputtag.group(1))
if name:
value = parse_html_tag_attr_value('value', inputtag.group(1))
if (not value):
inputs[name] = (inputtag.group(3) or '')
else:
inputs[name] = value
if (not input_names):
return (action, inputs)
else:
for (key, value) in input_names.items():
if (key in inputs):
if (isinstance(value, str) and (inputs[key] == value)):
continue
elif (isinstance(value, tuple) and (inputs[key] in value)):
continue
elif (hasattr(value, 'search') and re.match(value, inputs[key])):
continue
else:
break
else:
break
else:
return (action, inputs)
return (None, None) |
class WarmupCosineWithHardRestartsSchedule(WarmupCosineSchedule):
def __init__(self, warmup=0.002, t_total=(- 1), cycles=1.0, **kw):
super(WarmupCosineWithHardRestartsSchedule, self).__init__(warmup=warmup, t_total=t_total, cycles=cycles, **kw)
assert (cycles >= 1.0)
def get_lr_(self, progress):
if (progress < self.warmup):
return (progress / self.warmup)
else:
progress = ((progress - self.warmup) / (1 - self.warmup))
ret = (0.5 * (1.0 + math.cos((math.pi * ((self.cycles * progress) % 1)))))
return ret |
class CoCaLoss(ClipLoss):
def __init__(self, caption_loss_weight, clip_loss_weight, pad_id=0, local_loss=False, gather_with_grad=False, cache_labels=False, rank=0, world_size=1, use_horovod=False):
super().__init__(local_loss=local_loss, gather_with_grad=gather_with_grad, cache_labels=cache_labels, rank=rank, world_size=world_size, use_horovod=use_horovod)
self.clip_loss_weight = clip_loss_weight
self.caption_loss_weight = caption_loss_weight
self.caption_loss = nn.CrossEntropyLoss(ignore_index=pad_id)
def forward(self, image_features, text_features, logits, labels, logit_scale, output_dict=False):
clip_loss = super().forward(image_features, text_features, logit_scale)
clip_loss = (self.clip_loss_weight * clip_loss)
caption_loss = self.caption_loss(logits.permute(0, 2, 1), labels)
caption_loss = (caption_loss * self.caption_loss_weight)
if output_dict:
return {'contrastive_loss': clip_loss, 'caption_loss': caption_loss}
return (clip_loss, caption_loss) |
_task(name='CosRearrangementTask-v0')
class CosRearrangementTask(NavigationTask):
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
self.reset_trackers(False)
self.load_annotations()
self.rec_packers: Dict[(int, ShelfBinPacker)] = {}
def get_translation(self, object_id):
if (object_id != self._sim.gripped_object_id):
obj_translation = self._sim.get_translation(object_id)
else:
obj_translation = self._sim.get_agent_state().position
return obj_translation
def get_dist(self, obj_id1, obj_id2, dist_type, ignore_y=True):
obj1_pos = self.get_translation(obj_id1)
obj2_pos = self.get_translation(obj_id2)
if ignore_y:
obj1_pos[1] = obj2_pos[1]
if (dist_type == 'geo'):
return self._sim.geodesic_distance(obj1_pos, obj2_pos)
elif (dist_type == 'l2'):
return euclidean_distance(obj2_pos, obj1_pos)
else:
raise AssertionError
def get_iid_from_key(self, obj_key):
obj_id = self.obj_key_to_sim_obj_id[obj_key]
obj_iid = self.sim_obj_id_to_iid[obj_id]
return obj_iid
def reset_trackers(self, ep_reset):
self._episode_was_reset = ep_reset
self.misc_dict = {}
self.replay = []
self.instance_id_count = 1
self.sim_obj_id_to_iid = {}
self.iid_to_sim_obj_id = {}
self.iid_to_sid = {}
self.sid_to_iid = {}
self.sim_obj_id_to_obj_key = {}
self.obj_key_to_sim_obj_id = {}
self.sim_obj_id_to_type = {}
self.obj_id_to_room = {}
self.ep_obj_ids = []
def detrack_objects(self, obj_ids):
for obj_id in obj_ids:
iid = self.sim_obj_id_to_iid[obj_id]
sid = self.iid_to_sid[iid]
ok = self.sim_obj_id_to_obj_key[obj_id]
del self.sim_obj_id_to_iid[obj_id]
del self.sim_obj_id_to_type[obj_id]
del self.sim_obj_id_to_obj_key[obj_id]
del self.obj_id_to_room[obj_id]
del self.iid_to_sid[iid]
del self.iid_to_sim_obj_id[iid]
self.sid_to_iid[sid].remove(iid)
if (len(self.sid_to_iid) == 0):
del self.sid_to_iid[sid]
del self.obj_key_to_sim_obj_id[ok]
def delete_episode_objects(self):
self._sim.remove_objects(self.ep_obj_ids)
self.detrack_objects(self.ep_obj_ids)
self._episode_was_reset = True
self.misc_dict = {}
self.replay = []
self.ep_obj_ids = []
self.instance_id_count = (max(list(self.iid_to_sid.keys())) + 1)
self.assert_consistency()
def track(self, obj_id=None, sid=None, iid=None, obj_key=None, obj_type=None, room=None, obj_level='scene'):
if (iid is not None):
self.sim_obj_id_to_iid[obj_id] = iid
self.iid_to_sim_obj_id[iid] = obj_id
if (sid is not None):
self.iid_to_sid[iid] = sid
if (sid in self.sid_to_iid):
self.sid_to_iid[sid].append(iid)
else:
self.sid_to_iid[sid] = [iid]
if (obj_key is not None):
self.sim_obj_id_to_obj_key[obj_id] = obj_key
self.obj_key_to_sim_obj_id[obj_key] = obj_id
if (obj_type is not None):
assert (obj_type in ['rec', 'obj'])
self.sim_obj_id_to_type[obj_id] = obj_type
if (room is not None):
assert isinstance(room, str)
self.obj_id_to_room[obj_id] = room
if (obj_level == 'episode'):
self.ep_obj_ids.append(obj_id)
def trackers(self, only_return=False):
task_data = {}
for attr in ['_episode_was_reset', 'misc_dict', 'replay', 'instance_id_count', 'sim_obj_id_to_iid', 'iid_to_sim_obj_id', 'sim_obj_id_to_obj_key', 'obj_key_to_sim_obj_id', 'iid_to_sid', 'sid_to_iid', 'sid_class_map', 'class_sid_map', 'obj_id_to_room', 'sim_obj_id_to_type']:
task_data[attr] = getattr(self, attr)
if (not only_return):
print(f'{attr}: {task_data[attr]}')
self.assert_consistency()
return task_data
def assert_consistency(self, episode=None):
for attrs in [('sim_obj_id_to_iid', 'iid_to_sim_obj_id', 'sim_obj_id_to_obj_key', 'obj_key_to_sim_obj_id', 'iid_to_sid', 'obj_id_to_room', 'sim_obj_id_to_type'), ('sid_class_map', 'class_sid_map')]:
attr_lens = []
for attr in attrs:
attr_lens.append(len(getattr(self, attr)))
assert (len(set(attr_lens)) == 1)
if (episode is not None):
packer_mapping = get_packer_mapping(self.rec_packers, self)
current_mapping = episode.get_mapping()
assert (current_mapping == packer_mapping)
def load_annotations(self):
obj_attr_mgr = self._sim.get_object_template_manager()
cache_handles = [h for h in glob.glob(os.path.join(URDF_OBJ_CACHE, '**'), recursive=True) if h.endswith('.object_config.json')]
for ch in cache_handles:
obj_attr_mgr.load_configs(ch)
obj_attr_mgr.load_configs('data/objects')
self.object_annotations = np.load(self._config['OBJECT_ANNOTATIONS'], allow_pickle=True).item()
skipped_templates = 0
for k in tqdm(self.object_annotations, desc='Loading handles'):
if ('template' in self.object_annotations[k]):
handle = self.object_annotations[k]['template']
if (handle.endswith('.object_config.json') and os.path.exists(handle)):
obj_attr_mgr.load_configs(handle)
else:
skipped_templates += 1
print(f'Skipped {skipped_templates}/ {len(self.object_annotations)} specified object templates!')
sem_classes_path = 'cos_eor/scripts/dump/semantic_classes_amt.yaml'
objects_data = yaml.load(open(sem_classes_path, 'r'))
self.sid_class_map = dict(objects_data['semantic_class_id_map'])
self.class_sid_map = dict([tup[::(- 1)] for tup in objects_data['semantic_class_id_map']])
self.amt_data = aggregate_amt_annotations(only_amt=True)
def register_object_templates(self):
obj_attr_mgr = self._sim.get_object_template_manager()
object_templates = self._dataset.object_templates
if isinstance(object_templates, dict):
for (name, template_info) in object_templates.items():
name = os.path.basename(name).split('.')[0]
obj_handle = obj_attr_mgr.get_file_template_handles(name)[0]
obj_template = obj_attr_mgr.get_template_by_handle(obj_handle)
obj_template.scale = np.array(template_info['scale'])
obj_attr_mgr.register_template(obj_template)
elif isinstance(object_templates, list):
raise AssertionError('Objects need to be scaled!')
for name in object_templates:
name = os.path.basename(name).split('.')[0]
obj_handle = obj_attr_mgr.get_file_template_handles(name)[0]
obj_template = obj_attr_mgr.get_template_by_handle(obj_handle)
obj_template.scale = np.array([1.0, 1.0, 1.0])
obj_attr_mgr.register_template(obj_template)
def overwrite_sim_config(self, sim_config, episode):
sim_config = super().overwrite_sim_config(sim_config, episode)
return sim_config
def _initialize_receptacle_packers(self, episode: CosRearrangementEpisode):
self.rec_packers = {}
for rec_key in episode.recs_keys:
if ('agent' in rec_key):
continue
if (rec_key not in self.obj_key_to_sim_obj_id):
import pdb
pdb.set_trace()
rec_id = self.obj_key_to_sim_obj_id[rec_key]
self.rec_packers[rec_id] = ShelfBinPacker(get_bb_base(get_bb(self._sim, rec_id)))
self.rec_packers[rec_id].from_dict(episode.recs_packers[rec_key], self.obj_key_to_sim_obj_id)
def _initialize_episode_objects(self, episode):
start_idx = episode.default_matrix_shape[(- 1)]
obj_attr_mgr = self._sim.get_object_template_manager()
for (_, (file, key, pos, rot, cat)) in enumerate(zip(episode.objs_files[start_idx:], episode.objs_keys[start_idx:], episode.objs_pos[start_idx:], episode.objs_rot[start_idx:], episode.objs_cats[start_idx:])):
if file.endswith('.urdf'):
(obj_file, already_exists) = urdf_to_obj(file, URDF_OBJ_CACHE, return_exist=True)
episode.objs_files = [(obj_file if (f == file) else f) for f in episode.objs_files]
file = obj_file
if (not already_exists):
obj_attr_mgr.load_configs(file)
sim_obj_id = self._sim.add_object_by_handle(file)
if (sim_obj_id == (- 1)):
import pdb
pdb.set_trace()
self._sim.set_translation(pos, sim_obj_id)
if isinstance(rot, list):
rot = quat_from_coeffs(rot)
rot = quat_to_magnum(rot)
self._sim.set_rotation(rot, sim_obj_id)
self._sim.set_object_motion_type(MotionType.DYNAMIC, sim_obj_id)
sid = self.class_sid_map[cat]
iid = self.instance_id_count
self._sim.set_object_iid(iid, sim_obj_id)
rec_key = episode.get_rec(key, episode.start_matrix)
rec_id = self.obj_key_to_sim_obj_id.get(rec_key, (- 1))
rec_room = self.obj_id_to_room.get(rec_id, 'null')
self.track(sim_obj_id, sid, iid, key, 'obj', rec_room, 'episode')
self.instance_id_count += 1
self._initialize_receptacle_packers(episode)
def _initialize_objects(self, episode: CosRearrangementEpisode):
if (self._sim.same_scene and (self.instance_id_count > 1)):
self.delete_episode_objects()
self._sim.init_metadata_objects_same_scene(task=self, episode=episode)
else:
self.reset_trackers(True)
self._sim.remove_all_objects()
self._sim.init_metadata_objects(task=self, episode=episode)
self._sim.set_agent_state(episode.start_position, quat_from_coeffs(episode.start_rotation))
self._initialize_episode_objects(episode)
def reset(self, episode: Episode):
episode.reset()
self._initialize_objects(episode)
self.assert_consistency(episode)
return super().reset(episode)
def step(self, action: Union[(int, Dict[(str, Any)])], episode: Type[Episode]):
self._episode_was_reset = False
self.replay.append(action)
return super().step(action, episode)
def did_episode_reset(self, *args: Any, **kwargs: Any) -> bool:
return self._episode_was_reset
def save_replay(self, episode, info={}, path='', uuid=''):
data = {'episode_id': episode.episode_id, 'scene_id': episode.scene_id, 'agent_pos': np.array(self._sim._last_state.position).tolist(), 'current_position': {}, 'gripped_object_id': self._sim.gripped_object_id, 'info': deepcopy(info)}
task_data = {'sim_object_id_to_objid_mapping': self.sim_obj_id_to_ep_obj_id, 'objid_to_sim_object_id_mapping': self.ep_obj_id_to_sim_obj_id, 'actions': self.replay, 'misc_dict': self.misc_dict, 'sim_obj_id_to_iid': self.sim_obj_id_to_iid, 'iid_to_sim_obj_id': self.iid_to_sim_obj_id, 'obj_key_to_sim_obj_id': self.obj_key_to_sim_obj_id, 'sim_obj_id_to_obj_key': self.sim_obj_id_to_obj_key}
data.update(deepcopy(task_data))
object_positions = [np.array(obj.position).tolist() for obj in episode.objects]
rec_positions = [np.array(rec.position).tolist() for rec in episode.get_receptacles()]
data['object_positions'] = object_positions
data['receptacle_positions'] = rec_positions
keys = list(data['objid_to_sim_object_id_mapping'].keys())
for k in keys:
data['objid_to_sim_object_id_mapping'][str(k)] = data['objid_to_sim_object_id_mapping'].pop(k)
with open(os.path.join(path, 'replays_{}_{}_{}.json'.format(uuid, episode.episode_id, episode.scene_id.split('/')[(- 1)])), 'w') as f:
for key in data:
try:
json_tricks.dump(data[key], f)
except:
import pdb
pdb.set_trace()
def get_oid_from_sid(self, sid):
return self.iid_to_sim_obj_id[sid] |
class EvaluateTool(object):
def __init__(self, args):
self.args = args
def evaluate(self, preds, golds, section):
summary = {}
all_match = []
simple_match = []
complex_match = []
small_test_match = []
for (pred, gold_item) in zip(preds, golds):
match_or_not = (pred == gold_item['seq_out'])
all_match.append(match_or_not)
if (gold_item['hardness'] == 'simple'):
simple_match.append(match_or_not)
else:
complex_match.append(match_or_not)
if gold_item['small_test']:
small_test_match.append(match_or_not)
if (section in ['train', 'dev']):
summary['all'] = float(np.mean(all_match))
elif (section in ['test']):
summary['all'] = float(np.mean(all_match))
summary['simple'] = float(np.mean(simple_match))
summary['complex'] = float(np.mean(complex_match))
summary['small_test'] = float(np.mean(small_test_match))
return summary |
class CoreNLPTokenizer(Tokenizer):
def __init__(self, **kwargs):
self.classpath = (kwargs.get('classpath') or DEFAULTS['corenlp_classpath'])
self.annotators = copy.deepcopy(kwargs.get('annotators', set()))
self.mem = kwargs.get('mem', '2g')
self._launch()
def _launch(self):
annotators = ['tokenize', 'ssplit']
if ('ner' in self.annotators):
annotators.extend(['pos', 'lemma', 'ner'])
elif ('lemma' in self.annotators):
annotators.extend(['pos', 'lemma'])
elif ('pos' in self.annotators):
annotators.extend(['pos'])
annotators = ','.join(annotators)
options = ','.join(['untokenizable=noneDelete', 'invertible=true'])
cmd = ['java', ('-mx' + self.mem), '-cp', ('"%s"' % self.classpath), 'edu.stanford.nlp.pipeline.StanfordCoreNLP', '-annotators', annotators, '-tokenize.options', options, '-outputFormat', 'json', '-prettyPrint', 'false']
self.corenlp = pexpect.spawn('/bin/bash', maxread=100000, timeout=60)
self.corenlp.setecho(False)
self.corenlp.sendline('stty -icanon')
self.corenlp.sendline(' '.join(cmd))
self.corenlp.delaybeforesend = 0
self.corenlp.delayafterread = 0
self.corenlp.expect_exact('NLP>', searchwindowsize=100)
def _convert(token):
if (token == '-LRB-'):
return '('
if (token == '-RRB-'):
return ')'
if (token == '-LSB-'):
return '['
if (token == '-RSB-'):
return ']'
if (token == '-LCB-'):
return '{'
if (token == '-RCB-'):
return '}'
return token
def tokenize(self, text, offsets=None):
if (offsets is not None):
raise ValueError(f'No offsets as input for tokenizing in {self.__class__}! ')
if ('NLP>' in text):
raise RuntimeError('Bad token (NLP>) in text!')
if (text.lower().strip() == 'q'):
token = text.strip()
index = text.index(token)
data = [(token, text[index:], (index, (index + 1)), 'NN', 'q', 'O')]
return Tokens(data, self.annotators)
clean_text = text.replace('\n', ' ')
self.corenlp.sendline(clean_text.encode('utf-8'))
self.corenlp.expect_exact('NLP>', searchwindowsize=100)
output = self.corenlp.before
start = output.find(b'{"sentences":')
output = json.loads(output[start:].decode('utf-8'))
data = []
tokens = [t for s in output['sentences'] for t in s['tokens']]
for i in range(len(tokens)):
start_ws = tokens[i]['characterOffsetBegin']
if ((i + 1) < len(tokens)):
end_ws = tokens[(i + 1)]['characterOffsetBegin']
else:
end_ws = tokens[i]['characterOffsetEnd']
data.append((tokens[i]['originalText'], text[start_ws:end_ws], (tokens[i]['characterOffsetBegin'], tokens[i]['characterOffsetEnd']), tokens[i].get('pos', None), tokens[i].get('lemma', None), tokens[i].get('ner', None)))
return Tokens(data, self.annotators) |
def escape_md_section(text, snob=False):
text = md_backslash_matcher.sub('\\\\\\1', text)
if snob:
text = md_chars_matcher_all.sub('\\\\\\1', text)
text = md_dot_matcher.sub('\\1\\\\\\2', text)
text = md_plus_matcher.sub('\\1\\\\\\2', text)
text = md_dash_matcher.sub('\\1\\\\\\2', text)
return text |
def run(args):
locale.setlocale(locale.LC_ALL, '')
QtWidgets.QApplication.setAttribute(QtCore.Qt.ApplicationAttribute.AA_EnableHighDpiScaling, True)
is_preview = args.preview
start_logger(args.local_data, is_preview)
app = QtWidgets.QApplication(sys.argv)
app.applicationStateChanged.connect(_on_application_state_changed)
def main_done(done: asyncio.Task):
e: (Exception | None) = done.exception()
if (e is not None):
display_exception(e)
app.exit(1)
loop = create_loop(app)
with loop:
loop.create_task(qt_main(app, args)).add_done_callback(main_done)
loop.run_forever() |
class Test_pep440_post(unittest.TestCase, Testing_renderer_case_mixin):
style = 'pep440-post'
expected = {'tagged_0_commits_clean': 'v1.2.3', 'tagged_0_commits_dirty': 'v1.2.3.post0.dev0+g', 'tagged_1_commits_clean': 'v1.2.3.post1+gabc', 'tagged_1_commits_dirty': 'v1.2.3.post1.dev0+gabc', 'untagged_0_commits_clean': '0.post0+g', 'untagged_0_commits_dirty': '0.post0.dev0+g', 'untagged_1_commits_clean': '0.post1+gabc', 'untagged_1_commits_dirty': '0.post1.dev0+gabc', 'error_getting_parts': 'unknown'} |
class ResizeLongest(nn.Module):
def __init__(self, max_size, interpolation=InterpolationMode.BICUBIC, fill=0):
super().__init__()
if (not isinstance(max_size, int)):
raise TypeError(f'Size should be int. Got {type(max_size)}')
self.max_size = max_size
self.interpolation = interpolation
self.fill = fill
def forward(self, img):
if isinstance(img, torch.Tensor):
(height, width) = img.shape[1:]
else:
(width, height) = img.size
scale = (self.max_size / float(max(height, width)))
(new_height, new_width) = (round((height * scale)), round((width * scale)))
img = F.resize(img, [new_height, new_width], self.interpolation)
pad_h = (self.max_size - new_height)
pad_w = (self.max_size - new_width)
img = F.pad(img, padding=[0, 0, pad_w, pad_h], fill=self.fill)
return img |
.parametrize('suffix', ['inline', 'display'])
def test_dark_mode_mathml(webengine_versions, quteproc_new, request, qtbot, suffix):
if (not request.config.webengine):
pytest.skip('Skipped with QtWebKit')
args = (_base_args(request.config) + ['--temp-basedir', '-s', 'colors.webpage.darkmode.enabled', 'true', '-s', 'colors.webpage.darkmode.algorithm', 'brightness-rgb'])
quteproc_new.start(args)
quteproc_new.open_path(f'data/darkmode/mathml-{suffix}.html')
quteproc_new.wait_for_js('Image loaded')
if (webengine_versions.webengine >= utils.VersionNumber(6)):
expected = (testutils.Color(0, 0, 214) if IS_ARM else testutils.Color(0, 0, 215))
else:
expected = (testutils.Color(0, 0, 206) if IS_ARM else testutils.Color(0, 0, 204))
quteproc_new.get_screenshot(probe_pos=QPoint(105, 0), probe_color=expected)
quteproc_new.get_screenshot(probe_pos=QPoint(4, 4), probe_color=testutils.Color(255, 255, 255)) |
class TestUserDetails(BaseActionTest):
def test_user_details(self):
self.strategy.set_settings({})
details = {'first_name': 'Test'}
user = User(username='foobar')
backend = None
user_details(self.strategy, details, backend, user)
self.assertEqual(user.first_name, 'Test')
details = {'first_name': 'Test2'}
user_details(self.strategy, details, backend, user)
self.assertEqual(user.first_name, 'Test2')
def test_user_details_(self):
self.strategy.set_settings({'SOCIAL_AUTH_IMMUTABLE_USER_FIELDS': ('first_name',)})
details = {'first_name': 'Test'}
user = User(username='foobar')
backend = None
user_details(self.strategy, details, backend, user)
self.assertEqual(user.first_name, 'Test')
details = {'first_name': 'Test2'}
user_details(self.strategy, details, backend, user)
self.assertEqual(user.first_name, 'Test') |
class Hg(Vcs):
HEAD = 'tip'
_status_translations = (('AR', 'staged'), ('M', 'changed'), ('!', 'deleted'), ('?', 'untracked'), ('I', 'ignored'))
def _log(self, refspec=None, maxres=None, filelist=None):
args = ['log', '--template', 'json']
if refspec:
args += ['--limit', '1', '--rev', refspec]
elif maxres:
args += ['--limit', str(maxres)]
if filelist:
args += (['--'] + filelist)
try:
output = self._run(args)
except VcsError:
return None
if (not output):
return None
log = []
for entry in json.loads(output):
new = {}
new['short'] = entry['rev']
new['revid'] = entry['node']
new['author'] = entry['user']
new['date'] = datetime.fromtimestamp(entry['date'][0])
new['summary'] = entry['desc']
log.append(new)
return log
def _remote_url(self):
try:
return (self._run(['showconfig', 'paths.default']) or None)
except VcsError:
return None
def _status_translate(self, code):
for (code_x, status) in self._status_translations:
if (code in code_x):
return status
return 'unknown'
def action_add(self, filelist=None):
args = ['add']
if filelist:
args += (['--'] + filelist)
self._run(args, catchout=False)
def action_reset(self, filelist=None):
args = ['forget', '--']
if filelist:
args += filelist
else:
args += self.rootvcs.status_subpaths.keys()
self._run(args, catchout=False)
def data_status_root(self):
statuses = set()
for entry in json.loads(self._run(['status', '--all', '--template', 'json'])):
if (entry['status'] == 'C'):
continue
statuses.add(self._status_translate(entry['status']))
if statuses:
for status in self.DIRSTATUSES:
if (status in statuses):
return status
return 'sync'
def data_status_subpaths(self):
statuses = {}
for entry in json.loads(self._run(['status', '--all', '--template', 'json'])):
if (entry['status'] == 'C'):
continue
statuses[os.path.normpath(entry['path'])] = self._status_translate(entry['status'])
return statuses
def data_status_remote(self):
if (self._remote_url() is None):
return 'none'
return 'unknown'
def data_branch(self):
return (self._run(['branch']) or None)
def data_info(self, rev=None):
if (rev is None):
rev = self.HEAD
log = self._log(refspec=rev)
if (not log):
if (rev == self.HEAD):
return None
else:
raise VcsError('Revision {0:s} does not exist'.format(rev))
elif (len(log) == 1):
return log[0]
else:
raise VcsError('More than one instance of revision {0:s}'.format(rev)) |
class UdemyLectureStream(Downloader):
def __init__(self, parent):
self._mediatype = None
self._quality = None
self._resolution = None
self._dimension = None
self._extension = None
self._url = None
self._parent = parent
self._filename = None
self._fsize = None
self._active = False
self._is_hls = False
self._token = None
Downloader.__init__(self)
def __repr__(self):
out = ('%s:%%s' % (self.mediatype, self.extension, self.quality))
return out
def _generate_filename(self):
ok = re.compile('[^\\\\/:*?"<>|]')
filename = ''.join(((x if ok.match(x) else '_') for x in self.title))
filename += ('.' + self.extension)
return filename
def resolution(self):
return self._resolution
def quality(self):
return self._quality
def url(self):
return self._url
def is_hls(self):
return self._is_hls
def token(self):
return self._token
def id(self):
return self._parent.id
def dimension(self):
return self._dimension
def extension(self):
return self._extension
def filename(self):
if (not self._filename):
self._filename = self._generate_filename()
return self._filename
def title(self):
return self._parent.title
def mediatype(self):
return self._mediatype
def get_quality(self, quality, preferred_mediatype='video'):
lecture = self._parent.getbest()
_temp = {}
for s in self._parent.streams:
if (isinstance(quality, int) and (s.quality == quality)):
mediatype = s.mediatype
_temp[mediatype] = s
if _temp:
if (preferred_mediatype in _temp):
lecture = _temp[preferred_mediatype]
else:
lecture = list(_temp.values()).pop()
return lecture
def get_filesize(self):
if (not self._fsize):
headers = {'User-Agent': HEADERS.get('User-Agent')}
try:
with requests.get(self.url, stream=True, headers=headers) as resp:
if resp.ok:
self._fsize = float(resp.headers.get('Content-Length', 0))
if (not resp.ok):
self._fsize = 0
except conn_error:
self._fsize = 0
return self._fsize |
_module()
class AudioFeatureDataset(BaseDataset):
def __init__(self, ann_file, pipeline, suffix='.npy', **kwargs):
self.suffix = suffix
super().__init__(ann_file, pipeline, modality='Audio', **kwargs)
def load_annotations(self):
if self.ann_file.endswith('.json'):
return self.load_json_annotations()
video_infos = []
with open(self.ann_file, 'r') as fin:
for line in fin:
line_split = line.strip().split()
video_info = {}
idx = 0
filename = line_split[idx]
if (self.data_prefix is not None):
if (not filename.endswith(self.suffix)):
filename = (osp.join(self.data_prefix, filename) + self.suffix)
else:
filename = osp.join(self.data_prefix, filename)
video_info['audio_path'] = filename
idx += 1
video_info['total_frames'] = int(line_split[idx])
idx += 1
label = [int(x) for x in line_split[idx:]]
assert label, f'missing label in line: {line}'
if self.multi_class:
assert (self.num_classes is not None)
onehot = torch.zeros(self.num_classes)
onehot[label] = 1.0
video_info['label'] = onehot
else:
assert (len(label) == 1)
video_info['label'] = label[0]
video_infos.append(video_info)
return video_infos |
.parametrize('cfg_file', ['configs/ner/bert_softmax/bert_softmax_cluener_18e.py'])
def test_bert_softmax(cfg_file):
texts = ([''] * 47)
img = ([31] * 47)
labels = ([31] * 128)
input_ids = ([0] * 128)
attention_mask = ([0] * 128)
token_type_ids = ([0] * 128)
img_metas = {'texts': texts, 'labels': torch.tensor(labels).unsqueeze(0), 'img': img, 'input_ids': torch.tensor(input_ids).unsqueeze(0), 'attention_masks': torch.tensor(attention_mask).unsqueeze(0), 'token_type_ids': torch.tensor(token_type_ids).unsqueeze(0)}
tmp_dir = tempfile.TemporaryDirectory()
vocab_file = osp.join(tmp_dir.name, 'fake_vocab.txt')
_create_dummy_vocab_file(vocab_file)
model = _get_detector_cfg(cfg_file)
model['label_convertor']['vocab_file'] = vocab_file
detector = build_detector(model)
losses = detector.forward(img, img_metas)
assert isinstance(losses, dict)
model['loss']['type'] = 'MaskedFocalLoss'
detector = build_detector(model)
losses = detector.forward(img, img_metas)
assert isinstance(losses, dict)
tmp_dir.cleanup()
with torch.no_grad():
batch_results = []
result = detector.forward(None, img_metas, return_loss=False)
batch_results.append(result) |
class SharedGDict(GDict):
def __init__(self, gdict=None, shape=None, dtype=None, name=None):
if (gdict is not None):
assert ((shape is None) and (dtype is None) and (name is None))
assert (isinstance(gdict, GDict) and gdict.is_np_all)
shape = gdict.shape.memory
dtype = gdict.dtype.memory
nbytes = gdict.nbytes.memory
else:
assert (not ((shape is None) or (dtype is None) or (name is None)))
nbytes = None
self.is_new = (name is None)
(name, self.shared_memory) = self._create_shared_memory(shape, dtype, nbytes, name)
memory = self._create_np_from_memory(self.shared_memory, shape, dtype)
self.shared_shape = shape
self.shared_dtype = dtype
self.shared_name = name
super(SharedGDict, self).__init__(memory)
def _create_np_from_memory(cls, shared_memory, shape, dtype):
if isinstance(shared_memory, dict):
memory = {k: cls._create_np_from_memory(shared_memory[k], shape[k], dtype[k]) for k in shared_memory}
elif isinstance(shared_memory, list):
memory = [cls._create_np_from_memory(shared_memory[k], shape[k], dtype[k]) for k in range(len(shared_memory))]
else:
if isinstance(dtype, str):
dtype = np.dtype(dtype)
memory = np.ndarray(shape, dtype=dtype, buffer=shared_memory.buf)
return memory
def _create_shared_memory(cls, shape, dtype, nbytes, name=None):
if (name is None):
if isinstance(nbytes, dict):
(ret_name, ret_memory) = ({}, {})
for key in nbytes:
(name_k, memory_k) = cls._create_shared_memory(shape[key], dtype[key], nbytes[key], None)
ret_name[key] = name_k
ret_memory[key] = memory_k
elif isinstance(nbytes, list):
(ret_name, ret_memory) = ([], [])
for key in range(len(nbytes)):
(name_k, memory_k) = cls._create_shared_memory(shape[key], dtype[key], nbytes[key], None)
ret_name.append(name_k)
ret_memory.append(memory_k)
else:
assert is_num(nbytes), f'{nbytes}'
ret_memory = SharedMemory(create=True, size=nbytes)
ret_name = ret_memory.name
else:
ret_name = name
if isinstance(name, dict):
ret_memory = {k: cls._create_shared_memory(shape[k], dtype[k], None, name[k])[1] for k in name}
elif isinstance(name, list):
ret_memory = [cls._create_shared_memory(shape[k], dtype[k], None, name[k])[1] for k in range(len(name))]
else:
assert isinstance(name, str), f'{name}'
ret_memory = SharedMemory(name=name, create=False)
return (ret_name, ret_memory)
def get_infos(self):
return (self.shared_shape, self.shared_dtype, self.shared_name)
def _unlink(self):
memory = self._flatten(self.shared_memory)
if isinstance(memory, dict):
for (k, v) in memory.items():
v.unlink()
else:
memory.unlink()
def _close(self):
memory = self._flatten(self.shared_memory)
if isinstance(memory, dict):
for (k, v) in memory.items():
v.close()
elif (not callable(memory)):
memory.close()
def __del__(self):
try:
self._close()
if self.is_new:
self._unlink()
except:
pass
def get_full_by_key(self, key):
ret = []
for name in ['shared_shape', 'shared_dtype', 'shared_name']:
ret.append(self._get_item(getattr(self, name), self._process_key(key)))
return type(self)(None, *ret)
def __setitem__(self, key, value):
assert False, 'Please convert to GDict or Dictarray then change the value!' |
class ResNetBackbone(Backbone):
def __init__(self, backbone):
super(ResNetBackbone, self).__init__(backbone)
self.custom_objects.update(keras_resnet.custom_objects)
def retinanet(self, *args, **kwargs):
return resnet_retinanet(*args, backbone=self.backbone, **kwargs)
def download_imagenet(self):
resnet_filename = 'ResNet-{}-model.keras.h5'
resnet_resource = '
depth = int(self.backbone.replace('resnet', ''))
filename = resnet_filename.format(depth)
resource = resnet_resource.format(depth)
if (depth == 50):
checksum = '3e9f4e4f77bbe2c9bec13b53ee1c2319'
elif (depth == 101):
checksum = '05dce5b401a9ea0348a3213c'
elif (depth == 152):
checksum = '6ee11ef2b135592fbb9e71'
else:
raise ValueError('Unknown depth')
return get_file(filename, resource, cache_subdir='models', md5_hash=checksum)
def validate(self):
allowed_backbones = ['resnet50', 'resnet101', 'resnet152']
if (self.backbone not in allowed_backbones):
raise ValueError("Backbone ('{}') not in allowed backbones ({}).".format(self.backbone, allowed_backbones))
def preprocess_image(self, inputs):
return preprocess_image(inputs, mode='caffe') |
def test_non_async_context():
()
def async_fn_with_yield(should_yield):
with Ctx():
if should_yield:
ret = (yield ExternalCacheBatchItem(mc._batch, 'get', 'test'))
else:
ret = 0
return ret
()
def batch(should_yield=True):
(ret1, ret2) = (yield (async_fn_with_yield.asynq(should_yield), async_fn_with_yield.asynq(should_yield)))
return (ret1, ret2)
with AssertRaises(AssertionError):
batch()
batch(False) |
class Networks(nn.Module):
def __init__(self, cfgs, num_classes, samples_per_cls=None):
super(Networks, self).__init__()
self.num_classes = num_classes
self.samples_per_cls = samples_per_cls
self.backbone_with_fc = (cfgs.classifier is None)
self.backbone = self.build_backbone(cfgs)
self.epoch = 0
if (not self.backbone_with_fc):
self.classifier = self.build_classifier(cfgs)
if hasattr(self.classifier, 'loss'):
self.loss = self.classifier.loss
print('use the loss by the classifier')
else:
self.loss = Losses(samples_per_cls, num_classes, cfgs.loss_opt)
if hasattr(self.classifier, 'adv_loss'):
self.adv_loss = self.classifier.adv_loss
print('use the adv loss by the classifier for the inner maximum')
else:
self.adv_loss = Losses(samples_per_cls, num_classes, cfgs.adv_loss_opt)
self.nat_loss = Losses(samples_per_cls, num_classes, cfgs.nat_loss_opt)
def build_backbone(self, cfgs):
name = cfgs.backbone
print('>> Build backbone {}'.format(name))
backbone_opt = getattr(cfgs, 'backbone_opt', dict())
for (k, v) in backbone_opt.items():
print('{} : {}'.format(k, v))
if (name == 'WideResNet'):
net = WideResNet(num_classes=self.num_classes, use_fc=self.backbone_with_fc, **backbone_opt)
else:
raise NameError
return net
def build_classifier(self, cfgs):
name = cfgs.classifier
classifier_opt = getattr(cfgs, 'classifier_opt', dict())
for (k, v) in classifier_opt.items():
print('{} : {}'.format(k, v))
print('>> Build classifier {}'.format(name))
if ('FC' in name):
if (cfgs.loss_opt is not None):
focal_init = ('focal' in cfgs.loss_opt)
else:
focal_init = False
net = FC_Classifier(self.num_classes, samples_per_cls=self.samples_per_cls, focal_init=focal_init)
elif ('Cos' == name):
net = Cos_Classifier(self.num_classes, **classifier_opt)
elif ('Dot' in name):
net = Dot_Classifier(self.num_classes)
elif ('PostNorm' in name):
net = PostNorm_Classifier(self.num_classes, **classifier_opt)
elif ('CDT' in name):
net = CDT_Classifier(self.num_classes, samples_per_cls=self.samples_per_cls, **classifier_opt)
elif ('TDESim' in name):
net = TDESim_Classifier(self.num_classes, samples_per_cls=self.samples_per_cls, **classifier_opt)
elif ('CosPlus' in name):
net = CosPlus_Classifier(self.num_classes, samples_per_cls=self.samples_per_cls, **classifier_opt)
elif ('PostProc' in name):
net = PostProc_Classifier(self.num_classes, samples_per_cls=self.samples_per_cls, **classifier_opt)
else:
raise NameError
return net
def forward(self, x):
out = self.backbone(x)
if (not self.backbone_with_fc):
out = self.classifier(out)
return out
def on_epoch(self):
if hasattr(self.classifier, 'on_epoch'):
self.classifier.on_epoch()
print('Classifier operation on epoch')
if hasattr(self.backbone, 'on_epoch'):
self.backbone.on_epoch()
print('Backbone operation on epoch') |
class ResidualBaseDecoder(nn.Module):
def __init__(self, channel, groups):
super().__init__()
self._net = nn.Sequential(ResidualBlock(channel, channel, groups=groups), ResidualBlockShuffle(channel, channel, 2, groups=groups), AttentionBlock(channel, groups=groups), ResidualBlock(channel, channel, groups=groups), ResidualBlockShuffle(channel, channel, 2, groups=groups), ResidualBlock(channel, channel, groups=groups), pixelShuffle3x3(channel, 3, 2))
def forward(self, x: torch.Tensor):
return self._net(x) |
_config
def test_only_wm_protocols_focus(xmanager, conn):
w = None
def only_wm_protocols_focus():
nonlocal w
w = conn.create_window(5, 5, 10, 10)
w.set_attribute(eventmask=xcffib.xproto.EventMask.FocusChange)
w.set_property('WM_CLASS', 'float', type='STRING', format=8)
hints = ([0] * 14)
hints[0] = xcbq.HintsFlags['InputHint']
hints[1] = 0
w.set_property('WM_HINTS', hints, type='WM_HINTS', format=32)
conn.conn.core.ChangePropertyChecked(xcffib.xproto.PropMode.Append, w.wid, conn.atoms['WM_PROTOCOLS'], conn.atoms['ATOM'], 32, 1, [conn.atoms['WM_TAKE_FOCUS']]).check()
w.map()
conn.conn.flush()
try:
xmanager.create_window(only_wm_protocols_focus)
assert (xmanager.c.window.info()['floating'] is True)
(got_take_focus, got_focus_in) = wait_for_focus_events(conn)
assert got_take_focus
assert (not got_focus_in)
finally:
w.kill_client() |
class Solution(object):
def minIncrementForUnique(self, A):
if ((A is None) or (len(A) == 0)):
return 0
res = 0
num_set = set()
duplicate = []
A.sort()
(left, right) = (A[0], A[(- 1)])
holes = ((right - left) + 1)
for v in A:
if (v in num_set):
duplicate.append(v)
else:
num_set.add(v)
holes = (holes - len(num_set))
for hole in range((left + 1), right):
if ((holes == 0) or (len(duplicate) == 0)):
break
if ((hole not in num_set) and (hole > duplicate[0])):
res += (hole - duplicate.pop(0))
holes -= 1
while (len(duplicate) != 0):
right += 1
res += (right - duplicate.pop(0))
return res |
class ExportComplianceException(Exception):
def __init__(self, sso_username, email, quay_username):
self.sso_username = sso_username
self.email = email
self.quay_username = quay_username
def __str__(self):
return f'{self.sso_username}: {self.email} : {self.quay_username}' |
class ImmutableStringStrategy(StringStrategy):
def as_charlist_ascii(self, w_str):
return list(self.as_str_ascii(w_str))
def as_charlist_utf8(self, w_str):
return list(self.as_str_utf8(w_str))
def as_unicharlist(self, w_str):
return list(self.as_unicode(w_str))
def setitem(self, w_str, index, unichar):
self.make_mutable(w_str)
return w_str.setitem(index, unichar)
def setslice(self, w_str, index, w_from, fromstart, fromend):
self.make_mutable(w_str)
return w_str.setslice(index, w_from, fromstart, fromend) |
class ContextMenuUnconditional(ContextMenu, metaclass=ABCMeta):
def display(self, callingWindow, context):
raise NotImplementedError
def getBitmap(self, callingWindow, context):
return
def getText(self, callingWindow, context):
raise NotImplementedError
def getSubMenu(self, callingWindow, context, rootMenu, i, pitem):
return
def activate(self, callingWindow, fullContext, i):
return
def _baseDisplay(self, callingWindow, context, mainItem, selection):
return self.display(callingWindow, context)
def _baseGetBitmap(self, callingWindow, context, mainItem, selection):
return self.getBitmap(callingWindow, context)
def _baseGetText(self, callingWindow, context, mainItem, selection):
return self.getText(callingWindow, context)
def _baseGetSubMenu(self, callingWindow, context, mainItem, selection, rootMenu, i, pitem):
return self.getSubMenu(callingWindow, context, rootMenu, i, pitem)
def _baseActivate(self, callingWindow, fullContext, mainItem, selection, i):
return self.activate(callingWindow, fullContext, i) |
def test_log_filter():
rules = {'': 'INFO'}
filter_ = LogFilter(rules, default_level='INFO')
assert (filter_.should_log('test', 'DEBUG') is False)
assert (filter_.should_log('test', 'INFO') is True)
assert (filter_.should_log('raiden', 'DEBUG') is False)
assert (filter_.should_log('raiden', 'INFO') is True)
assert (filter_.should_log('raiden.cli', 'DEBUG') is False)
assert (filter_.should_log('raiden.cli', 'INFO') is True)
rules = {'': 'WARN'}
filter_ = LogFilter(rules, default_level='INFO')
assert (filter_.should_log('test', 'INFO') is False)
assert (filter_.should_log('test', 'WARN') is True)
assert (filter_.should_log('raiden', 'INFO') is False)
assert (filter_.should_log('raiden', 'WARN') is True)
assert (filter_.should_log('raiden.cli', 'INFO') is False)
assert (filter_.should_log('raiden.cli', 'WARN') is True)
rules = {'test': 'WARN'}
filter_ = LogFilter(rules, default_level='INFO')
assert (filter_.should_log('test', 'INFO') is False)
assert (filter_.should_log('test', 'WARN') is True)
assert (filter_.should_log('raiden', 'DEBUG') is False)
assert (filter_.should_log('raiden', 'INFO') is True)
assert (filter_.should_log('raiden', 'WARN') is True)
assert (filter_.should_log('raiden.cli', 'DEBUG') is False)
assert (filter_.should_log('raiden.cli', 'INFO') is True)
assert (filter_.should_log('raiden.cli', 'WARN') is True)
rules = {'raiden': 'DEBUG'}
filter_ = LogFilter(rules, default_level='INFO')
assert (filter_.should_log('test', 'DEBUG') is False)
assert (filter_.should_log('test', 'INFO') is True)
assert (filter_.should_log('raiden', 'DEBUG') is True)
assert (filter_.should_log('raiden.cli', 'DEBUG') is True)
rules = {'raiden.network': 'DEBUG'}
filter_ = LogFilter(rules, default_level='INFO')
assert (filter_.should_log('test', 'DEBUG') is False)
assert (filter_.should_log('test', 'INFO') is True)
assert (filter_.should_log('raiden', 'DEBUG') is False)
assert (filter_.should_log('raiden', 'INFO') is True)
assert (filter_.should_log('raiden.network', 'DEBUG') is True)
rules = {'': 'WARN', 'raiden': 'DEBUG', 'raiden.network': 'INFO', 'raiden.network.transport': 'DEBUG'}
filter_ = LogFilter(rules, default_level='INFO')
assert (filter_.should_log('raiden.network.transport.matrix', 'DEBUG') is True)
assert (filter_.should_log('raiden.network.transport', 'DEBUG') is True)
assert (filter_.should_log('raiden.network', 'DEBUG') is False)
assert (filter_.should_log('raiden.network', 'INFO') is True)
assert (filter_.should_log('raiden.network', 'INFO') is True)
assert (filter_.should_log('raiden', 'DEBUG') is True)
assert (filter_.should_log('', 'DEBUG') is False)
assert (filter_.should_log('', 'INFO') is False)
assert (filter_.should_log('', 'WARN') is True)
assert (filter_.should_log('other', 'DEBUG') is False)
assert (filter_.should_log('other', 'WARN') is True)
rules = {'raiden': 'DEBUG', 'raiden.network': 'INFO', 'raiden.network.transport': 'DEBUG'}
filter_ = LogFilter(rules, default_level='INFO')
assert (filter_.should_log('raiden.network.transport.matrix', 'DEBUG') is True)
assert (filter_.should_log('raiden.network.transport', 'DEBUG') is True)
assert (filter_.should_log('raiden.network', 'DEBUG') is False)
assert (filter_.should_log('raiden.network', 'INFO') is True)
assert (filter_.should_log('raiden.network', 'INFO') is True)
assert (filter_.should_log('raiden', 'DEBUG') is True)
assert (filter_.should_log('', 'DEBUG') is False)
assert (filter_.should_log('', 'INFO') is True)
assert (filter_.should_log('', 'WARN') is True)
assert (filter_.should_log('other', 'DEBUG') is False)
assert (filter_.should_log('other', 'INFO') is True)
assert (filter_.should_log('other', 'WARN') is True) |
def _apodize(input, ndim, oversamp, width, beta):
output = input
for a in range((- ndim), 0):
i = output.shape[a]
os_i = ceil((oversamp * i))
idx = np.arange(i, dtype=output.dtype)
apod = (((beta ** 2) - ((((np.pi * width) * (idx - (i // 2))) / os_i) ** 2)) ** 0.5)
apod /= np.sinh(apod)
output *= apod.reshape(([i] + ([1] * ((- a) - 1))))
return output |
.parametrize('add_version_condition', [True, False])
def test_delete(add_version_condition: bool) -> None:
item = UserModel('foo', 'bar')
with patch(PATCH_METHOD) as req:
req.return_value = None
item.delete(add_version_condition=add_version_condition)
expected = {'Key': {'user_id': {'S': 'bar'}, 'user_name': {'S': 'foo'}}, 'ReturnConsumedCapacity': 'TOTAL', 'TableName': 'UserModel'}
args = req.call_args[0][1]
assert (args == expected)
with patch(PATCH_METHOD) as req:
req.return_value = None
item.delete((UserModel.user_id == 'bar'), add_version_condition=add_version_condition)
expected = {'Key': {'user_id': {'S': 'bar'}, 'user_name': {'S': 'foo'}}, 'ConditionExpression': '#0 = :0', 'ExpressionAttributeNames': {'#0': 'user_id'}, 'ExpressionAttributeValues': {':0': {'S': 'bar'}}, 'ReturnConsumedCapacity': 'TOTAL', 'TableName': 'UserModel'}
args = req.call_args[0][1]
assert (args == expected)
with patch(PATCH_METHOD) as req:
req.return_value = None
item.delete((UserModel.user_id == 'bar'), add_version_condition=add_version_condition)
expected = {'Key': {'user_id': {'S': 'bar'}, 'user_name': {'S': 'foo'}}, 'ConditionExpression': '#0 = :0', 'ExpressionAttributeNames': {'#0': 'user_id'}, 'ExpressionAttributeValues': {':0': {'S': 'bar'}}, 'ReturnConsumedCapacity': 'TOTAL', 'TableName': 'UserModel'}
args = req.call_args[0][1]
assert (args == expected) |
def Mine_Pattern(FP, ItemS):
global CanNum
ExpSet = []
temp = FP[:]
for pre in temp:
for suf in temp:
pattern = [pre, suf]
CanNum += 1
(count, ItemS[str(pattern)]) = ProMatching(ItemS[str(pre)], suf)
if (count >= int(minsup)):
FP.append(pattern)
ExpSet.append(pattern)
else:
del ItemS[str(pattern)]
Join_S(FP, ExpSet, ItemS) |
def test_arm():
local = True
funcaddr = 0
varaddr = 1048576
stackaddr = 2097152
if local:
r2p = r2pipe.open('ipa://test/tests/crackme-level0-symbols.ipa', flags=['-2'])
r2p.cmd('s sym._validate; aei; aeim; aer x0 = 0x100000;')
funcaddr = int(r2p.cmd('s'), 16)
else:
r2p = r2pipe.open('frida://133ebc680e67c885e7fd8a0229bef371//com.nowsecure.crackme', flags=['-2'])
r2p.cmd('\\dc; `\\il~:0[0]`; `\\is~validate$:0[0]`;')
funcaddr = int(r2p.cmd('s'), 16)
varaddr = int(r2p.cmd('\\dma 0x1000'), 16)
stackaddr = (int(r2p.cmd('\\dma 0x2000'), 16) + 4096)
r2p.cmd(('aei; aeip; aer x0=%d; aer sp=%d; aer fp=%d;' % (varaddr, stackaddr, stackaddr)))
esilsolver = ESILSolver(r2p, debug=False, trace=False)
state = esilsolver.init_state()
b = [z3.BitVec(('b%d' % x), 8) for x in range(16)]
for x in range(16):
state.constrain(z3.Or(z3.And((b[x] >= 97), (b[x] <= 122)), (b[x] == 32)))
code = z3.Concat(*b)
state.memory[varaddr] = code
def success(state):
cs = state.evaluate_buffer(code)
print(("CODE: '%s'" % cs.decode()))
esilsolver.register_hook((funcaddr + 528), success)
esilsolver.run(target=(funcaddr + 528), avoid=[(funcaddr + 536), (funcaddr + 60)]) |
.parametrize('report_option', ['term-missing:skip-covered', 'term:skip-covered'])
def test_skip_covered_cli(pytester, testdir, report_option):
testdir.makefile('', coveragerc=SKIP_COVERED_COVERAGERC)
script = testdir.makepyfile(SKIP_COVERED_TEST)
result = testdir.runpytest('-v', f'--cov={script.dirpath()}', f'--cov-report={report_option}', script)
assert (result.ret == 0)
result.stdout.fnmatch_lines([SKIP_COVERED_RESULT]) |
def test_life_list__converters():
life_list = LifeList.from_json(j_life_list_1)
assert (life_list.data[0] == life_list[0])
assert (len(life_list) == 10)
assert (life_list.count_without_taxon == 4)
assert (isinstance(life_list.data[0], TaxonCount) and (life_list.data[0].id == 48460))
life_list = LifeList.from_json(j_life_list_1['results'])
assert (len(life_list) == 10)
assert (life_list.count_without_taxon == 0) |
def output_cut(s_partition: List[cirq.Qid]) -> None:
coloring = []
for node in working_graph:
if (node in s_partition):
coloring.append('blue')
else:
coloring.append('red')
edges = working_graph.edges(data=True)
weights = [w['weight'] for (u, v, w) in edges]
nx.draw_circular(working_graph, node_color=coloring, node_size=1000, with_labels=True, width=weights)
plt.show()
size = nx.cut_size(working_graph, s_partition, weight='weight')
print(f'Cut size: {size}') |
.functions
def test_add_column_iterator_repeat(dataframe):
df = dataframe.add_column('city_pop', range(3), fill_remaining=True)
assert (df.city_pop.iloc[0] == 0)
assert (df.city_pop.iloc[1] == 1)
assert (df.city_pop.iloc[2] == 2)
assert (df.city_pop.iloc[3] == 0)
assert (df.city_pop.iloc[4] == 1)
assert (df.city_pop.iloc[5] == 2) |
class Register():
def __init__(self) -> None:
self._generators = {}
self._on_packet = ({}, {}, {}, {})
self._on_server_start = {}
self._on_server_stop = {}
def add_world_generator(self, name: str):
def deco(cls):
if (not issubclass(cls, AbstractWorldGenerator)):
raise ValueError(f'Decorated class must be a subclass of AbstractWorldGenerator')
self._generators[name] = cls
return cls
return deco
def on_packet(self, state: str, packet_id: int):
state_id = STATES.encode(state)
def deco(func):
if (not asyncio.iscoroutinefunction(func)):
raise ValueError('Decorated object must be a coroutine function.')
if hasattr(func, '__self__'):
return PacketEvent(func, state_id, packet_id)
try:
self._on_packet[state_id][packet_id][f'{func.__module__}.{func.__qualname__}'] = func
except KeyError:
self._on_packet[state_id][packet_id] = {f'{func.__module__}.{func.__qualname__}': func}
return func
return deco
def on_server_start(self, func):
if (not asyncio.iscoroutinefunction(func)):
raise ValueError('Decorated object must be a coroutine function.')
return ServerStartEvent(func)
def on_server_stop(self, func):
if (not asyncio.iscoroutinefunction(func)):
raise ValueError('Decorated object must be a coroutine function.')
return ServerStopEvent(func) |
def transform_ptt_post_to_spacy(post: ptt.PttPost, nlp: Language, disable: Iterable[str]=['tok2vec']) -> spacy.SpacyPttPost:
title_bytes = nlp(post.title, disable=disable).to_bytes()
content_bytes = nlp(post.content, disable=disable).to_bytes()
comments = []
for comment in post.comments:
comment_bytes = nlp(comment.content, disable=disable).to_bytes()
comments.append(spacy.SpacyPttComment(comment_id=comment.comment_id, post_id=comment.post_id, content=comment_bytes))
return spacy.SpacyPttPost(id=post.id, title=title_bytes, content=content_bytes, comments=comments, created_at=post.created_at, updated_at=post.updated_at) |
def test_thread_cache_basics() -> None:
q: Queue[Outcome[object]] = Queue()
def fn() -> NoReturn:
raise RuntimeError('hi')
def deliver(outcome: Outcome[object]) -> None:
q.put(outcome)
start_thread_soon(fn, deliver)
outcome = q.get()
with pytest.raises(RuntimeError, match='hi'):
outcome.unwrap() |
def test__vf_ground_sky_2d(test_system_fixed_tilt):
(ts, pts, vfs_gnd_sky) = test_system_fixed_tilt
vfs = utils.vf_ground_sky_2d(ts['rotation'], ts['gcr'], pts, ts['pitch'], ts['height'], max_rows=1)
assert np.allclose(vfs, vfs_gnd_sky, rtol=0.1)
vf = utils.vf_ground_sky_2d(ts['rotation'], ts['gcr'], pts[0], ts['pitch'], ts['height'], max_rows=1)
assert np.isclose(vf, vfs_gnd_sky[0]) |
class VhdlLexer(RegexLexer):
name = 'vhdl'
aliases = ['vhdl']
filenames = ['*.vhdl', '*.vhd']
mimetypes = ['text/x-vhdl']
url = '
version_added = '1.5'
flags = (re.MULTILINE | re.IGNORECASE)
tokens = {'root': [('\\s+', Whitespace), ('(\\\\)(\\n)', bygroups(String.Escape, Whitespace)), ('--.*?$', Comment.Single), ("'(U|X|0|1|Z|W|L|H|-)'", String.Char), ('[~!%^&*+=|?:<>/-]', Operator), ("'[a-z_]\\w*", Name.Attribute), ("[()\\[\\],.;\\']", Punctuation), ('"[^\\n\\\\"]*"', String), ('(library)(\\s+)([a-z_]\\w*)', bygroups(Keyword, Whitespace, Name.Namespace)), ('(use)(\\s+)(entity)', bygroups(Keyword, Whitespace, Keyword)), ('(use)(\\s+)([a-z_][\\w.]*\\.)(all)', bygroups(Keyword, Whitespace, Name.Namespace, Keyword)), ('(use)(\\s+)([a-z_][\\w.]*)', bygroups(Keyword, Whitespace, Name.Namespace)), ('(std|ieee)(\\.[a-z_]\\w*)', bygroups(Name.Namespace, Name.Namespace)), (words(('std', 'ieee', 'work'), suffix='\\b'), Name.Namespace), ('(entity|component)(\\s+)([a-z_]\\w*)', bygroups(Keyword, Whitespace, Name.Class)), ('(architecture|configuration)(\\s+)([a-z_]\\w*)(\\s+)(of)(\\s+)([a-z_]\\w*)(\\s+)(is)', bygroups(Keyword, Whitespace, Name.Class, Whitespace, Keyword, Whitespace, Name.Class, Whitespace, Keyword)), ('([a-z_]\\w*)(:)(\\s+)(process|for)', bygroups(Name.Class, Operator, Whitespace, Keyword)), ('(end)(\\s+)', bygroups(using(this), Whitespace), 'endblock'), include('types'), include('keywords'), include('numbers'), ('[a-z_]\\w*', Name)], 'endblock': [include('keywords'), ('[a-z_]\\w*', Name.Class), ('\\s+', Whitespace), (';', Punctuation, '#pop')], 'types': [(words(('boolean', 'bit', 'character', 'severity_level', 'integer', 'time', 'delay_length', 'natural', 'positive', 'string', 'bit_vector', 'file_open_kind', 'file_open_status', 'std_ulogic', 'std_ulogic_vector', 'std_logic', 'std_logic_vector', 'signed', 'unsigned'), suffix='\\b'), Keyword.Type)], 'keywords': [(words(('abs', 'access', 'after', 'alias', 'all', 'and', 'architecture', 'array', 'assert', 'attribute', 'begin', 'block', 'body', 'buffer', 'bus', 'case', 'component', 'configuration', 'constant', 'disconnect', 'downto', 'else', 'elsif', 'end', 'entity', 'exit', 'file', 'for', 'function', 'generate', 'generic', 'group', 'guarded', 'if', 'impure', 'in', 'inertial', 'inout', 'is', 'label', 'library', 'linkage', 'literal', 'loop', 'map', 'mod', 'nand', 'new', 'next', 'nor', 'not', 'null', 'of', 'on', 'open', 'or', 'others', 'out', 'package', 'port', 'postponed', 'procedure', 'process', 'pure', 'range', 'record', 'register', 'reject', 'rem', 'return', 'rol', 'ror', 'select', 'severity', 'signal', 'shared', 'sla', 'sll', 'sra', 'srl', 'subtype', 'then', 'to', 'transport', 'type', 'units', 'until', 'use', 'variable', 'wait', 'when', 'while', 'with', 'xnor', 'xor'), suffix='\\b'), Keyword)], 'numbers': [('\\d{1,2}#[0-9a-f_]+#?', Number.Integer), ('\\d+', Number.Integer), ('(\\d+\\.\\d*|\\.\\d+|\\d+)E[+-]?\\d+', Number.Float), ('X"[0-9a-f_]+"', Number.Hex), ('O"[0-7_]+"', Number.Oct), ('B"[01_]+"', Number.Bin)]} |
def parse_location(file_desc):
file_desc = os.fsencode(file_desc)
file_parts = [x for x in re.split(b'(?<!\\\\)(\\\\{2})*::', file_desc) if (x is not None)]
concat_parts = []
keep = None
for part in reversed(file_parts):
if re.match(b'^(\\\\{2})+$', part):
keep = part
else:
if keep:
part += keep
keep = None
concat_parts.append(part)
concat_parts.reverse()
if (len(concat_parts) > 2):
return (None, None, "Too many parts separated by double colon in '{desc}'".format(desc=file_desc))
elif (len(concat_parts) == 0):
return (None, None, "No location could be identified in '{desc}'".format(desc=file_desc))
elif (len(concat_parts) == 1):
file_host = None
file_path = concat_parts[0]
else:
if (not concat_parts[0]):
return (None, None, "No file host in location '{lo}' starting with '::'".format(lo=file_desc))
elif (not concat_parts[1]):
return (None, None, "No file path in location '{lo}' ending with '::'".format(lo=file_desc))
file_host = concat_parts[0]
file_path = concat_parts[1]
sbs = b'\\'
dbs = b'\\\\'
if file_host:
file_host = sbs.join([x.replace((sbs + b':'), b':') for x in file_host.split(dbs)])
if (file_path.startswith(dbs) and (len(file_path) > 2) and (file_path[2:3] != sbs)):
is_unc_path = True
else:
is_unc_path = False
file_path = sbs.join([x.replace((sbs + b':'), b':') for x in file_path.split(dbs)])
file_path = file_path.replace(b'\\', b'/')
if is_unc_path:
file_path = (b'/' + file_path)
return (file_host, file_path, None) |
def test_internal_error_with_maxfail(pytester: pytest.Pytester) -> None:
pytester.makepyfile("\n import pytest\n\n (params=['1', '2'])\n def crasher():\n raise RuntimeError\n\n def test_aaa0(crasher):\n pass\n def test_aaa1(crasher):\n pass\n ")
result = pytester.runpytest_subprocess('--maxfail=1', '-n1')
result.stdout.re_match_lines(['.* [12] errors? in .*'])
assert ('INTERNALERROR' not in result.stderr.str()) |
def lr_schedule(lrnrate, epoch, warmupperiod=5, schedule=[100, 150, 200], max_epoch=250):
if (schedule is None):
schedule = [(max_epoch // 2.667), (max_epoch // 1.6), (max_epoch // 1.142)]
warmupfactor = min(1, ((epoch + 1) / (1e-06 + warmupperiod)))
if (epoch < schedule[0]):
return ((1.0 * lrnrate) * warmupfactor)
elif (epoch < schedule[1]):
return ((0.1 * lrnrate) * warmupfactor)
elif (epoch < schedule[2]):
return ((0.01 * lrnrate) * warmupfactor)
else:
return ((0.001 * lrnrate) * warmupfactor) |
def time_serie(ts_code: int, start: str, end: str, strict: bool=False) -> pd.Series:
if strict:
ts_data = api.get_data_with_strict_range(ts_code, start, end)
else:
ts_data = api.get_data(ts_code, start, end)
values = []
index = []
for i in ts_data:
values.append(i['valor'])
index.append(to_datetime(i['data'], 'pt'))
values = [(np.nan if (value == '') else value) for value in values]
return pd.Series(values, index, name=ts_code, dtype=np.float64) |
class StatsReporter(threading.Thread):
def __init__(self, report_interval: int):
super().__init__()
self.report_interval = report_interval
self.stop = threading.Event()
self.stats_queue = SimpleQueue()
def run(self):
while (not self.stop.wait(self.report_interval)):
pool_batch_stats = defaultdict(list)
while (not self.stats_queue.empty()):
(pool_uid, batch_stats) = self.stats_queue.get()
pool_batch_stats[pool_uid].append(batch_stats)
total_processed_batches = sum((len(pool_stats) for pool_stats in pool_batch_stats.values()))
logger.info(f'Processed {total_processed_batches} batches in last {self.report_interval} seconds:')
for (pool_uid, pool_stats) in pool_batch_stats.items():
total_batches = len(pool_stats)
total_examples = sum((batch_stats.batch_size for batch_stats in pool_stats))
avg_batch_size = mean((batch_stats.batch_size for batch_stats in pool_stats))
total_time = sum((batch_stats.processing_time for batch_stats in pool_stats))
batches_to_time = (total_batches / total_time)
batch_performance = (f'{batches_to_time:.2f} ' + ('batches/s' if (batches_to_time > 1) else 's/batch'))
examples_to_time = (total_examples / total_time)
example_performance = (f'{examples_to_time:.2f} ' + ('examples/s' if (examples_to_time > 1) else 's/example'))
logger.info(f'{pool_uid}: {total_batches} batches ({batch_performance}), {total_examples} examples ({example_performance}), avg batch size {avg_batch_size:.2f}')
def report_stats(self, pool_uid, batch_size, processing_time):
batch_stats = BatchStats(batch_size, processing_time)
self.stats_queue.put_nowait((pool_uid, batch_stats)) |
class ResizeObservation(gym.ObservationWrapper):
def __init__(self, env, shape):
super().__init__(env)
if isinstance(shape, int):
self.shape = (shape, shape)
else:
self.shape = tuple(shape)
obs_shape = (self.shape + self.observation_space.shape[2:])
self.observation_space = Box(low=0, high=255, shape=obs_shape, dtype=np.uint8)
def observation(self, observation):
resize_obs = transform.resize(observation, self.shape)
resize_obs *= 255
resize_obs = resize_obs.astype(np.uint8)
return resize_obs |
class BaseResourceDetailsPopup(QDialog, Ui_TrickDetailsPopup):
def __init__(self, parent: QWidget, window_manager: WindowManager, game_description: GameDescription, areas_to_show: list[tuple[(Region, Area, list[str])]], trick_levels: (TrickLevelConfiguration | None)=None):
super().__init__(parent)
self.setupUi(self)
set_default_window_icon(self)
self._window_manager = window_manager
self._game_description = game_description
self._trick_levels = trick_levels
self.area_list_label.linkActivated.connect(self._on_click_link_to_data_editor)
self.button_box.accepted.connect(self.button_box_close)
self.button_box.rejected.connect(self.button_box_close)
if areas_to_show:
lines = [((f'<a href="data-editor://{region.correct_name(area.in_dark_aether)}/{area.name}">{region.correct_name(area.in_dark_aether)} - {area.name}</a>' + ''.join((f'''
<br />{usage}''' for usage in usages))) + '<br />') for (region, area, usages) in areas_to_show]
self.area_list_label.setText('<br />'.join(sorted(lines)))
else:
self.area_list_label.setText('This trick is not used in this level.')
def button_box_close(self):
self.reject()
def _on_click_link_to_data_editor(self, link: str):
info = re.match('^data-editor://([^)]+)/([^)]+)$', link)
if info:
(region_name, area_name) = info.group(1, 2)
self._window_manager.open_data_visualizer_at(region_name, area_name, game=self._game_description.game, trick_levels=self._trick_levels) |
def _set_adlr_autoresume(args):
global _GLOBAL_ADLR_AUTORESUME
_ensure_var_is_not_initialized(_GLOBAL_ADLR_AUTORESUME, 'adlr autoresume')
if args.adlr_autoresume:
if (args.rank == 0):
print('enabling autoresume ...', flush=True)
sys.path.append(os.environ.get('SUBMIT_SCRIPTS', '.'))
try:
from userlib.auto_resume import AutoResume
except BaseException:
print('ADLR autoresume is not available, exiting ...')
sys.exit()
_GLOBAL_ADLR_AUTORESUME = AutoResume |
class TestDataPipeFSSpec(expecttest.TestCase):
def setUp(self):
self.temp_dir = create_temp_dir()
self.temp_files = create_temp_files(self.temp_dir)
self.temp_sub_dir = create_temp_dir(self.temp_dir.name)
self.temp_sub_files = create_temp_files(self.temp_sub_dir, 4, False)
self.temp_dir_2 = create_temp_dir()
self.temp_files_2 = create_temp_files(self.temp_dir_2)
self.temp_sub_dir_2 = create_temp_dir(self.temp_dir_2.name)
self.temp_sub_files_2 = create_temp_files(self.temp_sub_dir_2, 4, False)
def tearDown(self):
try:
self.temp_sub_dir.cleanup()
self.temp_dir.cleanup()
self.temp_sub_dir_2.cleanup()
self.temp_dir_2.cleanup()
except Exception as e:
warnings.warn(f'TestDataPipeFSSpec was not able to cleanup temp dir due to {e}')
def _write_text_files(self):
def filepath_fn(name: str) -> str:
return os.path.join(self.temp_dir.name, os.path.basename(name))
name_to_data = {'1.text': b'DATA', '2.text': b'DATA', '3.text': b'DATA'}
source_dp = IterableWrapper(sorted(name_to_data.items()))
saver_dp = source_dp.save_to_disk(filepath_fn=filepath_fn, mode='wb')
list(saver_dp)
def test_fsspec_file_lister_iterdatapipe(self):
datapipe: IterDataPipe = FSSpecFileLister(root=('file://' + self.temp_sub_dir.name))
for path in datapipe:
self.assertIn(path.split('://')[1], {fsspec.implementations.local.make_path_posix(file) for file in self.temp_sub_files})
datapipe = IterableWrapper([('file://' + self.temp_sub_dir.name)])
datapipe = datapipe.list_files_by_fsspec()
for path in datapipe:
self.assertIn(path.split('://')[1], {fsspec.implementations.local.make_path_posix(file) for file in self.temp_sub_files})
def test_fsspec_file_lister_iterdatapipe_with_list(self):
datapipe: IterDataPipe = FSSpecFileLister(root=[('file://' + self.temp_sub_dir.name), ('file://' + self.temp_sub_dir_2.name)])
file_lister = list(map((lambda path: path.split('://')[1]), datapipe))
file_lister.sort()
temp_files = list(map((lambda file: fsspec.implementations.local.make_path_posix(file)), (self.temp_sub_files + self.temp_sub_files_2)))
temp_files.sort()
self.assertEqual(file_lister, temp_files)
datapipe = IterableWrapper([('file://' + self.temp_sub_dir.name), ('file://' + self.temp_sub_dir_2.name)])
datapipe = datapipe.list_files_by_fsspec()
res = list(map((lambda path: path.split('://')[1]), datapipe))
res.sort()
temp_files = list(map((lambda file: fsspec.implementations.local.make_path_posix(file)), (self.temp_sub_files + self.temp_sub_files_2)))
temp_files.sort()
self.assertEqual(res, temp_files)
def test_fsspec_file_loader_iterdatapipe(self):
datapipe1 = FSSpecFileLister(root=('file://' + self.temp_sub_dir.name))
datapipe2 = FSSpecFileOpener(datapipe1)
datapipe3 = FSSpecFileOpener(datapipe1, kwargs_for_open={'encoding': 'cp037'})
for (_, f) in datapipe2:
self.assertEqual(f.read(), 'abcdef')
for (_, f) in datapipe3:
self.assertNotEqual(f.read(), 'abcdef')
self._write_text_files()
lister_dp = FileLister(self.temp_dir.name, '*.text')
fsspec_file_opener_dp = lister_dp.open_files_by_fsspec(mode='rb')
n_elements_before_reset = 2
(res_before_reset, res_after_reset) = reset_after_n_next_calls(fsspec_file_opener_dp, n_elements_before_reset)
self.assertEqual(2, len(res_before_reset))
self.assertEqual(3, len(res_after_reset))
for (_name, stream) in res_before_reset:
self.assertEqual(b'DATA', stream.read())
for (_name, stream) in res_after_reset:
self.assertEqual(b'DATA', stream.read())
def test_fsspec_saver_iterdatapipe(self):
def filepath_fn(name: str) -> str:
return ('file://' + os.path.join(self.temp_dir.name, os.path.basename(name)))
name_to_data = {'1.txt': b'DATA1', '2.txt': b'DATA2', '3.txt': b'DATA3'}
source_dp = IterableWrapper(sorted(name_to_data.items()))
saver_dp = source_dp.save_by_fsspec(filepath_fn=filepath_fn, mode='wb')
res_file_paths = list(saver_dp)
expected_paths = [filepath_fn(name) for name in name_to_data.keys()]
self.assertEqual(expected_paths, res_file_paths)
for name in name_to_data.keys():
p = filepath_fn(name).split('://')[1]
with open(p) as f:
self.assertEqual(name_to_data[name], f.read().encode())
saver_dp = FSSpecSaver(source_dp, filepath_fn=filepath_fn, mode='wb')
n_elements_before_reset = 2
(res_before_reset, res_after_reset) = reset_after_n_next_calls(saver_dp, n_elements_before_reset)
self.assertEqual([filepath_fn('1.txt'), filepath_fn('2.txt')], res_before_reset)
self.assertEqual(expected_paths, res_after_reset)
for name in name_to_data.keys():
p = filepath_fn(name).split('://')[1]
with open(p) as f:
self.assertEqual(name_to_data[name], f.read().encode())
self.assertEqual(3, len(saver_dp))
def test_fsspec_memory_list(self):
fs = fsspec.filesystem('memory')
fs.mkdir('foo')
fs.touch('foo/bar1')
fs.touch('foo/bar2')
datapipe = FSSpecFileLister(root='memory://foo')
self.assertEqual(set(datapipe), {'memory:///foo/bar1', 'memory:///foo/bar2'})
datapipe = FSSpecFileLister(root='memory://foo/bar1')
self.assertEqual(set(datapipe), {'memory://foo/bar1'})
def test_fsspec_memory_load(self):
fs = fsspec.filesystem('memory')
with fs.open('file', 'w') as f:
f.write('hello')
with fs.open('file2', 'w') as f:
f.write('hello2')
files = ['memory://file', 'memory://file2']
datapipe = FSSpecFileOpener(files)
self.assertEqual([f.read() for (_, f) in datapipe], ['hello', 'hello2'])
def test_fsspec_memory_save(self):
def filepath_fn(name: str) -> str:
return ('memory://' + name)
name_to_data = {'1.txt': b'DATA1', '2.txt': b'DATA2'}
source_dp = IterableWrapper(sorted(name_to_data.items()))
saver_dp = FSSpecSaver(source_dp, filepath_fn=filepath_fn, mode='wb')
self.assertEqual(set(saver_dp), {'memory://1.txt', 'memory://2.txt'}) |
def bbox2d(bboxA, bboxB):
minx_overlap = max(bboxA[0], bboxB[0])
miny_overlap = max(bboxA[1], bboxB[1])
maxx_overlap = min(bboxA[2], bboxB[2])
maxy_overlap = min(bboxA[3], bboxB[3])
interArea = (max(0, (maxx_overlap - minx_overlap)) * max(0, (maxy_overlap - miny_overlap)))
boxAArea = ((bboxA[2] - bboxA[0]) * (bboxA[3] - bboxA[1]))
boxBArea = ((bboxB[2] - bboxB[0]) * (bboxB[3] - bboxB[1]))
div = float(((boxAArea + boxBArea) - interArea))
iou = ((interArea / div) if (div != 0.0) else 0.0)
return iou |
class StoryCategory(NameSlugModel):
class Meta():
ordering = ('name',)
verbose_name = 'story category'
verbose_name_plural = 'story categories'
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('success_story_list_category', kwargs={'slug': self.slug}) |
class _WrappedModel():
def __init__(self, model, timestep_map, original_num_steps):
self.model = model
self.timestep_map = timestep_map
self.original_num_steps = original_num_steps
def __call__(self, x, ts, **kwargs):
map_tensor = th.tensor(self.timestep_map, device=ts.device, dtype=ts.dtype)
new_ts = map_tensor[ts]
return self.model(x, new_ts, **kwargs) |
def test_multi_mass_spring_damper():
(k0, m0, g, c0) = sm.symbols('k0, m0, g, c0')
(x0, v0, f0) = me.dynamicsymbols('x0, v0, f0')
sys = multi_mass_spring_damper()
assert (sys.constants_symbols == {k0, c0, m0})
assert (sys.specifieds_symbols == set())
assert (sys.coordinates == [x0])
assert (sys.speeds == [v0])
assert (sys.states == [x0, v0])
expected_mass_matrix_full = sm.Matrix([[1, 0], [0, m0]])
expected_forcing_full = sm.Matrix([[v0], [(((- c0) * v0) - (k0 * x0))]])
assert (sm.simplify((sys.eom_method.mass_matrix_full - expected_mass_matrix_full)) == sm.zeros(2, 2))
assert (sm.simplify((sys.eom_method.forcing_full - expected_forcing_full)) == sm.zeros(2, 1)) |
class FileUploader(Container):
_attribute_decorator('WidgetSpecific', 'If True multiple files can be \n selected at the same time', bool, {})
def multiple_selection_allowed(self):
return ('multiple' in self.__dict__.keys())
_selection_allowed.setter
def multiple_selection_allowed(self, value):
if value:
self.__dict__['multiple'] = 'multiple'
elif ('multiple' in self.__dict__.keys()):
del self.__dict__['multiple']
_attribute_decorator('WidgetSpecific', 'Defines the path where to save the file', str, {})
def savepath(self):
return self._savepath
def savepath(self, value):
self._savepath = value
def __init__(self, savepath='./', multiple_selection_allowed=False, accepted_files='*.*', *args, **kwargs):
super(FileUploader, self).__init__(*args, **kwargs)
self._savepath = savepath
self._multiple_selection_allowed = multiple_selection_allowed
self.type = 'input'
self.attributes['type'] = 'file'
if multiple_selection_allowed:
self.attributes['multiple'] = 'multiple'
self.attributes['accept'] = accepted_files
self.EVENT_ON_SUCCESS = 'onsuccess'
self.EVENT_ON_FAILED = 'onfailed'
self.EVENT_ON_DATA = 'ondata'
self.attributes[self.EVENT_ONCHANGE] = ("var files = this.files;for(var i=0; i<files.length; i++){remi.uploadFile('%(id)s','%(evt_success)s','%(evt_failed)s','%(evt_data)s',files[i]);}" % {'id': self.identifier, 'evt_success': self.EVENT_ON_SUCCESS, 'evt_failed': self.EVENT_ON_FAILED, 'evt_data': self.EVENT_ON_DATA})
_set_on_listener('(self, emitter, filename)')
_event
def onsuccess(self, filename):
return (filename,)
_set_on_listener('(self, emitter, filename)')
_event
def onfailed(self, filename):
return (filename,)
_set_on_listener('(self, emitter, filedata, filename)')
_event
def ondata(self, filedata, filename):
with open(os.path.join(self._savepath, filename), 'wb') as f:
f.write(filedata)
return (filedata, filename) |
def _construct_prop_item(key: str, value: ast.expr) -> tuple[(str, ast.expr)]:
if ((key == 'style') and isinstance(value, (ast.Dict, ast.Call))):
new_value = copy(value)
if _rewrite_props(new_value, (lambda k, v: ((k, v) if (k == 'style') else _construct_prop_item(k, v)))):
value = new_value
else:
key = conv_attr_name(key)
return (key, value) |
def add_attribute_to_class(api: SemanticAnalyzerPluginInterface, cls: ClassDef, name: str, typ: Type, final: bool=False, no_serialize: bool=False, override_allow_incompatible: bool=False, fullname: (str | None)=None, is_classvar: bool=False, overwrite_existing: bool=False) -> Var:
info = cls.info
if ((name in info.names) and (not overwrite_existing)):
r_name = get_unique_redefinition_name(name, info.names)
info.names[r_name] = info.names[name]
node = Var(name, typ)
node.info = info
node.is_final = final
node.is_classvar = is_classvar
if (name in ALLOW_INCOMPATIBLE_OVERRIDE):
node.allow_incompatible_override = True
else:
node.allow_incompatible_override = override_allow_incompatible
if fullname:
node._fullname = fullname
else:
node._fullname = ((info.fullname + '.') + name)
info.names[name] = SymbolTableNode(MDEF, node, plugin_generated=True, no_serialize=no_serialize)
return node |
def create_dataloader(dataset_classname, dataset_config, batch_size=1, collate_fn=None, shuffle=False, num_workers=0, drop_last=False) -> DataLoader:
dataset = dataset_class_dict[dataset_classname](**dataset_config)
dataloader = DataLoader(dataset, batch_size=batch_size, collate_fn=collate_fn, shuffle=shuffle, num_workers=num_workers, drop_last=drop_last)
return dataloader |
def configure_converter(converter: BaseConverter):
converter.register_structure_hook(bytes, (lambda v, _: b85decode(v)))
converter.register_unstructure_hook(bytes, (lambda v: (b85encode(v) if v else b'').decode('utf8')))
def gen_unstructure_mapping(cl: Any, unstructure_to=None):
key_handler = str
args = getattr(cl, '__args__', None)
if args:
if issubclass(args[0], str):
key_handler = (_enum_value_getter if issubclass(args[0], Enum) else None)
elif issubclass(args[0], bytes):
def key_handler(k: bytes):
return b85encode(k).decode('utf8')
return converter.gen_unstructure_mapping(cl, unstructure_to=unstructure_to, key_handler=key_handler)
converter._unstructure_func.register_func_list([(is_mapping, gen_unstructure_mapping, True)])
converter.register_unstructure_hook(datetime, (lambda v: v))
converter.register_structure_hook(datetime, validate_datetime)
converter.register_unstructure_hook(date, (lambda v: v.isoformat()))
converter.register_structure_hook(date, (lambda v, _: date.fromisoformat(v)))
configure_union_passthrough(Union[(str, String, bool, int, Integer, float, Float)], converter) |
def test_slice_penumbra():
profiler = Profile().from_tuples(PROFILER).resample_x(0.1)
(lt_penum, rt_penum) = profiler.slice_penumbra()
assert np.all((lt_penum.x < 0))
assert np.all((rt_penum.x > 0))
assert np.all((lt_penum.y < profiler.get_y(0)))
assert np.all((rt_penum.y < profiler.get_y(0))) |
class XLMTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self, vocab_file, merges_file, unk_token='<unk>', bos_token='<s>', sep_token='</s>', pad_token='<pad>', cls_token='</s>', mask_token='<special1>', additional_special_tokens=['<special0>', '<special1>', '<special2>', '<special3>', '<special4>', '<special5>', '<special6>', '<special7>', '<special8>', '<special9>'], lang2id=None, id2lang=None, do_lowercase_and_remove_accent=True, **kwargs):
super(XLMTokenizer, self).__init__(unk_token=unk_token, bos_token=bos_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, additional_special_tokens=additional_special_tokens, **kwargs)
self.cache_moses_punct_normalizer = dict()
self.cache_moses_tokenizer = dict()
self.lang_with_custom_tokenizer = set(['zh', 'th', 'ja'])
self.do_lowercase_and_remove_accent = do_lowercase_and_remove_accent
self.lang2id = lang2id
self.id2lang = id2lang
if ((lang2id is not None) and (id2lang is not None)):
assert (len(lang2id) == len(id2lang))
self.ja_word_tokenizer = None
self.zh_word_tokenizer = None
self.encoder = json.load(open(vocab_file, encoding='utf-8'))
self.decoder = {v: k for (k, v) in self.encoder.items()}
merges = open(merges_file, encoding='utf-8').read().split('\n')[:(- 1)]
merges = [tuple(merge.split()[:2]) for merge in merges]
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {}
def moses_punct_norm(self, text, lang):
if (lang not in self.cache_moses_punct_normalizer):
punct_normalizer = sm.MosesPunctNormalizer(lang=lang)
self.cache_moses_punct_normalizer[lang] = punct_normalizer
else:
punct_normalizer = self.cache_moses_punct_normalizer[lang]
return punct_normalizer.normalize(text)
def moses_tokenize(self, text, lang):
if (lang not in self.cache_moses_tokenizer):
moses_tokenizer = sm.MosesTokenizer(lang=lang)
self.cache_moses_tokenizer[lang] = moses_tokenizer
else:
moses_tokenizer = self.cache_moses_tokenizer[lang]
return moses_tokenizer.tokenize(text, return_str=False, escape=False)
def moses_pipeline(self, text, lang):
text = replace_unicode_punct(text)
text = self.moses_punct_norm(text, lang)
text = remove_non_printing_char(text)
return text
def ja_tokenize(self, text):
if (self.ja_word_tokenizer is None):
try:
import Mykytea
self.ja_word_tokenizer = Mykytea.Mykytea(('-model %s/local/share/kytea/model.bin' % os.path.expanduser('~')))
except (AttributeError, ImportError) as e:
logger.error("Make sure you install KyTea ( and it's python wrapper ( with the following steps")
logger.error('1. git clone :neubig/kytea.git && cd kytea')
logger.error('2. autoreconf -i')
logger.error('3. ./configure --prefix=$HOME/local')
logger.error('4. make && make install')
logger.error('5. pip install kytea')
raise e
return list(self.ja_word_tokenizer.getWS(text))
def vocab_size(self):
return len(self.encoder)
def bpe(self, token):
word = (tuple(token[:(- 1)]) + ((token[(- 1)] + '</w>'),))
if (token in self.cache):
return self.cache[token]
pairs = get_pairs(word)
if (not pairs):
return (token + '</w>')
while True:
bigram = min(pairs, key=(lambda pair: self.bpe_ranks.get(pair, float('inf'))))
if (bigram not in self.bpe_ranks):
break
(first, second) = bigram
new_word = []
i = 0
while (i < len(word)):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if ((word[i] == first) and (i < (len(word) - 1)) and (word[(i + 1)] == second)):
new_word.append((first + second))
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if (len(word) == 1):
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
if (word == '\n </w>'):
word = '\n</w>'
self.cache[token] = word
return word
def _tokenize(self, text, lang='en', bypass_tokenizer=False):
if (lang and self.lang2id and (lang not in self.lang2id)):
logger.error('Supplied language code not found in lang2id mapping. Please check that your language is supported by the loaded pretrained model.')
if bypass_tokenizer:
text = text.split()
elif (lang not in self.lang_with_custom_tokenizer):
text = self.moses_pipeline(text, lang=lang)
if (lang == 'ro'):
text = romanian_preprocessing(text)
text = self.moses_tokenize(text, lang=lang)
elif (lang == 'th'):
text = self.moses_pipeline(text, lang=lang)
try:
if ('pythainlp' not in sys.modules):
from pythainlp.tokenize import word_tokenize as th_word_tokenize
else:
th_word_tokenize = sys.modules['pythainlp'].word_tokenize
except (AttributeError, ImportError) as e:
logger.error('Make sure you install PyThaiNLP ( with the following steps')
logger.error('1. pip install pythainlp')
raise e
text = th_word_tokenize(text)
elif (lang == 'zh'):
try:
if ('jieba' not in sys.modules):
import jieba
else:
jieba = sys.modules['jieba']
except (AttributeError, ImportError) as e:
logger.error('Make sure you install Jieba ( with the following steps')
logger.error('1. pip install jieba')
raise e
text = ' '.join(jieba.cut(text))
text = self.moses_pipeline(text, lang=lang)
text = text.split()
elif (lang == 'ja'):
text = self.moses_pipeline(text, lang=lang)
text = self.ja_tokenize(text)
else:
raise ValueError('It should not reach here')
if (self.do_lowercase_and_remove_accent and (not bypass_tokenizer)):
text = lowercase_and_remove_accent(text)
split_tokens = []
for token in text:
if token:
split_tokens.extend([t for t in self.bpe(token).split(' ')])
return split_tokens
def _convert_token_to_id(self, token):
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index):
return self.decoder.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
out_string = ''.join(tokens).replace('</w>', ' ').strip()
return out_string
def add_special_tokens_single_sentence(self, token_ids):
return (([self.cls_token_id] + token_ids) + [self.sep_token_id])
def add_special_tokens_sentences_pair(self, token_ids_0, token_ids_1):
sep = [self.sep_token_id]
cls = [self.cls_token_id]
return ((((cls + token_ids_0) + sep) + token_ids_1) + sep)
def save_vocabulary(self, save_directory):
if (not os.path.isdir(save_directory)):
logger.error('Vocabulary path ({}) should be a directory'.format(save_directory))
return
vocab_file = os.path.join(save_directory, VOCAB_FILES_NAMES['vocab_file'])
merge_file = os.path.join(save_directory, VOCAB_FILES_NAMES['merges_file'])
with open(vocab_file, 'w', encoding='utf-8') as f:
f.write(json.dumps(self.encoder, ensure_ascii=False))
index = 0
with open(merge_file, 'w', encoding='utf-8') as writer:
for (bpe_tokens, token_index) in sorted(self.bpe_ranks.items(), key=(lambda kv: kv[1])):
if (index != token_index):
logger.warning('Saving vocabulary to {}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!'.format(merge_file))
index = token_index
writer.write((' '.join(bpe_tokens) + u'\n'))
index += 1
return (vocab_file, merge_file) |
def addScriptCode(hf, testruns):
t0 = (testruns[0].start * 1000)
tMax = (testruns[(- 1)].end * 1000)
detail = '\tvar devtable = [];\n'
for data in testruns:
topo = data.deviceTopology()
detail += ('\tdevtable[%d] = "%s";\n' % (data.testnumber, topo))
detail += ('\tvar bounds = [%f,%f];\n' % (t0, tMax))
script_code = (('<script type="text/javascript">\n' + detail) + '\tvar resolution = -1;\n\tvar dragval = [0, 0];\n\tfunction redrawTimescale(t0, tMax, tS) {\n\t\tvar rline = \'<div class="t" style="left:0;border-left:1px solid black;border-right:0;">\';\n\t\tvar tTotal = tMax - t0;\n\t\tvar list = document.getElementsByClassName("tblock");\n\t\tfor (var i = 0; i < list.length; i++) {\n\t\t\tvar timescale = list[i].getElementsByClassName("timescale")[0];\n\t\t\tvar m0 = t0 + (tTotal*parseFloat(list[i].style.left)/100);\n\t\t\tvar mTotal = tTotal*parseFloat(list[i].style.width)/100;\n\t\t\tvar mMax = m0 + mTotal;\n\t\t\tvar html = "";\n\t\t\tvar divTotal = Math.floor(mTotal/tS) + 1;\n\t\t\tif(divTotal > 1000) continue;\n\t\t\tvar divEdge = (mTotal - tS*(divTotal-1))*100/mTotal;\n\t\t\tvar pos = 0.0, val = 0.0;\n\t\t\tfor (var j = 0; j < divTotal; j++) {\n\t\t\t\tvar htmlline = "";\n\t\t\t\tvar mode = list[i].id[5];\n\t\t\t\tif(mode == "s") {\n\t\t\t\t\tpos = 100 - (((j)*tS*100)/mTotal) - divEdge;\n\t\t\t\t\tval = (j-divTotal+1)*tS;\n\t\t\t\t\tif(j == divTotal - 1)\n\t\t\t\t\t\thtmlline = \'<div class="t" style="right:\'+pos+\'%"><cS>S→</cS></div>\';\n\t\t\t\t\telse\n\t\t\t\t\t\thtmlline = \'<div class="t" style="right:\'+pos+\'%">\'+val+\'ms</div>\';\n\t\t\t\t} else {\n\t\t\t\t\tpos = 100 - (((j)*tS*100)/mTotal);\n\t\t\t\t\tval = (j)*tS;\n\t\t\t\t\thtmlline = \'<div class="t" style="right:\'+pos+\'%">\'+val+\'ms</div>\';\n\t\t\t\t\tif(j == 0)\n\t\t\t\t\t\tif(mode == "r")\n\t\t\t\t\t\t\thtmlline = rline+"<cS>←R</cS></div>";\n\t\t\t\t\t\telse\n\t\t\t\t\t\t\thtmlline = rline+"<cS>0ms</div>";\n\t\t\t\t}\n\t\t\t\thtml += htmlline;\n\t\t\t}\n\t\t\ttimescale.innerHTML = html;\n\t\t}\n\t}\n\tfunction zoomTimeline() {\n\t\tvar dmesg = document.getElementById("dmesg");\n\t\tvar zoombox = document.getElementById("dmesgzoombox");\n\t\tvar left = zoombox.scrollLeft;\n\t\tvar val = parseFloat(dmesg.style.width);\n\t\tvar newval = 100;\n\t\tvar sh = window.outerWidth / 2;\n\t\tif(this.id == "zoomin") {\n\t\t\tnewval = val * 1.2;\n\t\t\tif(newval > 910034) newval = 910034;\n\t\t\tdmesg.style.width = newval+"%";\n\t\t\tzoombox.scrollLeft = ((left + sh) * newval / val) - sh;\n\t\t} else if (this.id == "zoomout") {\n\t\t\tnewval = val / 1.2;\n\t\t\tif(newval < 100) newval = 100;\n\t\t\tdmesg.style.width = newval+"%";\n\t\t\tzoombox.scrollLeft = ((left + sh) * newval / val) - sh;\n\t\t} else {\n\t\t\tzoombox.scrollLeft = 0;\n\t\t\tdmesg.style.width = "100%";\n\t\t}\n\t\tvar tS = [10000, 5000, 2000, 1000, 500, 200, 100, 50, 20, 10, 5, 2, 1];\n\t\tvar t0 = bounds[0];\n\t\tvar tMax = bounds[1];\n\t\tvar tTotal = tMax - t0;\n\t\tvar wTotal = tTotal * 100.0 / newval;\n\t\tvar idx = 7*window.innerWidth/1100;\n\t\tfor(var i = 0; (i < tS.length)&&((wTotal / tS[i]) < idx); i++);\n\t\tif(i >= tS.length) i = tS.length - 1;\n\t\tif(tS[i] == resolution) return;\n\t\tresolution = tS[i];\n\t\tredrawTimescale(t0, tMax, tS[i]);\n\t}\n\tfunction deviceName(title) {\n\t\tvar name = title.slice(0, title.indexOf(" ("));\n\t\treturn name;\n\t}\n\tfunction deviceHover() {\n\t\tvar name = deviceName(this.title);\n\t\tvar dmesg = document.getElementById("dmesg");\n\t\tvar dev = dmesg.getElementsByClassName("thread");\n\t\tvar cpu = -1;\n\t\tif(name.match("CPU_ON\\[[0-9]*\\]"))\n\t\t\tcpu = parseInt(name.slice(7));\n\t\telse if(name.match("CPU_OFF\\[[0-9]*\\]"))\n\t\t\tcpu = parseInt(name.slice(8));\n\t\tfor (var i = 0; i < dev.length; i++) {\n\t\t\tdname = deviceName(dev[i].title);\n\t\t\tvar cname = dev[i].className.slice(dev[i].className.indexOf("thread"));\n\t\t\tif((cpu >= 0 && dname.match("CPU_O[NF]*\\\\[*"+cpu+"\\\\]")) ||\n\t\t\t\t(name == dname))\n\t\t\t{\n\t\t\t\tdev[i].className = "hover "+cname;\n\t\t\t} else {\n\t\t\t\tdev[i].className = cname;\n\t\t\t}\n\t\t}\n\t}\n\tfunction deviceUnhover() {\n\t\tvar dmesg = document.getElementById("dmesg");\n\t\tvar dev = dmesg.getElementsByClassName("thread");\n\t\tfor (var i = 0; i < dev.length; i++) {\n\t\t\tdev[i].className = dev[i].className.slice(dev[i].className.indexOf("thread"));\n\t\t}\n\t}\n\tfunction deviceTitle(title, total, cpu) {\n\t\tvar prefix = "Total";\n\t\tif(total.length > 3) {\n\t\t\tprefix = "Average";\n\t\t\ttotal[1] = (total[1]+total[3])/2;\n\t\t\ttotal[2] = (total[2]+total[4])/2;\n\t\t}\n\t\tvar devtitle = document.getElementById("devicedetailtitle");\n\t\tvar name = deviceName(title);\n\t\tif(cpu >= 0) name = "CPU"+cpu;\n\t\tvar driver = "";\n\t\tvar tS = "<t2>(</t2>";\n\t\tvar tR = "<t2>)</t2>";\n\t\tif(total[1] > 0)\n\t\t\ttS = "<t2>("+prefix+" Suspend:</t2><t0> "+total[1].toFixed(3)+" ms</t0> ";\n\t\tif(total[2] > 0)\n\t\t\ttR = " <t2>"+prefix+" Resume:</t2><t0> "+total[2].toFixed(3)+" ms<t2>)</t2></t0>";\n\t\tvar s = title.indexOf("{");\n\t\tvar e = title.indexOf("}");\n\t\tif((s >= 0) && (e >= 0))\n\t\t\tdriver = title.slice(s+1, e) + " <t1></t1> ";\n\t\tif(total[1] > 0 && total[2] > 0)\n\t\t\tdevtitle.innerHTML = "<t0>"+driver+name+"</t0> "+tS+tR;\n\t\telse\n\t\t\tdevtitle.innerHTML = "<t0>"+title+"</t0>";\n\t\treturn name;\n\t}\n\tfunction deviceDetail() {\n\t\tvar devinfo = document.getElementById("devicedetail");\n\t\tdevinfo.style.display = "block";\n\t\tvar name = deviceName(this.title);\n\t\tvar cpu = -1;\n\t\tif(name.match("CPU_ON\\[[0-9]*\\]"))\n\t\t\tcpu = parseInt(name.slice(7));\n\t\telse if(name.match("CPU_OFF\\[[0-9]*\\]"))\n\t\t\tcpu = parseInt(name.slice(8));\n\t\tvar dmesg = document.getElementById("dmesg");\n\t\tvar dev = dmesg.getElementsByClassName("thread");\n\t\tvar idlist = [];\n\t\tvar pdata = [[]];\n\t\tif(document.getElementById("devicedetail1"))\n\t\t\tpdata = [[], []];\n\t\tvar pd = pdata[0];\n\t\tvar total = [0.0, 0.0, 0.0];\n\t\tfor (var i = 0; i < dev.length; i++) {\n\t\t\tdname = deviceName(dev[i].title);\n\t\t\tif((cpu >= 0 && dname.match("CPU_O[NF]*\\\\[*"+cpu+"\\\\]")) ||\n\t\t\t\t(name == dname))\n\t\t\t{\n\t\t\t\tidlist[idlist.length] = dev[i].id;\n\t\t\t\tvar tidx = 1;\n\t\t\t\tif(dev[i].id[0] == "a") {\n\t\t\t\t\tpd = pdata[0];\n\t\t\t\t} else {\n\t\t\t\t\tif(pdata.length == 1) pdata[1] = [];\n\t\t\t\t\tif(total.length == 3) total[3]=total[4]=0.0;\n\t\t\t\t\tpd = pdata[1];\n\t\t\t\t\ttidx = 3;\n\t\t\t\t}\n\t\t\t\tvar info = dev[i].title.split(" ");\n\t\t\t\tvar pname = info[info.length-1];\n\t\t\t\tpd[pname] = parseFloat(info[info.length-3].slice(1));\n\t\t\t\ttotal[0] += pd[pname];\n\t\t\t\tif(pname.indexOf("suspend") >= 0)\n\t\t\t\t\ttotal[tidx] += pd[pname];\n\t\t\t\telse\n\t\t\t\t\ttotal[tidx+1] += pd[pname];\n\t\t\t}\n\t\t}\n\t\tvar devname = deviceTitle(this.title, total, cpu);\n\t\tvar left = 0.0;\n\t\tfor (var t = 0; t < pdata.length; t++) {\n\t\t\tpd = pdata[t];\n\t\t\tdevinfo = document.getElementById("devicedetail"+t);\n\t\t\tvar phases = devinfo.getElementsByClassName("phaselet");\n\t\t\tfor (var i = 0; i < phases.length; i++) {\n\t\t\t\tif(phases[i].id in pd) {\n\t\t\t\t\tvar w = 100.0*pd[phases[i].id]/total[0];\n\t\t\t\t\tvar fs = 32;\n\t\t\t\t\tif(w < 8) fs = 4*w | 0;\n\t\t\t\t\tvar fs2 = fs*3/4;\n\t\t\t\t\tphases[i].style.width = w+"%";\n\t\t\t\t\tphases[i].style.left = left+"%";\n\t\t\t\t\tphases[i].title = phases[i].id+" "+pd[phases[i].id]+" ms";\n\t\t\t\t\tleft += w;\n\t\t\t\t\tvar time = "<t4 style=\\"font-size:"+fs+"px\\">"+pd[phases[i].id]+" ms<br></t4>";\n\t\t\t\t\tvar pname = "<t3 style=\\"font-size:"+fs2+"px\\">"+phases[i].id.replace(new RegExp("_", "g"), " ")+"</t3>";\n\t\t\t\t\tphases[i].innerHTML = time+pname;\n\t\t\t\t} else {\n\t\t\t\t\tphases[i].style.width = "0%";\n\t\t\t\t\tphases[i].style.left = left+"%";\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif(typeof devstats !== \'undefined\')\n\t\t\tcallDetail(this.id, this.title);\n\t\tvar cglist = document.getElementById("callgraphs");\n\t\tif(!cglist) return;\n\t\tvar cg = cglist.getElementsByClassName("atop");\n\t\tif(cg.length < 10) return;\n\t\tfor (var i = 0; i < cg.length; i++) {\n\t\t\tcgid = cg[i].id.split("x")[0]\n\t\t\tif(idlist.indexOf(cgid) >= 0) {\n\t\t\t\tcg[i].style.display = "block";\n\t\t\t} else {\n\t\t\t\tcg[i].style.display = "none";\n\t\t\t}\n\t\t}\n\t}\n\tfunction callDetail(devid, devtitle) {\n\t\tif(!(devid in devstats) || devstats[devid].length < 1)\n\t\t\treturn;\n\t\tvar list = devstats[devid];\n\t\tvar tmp = devtitle.split(" ");\n\t\tvar name = tmp[0], phase = tmp[tmp.length-1];\n\t\tvar dd = document.getElementById(phase);\n\t\tvar total = parseFloat(tmp[1].slice(1));\n\t\tvar mlist = [];\n\t\tvar maxlen = 0;\n\t\tvar info = []\n\t\tfor(var i in list) {\n\t\t\tif(list[i][0] == "") {\n\t\t\t\tinfo = list[i].split("|");\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t\tvar tmp = list[i].split("|");\n\t\t\tvar t = parseFloat(tmp[0]), f = tmp[1], c = parseInt(tmp[2]);\n\t\t\tvar p = (t*100.0/total).toFixed(2);\n\t\t\tmlist[mlist.length] = [f, c, t.toFixed(2), p+"%"];\n\t\t\tif(f.length > maxlen)\n\t\t\t\tmaxlen = f.length;\n\t\t}\n\t\tvar pad = 5;\n\t\tif(mlist.length == 0) pad = 30;\n\t\tvar html = \'<div style="padding-top:\'+pad+\'px"><t3> <b>\'+name+\':</b>\';\n\t\tif(info.length > 2)\n\t\t\thtml += " start=<b>"+info[1]+"</b>, end=<b>"+info[2]+"</b>";\n\t\tif(info.length > 3)\n\t\t\thtml += ", length<i>(w/o overhead)</i>=<b>"+info[3]+" ms</b>";\n\t\tif(info.length > 4)\n\t\t\thtml += ", return=<b>"+info[4]+"</b>";\n\t\thtml += "</t3></div>";\n\t\tif(mlist.length > 0) {\n\t\t\thtml += \'<table class=fstat style="padding-top:\'+(maxlen*5)+\'px;"><tr><th>Function</th>\';\n\t\t\tfor(var i in mlist)\n\t\t\t\thtml += "<td class=vt>"+mlist[i][0]+"</td>";\n\t\t\thtml += "</tr><tr><th>Calls</th>";\n\t\t\tfor(var i in mlist)\n\t\t\t\thtml += "<td>"+mlist[i][1]+"</td>";\n\t\t\thtml += "</tr><tr><th>Time(ms)</th>";\n\t\t\tfor(var i in mlist)\n\t\t\t\thtml += "<td>"+mlist[i][2]+"</td>";\n\t\t\thtml += "</tr><tr><th>Percent</th>";\n\t\t\tfor(var i in mlist)\n\t\t\t\thtml += "<td>"+mlist[i][3]+"</td>";\n\t\t\thtml += "</tr></table>";\n\t\t}\n\t\tdd.innerHTML = html;\n\t\tvar height = (maxlen*5)+100;\n\t\tdd.style.height = height+"px";\n\t\tdocument.getElementById("devicedetail").style.height = height+"px";\n\t}\n\tfunction callSelect() {\n\t\tvar cglist = document.getElementById("callgraphs");\n\t\tif(!cglist) return;\n\t\tvar cg = cglist.getElementsByClassName("atop");\n\t\tfor (var i = 0; i < cg.length; i++) {\n\t\t\tif(this.id == cg[i].id) {\n\t\t\t\tcg[i].style.display = "block";\n\t\t\t} else {\n\t\t\t\tcg[i].style.display = "none";\n\t\t\t}\n\t\t}\n\t}\n\tfunction devListWindow(e) {\n\t\tvar win = window.open();\n\t\tvar html = "<title>"+e.target.innerHTML+"</title>"+\n\t\t\t"<style type=\\"text/css\\">"+\n\t\t\t" ul {list-style-type:circle;padding-left:10px;margin-left:10px;}"+\n\t\t\t"</style>"\n\t\tvar dt = devtable[0];\n\t\tif(e.target.id != "devlist1")\n\t\t\tdt = devtable[1];\n\t\twin.document.write(html+dt);\n\t}\n\tfunction errWindow() {\n\t\tvar range = this.id.split("_");\n\t\tvar idx1 = parseInt(range[0]);\n\t\tvar idx2 = parseInt(range[1]);\n\t\tvar win = window.open();\n\t\tvar log = document.getElementById("dmesglog");\n\t\tvar title = "<title>dmesg log</title>";\n\t\tvar text = log.innerHTML.split("\\n");\n\t\tvar html = "";\n\t\tfor(var i = 0; i < text.length; i++) {\n\t\t\tif(i == idx1) {\n\t\t\t\thtml += "<e id=target>"+text[i]+"</e>\\n";\n\t\t\t} else if(i > idx1 && i <= idx2) {\n\t\t\t\thtml += "<e>"+text[i]+"</e>\\n";\n\t\t\t} else {\n\t\t\t\thtml += text[i]+"\\n";\n\t\t\t}\n\t\t}\n\t\twin.document.write("<style>e{color:red}</style>"+title+"<pre>"+html+"</pre>");\n\t\twin.location.hash = "#target";\n\t\twin.document.close();\n\t}\n\tfunction logWindow(e) {\n\t\tvar name = e.target.id.slice(4);\n\t\tvar win = window.open();\n\t\tvar log = document.getElementById(name+"log");\n\t\tvar title = "<title>"+document.title.split(" ")[0]+" "+name+" log</title>";\n\t\twin.document.write(title+"<pre>"+log.innerHTML+"</pre>");\n\t\twin.document.close();\n\t}\n\tfunction onMouseDown(e) {\n\t\tdragval[0] = e.clientX;\n\t\tdragval[1] = document.getElementById("dmesgzoombox").scrollLeft;\n\t\tdocument.onmousemove = onMouseMove;\n\t}\n\tfunction onMouseMove(e) {\n\t\tvar zoombox = document.getElementById("dmesgzoombox");\n\t\tzoombox.scrollLeft = dragval[1] + dragval[0] - e.clientX;\n\t}\n\tfunction onMouseUp(e) {\n\t\tdocument.onmousemove = null;\n\t}\n\tfunction onKeyPress(e) {\n\t\tvar c = e.charCode;\n\t\tif(c != 42 && c != 43 && c != 45) return;\n\t\tvar click = document.createEvent("Events");\n\t\tclick.initEvent("click", true, false);\n\t\tif(c == 43) \n\t\t\tdocument.getElementById("zoomin").dispatchEvent(click);\n\t\telse if(c == 45)\n\t\t\tdocument.getElementById("zoomout").dispatchEvent(click);\n\t\telse if(c == 42)\n\t\t\tdocument.getElementById("zoomdef").dispatchEvent(click);\n\t}\n\twindow.addEventListener("resize", function () {zoomTimeline();});\n\twindow.addEventListener("load", function () {\n\t\tvar dmesg = document.getElementById("dmesg");\n\t\tdmesg.style.width = "100%"\n\t\tdmesg.onmousedown = onMouseDown;\n\t\tdocument.onmouseup = onMouseUp;\n\t\tdocument.onkeypress = onKeyPress;\n\t\tdocument.getElementById("zoomin").onclick = zoomTimeline;\n\t\tdocument.getElementById("zoomout").onclick = zoomTimeline;\n\t\tdocument.getElementById("zoomdef").onclick = zoomTimeline;\n\t\tvar list = document.getElementsByClassName("err");\n\t\tfor (var i = 0; i < list.length; i++)\n\t\t\tlist[i].onclick = errWindow;\n\t\tvar list = document.getElementsByClassName("logbtn");\n\t\tfor (var i = 0; i < list.length; i++)\n\t\t\tlist[i].onclick = logWindow;\n\t\tlist = document.getElementsByClassName("devlist");\n\t\tfor (var i = 0; i < list.length; i++)\n\t\t\tlist[i].onclick = devListWindow;\n\t\tvar dev = dmesg.getElementsByClassName("thread");\n\t\tfor (var i = 0; i < dev.length; i++) {\n\t\t\tdev[i].onclick = deviceDetail;\n\t\t\tdev[i].onmouseover = deviceHover;\n\t\t\tdev[i].onmouseout = deviceUnhover;\n\t\t}\n\t\tvar dev = dmesg.getElementsByClassName("srccall");\n\t\tfor (var i = 0; i < dev.length; i++)\n\t\t\tdev[i].onclick = callSelect;\n\t\tzoomTimeline();\n\t});\n</script>\n')
hf.write(script_code) |
class PrepDetails(MWSDataType):
AMAZON = 'AMAZON'
SELLER = 'SELLER'
def __init__(self, prep_instruction: Union[(PrepInstruction, str)], prep_owner: str=SELLER):
self.prep_instruction = prep_instruction
self.prep_owner = prep_owner
def params_dict(self) -> dict:
return {'PrepInstruction': self.prep_instruction, 'PrepOwner': self.prep_owner} |
class Effect5871(BaseEffect):
type = 'passive'
def handler(fit, ship, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Shield Operation')), 'shieldBonus', ship.getModifiedItemAttr('shipBonusMI2'), skill='Minmatar Hauler', **kwargs) |
class VIIRSSurfaceReflectanceWithVIHandler(VIIRSJRRFileHandler):
def __init__(self, *args, filter_veg: bool=True, **kwargs) -> None:
super().__init__(*args, **kwargs)
self._filter_veg = filter_veg
def _mask_invalid(self, data_arr: xr.DataArray, ds_info: dict) -> xr.DataArray:
new_data_arr = super()._mask_invalid(data_arr, ds_info)
if ((ds_info['file_key'] in ('NDVI', 'EVI')) and self._filter_veg):
good_mask = self._get_veg_index_good_mask()
new_data_arr = new_data_arr.where(good_mask)
return new_data_arr
def _get_veg_index_good_mask(self) -> xr.DataArray:
qf1 = self.nc['QF1 Surface Reflectance']
has_sun_glint = ((qf1 & 192) > 0)
is_cloudy = ((qf1 & 12) > 0)
cloud_quality = ((qf1 & 3) < 2)
qf2 = self.nc['QF2 Surface Reflectance']
has_snow_or_ice = ((qf2 & 32) > 0)
has_cloud_shadow = ((qf2 & 8) > 0)
water_mask = (qf2 & 7)
has_water = ((water_mask <= 2) | (water_mask == 5))
qf7 = self.nc['QF7 Surface Reflectance']
has_aerosols = ((qf7 & 12) > 8)
adjacent_to_cloud = ((qf7 & 2) > 0)
bad_mask = (((((((has_sun_glint | is_cloudy) | cloud_quality) | has_snow_or_ice) | has_cloud_shadow) | has_water) | has_aerosols) | adjacent_to_cloud)
bad_mask_iband_dask = bad_mask.data.repeat(2, axis=1).repeat(2, axis=0)
good_mask_iband = xr.DataArray((~ bad_mask_iband_dask), dims=qf1.dims)
return good_mask_iband |
class SAFEMSIMDXML(SAFEMSIXMLMetadata):
def calibrate_to_reflectances(self, data, band_name):
quantification = int(self.root.find('.//QUANTIFICATION_VALUE').text)
data = self._sanitize_data(data)
return (((data + self.band_offset(band_name)) / quantification) * 100)
def _sanitize_data(self, data):
data = data.where((data != self.no_data))
if self.mask_saturated:
data = data.where((data != self.saturated), np.inf)
return data
def band_offset(self, band):
band_index = self._band_index(band)
return self.band_offsets.get(band_index, 0)
def _band_index(self, band):
band_indices = self.band_indices
band_conversions = {'B01': 'B1', 'B02': 'B2', 'B03': 'B3', 'B04': 'B4', 'B05': 'B5', 'B06': 'B6', 'B07': 'B7', 'B08': 'B8', 'B8A': 'B8A', 'B09': 'B9', 'B10': 'B10', 'B11': 'B11', 'B12': 'B12'}
band_index = band_indices[band_conversions[band]]
return band_index
_property
def band_indices(self):
spectral_info = self.root.findall('.//Spectral_Information')
band_indices = {spec.attrib['physicalBand']: int(spec.attrib['bandId']) for spec in spectral_info}
return band_indices
_property
def band_offsets(self):
offsets = self.root.find('.//Radiometric_Offset_List')
if (offsets is not None):
band_offsets = {int(off.attrib['band_id']): float(off.text) for off in offsets}
else:
band_offsets = {}
return band_offsets
_property
def special_values(self):
special_values = self.root.findall('.//Special_Values')
special_values_dict = {value[0].text: float(value[1].text) for value in special_values}
return special_values_dict
def no_data(self):
return self.special_values['NODATA']
def saturated(self):
return self.special_values['SATURATED']
def calibrate_to_radiances(self, data, band_name):
physical_gain = self.physical_gain(band_name)
data = self._sanitize_data(data)
return ((data + self.band_offset(band_name)) / physical_gain)
def physical_gain(self, band_name):
band_index = self._band_index(band_name)
return self.physical_gains[band_index]
_property
def physical_gains(self):
physical_gains = {int(elt.attrib['bandId']): float(elt.text) for elt in self.root.findall('.//PHYSICAL_GAINS')}
return physical_gains |
def _get_expr(s: str) -> Tuple[(str, str)]:
level: int = 0
for (i, c) in enumerate(s):
if (c in ['(', '{']):
level += 1
elif ((level > 0) and (c in [')', '}'])):
level -= 1
elif ((level == 0) and (c in [')', '}', ','])):
break
return (s[0:i], s[i:]) |
_destruct_output_when_exp('content')
def put_scope(name: str, content: Union[(Output, List[Output])]=[], scope: str=None, position: int=OutputPosition.BOTTOM) -> Output:
if (not isinstance(content, list)):
content = [content]
check_dom_name_value(name, 'scope name')
dom_id = scope2dom(name, no_css_selector=True)
spec = _get_output_spec('scope', dom_id=dom_id, contents=content, scope=scope, position=position)
return Output(spec) |
class CostRegNet(nn.Module):
def __init__(self):
super(CostRegNet, self).__init__()
self.conv0 = ConvBnReLU3D(32, 8)
self.conv1 = ConvBnReLU3D(8, 16, stride=2)
self.conv2 = ConvBnReLU3D(16, 16)
self.conv3 = ConvBnReLU3D(16, 32, stride=2)
self.conv4 = ConvBnReLU3D(32, 32)
self.conv5 = ConvBnReLU3D(32, 64, stride=2)
self.conv6 = ConvBnReLU3D(64, 64)
self.conv7 = nn.Sequential(nn.ConvTranspose3d(64, 32, kernel_size=3, padding=1, output_padding=1, stride=2, bias=False), nn.BatchNorm3d(32), nn.ReLU(inplace=True))
self.conv9 = nn.Sequential(nn.ConvTranspose3d(32, 16, kernel_size=3, padding=1, output_padding=1, stride=2, bias=False), nn.BatchNorm3d(16), nn.ReLU(inplace=True))
self.conv11 = nn.Sequential(nn.ConvTranspose3d(16, 8, kernel_size=3, padding=1, output_padding=1, stride=2, bias=False), nn.BatchNorm3d(8), nn.ReLU(inplace=True))
self.prob = nn.Conv3d(8, 1, 3, stride=1, padding=1)
def forward(self, x):
conv0 = self.conv0(x)
conv2 = self.conv2(self.conv1(conv0))
conv4 = self.conv4(self.conv3(conv2))
x = self.conv6(self.conv5(conv4))
x = (conv4 + self.conv7(x))
x = (conv2 + self.conv9(x))
x = (conv0 + self.conv11(x))
x = self.prob(x)
return x |
class Player():
def __init__(self, playerid: int):
self.id: int = playerid
def set_spawn_info(self, team: int, skin: int, x: float, y: float, z: float, rotation: float, weapon1: int, weapon1_ammo: int, weapon2: int, weapon2_ammo: int, weapon3: int, weapon3_ammo: int) -> bool:
return set_spawn_info(self.id, team, skin, x, y, z, rotation, weapon1, weapon1_ammo, weapon2, weapon2_ammo, weapon3, weapon3_ammo)
def get_id(self) -> int:
return self.id
def spawn(self) -> bool:
return spawn_player(self.id)
def set_pos_find_z(self, x: float, y: float, z: float) -> bool:
return set_player_pos_find_z(self.id, x, y, z)
def get_pos(self) -> Tuple[(float, float, float)]:
return get_player_pos(self.id)
def set_pos(self, x: float, y: float, z: float) -> bool:
return set_player_pos(self.id, x, y, z)
def get_facing_angle(self) -> float:
return get_player_facing_angle(self.id)
def set_facing_angle(self, angle: float) -> bool:
return set_player_facing_angle(self.id, angle)
def is_in_range_of_point(self, range: float, x: float, y: float, z: float) -> bool:
return is_player_in_range_of_point(self.id, range, x, y, z)
def distance_from_point(self, x: float, y: float, z: float) -> float:
return get_player_distance_from_point(self.id, x, y, z)
def is_streamed_in(self, player: 'Player') -> bool:
return is_player_streamed_in(self.id, player.id)
def get_interior(self) -> int:
return get_player_interior(self.id)
def set_interior(self, interior_id: int) -> bool:
return set_player_interior(self.id, interior_id)
def get_health(self) -> float:
return get_player_health(self.id)
def set_health(self, health: float) -> bool:
return set_player_health(self.id, health)
def get_armour(self) -> float:
return get_player_armour(self.id)
def set_armour(self, armour: float) -> bool:
return set_player_armour(self.id, armour)
def get_ammo(self) -> int:
return get_player_ammo(self.id)
def set_ammo(self, weaponid: int, ammo: int) -> bool:
return set_player_ammo(self.id, weaponid, ammo)
def get_weapon_state(self) -> int:
return get_player_weapon_state(self.id)
def get_target_player(self) -> int:
return get_player_target_player(self.id)
def get_target_actor(self) -> int:
return get_player_target_actor(self.id)
def get_team(self) -> int:
return get_player_team(self.id)
def set_team(self, teamid: int) -> bool:
return set_player_team(self.id, teamid)
def get_score(self) -> int:
return get_player_score(self.id)
def set_score(self, score: int) -> bool:
return set_player_score(self.id, score)
def get_drunk_level(self) -> int:
return get_player_drunk_level(self.id)
def set_drunk_level(self, level: int) -> bool:
return set_player_drunk_level(self.id, level)
def get_color(self) -> int:
return get_player_color(self.id)
def set_color(self, color: int) -> bool:
return set_player_color(self.id, color)
def get_skin(self) -> int:
return get_player_skin(self.id)
def set_skin(self, skinid: int) -> bool:
return set_player_skin(self.id, skinid)
def give_weapon(self, weapon_id: int, ammo: int) -> bool:
return give_player_weapon(self.id, weapon_id, ammo)
def reset_weapons(self) -> bool:
return reset_player_weapons(self.id)
def set_armed_weapon(self, weapon_id: int) -> bool:
return set_player_armed_weapon(self.id, weapon_id)
def get_weapon_data(self, slot: int) -> Tuple[(int, int)]:
return get_player_weapon_data(self.id, slot)
def give_money(self, money: int) -> bool:
return give_player_money(self.id, money)
def reset_money(self) -> bool:
return reset_player_money(self.id)
def get_name(self) -> str:
return get_player_name(self.id)
def set_name(self, name: str) -> int:
return set_player_name(self.id, name)
def get_money(self) -> int:
return get_player_money(self.id)
def set_money(self, money: int) -> bool:
reset_player_money(self.id)
return give_player_money(self.id, money)
def get_state(self) -> int:
return get_player_state(self.id)
def get_ip(self) -> str:
return get_player_ip(self.id)
def get_ping(self) -> int:
return get_player_ping(self.id)
def weapon(self) -> int:
return get_player_weapon(self.id)
def get_keys(self) -> Tuple[(int, int, int)]:
return get_player_keys(self.id)
def get_time(self) -> Tuple[(int, int)]:
return get_player_time(self.id)
def set_time(self, hour: int, minute: int) -> bool:
return set_player_time(self.id, hour, minute)
def toggle_clock(self, toggle: bool) -> bool:
return toggle_player_clock(self.id, toggle)
def set_weather(self, weather: int) -> bool:
return set_player_weather(self.id, weather)
def force_class_selection(self) -> bool:
return force_class_selection(self.id)
def get_wanted_level(self) -> int:
return get_player_wanted_level(self.id)
def set_wanted_level(self, level: int) -> bool:
return set_player_wanted_level(self.id, level)
def get_fighting_style(self) -> int:
return get_player_fighting_style(self.id)
def set_fighting_style(self, style: int) -> bool:
return set_player_fighting_style(self.id, style)
def get_velocity(self) -> Tuple[(float, float, float)]:
return get_player_velocity(self.id)
def set_velocity(self, x: float, y: float, z: float) -> bool:
return set_player_velocity(self.id, x, y, z)
def play_crime_report(self, suspect: 'Player', crime: int) -> bool:
return play_crime_report_for_player(self.id, suspect.id, crime)
def play_audio_stream(self, url: str, position_x: float=0.0, position_y: float=0.0, position_z: float=0.0, distance: float=50.0, usepos: bool=False) -> bool:
return play_audio_stream_for_player(self.id, url, position_x, position_y, position_z, distance, usepos)
def stop_audio_stream(self) -> bool:
return stop_audio_stream_for_player(self.id)
def set_shop_name(self, shop_name: str) -> bool:
return set_player_shop_name(self.id, shop_name)
def set_skill_level(self, weapon_skill: int, level: int) -> bool:
return set_player_skill_level(self.id, weapon_skill, level)
def get_surfing_vehicle(self) -> Optional['Vehicle']:
veh_id = get_player_surfing_vehicle_id(self.id)
if (veh_id == INVALID_VEHICLE_ID):
return None
return Vehicle(veh_id)
def get_surfing_object(self) -> Optional['Object']:
obj_id = get_player_surfing_object_id(self.id)
if (obj_id == INVALID_OBJECT_ID):
return None
return Object(obj_id)
def remove_building(self, model_id: int, x: float, y: float, z: float, radius: float) -> bool:
return remove_building_for_player(self.id, model_id, x, y, z, radius)
def get_last_shot_vectors(self) -> Tuple[(float, float, float, float, float, float)]:
return get_player_last_shot_vectors(self.id)
def set_attached_object(self, index: int, model_id: int, bone: int, offset_x: float=0.0, offset_y: float=0.0, offset_z: float=0.0, rotation_x: float=0.0, rotation_y: float=0.0, rotation_z: float=0.0, scale_x: float=1.0, scale_y: float=1.0, scale_z: float=1.0, material_color_1: int=0, material_color_2: int=0) -> bool:
return set_player_attached_object(self.id, index, model_id, bone, offset_x, offset_y, offset_z, rotation_x, rotation_y, rotation_z, scale_x, scale_y, scale_z, material_color_1, material_color_2)
def remove_attached_object(self, index: int) -> bool:
return remove_player_attached_object(self.id, index)
def is_attached_object_slot_used(self, index: int) -> bool:
return is_player_attached_object_slot_used(self.id, index)
def edit_attached_object(self, index: int) -> bool:
return edit_attached_object(self.id, index)
def cancel_edit(self):
return cancel_edit(self.id)
def get_pvar_int(self, var_name: str) -> int:
return get_pvar_int(self.id, var_name)
def set_pvar_int(self, var_name: str, value: int) -> bool:
return set_pvar_int(self.id, var_name, value)
def get_pvar_string(self, var_name: str) -> str:
return get_pvar_string(self.id, var_name)
def set_pvar_string(self, var_name: str, value: str) -> bool:
return set_pvar_string(self.id, var_name, value)
def get_pvar_float(self, var_name: str) -> float:
return get_pvar_float(self.id, var_name)
def set_pvar_float(self, var_name: str, value: float) -> bool:
return set_pvar_float(self.id, var_name, value)
def delete_pvar(self, var_name: str) -> bool:
return delete_pvar(self.id, var_name)
def get_pvars_upper_index(self) -> int:
return get_pvars_upper_index(self.id)
def get_pvar_name_at_index(self, index: int) -> str:
return get_pvar_name_at_index(self.id, index)
def get_pvar_type(self, var_name: str) -> int:
return get_pvar_type(self.id, var_name)
def set_chat_bubble(self, text: str, color: int, draw_distance: float, expiretime: int) -> bool:
return set_player_chat_bubble(self.id, text, color, draw_distance, expiretime)
def put_in_vehicle(self, vehicle_id: int, seat_id: int) -> bool:
return put_player_in_vehicle(self.id, vehicle_id, seat_id)
def get_vehicle_id(self) -> int:
return get_player_vehicle_id(self.id)
def get_vehicle_seat(self) -> int:
return get_player_vehicle_seat(self.id)
def remove_from_vehicle(self) -> bool:
return remove_player_from_vehicle(self.id)
def toggle_controllable(self, toggle: bool) -> bool:
return toggle_player_controllable(self.id, toggle)
def play_sound(self, soundid: int, x: float, y: float, z: float) -> bool:
return player_play_sound(self.id, soundid, x, y, z)
def apply_animation(self, animation_library: str, animation_name: str, delta: float, loop: bool, lock_x: bool, lock_y: bool, freeze: bool, time: int, force_sync: bool=False) -> bool:
return apply_animation(self.id, animation_library, animation_name, delta, loop, lock_x, lock_y, freeze, time, force_sync)
def clear_animations(self, forcesync: bool=False) -> bool:
return clear_animations(self.id, forcesync)
def animation_index(self) -> int:
return get_player_animation_index(self.id)
def get_special_action(self) -> int:
return get_player_special_action(self.id)
def set_special_action(self, action_id: int) -> bool:
return set_player_special_action(self.id, action_id)
def disable_remote_vehicle_collisions(self, disable: bool) -> bool:
return disable_remote_vehicle_collisions(self.id, disable)
def set_checkpoint(self, x: float, y: float, z: float, size: float) -> bool:
return set_player_checkpoint(self.id, x, y, z, size)
def disable_checkpoint(self) -> bool:
return disable_player_checkpoint(self.id)
def set_race_checkpoint(self, type: int, x: float, y: float, z: float, next_x: float, next_y: float, next_z: float, size: float) -> bool:
return set_player_race_checkpoint(self.id, type, x, y, z, next_x, next_y, next_z, size)
def disable_race_checkpoint(self) -> bool:
return disable_player_race_checkpoint(self.id)
def set_world_bounds(self, x_max: float, x_min: float, y_max: float, y_min: float) -> bool:
return set_player_world_bounds(self.id, x_max, x_min, y_max, y_min)
def set_marker(self, showplayer: 'Player', color: int) -> bool:
return set_player_marker_for_player(self.id, showplayer.id, color)
def show_name_tag(self, showplayer: 'Player', show: bool) -> bool:
return show_player_name_tag_for_player(self.id, showplayer.id, show)
def set_map_icon(self, icon_id: int, x: float, y: float, z: float, marker_type: int, color: int, style: int=MAPICON_LOCAL) -> bool:
return set_player_map_icon(self.id, icon_id, x, y, z, marker_type, color, style)
def remove_map_icon(self, icon_id: int) -> bool:
return remove_player_map_icon(self.id, icon_id)
def allow_teleport(self, allow: bool) -> bool:
return allow_player_teleport(self.id, allow)
def set_camera_look_at(self, x: float, y: float, z: float, cut: int=CAMERA_CUT) -> bool:
return set_player_camera_look_at(self.id, x, y, z, cut)
def set_camera_behind(self) -> bool:
return set_camera_behind_player(self.id)
def get_camera_position(self) -> Tuple[(float, float, float)]:
return get_player_camera_pos(self.id)
def set_camera_position(self, x: float, y: float, z: float) -> bool:
return set_player_camera_pos(self.id, x, y, z)
def get_camera_front_vector(self) -> Tuple[(float, float, float)]:
return get_player_camera_front_vector(self.id)
def get_camera_mode(self) -> int:
return get_player_camera_mode(self.id)
def enable_camera_target(self, enable: bool) -> bool:
return enable_player_camera_target(self.id, enable)
def get_camera_target_object(self) -> Optional['Object']:
object_id = get_player_camera_target_object(self.id)
if (object_id == INVALID_OBJECT_ID):
return None
return Object(object_id)
def get_camera_target_vehicle(self) -> Optional['Vehicle']:
vehicle_id = get_player_camera_target_vehicle(self.id)
if (vehicle_id == INVALID_VEHICLE_ID):
return None
return Vehicle(vehicle_id)
def get_camera_target_player(self) -> Optional['Player']:
player_id = get_player_camera_target_player(self.id)
if (player_id == INVALID_PLAYER_ID):
return None
return Player(player_id)
def camera_target_actor(self) -> Optional['Actor']:
actor_id = get_player_camera_target_actor(self.id)
if (actor_id == INVALID_ACTOR_ID):
return None
return Actor(actor_id)
def get_camera_aspect_ratio(self) -> float:
return get_player_camera_aspect_ratio(self.id)
def get_camera_zoom(self) -> float:
return get_player_camera_zoom(self.id)
def interpolate_camera_position(self, from_x: float, from_y: float, from_z: float, to_x: float, to_y: float, to_z: float, time: int, cut: int=CAMERA_CUT) -> bool:
return interpolate_camera_pos(self.id, from_x, from_y, from_z, to_x, to_y, to_z, time, cut)
def interpolate_camera_look_at(self, from_x: float, from_y: float, from_z: float, to_x: float, to_y: float, to_z: float, time: int, cut: int=CAMERA_CUT) -> bool:
return interpolate_camera_look_at(self.id, from_x, from_y, from_z, to_x, to_y, to_z, time, cut)
def is_connected(self) -> bool:
return is_player_connected(self.id)
def is_in_vehicle(self, vehicle_id: int) -> bool:
return is_player_in_vehicle(self.id, vehicle_id)
def is_in_any_vehicle(self) -> bool:
return is_player_in_any_vehicle(self.id)
def is_in_checkpoint(self) -> bool:
return is_player_in_checkpoint(self.id)
def is_in_race_checkpoint(self) -> bool:
return is_player_in_race_checkpoint(self.id)
def get_virtual_world(self) -> int:
return get_player_virtual_world(self.id)
def set_virtual_world(self, world_id: int) -> bool:
return set_player_virtual_world(self.id, world_id)
def enable_stunt_bonus(self, enable: bool) -> bool:
return enable_stunt_bonus_for_player(self.id, enable)
def toggle_spectating(self, toggle: bool) -> bool:
return toggle_player_spectating(self.id, toggle)
def spectate_player(self, target_player: 'Player', mode: int=SPECTATE_MODE_NORMAL) -> bool:
return player_spectate_player(self.id, target_player.id, mode)
def spectate_vehicle(self, target_vehicle: 'Vehicle', mode: int=SPECTATE_MODE_NORMAL) -> bool:
return player_spectate_vehicle(self.id, target_vehicle.id, mode)
def start_recording_data(self, recordtype: int, recordname: str) -> bool:
return start_recording_player_data(self.id, recordtype, recordname)
def stop_recording_data(self) -> bool:
return stop_recording_player_data(self.id)
def create_explosion(self, x: float, y: float, z: float, type: int, radius: float) -> bool:
return create_explosion_for_player(self.id, x, y, z, type, radius)
def send_client_message(self, color: int, message: str) -> bool:
return send_client_message(self.id, color, message)
def send_message_to_player(self, sender: 'Player', message: str) -> bool:
return send_player_message_to_player(self.id, sender.id, message)
def send_death_message(self, killer: 'Player', killee: 'Player', weapon: int) -> bool:
return send_death_message_to_player(self.id, killer.id, killee.id, weapon)
def game_text(self, text: str, time: int, style: int) -> bool:
return game_text_for_player(self.id, text, time, style)
def is_npc(self) -> bool:
return is_player_npc(self.id)
def is_admin(self) -> bool:
return is_player_admin(self.id)
def kick(self) -> bool:
return kick(self.id)
def ban(self) -> bool:
return ban(self.id)
def ban_ex(self, reason: str) -> bool:
return ban_ex(self.id, reason)
def gpci(self) -> str:
return gpci(self.id)
def attach_camera_to_player_object(self, player_object: 'PlayerObject') -> bool:
return attach_camera_to_player_object(self.id, player_object.id)
('OnEnterExitModShop')
def on_enter_exit_mod_shop(cls, playerid: int, enterexit: int, interiorid: int):
return (cls(playerid), enterexit, interiorid)
('OnPlayerConnect')
def on_connect(cls, playerid: int):
return (cls(playerid),)
('OnPlayerDisconnect')
def on_disconnect(cls, playerid: int, reason: int):
return (cls(playerid), reason)
('OnPlayerSpawn')
def on_spawn(cls, playerid: int):
return (cls(playerid),)
('OnPlayerDeath')
def on_death(cls, playerid: int, killerid: int, reason: int):
return (cls(playerid), (killerid if (killerid == INVALID_PLAYER_ID) else cls(killerid)), reason)
('OnPlayerText')
def on_text(cls, playerid: int, text: str):
return (cls(playerid), text)
('OnPlayerCommandText')
def on_command_text(cls, playerid: int, command_text: str):
return (cls(playerid), command_text)
('OnPlayerRequestClass')
def on_request_class(cls, playerid: int, classid: int):
return (cls(playerid), classid)
('OnPlayerEnterVehicle')
def on_enter_vehicle(cls, playerid: int, vehicleid: int, is_passenger: bool):
return (cls(playerid), Vehicle(vehicleid), is_passenger)
('OnPlayerExitVehicle')
def on_exit_vehicle(cls, playerid: int, vehicleid: int):
return (cls(playerid), Vehicle(vehicleid))
('OnPlayerStateChange')
def on_state_change(cls, playerid, newstate: int, oldstate: int):
return (cls(playerid), newstate, oldstate)
('OnPlayerEnterCheckpoint')
def on_enter_checkpoint(cls, playerid: int):
return (cls(playerid),)
('OnPlayerLeaveCheckpoint')
def on_leave_checkpoint(cls, playerid: int):
return (cls(playerid),)
('OnPlayerEnterRaceCheckpoint')
def on_enter_race_checkpoint(cls, playerid: int):
return (cls(playerid),)
('OnPlayerLeaveRaceCheckpoint')
def on_leave_race_checkpoint(cls, playerid: int):
return (cls(playerid),)
('OnPlayerRequestSpawn')
def on_request_spawn(cls, playerid: int):
return (cls(playerid),)
('OnPlayerPickUpPickup')
def on_pick_up_pickup(cls, playerid, pickupid: int):
return (cls(playerid), Pickup(pickupid))
('OnPlayerSelectedMenuRow')
def on_selected_menu_row(cls, playerid: int, row: int):
return (cls(playerid), row)
('OnPlayerExitedMenu')
def on_exited_menu(cls, playerid: int):
return (cls(playerid),)
('OnPlayerInteriorChange')
def on_interior_change(cls, playerid: int, newinteriorid: int, oldinteriorid: int):
return (cls(playerid), newinteriorid, oldinteriorid)
('OnPlayerKeyStateChange')
def on_key_state_change(cls, playerid: int, newkeys: int, oldkeys: int):
return (cls(playerid), newkeys, oldkeys)
('OnPlayerUpdate')
def on_update(cls, playerid: int):
return (cls(playerid),)
('OnPlayerStreamIn')
def on_stream_in(cls, playerid: int, forplayerid: int):
return (cls(playerid), cls(forplayerid))
('OnPlayerStreamOut')
def on_stream_out(cls, playerid: int, forplayerid: int):
return (cls(playerid), cls(forplayerid))
('OnPlayerTakeDamage')
def on_take_damage(cls, playerid: int, issuerid: int, amount: float, weaponid: int, bodypart: int):
return (cls(playerid), (issuerid if (issuerid == INVALID_PLAYER_ID) else cls(issuerid)), amount, weaponid, bodypart)
('OnPlayerGiveDamage')
def on_give_damage(cls, playerid: int, damagedid: int, amount: float, weaponid: int, bodypart: int):
return (cls(playerid), (damagedid if (damagedid == INVALID_PLAYER_ID) else cls(damagedid)), amount, weaponid, bodypart)
('OnPlayerGiveDamageActor')
def on_give_damage_actor(cls, playerid: int, damaged_actorid: int, amount: float, weaponid: int, bodypart: int):
return (cls(playerid), Actor(damaged_actorid), amount, weaponid, bodypart)
('OnPlayerClickMap')
def on_click_map(cls, playerid: int, x: float, y: float, z: float):
return (cls(playerid), x, y, z)
('OnPlayerClickTextDraw')
def on_click_textdraw(cls, playerid: int, clickedid: int):
return (cls(playerid), TextDraw(clickedid))
('OnPlayerClickPlayerTextDraw')
def on_click_playertextdraw(cls, playerid: int, playertextid: int):
return (cls(playerid), PlayerTextDraw(playertextid))
('OnPlayerClickPlayer')
def on_click_player(cls, playerid: int, clickedplayerid: int, source: int):
return (cls(playerid), cls(clickedplayerid), source)
('OnPlayerEditObject')
def on_edit_object(cls, playerid: int, is_playerobject: bool, objectid: int, response: int, x: float, y: float, z: float, rot_x: float, rot_y: float, rot_z: float):
return (cls(playerid), (PlayerObject(objectid) if is_playerobject else Object(objectid)), response, x, y, z, rot_x, rot_y, rot_z)
('OnPlayerEditAttachedObject')
def on_edit_attached_object(cls, playerid: int, response: int, index: int, modelid: int, boneid: int, offset_x: float, offset_y: float, offset_z: float, rot_x: float, rot_y: float, rot_z: float, scale_x: float, scale_y: float, scale_z: float):
return (cls(playerid), response, index, modelid, boneid, offset_x, offset_y, offset_z, rot_x, rot_y, rot_z, scale_x, scale_y, scale_z)
('OnPlayerSelectObject')
def on_select_object(cls, playerid: int, type: int, objectid: int, modelid: int, x: float, y: float, z: float):
object_cls = {SELECT_OBJECT_GLOBAL_OBJECT: Object, SELECT_OBJECT_PLAYER_OBJECT: PlayerObject}
return (cls(playerid), object_cls[type](objectid), modelid, x, y, z)
('OnPlayerWeaponShot')
def on_weapon_shot(cls, playerid: int, weaponid: int, hittype: int, hitid: int, x: float, y: float, z: float):
hit_cls = {BULLET_HIT_TYPE_NONE: (lambda _: None), BULLET_HIT_TYPE_PLAYER: cls, BULLET_HIT_TYPE_VEHICLE: Vehicle, BULLET_HIT_TYPE_OBJECT: Object, BULLET_HIT_TYPE_PLAYER_OBJECT: PlayerObject}
return (cls(playerid), weaponid, hit_cls[hittype](hitid), x, y, z)
def command(cls, function=_NO_FUNCTION, **kwargs):
if (function is _NO_FUNCTION):
return functools.partial(cls.command, **kwargs)
(function)
def handler(playerid, *args):
return function(cls(playerid), *args)
return cmd(handler, **kwargs) |
def muti_loss_fusion_kl(preds, target, dfs, fs, mode='MSE'):
loss0 = 0.0
loss = 0.0
for i in range(0, len(preds)):
if ((preds[i].shape[2] != target.shape[2]) or (preds[i].shape[3] != target.shape[3])):
tmp_target = F.interpolate(target, size=preds[i].size()[2:], mode='bilinear', align_corners=True)
loss = (loss + bce_loss(preds[i], tmp_target))
else:
loss = (loss + bce_loss(preds[i], target))
if (i == 0):
loss0 = loss
for i in range(0, len(dfs)):
if (mode == 'MSE'):
loss = (loss + fea_loss(dfs[i], fs[i]))
elif (mode == 'KL'):
loss = (loss + kl_loss(F.log_softmax(dfs[i], dim=1), F.softmax(fs[i], dim=1)))
elif (mode == 'MAE'):
loss = (loss + l1_loss(dfs[i], fs[i]))
elif (mode == 'SmoothL1'):
loss = (loss + smooth_l1_loss(dfs[i], fs[i]))
return (loss0, loss) |
class Scenario(ScenarioGenerator):
def __init__(self):
super().__init__()
self.open_scenario_version = 2
def scenario(self, **kwargs):
catalog = xosc.Catalog()
catalog.add_catalog('VehicleCatalog', '../xosc/Catalogs/Vehicles')
road = xosc.RoadNetwork(roadfile='../xodr/straight_500m.xodr', scenegraph='../models/straight_500m.osgb')
paramdec = xosc.ParameterDeclarations()
egoname = 'Ego'
targetname = 'Target'
entities = xosc.Entities()
init = xosc.Init()
act = xosc.Act('indef traffic')
for i in range(20):
entities.add_scenario_object((targetname + str(i)), xosc.CatalogReference('VehicleCatalog', 'car_yellow'))
init.add_init_action((targetname + str(i)), xosc.TeleportAction(xosc.LanePosition((100 + (i * 20)), 0, (- 1), 1)))
init.add_init_action((targetname + str(i)), xosc.AbsoluteSpeedAction(60, xosc.TransitionDynamics(xosc.DynamicsShapes.step, xosc.DynamicsDimension.time, 1)))
event = xosc.Event('speedchange', xosc.Priority.overwrite, maxexecution=10)
event.add_action('restart', xosc.TeleportAction(xosc.LanePosition(0, 0, (- 1), 1)))
trig_cond = xosc.EndOfRoadCondition(0)
event.add_trigger(xosc.EntityTrigger('trigger', 0, xosc.ConditionEdge.rising, trig_cond, (targetname + str(i))))
man = xosc.Maneuver('mymaneuver')
man.add_event(event)
mangr = xosc.ManeuverGroup('mangr', maxexecution=3)
mangr.add_maneuver(man)
mangr.add_actor((targetname + str(i)))
act.add_maneuver_group(mangr)
sb = xosc.StoryBoard(init, xosc.ValueTrigger('stop_simulation', 0, xosc.ConditionEdge.rising, xosc.SimulationTimeCondition(100, xosc.Rule.greaterThan), 'stop'))
sb.add_act(act)
sce = xosc.Scenario('adaptspeed_example', 'User', paramdec, entities=entities, storyboard=sb, roadnetwork=road, catalog=catalog, osc_minor_version=self.open_scenario_version)
return sce |
class ResourceDatabaseItemModel(ResourceDatabaseGenericModel):
def __init__(self, db: ResourceDatabase):
super().__init__(db, ResourceType.ITEM)
def all_columns(self):
return ITEM_FIELDS
def _create_item(self, short_name) -> ItemResourceInfo:
return ItemResourceInfo(self.db.first_unused_resource_index(), short_name, short_name, 1) |
def _synchronize_async_fixture(fixturedef: FixtureDef, event_loop_fixture_id: str) -> None:
if inspect.isasyncgenfunction(fixturedef.func):
_wrap_asyncgen_fixture(fixturedef, event_loop_fixture_id)
elif inspect.iscoroutinefunction(fixturedef.func):
_wrap_async_fixture(fixturedef, event_loop_fixture_id) |
_required
_exempt
_
def unmark_comment_as_spam(request, conference_slug, proposal_slug, proposal_comment_id):
if ((not request.is_ajax()) or (request.user.is_active is False)):
return HttpResponseForbidden()
conference = get_object_or_404(Conference, slug=conference_slug)
proposal = get_object_or_404(Proposal, slug=proposal_slug, conference=conference)
proposal_comment = get_object_or_404(ProposalComment, proposal=proposal, id=proposal_comment_id)
if (proposal_comment.is_spam and (proposal_comment.marked_as_spam_by == request.user)):
proposal_comment.is_spam = False
proposal_comment.marked_as_spam_by = None
proposal_comment.save()
user_action_for_spam(proposal_comment.commenter, getattr(settings, 'USER_SPAM_THRESHOLD', 2))
return HttpResponse('Unmarked as spam')
return HttpResponseForbidden() |
def test_chunk_boundaries() -> None:
conn = Connection(our_role=SERVER)
request = b'POST / HTTP/1.1\r\nHost: example.com\r\nTransfer-Encoding: chunked\r\n\r\n'
conn.receive_data(request)
assert (conn.next_event() == Request(method='POST', target='/', headers=[('Host', 'example.com'), ('Transfer-Encoding', 'chunked')]))
assert (conn.next_event() is NEED_DATA)
conn.receive_data(b'5\r\nhello\r\n')
assert (conn.next_event() == Data(data=b'hello', chunk_start=True, chunk_end=True))
conn.receive_data(b'5\r\nhel')
assert (conn.next_event() == Data(data=b'hel', chunk_start=True, chunk_end=False))
conn.receive_data(b'l')
assert (conn.next_event() == Data(data=b'l', chunk_start=False, chunk_end=False))
conn.receive_data(b'o\r\n')
assert (conn.next_event() == Data(data=b'o', chunk_start=False, chunk_end=True))
conn.receive_data(b'5\r\nhello')
assert (conn.next_event() == Data(data=b'hello', chunk_start=True, chunk_end=True))
conn.receive_data(b'\r\n')
assert (conn.next_event() == NEED_DATA)
conn.receive_data(b'0\r\n\r\n')
assert (conn.next_event() == EndOfMessage()) |
def to_custom_tensor(original: Union[(List, Tuple)], torch_tensors: List[torch.Tensor]) -> List:
outputs = []
for (orig, torch_tensor) in zip(original, torch_tensors):
tensor = torch_tensor
if isinstance(orig, spconv.SparseConvTensor):
tensor = orig.replace_feature(torch_tensor)
outputs.append(tensor)
return outputs |
def main():
scene = SceneManager.AddScene('Scene')
scene.mainCamera.transform.localPosition = Vector3(0, 0, (- 10))
cube = GameObject('Cube')
texture = Texture2D(resolver.getPath('examples/example8/logo.png'))
renderer = cube.AddComponent(MeshRenderer)
renderer.mesh = Mesh.cube(2)
renderer.mat = Material(RGB(255, 255, 255), texture)
cube.AddComponent(Rotator)
scene.Add(cube)
SceneManager.LoadScene(scene) |
def resume_from_checkpoint(fdir, model, optimizer=None, scheduler=None):
start_epoch = 0
checkpoint_file = osp.join(fdir, 'checkpoint')
if (not osp.exists(checkpoint_file)):
with open(checkpoint_file, 'w') as f:
pass
return start_epoch
with open(checkpoint_file, 'r') as checkpoint:
model_names = checkpoint.readlines()
if (len(model_names) == 0):
return start_epoch
model_name = model_names[(- 1)].strip('\n')
fpath = osp.join(fdir, model_name)
print(f'Loading checkpoint from "{fpath}"')
checkpoint = load_checkpoint(fpath)
model.load_state_dict(checkpoint['state_dict'])
print('Loaded model weights')
if ((optimizer is not None) and ('optimizer' in checkpoint.keys())):
optimizer.load_state_dict(checkpoint['optimizer'])
print('Loaded optimizer')
if ((scheduler is not None) and ('scheduler' in checkpoint.keys())):
scheduler.load_state_dict(checkpoint['scheduler'])
print('Loaded scheduler')
start_epoch = checkpoint['epoch']
print(f'Previous epoch: {start_epoch}')
return start_epoch |
class RVsAssignmentStepsTester():
def continuous_steps(self, step, step_kwargs):
with pm.Model() as m:
c1 = pm.HalfNormal('c1')
c2 = pm.HalfNormal('c2')
with pytensor.config.change_flags(mode=fast_unstable_sampling_mode):
assert ([m.rvs_to_values[c1]] == step([c1], **step_kwargs).vars)
assert ({m.rvs_to_values[c1], m.rvs_to_values[c2]} == set(step([c1, c2], **step_kwargs).vars)) |
class DictAction(Action):
def _parse_int_float_bool(val):
try:
return int(val)
except ValueError:
pass
try:
return float(val)
except ValueError:
pass
if (val.lower() in ['true', 'false']):
return (True if (val.lower() == 'true') else False)
if (val.lower() in ['none', 'null']):
return None
return val
def __call__(self, parser, namespace, values, option_string=None):
options = {}
for kv in values:
(key, val) = kv.split('=', maxsplit=1)
val = [self._parse_int_float_bool(v) for v in val.split(',')]
if (len(val) == 1):
val = val[0]
options[key] = val
setattr(namespace, self.dest, options) |
class SponsorshipBenefitAdminForm(forms.ModelForm):
class Meta():
model = SponsorshipBenefit
widgets = {'year': SPONSORSHIP_YEAR_SELECT}
fields = '__all__'
def clean(self):
cleaned_data = super().clean()
standalone = cleaned_data.get('standalone')
packages = cleaned_data.get('packages')
if (standalone and packages):
error = 'Standalone benefits must not belong to any package.'
raise forms.ValidationError(error)
return cleaned_data |
class MultiDatasetSampler(Sampler):
def __init__(self, cfg, dataset_dicts, sizes, seed: Optional[int]=None):
self.sizes = sizes
self.sample_epoch_size = cfg.MULTI_DATASET.SAMPLE_EPOCH_SIZE
assert ((self.sample_epoch_size % cfg.SOLVER.IMS_PER_BATCH) == 0)
print('self.epoch_size', self.sample_epoch_size)
if (seed is None):
seed = comm.shared_random_seed()
self._seed = int(seed)
self._rank = comm.get_rank()
self._world_size = comm.get_world_size()
self._batch_size = cfg.SOLVER.IMS_PER_BATCH
self._ims_per_gpu = (self._batch_size // self._world_size)
self.dataset_ids = torch.tensor([d['dataset_source'] for d in dataset_dicts], dtype=torch.long)
st = 0
dataset_ratio = cfg.MULTI_DATASET.DATA_RATIO
assert (len(dataset_ratio) == len(sizes)), 'length of dataset ratio {} should be equal to number if dataset {}'.format(len(dataset_ratio), len(sizes))
dataset_weight = [((((torch.ones(s) * max(sizes)) / s) * r) / sum(dataset_ratio)) for (i, (r, s)) in enumerate(zip(dataset_ratio, sizes))]
st = 0
cas_factors = []
for (i, s) in enumerate(sizes):
if cfg.MULTI_DATASET.USE_CAS[i]:
cas_factor = self._get_class_balance_factor_per_dataset(dataset_dicts[st:(st + s)], l=cfg.MULTI_DATASET.CAS_LAMBDA)
cas_factor = (cas_factor * (s / cas_factor.sum()))
else:
cas_factor = torch.ones(s)
cas_factors.append(cas_factor)
st = (st + s)
cas_factors = torch.cat(cas_factors)
dataset_weight = torch.cat(dataset_weight)
self.weights = (dataset_weight * cas_factors)
def __iter__(self):
start = self._rank
(yield from itertools.islice(self._infinite_indices(), start, None, self._world_size))
def _infinite_indices(self):
g = torch.Generator()
g.manual_seed(self._seed)
while True:
ids = torch.multinomial(self.weights, self.sample_epoch_size, generator=g, replacement=True)
(yield from ids)
def _get_class_balance_factor_per_dataset(self, dataset_dicts, l=1.0):
ret = []
category_freq = defaultdict(int)
for dataset_dict in dataset_dicts:
cat_ids = {ann['category_id'] for ann in dataset_dict['annotations']}
for cat_id in cat_ids:
category_freq[cat_id] += 1
for (i, dataset_dict) in enumerate(dataset_dicts):
cat_ids = {ann['category_id'] for ann in dataset_dict['annotations']}
ret.append(sum([(1.0 / (category_freq[cat_id] ** l)) for cat_id in cat_ids]))
return torch.tensor(ret).float() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.