code stringlengths 281 23.7M |
|---|
class VanMlpLayer(nn.Sequential):
def __init__(self, in_channels: int, hidden_size: int, out_channels: int, hidden_act: str='gelu', dropout_rate: float=0.5):
super().__init__()
self.in_dense = nn.Conv2d(in_channels, hidden_size, kernel_size=1)
self.depth_wise = nn.Conv2d(hidden_size, hidden_size, kernel_size=3, padding=1, groups=hidden_size)
self.activation = ACT2FN[hidden_act]
self.dropout1 = nn.Dropout(dropout_rate)
self.out_dense = nn.Conv2d(hidden_size, out_channels, kernel_size=1)
self.dropout2 = nn.Dropout(dropout_rate) |
class ClassyModelWrapper():
def __init__(self, classy_model):
self.classy_model = classy_model
def __getattr__(self, name):
if ((name != 'classy_model') and hasattr(self, 'classy_model')):
attr = getattr(self.classy_model, name)
if isinstance(attr, types.MethodType):
attr = _ClassyModelMethod(self, attr)
return attr
else:
return super().__getattr__(name)
def __setattr__(self, name, value):
if ((name not in ['classy_model', 'forward']) and hasattr(self, 'classy_model')):
setattr(self.classy_model, name, value)
else:
super().__setattr__(name, value)
def forward(self, *args, **kwargs):
return self.classy_model(*args, **kwargs)
def __call__(self, *args, **kwargs):
return self.forward(*args, **kwargs)
def __repr__(self):
return f'''Classy {type(self.classy_model)}:
{self.classy_model.__repr__()}'''
def __class__(self):
return self.classy_model.__class__ |
def get_imagenet(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), root='./data', base_folder='imagenet'):
transform_train = transforms.Compose([transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean, std)])
transform_test = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean, std)])
train_dir = os.path.join(root, base_folder, 'train')
test_dir = os.path.join(root, base_folder, 'val')
dataset_train = datasets.ImageFolder(train_dir, transform_train)
dataset_test = datasets.ImageFolder(test_dir, transform_test)
return (dataset_train, dataset_test) |
def cec_dc_snl_ac_arrays(cec_module_cs5p_220m, cec_inverter_parameters, sapm_temperature_cs5p_220m):
module_parameters = cec_module_cs5p_220m.copy()
module_parameters['b'] = 0.05
module_parameters['EgRef'] = 1.121
module_parameters['dEgdT'] = (- 0.0002677)
temp_model_params = sapm_temperature_cs5p_220m.copy()
array_one = pvsystem.Array(mount=pvsystem.FixedMount(surface_tilt=32.2, surface_azimuth=180), module=module_parameters['Name'], module_parameters=module_parameters.copy(), temperature_model_parameters=temp_model_params.copy())
array_two = pvsystem.Array(mount=pvsystem.FixedMount(surface_tilt=42.2, surface_azimuth=220), module=module_parameters['Name'], module_parameters=module_parameters.copy(), temperature_model_parameters=temp_model_params.copy())
system = PVSystem(arrays=[array_one, array_two], inverter_parameters=cec_inverter_parameters)
return system |
class FSDPOptimizerAdapter():
def __init__(self, module: FSDP, optimizer: torch.optim.Optimizer) -> None:
self.module = module
self.optimizer = optimizer
def state_dict(self) -> Dict[(str, Any)]:
optim_state_dict = FSDP.optim_state_dict(self.module, self.optimizer)
return optim_state_dict
def load_state_dict(self, state_dict: Dict[(str, Any)]) -> None:
optim_state_dict = FSDP.optim_state_dict_to_load(self.module, self.optimizer, state_dict)
self.optimizer.load_state_dict(optim_state_dict) |
class Effect5381(BaseEffect):
type = 'passive'
def handler(fit, ship, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Medium Energy Turret')), 'trackingSpeed', ship.getModifiedItemAttr('shipBonusABC1'), skill='Amarr Battlecruiser', **kwargs) |
def main():
parser = ArgumentParser(description=CMDLINE_HELP)
parser.add_argument('--noserver', action='store_true', dest='noserver', default=False, help='Do not start Server process')
parser.add_argument('--noportal', action='store_true', dest='noportal', default=False, help='Do not start Portal process')
parser.add_argument('--logserver', action='store_true', dest='logserver', default=False, help='Log Server output to logfile')
parser.add_argument('--iserver', action='store_true', dest='iserver', default=False, help='Server in interactive mode')
parser.add_argument('--iportal', action='store_true', dest='iportal', default=False, help='Portal in interactive mode')
parser.add_argument('--pserver', action='store_true', dest='pserver', default=False, help='Profile Server')
parser.add_argument('--pportal', action='store_true', dest='pportal', default=False, help='Profile Portal')
parser.add_argument('--nologcycle', action='store_false', dest='nologcycle', default=True, help='Do not cycle log files')
parser.add_argument('--doexit', action='store_true', dest='doexit', default=False, help='Immediately exit after processes have started.')
parser.add_argument('gamedir', help='path to game dir')
parser.add_argument('twistdbinary', help='path to twistd binary')
parser.add_argument('slogfile', help='path to server log file')
parser.add_argument('plogfile', help='path to portal log file')
parser.add_argument('hlogfile', help='path to http log file')
args = parser.parse_args()
global GAMEDIR
global SERVER_LOGFILE, PORTAL_LOGFILE, HTTP_LOGFILE
global SERVER_PIDFILE, PORTAL_PIDFILE
global SERVER_RESTART, PORTAL_RESTART
global SPROFILER_LOGFILE, PPROFILER_LOGFILE
GAMEDIR = args.gamedir
sys.path.insert(1, os.path.join(GAMEDIR, SERVERDIR))
SERVER_PIDFILE = os.path.join(GAMEDIR, SERVERDIR, 'server.pid')
PORTAL_PIDFILE = os.path.join(GAMEDIR, SERVERDIR, 'portal.pid')
SERVER_RESTART = os.path.join(GAMEDIR, SERVERDIR, 'server.restart')
PORTAL_RESTART = os.path.join(GAMEDIR, SERVERDIR, 'portal.restart')
SERVER_LOGFILE = args.slogfile
PORTAL_LOGFILE = args.plogfile
HTTP_LOGFILE = args.hlogfile
TWISTED_BINARY = args.twistdbinary
SPROFILER_LOGFILE = os.path.join(GAMEDIR, SERVERDIR, 'logs', 'server.prof')
PPROFILER_LOGFILE = os.path.join(GAMEDIR, SERVERDIR, 'logs', 'portal.prof')
server_argv = [TWISTED_BINARY, '--nodaemon', ('--logfile=%s' % SERVER_LOGFILE), ('--pidfile=%s' % SERVER_PIDFILE), ('--python=%s' % SERVER_PY_FILE)]
portal_argv = [TWISTED_BINARY, ('--logfile=%s' % PORTAL_LOGFILE), ('--pidfile=%s' % PORTAL_PIDFILE), ('--python=%s' % PORTAL_PY_FILE)]
pserver_argv = ['--savestats', '--profiler=cprofile', ('--profile=%s' % SPROFILER_LOGFILE)]
pportal_argv = ['--savestats', '--profiler=cprofile', ('--profile=%s' % PPROFILER_LOGFILE)]
pid = get_pid(SERVER_PIDFILE)
if (pid and (not args.noserver)):
print(('\nEvennia Server is already running as process %(pid)s. Not restarted.' % {'pid': pid}))
args.noserver = True
if args.noserver:
server_argv = None
else:
set_restart_mode(SERVER_RESTART, 'shutdown')
if (not args.logserver):
del server_argv[2]
print('\nStarting Evennia Server (output to stdout).')
else:
if (not args.nologcycle):
cycle_logfile(SERVER_LOGFILE)
print('\nStarting Evennia Server (output to server logfile).')
if args.pserver:
server_argv.extend(pserver_argv)
print('\nRunning Evennia Server under cProfile.')
pid = get_pid(PORTAL_PIDFILE)
if (pid and (not args.noportal)):
print(('\nEvennia Portal is already running as process %(pid)s. Not restarted.' % {'pid': pid}))
args.noportal = True
if args.noportal:
portal_argv = None
else:
if args.iportal:
portal_argv[1] = '--nodaemon'
set_restart_mode(PORTAL_RESTART, True)
print('\nStarting Evennia Portal in non-Daemon mode (output to stdout).')
else:
if (not args.nologcycle):
cycle_logfile(PORTAL_LOGFILE)
cycle_logfile(HTTP_LOGFILE)
set_restart_mode(PORTAL_RESTART, False)
print('\nStarting Evennia Portal in Daemon mode (output to portal logfile).')
if args.pportal:
portal_argv.extend(pportal_argv)
print('\nRunning Evennia Portal under cProfile.')
if args.doexit:
print(PROCESS_DOEXIT)
if (os.name == 'nt'):
if server_argv:
del server_argv[(- 2)]
if portal_argv:
del portal_argv[(- 2)]
start_services(server_argv, portal_argv, doexit=args.doexit) |
class PBSProJob(cpi.Job):
def __init__(self, api, adaptor):
_cpi_base = super(PBSProJob, self)
_cpi_base.__init__(api, adaptor)
def _get_impl(self):
return self
_CALL
def init_instance(self, job_info):
self.jd = job_info['job_description']
self.js = job_info['job_service']
if (job_info['reconnect'] is True):
self._id = job_info['reconnect_jobid']
self._name = self.jd.get(api.NAME)
self._started = True
else:
self._id = None
self._name = self.jd.get(api.NAME)
self._started = False
return self.get_api()
_CALL
def get_state(self):
if (self._started is False):
return api.NEW
return self.js._job_get_state(job_id=self._id)
_CALL
def wait(self, timeout):
if (self._started is False):
raise rse.IncorrectState("Can't wait for job that hasn't been started")
else:
self.js._job_wait(job_id=self._id, timeout=timeout)
_CALL
def cancel(self, timeout):
if (self._started is False):
raise rse.IncorrectState("Can't wait for job that hasn't been started")
else:
self.js._job_cancel(self._id)
_CALL
def run(self):
self._id = self.js._job_run(self._api())
self._started = True
_CALL
def get_service_url(self):
return self.js.rm
_CALL
def get_id(self):
return self._id
_CALL
def get_name(self):
return self._name
_CALL
def get_exit_code(self):
if (self._started is False):
return None
else:
return self.js._job_get_exit_code(self._id)
_CALL
def get_created(self):
if (self._started is False):
return None
else:
return self.js._job_get_create_time(self._id)
_CALL
def get_started(self):
if (self._started is False):
return None
else:
return self.js._job_get_start_time(self._id)
_CALL
def get_finished(self):
if (self._started is False):
return None
else:
return self.js._job_get_end_time(self._id)
_CALL
def get_execution_hosts(self):
if (self._started is False):
return None
else:
return self.js._job_get_execution_hosts(self._id)
_CALL
def get_description(self):
return self.jd |
def test_special_characters():
s = '\n[\n]\n^\n\\\n(\n)\n(?:\n-\n|\n\\w\n'
assert (list(MyLexer().get_tokens(s)) == [(Token.Name, '['), (Token.Text, '\n'), (Token.Name, ']'), (Token.Text, '\n'), (Token.Name, '^'), (Token.Text, '\n'), (Token.Name, '\\'), (Token.Text, '\n'), (Token.Name, '('), (Token.Text, '\n'), (Token.Name, ')'), (Token.Text, '\n'), (Token.Name, '(?:'), (Token.Text, '\n'), (Token.Name, '-'), (Token.Text, '\n'), (Token.Name, '|'), (Token.Text, '\n'), (Token.Name, '\\w'), (Token.Text, '\n')]) |
class MigratableDb():
def __init__(self, ddbb):
self.ddbb = ddbb
def is_empty(self):
metadata = MetaData()
metadata.bind = self.ddbb.engine
metadata.reflect()
tables = metadata.tables
return (not tables)
def is_versioned(self):
try:
self.get_version()
except OperationalError:
return False
except NoSuchTableError:
return False
return True
def get_version(self):
latest = self.ddbb.session.query(func.max(MigrateVersion.version)).one()
return latest[0]
def get_upgrade_version(self):
return 15
def version(self, initial_version):
self.ddbb.session.add(MigrateVersion(repository_id='pytrainer', repository_path='/usr/lib/python3/site-packages/pytrainer/upgrade', version=15))
self.ddbb.session.commit()
def upgrade(self):
from pytrainer.gui.dialogs import warning_dialog
current_version = self.get_version()
warning_dialog(title='Database migration not supported', text=f'Migrating from database version {current_version} not supported. Please use pytrainer version 2.1.0 to migrate the database.')
exit(1) |
class AutoSamSeg(nn.Module):
def __init__(self, image_encoder, seg_decoder, img_size=1024):
super().__init__()
self.img_size = img_size
self.image_encoder = image_encoder
self.mask_decoder = seg_decoder
self.pe_layer = PositionEmbeddingRandom(128)
def forward(self, x):
original_size = x.shape[(- 1)]
x = F.interpolate(x, (self.image_encoder.img_size, self.image_encoder.img_size), mode='bilinear', align_corners=False)
image_embedding = self.image_encoder(x)
img_pe = self.pe_layer([64, 64]).unsqueeze(0)
(mask, iou_pred) = self.mask_decoder(image_embeddings=image_embedding.unsqueeze(1), image_pe=img_pe)
if (mask.shape[(- 1)] != original_size):
mask = F.interpolate(mask, (original_size, original_size), mode='bilinear', align_corners=False)
return (mask, iou_pred)
def get_embedding(self, x):
original_size = x.shape[(- 1)]
x = F.interpolate(x, (self.image_encoder.img_size, self.image_encoder.img_size), mode='bilinear', align_corners=False)
image_embedding = self.image_encoder(x)
out = nn.functional.adaptive_avg_pool2d(image_embedding, 1).squeeze()
return out |
class TestRiskDifference():
def test_risk_difference_equal_to_0(self, counts_1):
rd = risk_difference(counts_1[0], counts_1[1], counts_1[2], counts_1[3])
assert (rd.point_estimate == 0)
def test_risk_difference_equal_to_half(self):
rd = risk_difference(50, 50, 25, 75)
npt.assert_allclose(rd.point_estimate, 0.25)
def test_value_error_for_negative_counts(self):
with pytest.raises(ValueError):
risk_difference((- 5), 1, 1, 1)
def test_match_sas_ci(self, counts_1):
sas_ci = ((- 0.), 0.)
rd = risk_difference(counts_1[0], counts_1[1], counts_1[2], counts_1[3])
npt.assert_allclose(rd[1:3], sas_ci)
def test_match_sas_se(self, counts_1):
sas_se = 0.1
rd = risk_difference(counts_1[0], counts_1[1], counts_1[2], counts_1[3])
npt.assert_allclose(rd.standard_error, sas_se)
def test_raises_warning_if_small_cells(self):
with pytest.warns(UserWarning, match='confidence interval approximation is invalid'):
rd = risk_difference(1, 10, 10, 10) |
class Kernel(abc.ABC):
def __call__(self, x: torch.Tensor, y: Union[(None, torch.Tensor)]=None) -> torch.Tensor:
if (y is None):
y = x
return self._call_impl(x, y)
def _call_impl(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
def string_id(self):
def effective_dim(self, x) -> float: |
class BaseOCR(BasePlugin):
__name__ = 'BaseOCR'
__type__ = 'base'
__version__ = '0.28'
__status__ = 'stable'
__description__ = 'OCR base plugin'
__license__ = 'GPLv3'
__authors__ = [('pyLoad team', '')]
def __init__(self, pyfile):
self._init(pyfile.m.pyload)
self.pyfile = pyfile
self.init()
def _log(self, level, plugintype, pluginname, args, kwargs):
args = ((self.__name__,) + args)
return self.pyfile.plugin._log(level, plugintype, self.pyfile.plugin.__name__, args, kwargs)
def load_image(self, image):
self.img = Image.open(image)
self.pixels = self.img.load()
self.result_captcha = ''
def deactivate(self):
pass
def threshold(self, value):
self.img = self.img.point((lambda a: ((a * value) + 10)))
def call_cmd(self, command, *args, **kwargs):
call = ([command] + args)
self.log_debug(('EXECUTE ' + ' '.join(call)))
popen = subprocess.Popen(call)
popen.wait()
output = ((popen.stdout.read() + ' | ') + popen.stderr.read())
popen.stdout.close()
popen.stderr.close()
self.log_debug(f'Tesseract ReturnCode {popen.returncode}', f'Output: {output}')
def run_tesser(self, subset=False, digits=True, lowercase=True, uppercase=True, pagesegmode=None):
try:
tmp_tif = open(os.path.join(self.pyload.tempdir, f'tmp_tif_{self.classname}.tif'), mode='wb')
tmp_tif.close()
tmp_txt = open(os.path.join(self.pyload.tempdir, f'tmp_txt_{self.classname}.txt'), mode='wb')
tmp_txt.close()
except IOError as exc:
self.log_error(exc)
return
self.log_debug('Saving tiff...')
self.img.save(tmp_tif.name, 'TIFF')
if (os.name == 'nt'):
command = os.path.join(PKGDIR, 'lib', 'tesseract', 'tesseract.exe')
else:
command = 'tesseract'
args = [os.path.realpath(tmp_tif.name), os.path.realpath(tmp_txt.name).replace('.txt', '')]
if pagesegmode:
args.extend(['-psm', str(pagesegmode)])
if (subset and (digits or lowercase or uppercase)):
with open(os.path.join(self.pyload.tempdir, 'tmp_sub_{}.subset'.format(self.classname)), 'wb') as tmp_sub:
tmp_sub.write('tessedit_char_whitelist ')
if digits:
tmp_sub.write('')
if lowercase:
tmp_sub.write('abcdefghijklmnopqrstuvwxyz')
if uppercase:
tmp_sub.write('ABCDEFGHIJKLMNOPQRSTUVWXYZ')
tmp_sub.write('\n')
args.append('nobatch')
args.append(os.path.realpath(tmp_sub.name))
self.log_debug('Running tesseract...')
self.call_cmd(command, *args)
self.log_debug('Reading txt...')
try:
with open(tmp_txt.name) as fp:
self.result_captcha = fp.read().replace('\n', '')
except Exception:
self.result_captcha = ''
self.log_info((self._('OCR result: ') + self.result_captcha))
self.remove(tmp_tif.name, try_trash=False)
self.remove(tmp_txt.name, try_trash=False)
if (subset and (digits or lowercase or uppercase)):
self.remove(tmp_sub.name, try_trash=False)
def recognize(self, image):
raise NotImplementedError
def to_greyscale(self):
if (self.img.mode != 'L'):
self.img = self.img.convert('L')
self.pixels = self.img.load()
def eval_black_white(self, limit):
self.pixels = self.img.load()
(w, h) = self.img.size
for x in range(w):
for y in range(h):
if (self.pixels[(x, y)] > limit):
self.pixels[(x, y)] = 255
else:
self.pixels[(x, y)] = 0
def clean(self, allowed):
pixels = self.pixels
(w, h) = self.img.size
for x in range(w):
for y in range(h):
if (pixels[(x, y)] == 255):
continue
count = 0
try:
if (pixels[((x - 1), (y - 1))] != 255):
count += 1
if (pixels[((x - 1), y)] != 255):
count += 1
if (pixels[((x - 1), (y + 1))] != 255):
count += 1
if (pixels[(x, (y + 1))] != 255):
count += 1
if (pixels[((x + 1), (y + 1))] != 255):
count += 1
if (pixels[((x + 1), y)] != 255):
count += 1
if (pixels[((x + 1), (y - 1))] != 255):
count += 1
if (pixels[(x, (y - 1))] != 255):
count += 1
except Exception:
pass
if (count < allowed):
pixels[(x, y)] = 1
for x in range(w):
for y in range(h):
if (pixels[(x, y)] == 1):
pixels[(x, y)] = 255
self.pixels = pixels
def derotate_by_average(self):
(w, h) = self.img.size
pixels = self.pixels
for x in range(w):
for y in range(h):
if (pixels[(x, y)] == 0):
pixels[(x, y)] = 155
highest = {}
counts = {}
for angle in range((- 45), 45):
tmpimage = self.img.rotate(angle)
pixels = tmpimage.load()
(w, h) = self.img.size
for x in range(w):
for y in range(h):
if (pixels[(x, y)] == 0):
pixels[(x, y)] = 255
count = {}
for x in range(w):
count[x] = 0
for y in range(h):
if (pixels[(x, y)] == 155):
count[x] += 1
sum = 0
cnt = 0
for x in count.values():
if (x != 0):
sum += x
cnt += 1
avg = (sum // cnt)
counts[angle] = cnt
highest[angle] = 0
for x in count.values():
if (x > highest[angle]):
highest[angle] = x
highest[angle] = (highest[angle] - avg)
hkey = 0
hvalue = 0
for (key, value) in highest.items():
if (value > hvalue):
hkey = key
hvalue = value
self.img = self.img.rotate(hkey)
pixels = self.img.load()
for x in range(w):
for y in range(h):
if (pixels[(x, y)] == 0):
pixels[(x, y)] = 255
if (pixels[(x, y)] == 155):
pixels[(x, y)] = 0
self.pixels = pixels
def split_captcha_letters(self):
captcha = self.img
started = False
letters = []
(width, height) = captcha.size
(bottomY, topY) = (0, height)
pixels = captcha.load()
for x in range(width):
black_pixel_in_col = False
for y in range(height):
if (pixels[(x, y)] != 255):
if (not started):
started = True
firstX = x
lastX = x
if (y > bottomY):
bottomY = y
if (y < topY):
topY = y
if (x > lastX):
lastX = x
black_pixel_in_col = True
if ((black_pixel_in_col is False) and (started is True)):
rect = (firstX, topY, lastX, bottomY)
new_captcha = captcha.crop(rect)
(w, h) = new_captcha.size
if ((w > 5) and (h > 5)):
letters.append(new_captcha)
started = False
(bottomY, topY) = (0, height)
return letters
def correct(self, values, var=None):
if var:
result = var
else:
result = self.result_captcha
for (key, item) in values.items():
if (key.__class__ is str):
result = result.replace(key, item)
else:
for expr in key:
result = result.replace(expr, item)
if var:
return result
else:
self.result_captcha = result |
class UnitsSystem(SourceManagedClass):
def __init__(self, sources: Optional[Callable]=None):
super().__init__({k: sources(k) for k in sources()})
self.separate_value_and_unit_RE = re.compile(u'([-+]?[0-9]*\\.?[0-9]+(?:[eE][-+]?[0-9]+)?)(?:[ \t]*(.*))?')
self.split_units_RE = re.compile(u'(?:([^ \\+\\-\\^\\.0-9]+)[\\^]?([\\-\\+]?[^ \\-\\+]*)?)')
self.siConversions = {}
self.dimensions = defaultdict(dict)
self.read()
def read(self, source=None, value=None):
if (source is not None):
super().read(source, value)
for dimension in self.database.sections():
units = self.database.options(dimension)
for unit in units:
if ('META_GenerateConversions' in unit):
expression = self.database.get(dimension, unit)
si_base_unit = expression.split()[0]
centi = ('centi' in expression)
deci = ('deci' in expression)
non_base_si_factor = (self.safe_eval(expression.split()[1]) if ('altbase' in expression) else 1)
dimension_conversions = generateConversionDictForSISuffix(si_base_unit, centi=centi, deci=deci, non_base_si_factor=non_base_si_factor)
self.siConversions.update(dimension_conversions)
self.dimensions[dimension].update(dimension_conversions)
continue
string_expression = self.database.get(dimension, unit)
self.siConversions[unit] = self.safe_eval(string_expression)
self.dimensions[dimension][unit] = self.siConversions[unit]
def safe_eval(self, string_expression):
return eval(string_expression, {'__builtins__': {}}, {'constants': solcore.constants})
def siUnits(self, value, unit):
if ((unit is None) or (value is None)):
return value
units_list = self.split_units_RE.findall(unit)
for (unit, power) in units_list:
power = (float(power) if (power != '') else 1)
value = (value * np.power(self.siConversions[unit], power))
return value
def asUnit(self, value, unit):
if ((unit is None) or (value is None)):
return value
units_list = self.split_units_RE.findall(unit)
for (unit, power) in units_list:
power = (float(power) if (power != '') else 1)
value = (value / (self.siConversions[unit] ** power))
return value
def si(self, *args):
if (type(args[0]) == str):
return self.siUnitFromString(*args)
return self.siUnits(*args)
def siUnitFromString(self, string):
matchObj = self.separate_value_and_unit_RE.match(string)
(value, unit) = matchObj.groups()
value = float(value)
units_list = self.split_units_RE.findall(unit)
for (unit, power) in units_list:
power = (float(power) if (power != '') else 1)
value *= (self.siConversions[unit] ** power)
return value
def convert(self, value, from_unit, to_unit):
return self.asUnit(self.siUnits(value, from_unit), to_unit)
def eVnm(self, value):
factor = (self.asUnit(h, 'eV') * self.asUnit(c, 'nm'))
return (factor / value)
def nmJ(self, value):
factor = (h * c)
return (factor / self.siUnits(value, 'nm'))
def mJ(self, value):
factor = (h * c)
return (factor / value)
def nmHz(self, value):
factor = self.asUnit(c, 'nm s-1')
return (factor / value)
def spectral_conversion_nm_ev(self, x, y):
x_prime = self.eVnm(x)
conversion_constant = (self.asUnit(h, 'eV s') * self.asUnit(c, 'nm s-1'))
y_prime = ((y * conversion_constant) / (x_prime ** 2))
y_prime = reverse(y_prime)
x_prime = reverse(x_prime)
return (x_prime, y_prime)
def spectral_conversion_nm_hz(self, x, y):
x_prime = self.nmHz(x)
conversion_constant = self.asUnit(c, 'nm s-1')
y_prime = ((y * conversion_constant) / (x_prime ** 2))
y_prime = reverse(y_prime)
x_prime = reverse(x_prime)
return (x_prime, y_prime)
def sensibleUnits(self, value, dimension, precision=2):
negative = ''
if (value < 0):
value *= (- 1)
negative = '-'
formatting = ('%s%%.%if %%s' % (negative, precision))
d = self.dimensions[dimension]
possibleUnits = d.keys()
if (value == 0):
return (formatting % (0, ''))
allValues = [abs(np.log10(self.asUnit(value, unit))) for unit in possibleUnits]
bestUnit = possibleUnits[allValues.index(min(allValues))]
return (formatting % (self.asUnit(value, bestUnit), bestUnit))
def eV(self, e):
return ('%.3f eV' % self.asUnit(e, 'eV'))
def guess_dimension(self, unit):
possibilities = [key for key in self.dimensions.keys() if (unit in self.dimensions[key])]
assert (len(possibilities) != 0), ("Guessing dimension of '%s': No candidates found" % unit)
assert (len(possibilities) == 1), ("Guessing dimension of '%s': Multiple candidates found, please convert manually. (%s)" % (unit, ', '.join(possibilities)))
return possibilities[0]
def list_dimensions(self):
for dim in self.dimensions.keys():
print(('%s: %s' % (dim, ', '.join([k for k in self.dimensions[dim].keys() if ((k is not None) and (k != ''))])))) |
def _setup_server(webio_handler, port=0, host='', static_dir=None, max_buffer_size=((2 ** 20) * 200), **tornado_app_settings):
if (port == 0):
port = get_free_port()
handlers = [('/', webio_handler)]
if (static_dir is not None):
handlers.append(('/static/(.*)', tornado.web.StaticFileHandler, {'path': static_dir}))
handlers.append(('/(.*)', tornado.web.StaticFileHandler, {'path': STATIC_PATH, 'default_filename': 'index.html'}))
app = tornado.web.Application(handlers=handlers, **tornado_app_settings)
server = app.listen(port, address=host, max_buffer_size=max_buffer_size)
return (server, port) |
class MultiOutputModel(torch.nn.Module):
def __init__(self):
super(MultiOutputModel, self).__init__()
self.layer = TupleOutputModel()
self.conv1 = torch.nn.Conv2d(2, 4, kernel_size=3, padding=1)
self.conv2 = torch.nn.Conv2d(4, 4, kernel_size=3, padding=1)
self.conv3 = torch.nn.Conv2d(6, 4, kernel_size=3, padding=1)
def forward(self, *inputs):
(x, y, z) = self.layer(inputs[0])
x1 = self.conv1(x)
x2 = self.conv2(y)
x3 = self.conv3(z)
return torch.cat([x1, x2, x3], 1) |
_function
def create_pak_backups(game_root: Path, backup_files_path: Path, progress_update: ProgressUpdateCallable):
pak_folder = backup_files_path.joinpath('paks')
pak_folder.mkdir(parents=True, exist_ok=True)
files_folder = game_root.joinpath('files')
for (i, pak) in enumerate(_ECHOES_PAKS):
progress_update(f'Backing up {pak}', (i / len(_ECHOES_PAKS)))
shutil.copy(files_folder.joinpath(pak), pak_folder.joinpath(pak)) |
def get_feature_dimensions(parameters: dict) -> Tuple[(int, int, int, int)]:
n_atom_types = len(parameters['atom_types'])
n_formal_charge = len(parameters['formal_charge'])
n_numh = (int(((not parameters['use_explicit_H']) and (not parameters['ignore_H']))) * len(parameters['imp_H']))
n_chirality = (int(parameters['use_chirality']) * len(parameters['chirality']))
return (n_atom_types, n_formal_charge, n_numh, n_chirality) |
class GlibTranslations(gettext.GNUTranslations):
def __init__(self, fp=None):
self.path = ((fp and fp.name) or '')
self._catalog = {}
self.plural = (lambda n: (n != 1))
gettext.GNUTranslations.__init__(self, fp)
self._debug_text = None
def ugettext(self, message):
message = str(message)
return str(gettext.GNUTranslations.gettext(self, message))
def ungettext(self, msgid1, msgid2, n):
msgid1 = str(msgid1)
msgid2 = str(msgid2)
return str(gettext.GNUTranslations.ngettext(self, msgid1, msgid2, n))
def unpgettext(self, context, msgid, msgidplural, n):
context = str(context)
msgid = str(msgid)
msgidplural = str(msgidplural)
real_msgid = f'{context}{msgid}'
real_msgidplural = f'{context}{msgidplural}'
result = self.ngettext(real_msgid, real_msgidplural, n)
if (result == real_msgid):
return msgid
elif (result == real_msgidplural):
return msgidplural
return result
def upgettext(self, context, msgid):
context = str(context)
msgid = str(msgid)
real_msgid = f'{context}{msgid}'
result = self.ugettext(real_msgid)
if (result == real_msgid):
return msgid
return result
def set_debug_text(self, debug_text):
self._debug_text = debug_text
def wrap_text(self, value):
if (self._debug_text is None):
return value
else:
return ((self._debug_text + value) + self._debug_text)
def install(self, *args, **kwargs):
raise NotImplementedError('We no longer do builtins') |
def val(model, dataloader, metrics_manager):
model.eval()
if opt.multiclass:
criterion = CrossEntropyLoss()
else:
criterion = BCEWithLogitsLoss()
metrics_manager.reset()
for (_, data) in enumerate(dataloader):
(inputs, labels) = data
(loss, y_pred, y_true) = forward_step(model, inputs, labels, criterion=criterion)
metrics_manager.update(loss, y_pred, y_true)
results = metrics_manager.compute(train=False)
model.train()
return results |
def myUpSample2X(layer_input, skip_input, filters, f_size=3, dropout_rate=0):
u = UpSampling2D(size=2)(layer_input)
u = Conv2D(filters, kernel_size=f_size, strides=1, padding='same', activation='relu')(u)
if dropout_rate:
u = Dropout(dropout_rate)(u)
u = BatchNormalization(momentum=0.8)(u)
u = Concatenate()([u, skip_input])
return u |
class keep_wl():
def __init__(self, labels):
self.loss = torch.zeros(labels.shape[0], dtype=torch.float).cuda(non_blocking=True)
self.weight = torch.zeros(labels.shape[0], dtype=torch.float).cuda(non_blocking=True)
def __call__(self, epoch_loss, epoch_weight, index):
self.loss[index] = epoch_loss.detach().data
self.weight[index] = epoch_weight.detach().data |
class CheckNanLossHook(ClassyHook):
on_start = ClassyHook._noop
on_phase_start = ClassyHook._noop
on_forward = ClassyHook._noop
on_backward = ClassyHook._noop
on_phase_end = ClassyHook._noop
on_end = ClassyHook._noop
on_step = ClassyHook._noop
on_update = ClassyHook._noop
def on_loss_and_meter(self, task: 'tasks.ClassyTask') -> None:
loss_val = task.last_batch.loss.data.cpu()
if (not torch.isfinite(loss_val).all()):
raise FloatingPointError(f'Infinite Loss or NaN at iteration={task.iteration}. Loss value: {loss_val}') |
class TestFileFileYAMLReaderMultipleFileTypes(unittest.TestCase):
def setUp(self):
patterns1 = ['a.nc']
patterns2 = ['b.nc']
patterns3 = ['geo.nc']
res_dict = {'reader': {'name': 'fake', 'sensors': ['canon']}, 'file_types': {'ftype1': {'name': 'ft1', 'file_patterns': patterns1}, 'ftype2': {'name': 'ft2', 'file_patterns': patterns2}, 'ftype3': {'name': 'ft3', 'file_patterns': patterns3}}, 'datasets': {'ch1': {'name': 'ch01', 'wavelength': [0.5, 0.6, 0.7], 'calibration': 'reflectance', 'file_type': ['ftype1', 'ftype2'], 'coordinates': ['lons', 'lats']}, 'ch2': {'name': 'ch02', 'wavelength': [0.7, 0.75, 0.8], 'calibration': 'counts', 'file_type': ['ftype1', 'ftype2'], 'coordinates': ['lons', 'lats']}, 'ch3': {'name': 'ch03', 'wavelength': [0.8, 0.85, 0.9], 'calibration': 'counts', 'file_type': 'ftype1', 'coordinates': ['lons', 'lats']}, 'lons': {'name': 'lons', 'file_type': ['ftype1', 'ftype3']}, 'lats': {'name': 'lats', 'file_type': ['ftype1', 'ftype3']}}}
self.config = res_dict
self.reader = yr.FileYAMLReader(self.config)
def test_update_ds_ids_from_file_handlers(self):
from functools import partial
orig_ids = self.reader.all_ids
for (ftype, resol) in zip(('ftype1', 'ftype2'), (1, 2)):
_orig_ids = {key: val.copy() for (key, val) in orig_ids.items()}
with patch.dict(self.reader.all_ids, _orig_ids, clear=True), patch.dict(self.reader.available_ids, {}, clear=True):
fh = MagicMock(filetype_info={'file_type': ftype}, resolution=resol)
fh.available_datasets = partial(available_datasets, fh)
fh.file_type_matches = partial(file_type_matches, fh)
self.reader.file_handlers = {ftype: [fh]}
self.reader.update_ds_ids_from_file_handlers()
for (ds_id, ds_info) in self.reader.all_ids.items():
file_types = ds_info['file_type']
if (not isinstance(file_types, list)):
file_types = [file_types]
if (ftype in file_types):
assert (resol == ds_id['resolution']) |
def interrogate_collection_type(t):
expr = _norm_input(t)
style = None
members = None
view = None
base = None
if (expr.name in _VARIADIC):
(view, base) = _VARIADIC[expr.name]
(field,) = expr.fields
if isinstance(field, UnionExp):
style = 'composite'
members = list(field.members)
else:
style = 'simple'
members = field
elif isinstance(expr, UnionExp):
if (expr.members[0].name in _VARIADIC):
members = []
for member in expr.members:
(field,) = member.fields
if isinstance(field, UnionExp):
style = 'complex'
members.append(list(field.members))
else:
members.append([field])
if (style != 'complex'):
style = 'monomorphic'
(view, base) = _VARIADIC[member.name]
if (style == 'monomorphic'):
members = [m[0] for m in members]
return CollectionStyle(style=style, members=members, view=view, expr=expr, base=base) |
_tokenizers
class MgpstrTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = MgpstrTokenizer
test_rust_tokenizer = False
from_pretrained_kwargs = {}
test_seq2seq = False
def setUp(self):
super().setUp()
vocab = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
vocab_tokens = dict(zip(vocab, range(len(vocab))))
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as fp:
fp.write((json.dumps(vocab_tokens) + '\n'))
def get_tokenizer(self, **kwargs):
return MgpstrTokenizer.from_pretrained(self.tmpdirname, **kwargs)
def get_input_output_texts(self, tokenizer):
input_text = 'tester'
output_text = 'tester'
return (input_text, output_text)
('MGP-STR always lower cases letters.')
def test_added_tokens_do_lower_case(self):
pass
def test_add_special_tokens(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
special_token = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token})
encoded_special_token = tokenizer.encode([special_token], add_special_tokens=False)
self.assertEqual(len(encoded_special_token), 1)
decoded = tokenizer.decode(encoded_special_token, skip_special_tokens=True)
self.assertTrue((special_token not in decoded))
def test_internal_consistency(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
(input_text, output_text) = self.get_input_output_texts(tokenizer)
tokens = tokenizer.tokenize(input_text)
ids = tokenizer.convert_tokens_to_ids(tokens)
ids_2 = tokenizer.encode(input_text, add_special_tokens=False)
self.assertListEqual(ids, ids_2)
tokens_2 = tokenizer.convert_ids_to_tokens(ids)
self.assertNotEqual(len(tokens_2), 0)
text_2 = tokenizer.decode(ids)
self.assertIsInstance(text_2, str)
self.assertEqual(text_2.replace(' ', ''), output_text)
('MGP-STR tokenizer only handles one sequence.')
def test_maximum_encoding_length_pair_input(self):
pass
('inputs cannot be pretokenized in MgpstrTokenizer')
def test_pretokenized_inputs(self):
pass |
class FAP2(Stage):
_format = [E(1, 4, x_fixed(b'FAP2'), dummy=True), E(6, 7, 'i2'), E(9, 9, 'a1'), E(11, 14, 'i4'), E(16, 23, 'f8.3'), E(25, 27, 'i3'), E(29, 53, 'a25')]
output_units = Units.T(help='output units code (V=volts, A=amps, C=counts)')
decimation = Int.T(optional=True, help='decimation')
correction = Float.T(help='group correction applied [s]')
ntrip = Int.T(help='number of frequency, amplitude, phase triplets')
description = String.T(default='', optional=True, help='description')
frequencies = List.T(Float.T(), help='frequency [Hz]')
amplitudes = List.T(Float.T(), help='amplitude [input untits/output units]')
phases = List.T(Float.T(), help='phase delay [degrees]')
comments = List.T(String.T(optional=True))
def append_dataline(self, line, version_dialect):
d = FAP2Data.deserialize(line, version_dialect)
self.frequencies.append(d.frequency)
self.amplitudes.append(d.amplitude)
self.phases.append(d.phase)
def write_datalines(self, writer):
for (frequency, amplitude, phase) in zip(self.frequencies, self.amplitudes, self.phases):
FAP2Data(frequency=frequency, amplitude=amplitude, phase=phase).write(writer) |
class BackendMock(_BackendBase):
_version = None
def bugzilla_version(self):
return {'version': self._version}
def __helper(self, args):
prevfuncname = inspect.stack()[1][3]
func_args = getattr(self, ('_%s_args' % prevfuncname))
func_return = getattr(self, ('_%s_return' % prevfuncname))
if isinstance(func_return, BaseException):
raise func_return
filename = None
expect_out = func_args
if isinstance(func_args, str):
filename = func_args
expect_out = None
if ('content-disposition' in str(args)):
largs = list(args)
largs[1] = 'STRIPPED-BY-TESTSUITE'
args = tuple(largs)
if (filename or expect_out):
tests.utils.diff_compare(args, filename, expect_out)
if isinstance(func_return, dict):
return func_return
returnstr = open(tests.utils.tests_path(func_return)).read()
return eval(returnstr)
def bug_attachment_create(self, *args):
return self.__helper(args)
def bug_attachment_get(self, *args):
return self.__helper(args)
def bug_attachment_get_all(self, *args):
return self.__helper(args)
def bug_attachment_update(self, *args):
return self.__helper(args)
def bug_comments(self, *args):
return self.__helper(args)
def bug_create(self, *args):
return self.__helper(args)
def bug_history(self, *args):
return self.__helper(args)
def bug_get(self, *args):
return self.__helper(args)
def bug_fields(self, *args):
return self.__helper(args)
def bug_search(self, *args):
return self.__helper(args)
def bug_update(self, *args):
return self.__helper(args)
def bug_update_tags(self, *args):
return self.__helper(args)
def component_create(self, *args):
return self.__helper(args)
def component_get(self, *args):
return self.__helper(args)
def component_update(self, *args):
return self.__helper(args)
def group_get(self, *args):
return self.__helper(args)
def externalbugs_add(self, *args):
return self.__helper(args)
def externalbugs_update(self, *args):
return self.__helper(args)
def externalbugs_remove(self, *args):
return self.__helper(args)
def product_get(self, *args):
return self.__helper(args)
def product_get_accessible(self, *args):
return self.__helper(args)
def product_get_enterable(self, *args):
return self.__helper(args)
def product_get_selectable(self, *args):
return self.__helper(args)
def user_create(self, *args):
return self.__helper(args)
def user_get(self, *args):
return self.__helper(args)
def user_login(self, *args):
return self.__helper(args)
def user_logout(self, *args):
return self.__helper(args)
def user_update(self, *args):
return self.__helper(args) |
def test_two_debits(sdd):
payment1 = {'name': 'Test & Co.', 'IBAN': 'NL50BANK', 'BIC': 'BANKNL2A', 'amount': 1012, 'type': 'FRST', 'collection_date': datetime.date.today(), 'mandate_id': '1234', 'mandate_date': datetime.date.today(), 'description': 'Test transaction1', 'endtoend_id': 'ebd75e7e649375d91b33dc11ae44c0e1'}
payment2 = {'name': 'Test du Test', 'IBAN': 'NL50BANK', 'BIC': 'BANKNL2A', 'amount': 5000, 'type': 'RCUR', 'collection_date': datetime.date.today(), 'mandate_id': '1234', 'mandate_date': datetime.date.today(), 'description': u'Testgrue <html>', 'endtoend_id': 'af755a40cb692551ed9f9d55f7179525'}
sdd.add_payment(payment1)
sdd.add_payment(payment2)
xmlout = sdd.export()
xmlpretty = validate_xml(xmlout, 'pain.008.001.02')
assert (clean_ids(xmlpretty.strip()) == clean_ids(SAMPLE_RESULT.strip())) |
def test_filerewriter_files_in_to_out_no_out(temp_file_creator):
rewriter = ArbRewriter('formatter')
file1 = temp_file_creator()
with patch_logger('pypyr.utils.filesystem', logging.INFO) as mock_logger_info:
rewriter.files_in_to_out(file1)
assert (mock_logger_info.mock_calls == [call(f'edited & wrote 1 file(s) at {file1}')])
rewriter.assert_in_to_out_call_count(1)
rewriter.assert_in_to_out_call_path(Path(file1), None) |
class FixFuncattrs(fixer_base.BaseFix):
BM_compatible = True
PATTERN = "\n power< any+ trailer< '.' attr=('func_closure' | 'func_doc' | 'func_globals'\n | 'func_name' | 'func_defaults' | 'func_code'\n | 'func_dict') > any* >\n "
def transform(self, node, results):
attr = results['attr'][0]
attr.replace(Name(('__%s__' % attr.value[5:]), prefix=attr.prefix)) |
class ConditionalDetrOnnxConfig(OnnxConfig):
torch_onnx_minimum_version = version.parse('1.11')
def inputs(self) -> Mapping[(str, Mapping[(int, str)])]:
return OrderedDict([('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ('pixel_mask', {0: 'batch'})])
def atol_for_validation(self) -> float:
return 1e-05
def default_onnx_opset(self) -> int:
return 12 |
class NotInSetConstraint(Constraint):
def __init__(self, set):
self._set = set
def __call__(self, variables, domains, assignments, forwardcheck=False):
raise RuntimeError("Can't happen")
def preProcess(self, variables: Sequence, domains: dict, constraints: List[tuple], vconstraints: dict):
set = self._set
for variable in variables:
domain = domains[variable]
for value in domain[:]:
if (value in set):
domain.remove(value)
vconstraints[variable].remove((self, variables))
constraints.remove((self, variables)) |
class Tlibrary_utils(TestCase):
def test_basic(self):
if is_windows():
res = split_scan_dirs(':Z:\\foo:C:/windows:')
self.assertEqual(res, ['Z:\\foo', 'C:/windows'])
else:
res = split_scan_dirs(f':{STANDARD_PATH}:{OTHER_PATH}:')
self.assertEqual(res, [STANDARD_PATH, OTHER_PATH])
def test_colon_paths(self):
if (not is_windows()):
res = split_scan_dirs(f':{STANDARD_PATH}:{GVFS_PATH_ESCAPED}')
self.assertEqual(res, [STANDARD_PATH, GVFS_PATH])
def test_get_exclude_dirs(self):
some_path = os.path.join(get_home_dir(), 'foo')
if (os.name != 'nt'):
some_path = unexpand(some_path)
config.set('library', 'exclude', some_path)
assert (os.path.expanduser(some_path) in get_exclude_dirs())
assert all((isinstance(p, fsnative) for p in get_exclude_dirs()))
def test_get_scan_dirs(self):
some_path = os.path.join(get_home_dir(), 'foo')
if (os.name != 'nt'):
some_path = unexpand(some_path)
config.set('settings', 'scan', some_path)
assert (os.path.expanduser(some_path) in get_scan_dirs())
assert all((isinstance(p, fsnative) for p in get_scan_dirs())) |
def find_vcs_root(path, markers=('.git',)):
if osp.isfile(path):
path = osp.dirname(path)
(prev, cur) = (None, osp.abspath(osp.expanduser(path)))
while (cur != prev):
if any((osp.exists(osp.join(cur, marker)) for marker in markers)):
return cur
(prev, cur) = (cur, osp.split(cur)[0])
return None |
def get_cosine_schedule_with_warmup(optimizer, num_training_steps, num_warmup_steps=0, num_cycles=(7.0 / 16.0), last_epoch=(- 1)):
def _lr_lambda(current_step):
if (current_step < num_warmup_steps):
return (float(current_step) / float(max(1, num_warmup_steps)))
no_progress = (float((current_step - num_warmup_steps)) / float(max(1, (num_training_steps - num_warmup_steps))))
return max(0.0, math.cos(((math.pi * num_cycles) * no_progress)))
return optim.lr_scheduler.LambdaLR(optimizer, _lr_lambda, last_epoch) |
_module()
def constant_init(module, val, bias=0):
if (hasattr(module, 'weight') and (module.weight is not None)):
nn.init.constant_(module.weight, val)
elif hasattr(module, 'kernel'):
nn.init.constant_(module.kernel, val)
if (hasattr(module, 'bias') and (module.bias is not None)):
nn.init.constant_(module.bias, bias) |
class _CookieDBManager():
_db = None
_cache_dir = _os.path.join(_ad.user_cache_dir(), 'py-yfinance')
def get_database(cls):
if (cls._db is None):
cls._initialise()
return cls._db
def close_db(cls):
if (cls._db is not None):
try:
cls._db.close()
except Exception:
pass
def _initialise(cls, cache_dir=None):
if (cache_dir is not None):
cls._cache_dir = cache_dir
if (not _os.path.isdir(cls._cache_dir)):
try:
_os.makedirs(cls._cache_dir)
except OSError as err:
raise _CookieCacheException(f"Error creating CookieCache folder: '{cls._cache_dir}' reason: {err}")
elif (not (_os.access(cls._cache_dir, _os.R_OK) and _os.access(cls._cache_dir, _os.W_OK))):
raise _CookieCacheException(f"Cannot read and write in CookieCache folder: '{cls._cache_dir}'")
cls._db = _peewee.SqliteDatabase(_os.path.join(cls._cache_dir, 'cookies.db'), pragmas={'journal_mode': 'wal', 'cache_size': (- 64)})
def set_location(cls, new_cache_dir):
if (cls._db is not None):
cls._db.close()
cls._db = None
cls._cache_dir = new_cache_dir
def get_location(cls):
return cls._cache_dir |
class ConfigSetting():
_name = None
def __init__(self, default=ExplicitSettingRequired, description='No description supplied', dangerous=False, automatic=False):
self.description = description
self.default = default
self.dangerous = dangerous
self.automatic = automatic
def __str__(self):
return 'for humans'
def defaulted(self):
return (self.default is not ExplicitSettingRequired)
def default_value_for_configuration(self, config):
if isinstance(self.default, DeferredDefault):
return self.default(config)
else:
return self.default
def __get__(self, obj, objtype):
setting_name = self.name(type(obj))
if self.is_set(obj):
return obj.__dict__[setting_name]
if self.defaulted:
return self.default_value_for_configuration(obj)
raise ConfigurationException(('%s was not set' % setting_name))
def __set__(self, obj, value):
name = self.name(type(obj))
obj.__dict__[name] = value
def name(self, objtype):
if self._name:
return self._name
for cls in objtype.mro():
for (name, value) in cls.__dict__.items():
if (value is self):
self._name = name
return name
raise AttributeError(('Could not deduce name for descriptor %s (%s) %s' % (self, self.description, self.default)))
def is_set(self, obj):
name = self.name(type(obj))
return (name in obj.__dict__)
def is_valid(self, obj):
return (self.automatic or self.defaulted or self.is_set(obj))
def is_dangerous(self, obj):
return (self.dangerous and (not self.is_set(obj))) |
def _helper_runningmeanstd():
comm = MPI.COMM_WORLD
np.random.seed(0)
for (triple, axis) in [((np.random.randn(3), np.random.randn(4), np.random.randn(5)), 0), ((np.random.randn(3, 2), np.random.randn(4, 2), np.random.randn(5, 2)), 0), ((np.random.randn(2, 3), np.random.randn(2, 4), np.random.randn(2, 4)), 1)]:
x = np.concatenate(triple, axis=axis)
ms1 = [x.mean(axis=axis), x.std(axis=axis), x.shape[axis]]
ms2 = mpi_moments(triple[comm.Get_rank()], axis=axis)
for (a1, a2) in zipsame(ms1, ms2):
print(a1, a2)
assert np.allclose(a1, a2)
print('ok!') |
class CharBiLSTM(nn.Module):
def __init__(self, char2idx, chars, char_emb_size, charlstm_hidden_dim, dropout=0.5):
super(CharBiLSTM, self).__init__()
print('[Info] Building character-level LSTM')
self.char_emb_size = char_emb_size
self.char2idx = char2idx
self.chars = chars
self.char_size = len(self.chars)
self.device = NetworkConfig.DEVICE
self.hidden = charlstm_hidden_dim
self.char_embeddings = nn.Embedding(self.char_size, self.char_emb_size)
self.char_embeddings = self.char_embeddings.to(self.device)
self.char_lstm = nn.LSTM(self.char_emb_size, (self.hidden // 2), num_layers=1, batch_first=True, bidirectional=True).to(self.device)
def get_last_hiddens(self, char_seq_tensor, char_seq_len):
batch_size = char_seq_tensor.size(0)
sent_len = char_seq_tensor.size(1)
char_seq_tensor = char_seq_tensor.view((batch_size * sent_len), (- 1))
char_seq_len = char_seq_len.view((batch_size * sent_len))
(sorted_seq_len, permIdx) = char_seq_len.sort(0, descending=True)
(_, recover_idx) = permIdx.sort(0, descending=False)
sorted_seq_tensor = char_seq_tensor[permIdx]
char_embeds = self.char_embeddings(sorted_seq_tensor)
pack_input = pack_padded_sequence(char_embeds, sorted_seq_len, batch_first=True)
(char_rnn_out, char_hidden) = self.char_lstm(pack_input, None)
hidden = char_hidden[0].transpose(1, 0).contiguous().view((batch_size * sent_len), 1, (- 1))
output = hidden[recover_idx].view(batch_size, sent_len, (- 1))
return output
def forward(self, char_input, seq_lengths):
return self.get_last_hiddens(char_input, seq_lengths) |
def on_key_press(symbol, modifiers):
if (symbol == pyglet.window.key.SPACE):
if timer.running:
timer.running = False
elif (timer.time > 0):
timer.reset()
else:
timer.running = True
elif (symbol == pyglet.window.key.ESCAPE):
window.close() |
def test_scalar_creator_helper():
default = scalar()
assert (default.type.dtype == config.floatX)
assert (default.type.ndim == 0)
assert (default.type.shape == ())
assert (default.name is None)
custom = scalar(name='custom', dtype='int64')
assert (custom.dtype == 'int64')
assert (custom.type.ndim == 0)
assert (custom.type.shape == ()) |
_config
def test_max_size_hint_no_flag(xmanager, conn):
w = None
def size_hints():
nonlocal w
w = conn.create_window(0, 0, 100, 100)
hints = ([0] * 18)
hints[7] = hints[8] = 100
w.set_property('WM_NORMAL_HINTS', hints, type='WM_SIZE_HINTS', format=32)
w.map()
conn.conn.flush()
try:
xmanager.create_window(size_hints)
xmanager.c.window.enable_floating()
assert (xmanager.c.window.info()['width'] == 100)
assert (xmanager.c.window.info()['height'] == 100)
xmanager.c.window.set_size_floating(50, 50)
assert (xmanager.c.window.info()['width'] == 50)
assert (xmanager.c.window.info()['height'] == 50)
xmanager.c.window.set_size_floating(200, 200)
assert (xmanager.c.window.info()['width'] == 200)
assert (xmanager.c.window.info()['height'] == 200)
finally:
w.kill_client() |
.end_to_end()
def test_error_when_hook_module_is_no_iterable(tmp_path):
tmp_path.joinpath('pyproject.toml').write_text("[tool.pytask.ini_options]\nhook_module = 'hooks'")
result = subprocess.run(('pytask', 'build', '--help'), cwd=tmp_path, capture_output=True)
assert (result.returncode == ExitCode.CONFIGURATION_FAILED)
assert (b"Error: Invalid value for '--hook-module':" in result.stderr) |
def main_worker(gpu, args):
args.gpu = gpu
args.rank = gpu
print(f'Process Launching at GPU {gpu}')
if args.distributed:
torch.cuda.set_device(args.gpu)
dist.init_process_group(backend='nccl')
print(f'Building train loader at GPU {gpu}')
train_loader = get_loader(args, split=args.train, mode='train', batch_size=args.batch_size, distributed=args.distributed, gpu=args.gpu, workers=args.num_workers, topk=args.train_topk)
if (gpu == 0):
if (args.valid_batch_size is not None):
valid_batch_size = args.valid_batch_size
else:
valid_batch_size = args.batch_size
print(f'Building val loader at GPU {gpu}')
val_loader = get_loader(args, split=args.valid, mode='val', batch_size=valid_batch_size, distributed=False, gpu=args.gpu, workers=4, topk=args.valid_topk)
print('# len val loader:', len(val_loader))
print(f'Building test loader at GPU {gpu}')
test_loader = get_loader(args, split=args.test, mode='val', batch_size=valid_batch_size, distributed=False, gpu=args.gpu, workers=4, topk=args.valid_topk)
else:
val_loader = None
test_loader = None
trainer = Trainer(args, train_loader, val_loader, test_loader, train=True)
trainer.train() |
class VoxelGenerator():
def __init__(self, voxel_size, point_cloud_range, max_num_points, max_voxels=20000):
point_cloud_range = np.array(point_cloud_range, dtype=np.float32)
voxel_size = np.array(voxel_size, dtype=np.float32)
grid_size = ((point_cloud_range[3:] - point_cloud_range[:3]) / voxel_size)
grid_size = np.round(grid_size).astype(np.int64)
voxelmap_shape = tuple(np.round(grid_size).astype(np.int32).tolist())
voxelmap_shape = voxelmap_shape[::(- 1)]
self._coor_to_voxelidx = np.full(voxelmap_shape, (- 1), dtype=np.int32)
self._voxel_size = voxel_size
self._point_cloud_range = point_cloud_range
self._max_num_points = max_num_points
self._max_voxels = max_voxels
self._grid_size = grid_size
def generate(self, points, max_voxels=None):
res = points_to_voxel(points, self._voxel_size, self._point_cloud_range, self._coor_to_voxelidx, self._max_num_points, (max_voxels or self._max_voxels))
return res
def voxel_size(self):
return self._voxel_size
def max_num_points_per_voxel(self):
return self._max_num_points
def point_cloud_range(self):
return self._point_cloud_range
def grid_size(self):
return self._grid_size |
class Append(COp):
__props__ = ('inplace',)
def __init__(self, inplace=False):
self.inplace = inplace
if self.inplace:
self.destroy_map = {0: [0]}
else:
self.view_map = {0: [0]}
def make_node(self, x, toAppend):
assert isinstance(x.type, TypedListType)
assert (x.ttype == toAppend.type), (x.ttype, toAppend.type)
return Apply(self, [x, toAppend], [x.type()])
def perform(self, node, inputs, outputs):
(x, toAppend) = inputs
(out,) = outputs
if (not self.inplace):
out[0] = list(x)
else:
out[0] = x
toAppend = _lessbroken_deepcopy(toAppend)
out[0].append(toAppend)
def __str__(self):
return self.__class__.__name__
def c_code(self, node, name, inp, out, sub):
raise NotImplementedError('DISABLED AS WE NEED TO UPDATE IT TO COPY toAppend()')
(x_name, toAppend) = (inp[0], inp[1])
output_name = out[0]
fail = sub['fail']
if (not self.inplace):
init = ('\n %(output_name)s = (PyListObject*) PyList_GetSlice((PyObject*) %(x_name)s, 0, PyList_GET_SIZE((PyObject*) %(x_name)s)) ;\n ' % locals())
else:
init = f'''
{output_name} = {x_name};
'''
return (init + ('\n if(%(output_name)s==NULL){\n %(fail)s\n };\n if(PyList_Append( (PyObject*) %(output_name)s,(PyObject*) %(toAppend)s)){\n %(fail)s\n };\n Py_INCREF(%(output_name)s);\n ' % locals()))
def c_code_cache_version(self):
return (1,) |
_test
def test_clone_functional_model():
val_a = np.random.random((10, 4))
val_b = np.random.random((10, 4))
val_out = np.random.random((10, 4))
input_a = keras.Input(shape=(4,))
input_b = keras.Input(shape=(4,))
dense_1 = keras.layers.Dense(4)
dense_2 = keras.layers.Dense(4)
x_a = dense_1(input_a)
x_a = keras.layers.Dropout(0.5)(x_a)
x_b = dense_1(input_b)
x_a = dense_2(x_a)
outputs = keras.layers.add([x_a, x_b])
model = keras.models.Model([input_a, input_b], outputs)
if (K.backend() == 'tensorflow'):
K.clear_session()
new_model = keras.models.clone_model(model)
new_model.compile('rmsprop', 'mse')
new_model.train_on_batch([val_a, val_b], val_out)
input_a = keras.Input(shape=(4,), name='a')
input_b = keras.Input(shape=(4,), name='b')
new_model = keras.models.clone_model(model, input_tensors=[input_a, input_b])
new_model.compile('rmsprop', 'mse')
new_model.train_on_batch([val_a, val_b], val_out)
input_a = keras.backend.variable(val_a)
input_b = keras.backend.variable(val_b)
new_model = keras.models.clone_model(model, input_tensors=[input_a, input_b])
new_model.compile('rmsprop', 'mse')
new_model.train_on_batch(None, val_out) |
def fixture_path(test_path):
test_path.makepyfile(test_classes='\n import pytest\n\n class Test1:\n .order("last")\n def test_two(self):\n assert True\n\n .order("first")\n def test_one(self):\n assert True\n\n class Test2:\n .order("last")\n def test_two(self):\n assert True\n\n .order("first")\n def test_one(self):\n assert True\n\n .order("last")\n def test_two():\n assert True\n\n .order("first")\n def test_one():\n assert True\n ', test_functions1='\n import pytest\n\n .order("last")\n def test1_two():\n assert True\n\n .order("first")\n def test1_one():\n assert True\n ', test_functions2='\n import pytest\n\n .order("last")\n def test2_two():\n assert True\n\n .order("first")\n def test2_one():\n assert True\n ')
(yield test_path) |
def _get_attributes(element):
properties = {}
for (attrib_name, val) in element.attrib.items():
if attrib_name.endswith('_LONG'):
val = six.integer_types[(- 1)](val)
attrib_name = attrib_name[:(- 5)]
else:
val = _un_escape_specials(val)
_extract_properties(properties, attrib_name, val)
return properties |
def _get_package_bin_dir_app_paths(venv: Venv, package_info: PackageInfo, venv_bin_path: Path, local_bin_dir: Path) -> Set[Path]:
suffix = package_info.suffix
apps = []
if package_info.include_apps:
apps += package_info.apps
if package_info.include_dependencies:
apps += package_info.apps_of_dependencies
return get_exposed_paths_for_package(venv_bin_path, local_bin_dir, [add_suffix(app, suffix) for app in apps]) |
def test_bn_reestimation():
tf.keras.backend.clear_session()
np.random.seed(0)
input_data = np.random.randn(1024, 32, 32, 3).astype(np.float32)
batch_size = 4
dataset = tf.data.Dataset.from_tensor_slices(input_data)
dataset = dataset.batch(batch_size=batch_size)
dummy_inputs = np.random.randn(2, 32, 32, 3).astype(np.float32)
model = tf.keras.applications.mobilenet_v2.MobileNetV2(weights=None, input_shape=(32, 32, 3))
sub_model = tf.keras.Sequential()
for layer in model.layers[0:12]:
sub_model.add(layer)
sub_model.build((32, 32, 3))
qsim = _qsim_setup_for_fold_scale(sub_model, dummy_inputs)
qsim.model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.001), loss=tf.keras.losses.MeanSquaredError())
_reestimate_and_compare_results(qsim.model, dataset)
_fold_all_batch_norms_to_scale_and_compare_results(qsim, dummy_inputs, 0.005) |
class RegressionModelConfig(PretrainedConfig):
def __init__(self, a=0, b=0, double_output=False, random_torch=True, **kwargs):
super().__init__(**kwargs)
self.a = a
self.b = b
self.double_output = double_output
self.random_torch = random_torch
self.hidden_size = 1 |
_model('model_parallel_transformer_lm')
class ModelParallelTransformerLanguageModel(TransformerLanguageModel):
def build_model(cls, args, task):
if (not has_megatron_submodule):
raise ImportError('\n\nPlease install the megatron submodule:\n\n git submodule update --init fairseq/model_parallel/megatron')
base_lm_architecture(args)
task.source_dictionary.pad_to_multiple_((args.model_parallel_size * 8))
task.target_dictionary.pad_to_multiple_((args.model_parallel_size * 8))
if args.decoder_layers_to_keep:
args.decoder_layers = len(args.decoder_layers_to_keep.split(','))
if (getattr(args, 'max_target_positions', None) is None):
args.max_target_positions = getattr(args, 'tokens_per_sample', DEFAULT_MAX_TARGET_POSITIONS)
if args.character_embeddings:
raise NotImplementedError('Character embeddings is not supported for model parallel')
elif args.adaptive_input:
raise NotImplementedError('Adaptive input is not supported for model parallel')
else:
embed_tokens = cls.build_embedding(args, task.source_dictionary, args.decoder_input_dim)
decoder = ModelParallelTransformerDecoder(args, task.target_dictionary, embed_tokens, no_encoder_attn=True)
return cls(decoder)
def build_embedding(cls, args, dictionary, embed_dim, path=None):
def _vocab_init(tensor, **kwargs):
nn.init.normal_(tensor, mean=0, std=(embed_dim ** (- 0.5)))
nn.init.constant_(tensor[1], 0)
embed_tokens = VocabParallelEmbedding(len(dictionary), embed_dim, dictionary.pad(), init_method=_vocab_init)
return embed_tokens |
class TextInputAdapter(ObjectBlockView):
def __init__(self, obj, container, x=10, y=10, *args, **kwargs):
ObjectBlockView.__init__(self, obj, container, *args, x=10, y=10, **kwargs)
txt = gui.TextInput()
ofbv = ObjectFunctionBlockView(self.reference_object, txt.get_value, 'get_value', 'get_value', self)
ofbv.add_io_widget(OutputView('Value'))
self.add_fb_view(ofbv)
ofbv = ObjectFunctionBlockView(self.reference_object, txt.set_value, 'set_value', 'set_value', self)
self.add_fb_view(ofbv)
ie = InputEvent('onclicked', self.callback_test)
self.add_io_widget(ie)
oe = OutputEvent('onclick', self.onclick)
self.add_io_widget(oe)
def callback_test(self, emitter):
self.outline.set_stroke(2, 'red') |
def tensor_to_PIL(image_tensor, pixel_min=(- 1), pixel_max=1):
image_tensor = image_tensor.cpu()
if ((pixel_min != 0) or (pixel_max != 1)):
image_tensor = ((image_tensor - pixel_min) / (pixel_max - pixel_min))
image_tensor.clamp_(min=0, max=1)
to_pil = torchvision.transforms.functional.to_pil_image
if (image_tensor.dim() == 4):
return [to_pil(img) for img in image_tensor]
return to_pil(image_tensor) |
class CloudGuruLectureLectureAssets(object):
def __init__(self, parent):
self._extension = None
self._mediatype = None
self._url = None
self._parent = parent
self._title = None
self._filename = None
self._fsize = None
self._active = False
def __repr__(self):
out = ('%s:%%s' % (self.mediatype, self.extension, self.extension))
return out
def _generate_filename(self):
ok = re.compile('[^\\\\/:*?"<>|]')
filename = ''.join(((x if ok.match(x) else '_') for x in self.title))
filename += '.{}'.format(self.extension)
return filename
def _write_external_links(self, filepath):
retVal = {}
filename = filepath
if (os.name == 'nt'):
if (len(os.path.abspath(filename)) > 255):
filename = (u'\\\\?\\%s' % os.path.abspath(filename))
if (pyver == 3):
with open('{}'.format(filename), 'a', encoding='utf-8') as f:
try:
f.write('{}\n'.format(self.url))
except Exception as e:
retVal = {'status': 'False', 'msg': 'Python3 Exception : {}'.format(e)}
else:
retVal = {'status': 'True', 'msg': 'download'}
f.close()
else:
with open('{}'.format(filename), 'a') as f:
try:
f.write('{}\n'.format(self.url))
except Exception as e:
retVal = {'status': 'False', 'msg': 'Python2 Exception : {}'.format(e)}
else:
retVal = {'status': 'True', 'msg': 'download'}
f.close()
return retVal
def id(self):
return self._parent.id
def url(self):
return self._url
def extension(self):
return self._extension
def title(self):
return self._title
def filename(self):
if (not self._filename):
self._filename = self._generate_filename()
return self._filename
def mediatype(self):
return self._mediatype
def get_filesize(self):
if (not self._fsize):
try:
cl = 'content-length'
self._fsize = int(requests.get(self.url, stream=True, headers={'User-Agent': HEADERS.get('User-Agent')}).headers[cl])
except (conn_error, KeyError) as e:
self._fsize = 0
return self._fsize
def download(self, filepath='', quiet=False, callback=(lambda *x: None)):
savedir = filename = ''
retVal = {}
if (filepath and os.path.isdir(filepath)):
(savedir, filename) = (filepath, self.filename)
elif filepath:
(savedir, filename) = os.path.split(filepath)
else:
filename = self.filename
filepath = os.path.join(savedir, filename)
if ((os.name == 'nt') and (len(filepath) > 250)):
filepath = '\\\\?\\{}'.format(filepath)
if (self.mediatype == 'external_link'):
return self._write_external_links(filepath)
if os.path.isfile(filepath):
retVal = {'status': 'True', 'msg': 'already downloaded'}
return retVal
temp_filepath = (filepath + '.part')
status_string = ' {:,} Bytes [{:.2%}] received. Rate: [{:4.0f} KB/s]. ETA: [{:.0f} secs]'
if early_py_version:
status_string = ' {0:} Bytes [{1:.2%}] received. Rate: [{2:4.0f} KB/s]. ETA: [{3:.0f} secs]'
try:
req = compat_request(self.url, headers={'User-Agent': HEADERS.get('User-Agent')})
response = compat_urlopen(req)
except compat_urlerr as e:
retVal = {'status': 'False', 'msg': 'URLError : either your internet connection is not working or server aborted the request'}
return retVal
except compat_ as e:
if (e.code == 401):
retVal = {'status': 'False', 'msg': 'CloudGuru Says (HTTP Error 401 : Unauthorized)'}
else:
retVal = {'status': 'False', 'msg': 'HTTPError-{} : direct download link is expired run the CloudGuru-dl again...'.format(e.code)}
return retVal
except Exception as e:
retVal = {'status': 'False', 'msg': ('Exception : %s' % e)}
return retVal
else:
try:
total = int(response.info()['Content-Length'].strip())
except Exception as e:
retVal = {'status': 'False', 'msg': ('Exception : %s' % e)}
return retVal
(chunksize, bytesdone, t0) = (16384, 0, time.time())
(fmode, offset) = ('wb', 0)
if os.path.exists(temp_filepath):
if (os.stat(temp_filepath).st_size < total):
offset = os.stat(temp_filepath).st_size
fmode = 'ab'
try:
outfh = open(temp_filepath, fmode)
except Exception as e:
if (os.name == 'nt'):
file_length = len(temp_filepath)
if (file_length > 255):
retVal = {'status': 'False', 'msg': "file length is too long to create. try downloading to other drive (e.g :- -o 'E:\\')"}
return retVal
retVal = {'status': 'False', 'msg': 'Reason : {}'.format(e)}
return retVal
if offset:
resume_opener = compat_opener()
resume_opener.addheaders = [('User-Agent', HEADERS.get('User-Agent')), ('Range', ('bytes=%s-' % offset))]
try:
response = resume_opener.open(self.url)
except compat_urlerr as e:
retVal = {'status': 'False', 'msg': 'URLError : either your internet connection is not working or server aborted the request'}
return retVal
except compat_ as e:
if (e.code == 401):
retVal = {'status': 'False', 'msg': 'CloudGuru Says (HTTP Error 401 : Unauthorized)'}
else:
retVal = {'status': 'False', 'msg': "HTTPError-{} : direct download link is expired run the CloudGuru-dl with '--skip-sub' option ...".format(e.code)}
return retVal
except Exception as e:
retVal = {'status': 'False', 'msg': ('Exception : %s' % e)}
return retVal
else:
bytesdone = offset
self._active = True
while self._active:
chunk = response.read(chunksize)
outfh.write(chunk)
elapsed = (time.time() - t0)
bytesdone += len(chunk)
if elapsed:
try:
rate = (((float(bytesdone) - float(offset)) / 1024.0) / elapsed)
eta = ((total - bytesdone) / (rate * 1024.0))
except ZeroDivisionError as e:
outfh.close()
try:
os.unlink(temp_filepath)
except Exception as e:
pass
retVal = {'status': 'False', 'msg': 'ZeroDivisionError : it seems, lecture has malfunction or is zero byte(s) ..'}
return retVal
else:
rate = 0
eta = 0
progress_stats = (bytesdone, ((bytesdone * 1.0) / total), rate, eta)
if (not chunk):
outfh.close()
break
if (not quiet):
status = status_string.format(*progress_stats)
sys.stdout.write(((('\r' + status) + (' ' * 4)) + '\r'))
sys.stdout.flush()
if callback:
callback(total, *progress_stats)
if self._active:
os.rename(temp_filepath, filepath)
retVal = {'status': 'True', 'msg': 'download'}
else:
outfh.close()
retVal = {'status': 'True', 'msg': 'download'}
return retVal |
def test__torque_driven_ocp__maximize_predicted_height_CoM():
from bioptim.examples.torque_driven_ocp import maximize_predicted_height_CoM as ocp_module
bioptim_folder = os.path.dirname(ocp_module.__file__)
ocp_module.prepare_ocp(biorbd_model_path=(bioptim_folder + '/models/2segments_4dof_2contacts.bioMod'), phase_time=0.5, n_shooting=20, use_actuators=False, objective_name='MINIMIZE_COM_VELOCITY', com_constraints=True, phase_dynamics=PhaseDynamics.SHARED_DURING_THE_PHASE, expand_dynamics=False) |
.parametrize('old_version, new_version, changed', [(None, '5.12.1', False), ('5.12.1', '5.12.1', False), ('5.12.2', '5.12.1', True), ('5.12.1', '5.12.2', True), ('5.13.0', '5.12.2', True), ('5.12.2', '5.13.0', True)])
def test_qt_version_changed(state_writer, monkeypatch, old_version, new_version, changed):
monkeypatch.setattr(configfiles, 'qVersion', (lambda : new_version))
if (old_version is not None):
state_writer('qt_version', old_version)
state = configfiles.StateConfig()
assert (state.qt_version_changed == changed) |
def get_future_names(packages: List[Package], underlined: bool, job_set: taskhandle.BaseJobSet) -> Generator[(Future, None, None)]:
with ProcessPoolExecutor() as executor:
for package in packages:
for module in get_files(package, underlined):
job_set.started_job(module.modname)
job_set.increment()
(yield executor.submit(get_names, module, package)) |
.parametrize('solver', [pytest.param(_make_se, id='SESolver'), pytest.param(_make_me, id='MESolver'), pytest.param(_make_br, id='BRSolver')])
def testPropSolver(solver):
a = destroy(5)
H = (a.dag() * a)
U = Propagator(solver(H, a))
c_ops = []
if (solver is not _make_se):
c_ops = [a]
assert ((U(1) - propagator(H, 1, c_ops)).norm('max') < 0.0001)
assert ((U(0.5) - propagator(H, 0.5, c_ops)).norm('max') < 0.0001)
assert ((U(1.5, 0.5) - propagator(H, 1, c_ops)).norm('max') < 0.0001) |
('multistep-stage')
def multistep_stage(stage, spec):
log.info('scheduling multistep stage with spec:\n%s', spec)
log.debug('selecting parameters')
parameters = {k: select_parameter(stage.view, v) for (k, v) in get_parameters(spec['parameters']).items()}
log.info('scattering')
singlesteppars = scatter(parameters, spec['scatter'], spec.get('batchsize'), spec.get('partitionsize'))
log.info('scattering')
for (i, pars) in enumerate(singlesteppars):
singlename = '{}_{}'.format(stage.name, i)
(finalized, inputs) = finalize_input(pars, stage.view)
finalized = stage.datamodel.create(finalized, getattr(stage.state_provider, 'datamodel', None))
addStepOrWorkflow(singlename, stage, finalized, inputs, spec)
registerExpressions(stage, spec.get('register_values')) |
def get_solcast_forecast(latitude, longitude, api_key, map_variables=True, **kwargs):
params = dict(latitude=latitude, longitude=longitude, format='json', **kwargs)
data = _get_solcast(endpoint='forecast/radiation_and_weather', params=params, api_key=api_key, map_variables=map_variables)
return (data, {'latitude': latitude, 'longitude': longitude}) |
def _ssim(img1, img2, window, window_size, channel, size_average=True, mask=None):
mu1 = F.conv2d(img1, window, padding=(window_size // 2), groups=channel)
mu2 = F.conv2d(img2, window, padding=(window_size // 2), groups=channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = (mu1 * mu2)
sigma1_sq = (F.conv2d((img1 * img1), window, padding=(window_size // 2), groups=channel) - mu1_sq)
sigma2_sq = (F.conv2d((img2 * img2), window, padding=(window_size // 2), groups=channel) - mu2_sq)
sigma12 = (F.conv2d((img1 * img2), window, padding=(window_size // 2), groups=channel) - mu1_mu2)
C1 = (0.01 ** 2)
C2 = (0.03 ** 2)
ssim_map = ((((2 * mu1_mu2) + C1) * ((2 * sigma12) + C2)) / (((mu1_sq + mu2_sq) + C1) * ((sigma1_sq + sigma2_sq) + C2)))
if size_average:
return ssim_map.mean()
elif (mask is None):
return ssim_map.mean(1).mean(1).mean(1)
else:
return ssim_map.mean(1)[mask].mean() |
class TestCals(unittest.TestCase):
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
self._qubits = [0, 2]
self._controls = [1, 3]
self._maxrep = 10
self._circs = []
def run_sim(self, noise=None):
backend = qiskit.Aer.get_backend('qasm_simulator')
shots = 500
backend_result = qiskit.execute(self._circs, backend, seed_simulator=SEED, shots=shots, noise_model=noise).result()
return backend_result
def test_ampcal1Q(self):
(self._circs, xdata) = ampcal_1Q_circuits(self._maxrep, self._qubits)
err_unitary = np.zeros([2, 2], dtype=complex)
angle_err = 0.1
for i in range(2):
err_unitary[(i, i)] = np.cos(angle_err)
err_unitary[(i, ((i + 1) % 2))] = np.sin(angle_err)
err_unitary[(0, 1)] *= (- 1.0)
error = coherent_unitary_error(err_unitary)
noise_model = NoiseModel()
noise_model.add_all_qubit_quantum_error(error, 'u2')
initial_theta = 0.18
initial_c = 0.5
fit = AmpCalFitter(self.run_sim(noise_model), xdata, self._qubits, fit_p0=[initial_theta, initial_c], fit_bounds=([(- np.pi), (- 1)], [np.pi, 1]))
self.assertAlmostEqual(fit.angle_err(0), 0.1, 2)
def test_anglecal1Q(self):
(self._circs, xdata) = anglecal_1Q_circuits(self._maxrep, self._qubits, angleerr=0.1)
initial_theta = 0.18
initial_c = 0.5
fit = AngleCalFitter(self.run_sim(), xdata, self._qubits, fit_p0=[initial_theta, initial_c], fit_bounds=([(- np.pi), (- 1)], [np.pi, 1]))
self.assertAlmostEqual(fit.angle_err(0), 0.1, 2)
def test_ampcalCX(self):
(self._circs, xdata) = ampcal_cx_circuits(self._maxrep, self._qubits, self._controls)
err_unitary = np.eye(4, dtype=complex)
angle_err = 0.15
for i in range(2):
err_unitary[((2 + i), (2 + i))] = np.cos(angle_err)
err_unitary[((2 + i), (2 + ((i + 1) % 2)))] = ((- 1j) * np.sin(angle_err))
error = coherent_unitary_error(err_unitary)
noise_model = NoiseModel()
noise_model.add_nonlocal_quantum_error(error, 'cx', [1, 0], [0, 1])
initial_theta = 0.18
initial_c = 0.5
fit = AmpCalCXFitter(self.run_sim(noise_model), xdata, self._qubits, fit_p0=[initial_theta, initial_c], fit_bounds=([(- np.pi), (- 1)], [np.pi, 1]))
self.assertAlmostEqual(fit.angle_err(0), 0.15, 2)
self.assertAlmostEqual(fit.angle_err(1), 0.0, 2)
def test_anglecalCX(self):
(self._circs, xdata) = anglecal_cx_circuits(self._maxrep, self._qubits, self._controls, angleerr=0.1)
initial_theta = 0.18
initial_c = 0.5
fit = AngleCalCXFitter(self.run_sim(), xdata, self._qubits, fit_p0=[initial_theta, initial_c], fit_bounds=([(- np.pi), (- 1)], [np.pi, 1]))
self.assertAlmostEqual(fit.angle_err(0), 0.1, 2)
self.assertAlmostEqual(fit.angle_err(1), 0.1, 2) |
_cache
def read_json_then_binary(game: RandovaniaGame) -> tuple[(Path, dict)]:
dir_path = game.data_path.joinpath('logic_database')
if dir_path.exists():
return (dir_path, data_reader.read_split_file(dir_path))
json_path = dir_path.joinpath(f'{game.value}.json')
if json_path.exists():
with json_path.open('r') as open_file:
return (json_path, data_reader.read_json_file(open_file))
binary_path = get_data_path().joinpath('binary_data', f'{game.value}.bin')
return (binary_path, binary_data.decode_file_path(binary_path)) |
class PdfFormEnv(pdfium_i.AutoCloseable):
def __init__(self, raw, config, pdf):
(self.raw, self.config, self.pdf) = (raw, config, pdf)
super().__init__(PdfFormEnv._close_impl, self.config, self.pdf)
def parent(self):
return self.pdf
def _close_impl(raw, config, pdf):
pdfium_c.FPDFDOC_ExitFormFillEnvironment(raw)
id(config)
pdf.formenv = None |
class ConvNoDepthwiseLayerSelector(LayerSelector):
def select(self, layer_db: LayerDatabase, modules_to_ignore: List[tf.keras.layers.Layer]):
selected_layers = []
for layer in layer_db:
if (layer.module in modules_to_ignore):
continue
if (isinstance(layer.module, tf.keras.layers.Conv2D) and (not isinstance(layer.module, tf.keras.layers.DepthwiseConv2D))):
selected_layers.append(layer)
layer_db.mark_picked_layers(selected_layers) |
def _add_realm_args(parser):
group = parser.add_argument_group(title='realm')
group.add_argument('--ict-head-size', type=int, default=None, help='Size of block embeddings to be used in ICT and REALM (paper default: 128)')
group.add_argument('--ict-load', type=str, default=None, help='Directory containing an ICTBertModel checkpoint')
group.add_argument('--bert-load', type=str, default=None, help='Directory containing an BertModel checkpoint (needed to start ICT and REALM)')
group.add_argument('--titles-data-path', type=str, default=None, help='Path to titles dataset used for ICT')
group.add_argument('--query-in-block-prob', type=float, default=0.1, help='Probability of keeping query in block for ICT dataset')
group.add_argument('--use-one-sent-docs', action='store_true', help='Whether to use one sentence documents in ICT')
group.add_argument('--report-topk-accuracies', nargs='+', default=[], help="Which top-k accuracies to report (e.g. '1 5 20')")
group.add_argument('--faiss-use-gpu', action='store_true', help='Whether create the FaissMIPSIndex on GPU')
group.add_argument('--block-data-path', type=str, default=None, help='Where to save/load BlockData to/from')
group.add_argument('--indexer-batch-size', type=int, default=128, help='How large of batches to use when doing indexing jobs')
group.add_argument('--indexer-log-interval', type=int, default=1000, help='After how many batches should the indexer report progress')
return parser |
class WMS_NASA_GIBS(WMSBase):
layer_prefix = 'NASA_GIBS_'
name = 'NASA_GIBS'
def __init__(self, m=None):
self.m = m
if (self.m.get_crs(3857) == m.crs_plot):
self.usewms = self.m.add_wms.NASA_GIBS.EPSG_3857
elif (self.m.get_crs(3031) == m.crs_plot):
self.usewms = self.m.add_wms.NASA_GIBS.EPSG_3031
elif (self.m.get_crs(3413) == m.crs_plot):
self.usewms = self.m.add_wms.NASA_GIBS.EPSG_3413
elif (self.m.get_crs(4326) == m.crs_plot):
self.usewms = self.m.add_wms.NASA_GIBS.EPSG_4326
else:
self.usewms = self.m.add_wms.NASA_GIBS.EPSG_3857
try:
self.wmslayers = [key for key in self.usewms.add_layer.__dict__.keys() if (not ((key in ['m']) or key.startswith('_')))]
except Exception:
self.wmslayers = []
_log_problem(self.name)
def do_add_layer(self, wmslayer, layer):
wms = getattr(self.usewms.add_layer, wmslayer)
wms(layer=layer, transparent=True)
self.ask_for_legend(wms, wmslayer) |
class CookieJar():
def __init__(self, pluginname, account=None):
self.cookies = {}
self.plugin = pluginname
self.account = account
def add_cookies(self, clist):
for c in clist:
name = c.split('\t')[5]
self.cookies[name] = c
def get_cookies(self):
return list(self.cookies.values())
def parse_cookie(self, name):
if (name in self.cookies):
return self.cookies[name].split('\t')[6]
else:
return None
def get_cookie(self, name):
return self.parse_cookie(name)
def set_cookie(self, domain, name, value, path='/', exp=(time.time() + timedelta(days=31).total_seconds())):
self.cookies[name] = f'.{domain} TRUE {path} FALSE {exp} {name} {value}'
def clear(self):
self.cookies = {} |
class DebertaTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ['input_ids', 'attention_mask', 'token_type_ids']
def __init__(self, vocab_file, merges_file, errors='replace', bos_token='[CLS]', eos_token='[SEP]', sep_token='[SEP]', cls_token='[CLS]', unk_token='[UNK]', pad_token='[PAD]', mask_token='[MASK]', add_prefix_space=False, add_bos_token=False, **kwargs):
bos_token = (AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token)
eos_token = (AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token)
sep_token = (AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token)
cls_token = (AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token)
unk_token = (AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token)
pad_token = (AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token)
mask_token = (AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token)
super().__init__(errors=errors, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, add_prefix_space=add_prefix_space, add_bos_token=add_bos_token, **kwargs)
self.add_bos_token = add_bos_token
with open(vocab_file, encoding='utf-8') as vocab_handle:
self.encoder = json.load(vocab_handle)
self.decoder = {v: k for (k, v) in self.encoder.items()}
self.errors = errors
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for (k, v) in self.byte_encoder.items()}
with open(merges_file, encoding='utf-8') as merges_handle:
bpe_merges = merges_handle.read().split('\n')[1:(- 1)]
bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
self.cache = {}
self.add_prefix_space = add_prefix_space
self.pat = re.compile("'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)|\\s+")
def vocab_size(self):
return len(self.encoder)
def get_vocab(self):
return dict(self.encoder, **self.added_tokens_encoder)
def bpe(self, token):
if (token in self.cache):
return self.cache[token]
word = tuple(token)
pairs = get_pairs(word)
if (not pairs):
return token
while True:
bigram = min(pairs, key=(lambda pair: self.bpe_ranks.get(pair, float('inf'))))
if (bigram not in self.bpe_ranks):
break
(first, second) = bigram
new_word = []
i = 0
while (i < len(word)):
try:
j = word.index(first, i)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
i = j
if ((word[i] == first) and (i < (len(word) - 1)) and (word[(i + 1)] == second)):
new_word.append((first + second))
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if (len(word) == 1):
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
if (token_ids_1 is None):
return (([self.cls_token_id] + token_ids_0) + [self.sep_token_id])
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return ((((cls + token_ids_0) + sep) + token_ids_1) + sep)
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
if (token_ids_1 is None):
return (([1] + ([0] * len(token_ids_0))) + [1])
return (((([1] + ([0] * len(token_ids_0))) + [1]) + ([0] * len(token_ids_1))) + [1])
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if (token_ids_1 is None):
return (len(((cls + token_ids_0) + sep)) * [0])
return ((len(((cls + token_ids_0) + sep)) * [0]) + (len((token_ids_1 + sep)) * [1]))
def _tokenize(self, text):
bpe_tokens = []
for token in re.findall(self.pat, text):
token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8')))
bpe_tokens.extend((bpe_token for bpe_token in self.bpe(token).split(' ')))
return bpe_tokens
def _convert_token_to_id(self, token):
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index):
return self.decoder.get(index)
def convert_tokens_to_string(self, tokens):
text = ''.join(tokens)
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors)
return text
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
if (not os.path.isdir(save_directory)):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']))
merge_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']))
with open(vocab_file, 'w', encoding='utf-8') as f:
f.write((json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + '\n'))
index = 0
with open(merge_file, 'w', encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for (bpe_tokens, token_index) in sorted(self.bpe_ranks.items(), key=(lambda kv: kv[1])):
if (index != token_index):
logger.warning(f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!')
index = token_index
writer.write((' '.join(bpe_tokens) + '\n'))
index += 1
return (vocab_file, merge_file)
def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
add_prefix_space = kwargs.pop('add_prefix_space', self.add_prefix_space)
if ((is_split_into_words or add_prefix_space) and ((len(text) > 0) and (not text[0].isspace()))):
text = (' ' + text)
return (text, kwargs)
def _build_conversation_input_ids(self, conversation: 'Conversation') -> List[int]:
input_ids = []
for (is_user, text) in conversation.iter_texts():
input_ids.extend((self.encode(text, add_special_tokens=False) + [self.eos_token_id]))
if (len(input_ids) > self.model_max_length):
input_ids = input_ids[(- self.model_max_length):]
return input_ids |
class TestSimpleSearcher():
__test__ = False
def __init__(self):
self.query_text = np.random.random(text_vector_size).tolist()
self.query_image = np.random.random(image_vector_size).tolist()
self.query_code = np.random.random(code_vector_size).tolist()
def simple_search_text(self, client: QdrantBase) -> List[models.ScoredPoint]:
return client.search(collection_name=COLLECTION_NAME, query_vector=('text', self.query_text), with_payload=True, limit=10)
def simple_search_image(self, client: QdrantBase) -> List[models.ScoredPoint]:
return client.search(collection_name=COLLECTION_NAME, query_vector=('image', self.query_image), with_payload=True, limit=10)
def simple_search_code(self, client: QdrantBase) -> List[models.ScoredPoint]:
return client.search(collection_name=COLLECTION_NAME, query_vector=('code', self.query_code), with_payload=True, limit=10)
def simple_search_text_offset(self, client: QdrantBase) -> List[models.ScoredPoint]:
return client.search(collection_name=COLLECTION_NAME, query_vector=('text', self.query_text), with_payload=True, limit=10, offset=10)
def simple_search_text_with_vector(self, client: QdrantBase) -> List[models.ScoredPoint]:
return client.search(collection_name=COLLECTION_NAME, query_vector=('text', self.query_text), with_payload=True, with_vectors=True, limit=10, offset=10)
def search_score_threshold(self, client: QdrantBase) -> List[models.ScoredPoint]:
res1 = client.search(collection_name=COLLECTION_NAME, query_vector=('text', self.query_text), with_payload=True, limit=10, score_threshold=0.9)
res2 = client.search(collection_name=COLLECTION_NAME, query_vector=('text', self.query_text), with_payload=True, limit=10, score_threshold=0.95)
res3 = client.search(collection_name=COLLECTION_NAME, query_vector=('text', self.query_text), with_payload=True, limit=10, score_threshold=0.1)
return ((res1 + res2) + res3)
def simple_search_text_select_payload(self, client: QdrantBase) -> List[models.ScoredPoint]:
return client.search(collection_name=COLLECTION_NAME, query_vector=('text', self.query_text), with_payload=['text_array', 'nested.id'], limit=10)
def search_payload_exclude(self, client: QdrantBase) -> List[models.ScoredPoint]:
return client.search(collection_name=COLLECTION_NAME, query_vector=('text', self.query_text), with_payload=models.PayloadSelectorExclude(exclude=['text_array', 'nested.id']), limit=10)
def simple_search_image_select_vector(self, client: QdrantBase) -> List[models.ScoredPoint]:
return client.search(collection_name=COLLECTION_NAME, query_vector=('image', self.query_image), with_payload=False, with_vectors=['image', 'code'], limit=10)
def filter_search_text(self, client: QdrantBase, query_filter: models.Filter) -> List[models.ScoredPoint]:
return client.search(collection_name=COLLECTION_NAME, query_vector=('text', self.query_text), query_filter=query_filter, with_payload=True, limit=10)
def filter_search_text_single(self, client: QdrantBase, query_filter: models.Filter) -> List[models.ScoredPoint]:
return client.search(collection_name=COLLECTION_NAME, query_vector=self.query_text, query_filter=query_filter, with_payload=True, with_vectors=True, limit=10) |
class DenseConv(nn.Module):
def __init__(self, inplanes, planes, kernel_size=3, stride=1, dilation=1, act_type='relu'):
super(DenseConv, self).__init__()
self.conv = nn.Conv2d(inplanes, planes, kernel_size=kernel_size, stride=stride, padding=get_same_padding(kernel_size, dilation), dilation=dilation)
self.act = (get_act(act_type=act_type) if act_type else None)
def forward(self, x):
output = self.act(self.conv(x))
return torch.cat((x, output), 1) |
def test_title_normalization():
title = 'abcd'
body = '1234'
assert (util.normalize_title(title, body) == title)
title = '[2.7] bpo-29243: Fix Makefile with respect to --enable-optimizations ...'
body = '...(GH-1478)\r\n\r\nstuff'
expected = '[2.7] bpo-29243: Fix Makefile with respect to --enable-optimizations (GH-1478)'
assert (util.normalize_title(title, body) == expected)
title = '[2.7] bpo-29243: Fix Makefile with respect to --enable-optimizations ...'
body = '...(GH-1478)'
assert (util.normalize_title(title, body) == expected)
title = '[2.7] bpo-29243: Fix Makefile with respect to --enable-optimizations (GH-14...'
body = '...78)'
assert (util.normalize_title(title, body) == expected) |
def model_based_prob_help(A_k, trans_list, img_db_path, d, J, sigma_sq, n, i, k, alpha, xi):
X = get_image_db(img_db_path)
X_i = X[d['v']]
w_i = X[d['m']]
result = [None for _ in trans_list]
for (j, phi) in enumerate(trans_list):
ln_result = (ln_p_xo(A_k=A_k, phi=phi, X_i=X_i, w_i=w_i, J=J, sigma_sq=sigma_sq, n=n, k=k, i=i) + ln_p_k_phi(phi=phi, alpha=alpha, xi=xi))
result[j] = ln_result
return {'ln_prob': result, 'i': i, 'k': k} |
_rewriter([IncSubtensor], inplace=True)
def local_inplace_setsubtensor(fgraph, node):
if (isinstance(node.op, IncSubtensor) and (not node.op.inplace)):
dta = node.op.destroyhandler_tolerate_aliased
new_op = node.op.__class__(node.op.idx_list, inplace=True, set_instead_of_inc=node.op.set_instead_of_inc, destroyhandler_tolerate_aliased=dta)
new_node = new_op(*node.inputs)
val = getattr(node.outputs[0].tag, 'nan_guard_mode_check', True)
new_node.tag.nan_guard_mode_check = val
copy_stack_trace(node.outputs, new_node)
return [new_node]
return False |
def test_env_site_select_first(tmp_path: Path) -> None:
fallback = (tmp_path / 'fallback')
fallback.mkdir(parents=True)
site_packages = SitePackages(tmp_path, fallbacks=[fallback])
candidates = site_packages.make_candidates(Path('hello.txt'), writable_only=True)
assert (len(candidates) == 2)
assert (len(site_packages.find(Path('hello.txt'))) == 0)
content = str(uuid.uuid4())
site_packages.write_text(Path('hello.txt'), content, encoding='utf-8')
assert (site_packages.path / 'hello.txt').exists()
assert (not (fallback / 'hello.txt').exists())
assert (len(site_packages.find(Path('hello.txt'))) == 1) |
class TestNamedTuple(TestNameCheckVisitorBase):
_passes()
def test_args(self):
from typing import NamedTuple
class NT(NamedTuple):
field: int
class CustomNew():
def __new__(self, a: int) -> 'CustomNew':
return super().__new__(self)
def make_nt() -> NT:
return NT(field=3)
def capybara():
NT(filed=3)
nt2 = make_nt()
assert_is_value(nt2, TypedValue(NT))
assert_is_value(nt2.field, TypedValue(int))
CustomNew('x')
cn = CustomNew(a=3)
assert_is_value(cn, TypedValue(CustomNew)) |
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):
layers = []
for i in range(nb_layers):
layers.append(block((((i == 0) and in_planes) or out_planes), out_planes, (((i == 0) and stride) or 1), dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x) |
class RouterBackendTest(CreateDataMixin, TestCase):
def setUp(self):
self.router = BlockingRouter(apps=[], backends={})
def test_valid_backend_path(self):
backend = self.router.add_backend('backend', 'rapidsms.backends.base.BackendBase')
self.assertEqual(1, len(self.router.backends.keys()))
self.assertEqual(backend, self.router.backends['backend'])
def test_router_downcases_backend_configs(self):
test_backend = 'rapidsms.backends.base.BackendBase'
test_conf = {'a': 1, 'B': 2, 'Cc': 3}
backend = self.router.add_backend('backend', test_backend, test_conf)
self.assertEqual(len(backend._config), 3)
self.assertIn('a', backend._config)
self.assertIn('b', backend._config)
self.assertIn('cc', backend._config)
self.assertNotIn('B', backend._config)
self.assertNotIn('Cc', backend._config)
def test_add_backend_class(self):
self.router.add_backend('backend', BackendBase)
self.assertEqual(1, len(self.router.backends.keys()))
self.assertIn('backend', self.router.backends.keys())
self.assertEqual('backend', self.router.backends['backend'].name)
def test_router_not_configured_with_backend(self):
args = ('missing-backend', '1234', 'hello', [''], {})
self.assertRaises(MessageSendingError, self.router.send_to_backend, *args)
def test_backend_send_raises_error(self):
backend = self.router.add_backend('backend', RaisesBackend)
args = (backend.model.name, '1234', 'hello', [''], {})
self.assertRaises(MessageSendingError, self.router.send_to_backend, *args)
('rapidsms.router.blocking.router.logger')
def test_send_captures_exception(self, mock_logger):
backend = self.router.add_backend('backend', RaisesBackend)
msg = self.create_outgoing_message(backend=backend.model)
self.router.send_outgoing(msg)
mock_logger.exception.assert_called_once_with('backend encountered an error while sending.') |
class DatasetCatalog():
DATA_DIR = 'datasets'
DATASETS = {'kitti_train': {'root': 'kitti/training/'}, 'kitti_test': {'root': 'kitti/testing/'}}
def get(name):
if ('kitti' in name):
data_dir = DatasetCatalog.DATA_DIR
attrs = DatasetCatalog.DATASETS[name]
args = dict(root=os.path.join(data_dir, attrs['root']))
return dict(factory='KITTIDataset', args=args)
raise RuntimeError('Dataset not available: {}'.format(name)) |
.parametrize('url, valid, has_err_string', [(' True, False), ('', False, False), ('://', False, True)])
def test_raise_cmdexc_if_invalid(url, valid, has_err_string):
qurl = QUrl(url)
assert (qurl.isValid() == valid)
if valid:
urlutils.raise_cmdexc_if_invalid(qurl)
else:
assert (bool(qurl.errorString()) == has_err_string)
if has_err_string:
expected_text = ('Invalid URL - ' + qurl.errorString())
else:
expected_text = 'Invalid URL'
with pytest.raises(cmdutils.CommandError, match=expected_text):
urlutils.raise_cmdexc_if_invalid(qurl) |
class CallbackList(Callback):
def __init__(self, *args, with_header=True):
super(CallbackList, self).__init__(with_header=with_header)
assert all([issubclass(type(x), Callback) for x in args]), 'Callback inputs illegal: {}'.format(args)
self.callbacks = [callback for callback in args]
def __call__(self, epoch=None, batch=None, silent=False, **kwargs):
str_out = self.header(epoch, batch)
for callback in self.callbacks:
str_out += (callback(**kwargs, silent=True) + ' ')
if (not silent):
logging.info(str_out)
return str_out |
def test_consecutive_spacer(manager_nospawn):
config = GeomConf
config.screens = [libqtile.config.Screen(bottom=libqtile.bar.Bar([ExampleWidget(), libqtile.widget.Spacer(libqtile.bar.STRETCH), libqtile.widget.Spacer(libqtile.bar.STRETCH), ExampleWidget(), ExampleWidget(), libqtile.widget.Spacer(libqtile.bar.STRETCH), ExampleWidget()], 10))]
manager_nospawn.start(config)
i = manager_nospawn.c.bar['bottom'].info()
assert (i['widgets'][0]['offset'] == 0)
assert (i['widgets'][0]['width'] == 10)
assert (i['widgets'][1]['offset'] == 10)
assert (i['widgets'][1]['width'] == 190)
assert (i['widgets'][2]['offset'] == 200)
assert (i['widgets'][2]['width'] == 190)
assert (i['widgets'][3]['offset'] == 390)
assert (i['widgets'][3]['width'] == 10)
assert (i['widgets'][4]['offset'] == 400)
assert (i['widgets'][4]['width'] == 10)
assert (i['widgets'][5]['offset'] == 410)
assert (i['widgets'][5]['width'] == 380)
assert (i['widgets'][6]['offset'] == 790)
assert (i['widgets'][6]['width'] == 10)
libqtile.hook.clear() |
class SnapshotsStub(object):
def __init__(self, channel):
self.Create = channel.unary_unary('/qdrant.Snapshots/Create', request_serializer=snapshots__service__pb2.CreateSnapshotRequest.SerializeToString, response_deserializer=snapshots__service__pb2.CreateSnapshotResponse.FromString)
self.List = channel.unary_unary('/qdrant.Snapshots/List', request_serializer=snapshots__service__pb2.ListSnapshotsRequest.SerializeToString, response_deserializer=snapshots__service__pb2.ListSnapshotsResponse.FromString)
self.Delete = channel.unary_unary('/qdrant.Snapshots/Delete', request_serializer=snapshots__service__pb2.DeleteSnapshotRequest.SerializeToString, response_deserializer=snapshots__service__pb2.DeleteSnapshotResponse.FromString)
self.CreateFull = channel.unary_unary('/qdrant.Snapshots/CreateFull', request_serializer=snapshots__service__pb2.CreateFullSnapshotRequest.SerializeToString, response_deserializer=snapshots__service__pb2.CreateSnapshotResponse.FromString)
self.ListFull = channel.unary_unary('/qdrant.Snapshots/ListFull', request_serializer=snapshots__service__pb2.ListFullSnapshotsRequest.SerializeToString, response_deserializer=snapshots__service__pb2.ListSnapshotsResponse.FromString)
self.DeleteFull = channel.unary_unary('/qdrant.Snapshots/DeleteFull', request_serializer=snapshots__service__pb2.DeleteFullSnapshotRequest.SerializeToString, response_deserializer=snapshots__service__pb2.DeleteSnapshotResponse.FromString) |
class GaussLayer(nn.Module):
def __init__(self, in_features, out_features, bias=True, sigma=10):
super().__init__()
self.sigma = sigma
self.linear = nn.Linear(in_features, out_features, bias=bias)
def forward(self, input):
return torch.exp((- ((self.sigma * self.linear(input)) ** 2))) |
.usefixtures('patched_df')
def test_df_always_visible(fake_qtile, fake_window):
df2 = df.DF(visible_on_warn=False)
fakebar = FakeBar([df2], window=fake_window)
df2._configure(fake_qtile, fakebar)
text = df2.poll()
assert (text == '/ (38G|83%)')
df2.draw()
assert (df2.layout.colour == df2.foreground) |
class TMP4UpdateParents64Bit(TestCase):
original = os.path.join(DATA_DIR, '64bit.mp4')
def setUp(self):
self.filename = get_temp_copy(self.original)
def test_update_parents(self):
with open(self.filename, 'rb') as fileobj:
atoms = Atoms(fileobj)
self.assertEqual(77, atoms.atoms[0].length)
self.assertEqual(61, atoms.atoms[0].children[0].length)
tags = MP4Tags(atoms, fileobj)
tags['pgap'] = True
tags.save(self.filename, padding=(lambda x: 0))
with open(self.filename, 'rb') as fileobj:
atoms = Atoms(fileobj)
self.assertEqual(((77 + 25) + 8), atoms.atoms[0].length)
self.assertEqual(((61 + 25) + 8), atoms.atoms[0].children[0].length)
def tearDown(self):
os.unlink(self.filename) |
def density_fit(mf, auxbasis=None, mesh=None, with_df=None):
from pyscf.pbc.df import rsdf
if (with_df is None):
if (getattr(mf, 'kpts', None) is not None):
kpts = mf.kpts
else:
kpts = numpy.reshape(mf.kpt, (1, 3))
kpts = getattr(kpts, 'kpts', kpts)
with_df = rsdf.RSDF(mf.cell, kpts)
with_df.max_memory = mf.max_memory
with_df.stdout = mf.stdout
with_df.verbose = mf.verbose
with_df.auxbasis = auxbasis
if (mesh is not None):
with_df.mesh = mesh
mf = mf.copy()
mf.with_df = with_df
mf._eri = None
return mf |
class OrderedFactory(factory.Factory):
class Meta():
model = Ordered
_generation
def zzz(obj: Ordered, create: bool, val: Any, **kwargs: Any) -> None:
obj.value = 'zzz'
_generation
def aaa(obj: Ordered, create: bool, val: Any, **kwargs: Any) -> None:
obj.value = 'aaa' |
def test_requirement_lists_without_satisfied_resources(echoes_game_description, default_echoes_preset, echoes_game_patches):
def item(name):
return search.find_resource_info_with_long_name(echoes_game_description.resource_database.item, name)
state = echoes_game_description.game.generator.bootstrap.calculate_starting_state(echoes_game_description, echoes_game_patches, default_echoes_preset.configuration)
state.resources.add_resource_gain([(item('Seeker Launcher'), 1), (item('Space Jump Boots'), 1)])
uncollected_resources: set[ResourceInfo] = set()
possible_sets = [RequirementSet([RequirementList([ResourceRequirement.simple(item('Dark Visor')), ResourceRequirement.create(item('Missile'), 5, False), ResourceRequirement.simple(item('Seeker Launcher'))]), RequirementList([ResourceRequirement.simple(item('Screw Attack')), ResourceRequirement.simple(item('Space Jump Boots'))]), RequirementList([ResourceRequirement.simple(item('Power Bomb')), ResourceRequirement.simple(item('Boost Ball'))])]), RequirementSet([RequirementList([ResourceRequirement.simple(item('Power Bomb')), ResourceRequirement.simple(item('Boost Ball'))]), RequirementList([ResourceRequirement.simple(item('Spider Ball')), ResourceRequirement.simple(item('Boost Ball'))])])]
result = pickup_list._requirement_lists_without_satisfied_resources(state, possible_sets, uncollected_resources)
assert (result == {RequirementList([ResourceRequirement.simple(item('Dark Visor')), ResourceRequirement.create(item('Missile'), 5, False)]), RequirementList([ResourceRequirement.simple(item('Screw Attack'))]), RequirementList([ResourceRequirement.simple(item('Power Bomb')), ResourceRequirement.simple(item('Boost Ball'))]), RequirementList([ResourceRequirement.simple(item('Spider Ball')), ResourceRequirement.simple(item('Boost Ball'))])}) |
class DetailedItinerariesComputer(BaseTravelTimeMatrixComputer):
COLUMNS = (['from_id', 'to_id', 'option'] + Trip.COLUMNS)
def __init__(self, transport_network, origins=None, destinations=None, snap_to_network=False, force_all_to_all=False, **kwargs):
super().__init__(transport_network, origins, destinations, snap_to_network, **kwargs)
if (destinations is None):
self.all_to_all = True
if self.verbose:
warnings.warn('No destinations specified, computing an all-to-all matrix', RuntimeWarning)
elif (len(origins) != len(destinations)):
self.all_to_all = True
if self.verbose:
warnings.warn('Origins and destinations are of different length, computing an all-to-all matrix', RuntimeWarning)
elif origins.equals(destinations):
self.all_to_all = True
if self.verbose:
warnings.warn('Origins and destinations are identical, computing an all-to-all matrix', RuntimeWarning)
else:
self.all_to_all = force_all_to_all
def compute_travel_details(self):
self._prepare_origins_destinations()
if ([mode for mode in self.request.transport_modes if mode.is_transit_mode] and (not ACCURATE_GEOMETRIES)):
warnings.warn('R5 has been compiled with `TransitLayer.SAVE_SHAPES = false` (the default). The geometries of public transport routes are inaccurate (straight lines between stops), and distances can not be computed.', RuntimeWarning)
with joblib.Parallel(prefer='threads', verbose=(10 * self.verbose), n_jobs=self.NUM_THREADS) as parallel:
matrices = parallel((joblib.delayed(self._travel_details_per_od_pair)(from_id, to_id) for (_, (from_id, to_id)) in self.od_pairs.iterrows()))
od_matrix = pandas.concat([matrix.astype(matrices[0].dtypes) for matrix in matrices], ignore_index=True)
od_matrix = geopandas.GeoDataFrame(od_matrix, crs=self._origins_crs)
return od_matrix
def _prepare_origins_destinations(self):
super()._prepare_origins_destinations()
if self.all_to_all:
self.od_pairs = self.origins[['id']].join(self.destinations[['id']], how='cross', lsuffix='_origin', rsuffix='_destination')
else:
self.od_pairs = pandas.DataFrame({'id_origin': self.origins.id.values, 'id_destination': self.destinations.id.values})
def _travel_details_per_od_pair(self, from_id, to_id):
origin = self.origins[(self.origins.id == from_id)]
destination = self.destinations[(self.destinations.id == to_id)]
request = copy.copy(self.request)
request._regional_task.fromLat = origin.geometry.item().y
request._regional_task.fromLon = origin.geometry.item().x
request._regional_task.toLat = destination.geometry.item().y
request._regional_task.toLon = destination.geometry.item().x
trip_planner = TripPlanner(self.transport_network, request)
trips = trip_planner.trips
trips = [([from_id, to_id, option] + segment) for (option, trip) in enumerate(trips) for segment in trip.as_table()]
return pandas.DataFrame(trips, columns=self.COLUMNS) |
class MemoryFLACFileStream(UnclosedFLACFileStream):
def __init__(self, path, file):
self.file = file
self.file_size = 0
if (getattr(self.file, 'seek', None) and getattr(self.file, 'tell', None)):
self.seekable = True
self.file.seek(0, 2)
self.file_size = self.file.tell()
self.file.seek(0)
else:
warnings.warn(f'Warning: {file} file object is not seekable.')
self.seekable = False
self.decoder = pyogg.flac.FLAC__stream_decoder_new()
self.client_data = c_void_p()
self.channels = None
self.frequency = None
self.total_samples = None
self.buffer = None
self.bytes_written = None
self.write_callback_ = pyogg.flac.FLAC__StreamDecoderWriteCallback(self.write_callback)
self.metadata_callback_ = pyogg.flac.FLAC__StreamDecoderMetadataCallback(self.metadata_callback)
self.error_callback_ = pyogg.flac.FLAC__StreamDecoderErrorCallback(self.error_callback)
self.read_callback_ = pyogg.flac.FLAC__StreamDecoderReadCallback(self.read_callback)
if self.seekable:
self.seek_callback_ = pyogg.flac.FLAC__StreamDecoderSeekCallback(self.seek_callback)
self.tell_callback_ = pyogg.flac.FLAC__StreamDecoderTellCallback(self.tell_callback)
self.length_callback_ = pyogg.flac.FLAC__StreamDecoderLengthCallback(self.length_callback)
self.eof_callback_ = FLAC__StreamDecoderEofCallback(self.eof_callback)
else:
self.seek_callback_ = None
self.tell_callback_ = None
self.length_callback_ = None
self.eof_callback_ = None
init_status = pyogg.flac.libflac.FLAC__stream_decoder_init_stream(self.decoder, self.read_callback_, self.seek_callback_, self.tell_callback_, self.length_callback_, self.eof_callback_, self.write_callback_, self.metadata_callback_, self.error_callback_, self.client_data)
if init_status:
raise DecodeException("An error occurred when trying to open '{}': {}".format(path, pyogg.flac.FLAC__StreamDecoderInitStatusEnum[init_status]))
metadata_status = pyogg.flac.FLAC__stream_decoder_process_until_end_of_metadata(self.decoder)
if (not metadata_status):
raise DecodeException('An error occured when trying to decode the metadata of {}'.format(path))
def read_callback(self, decoder, buffer, size, data):
chunk = size.contents.value
data = self.file.read(chunk)
read_size = len(data)
memmove(buffer, data, read_size)
size.contents.value = read_size
if (read_size > 0):
return 0
elif (read_size == 0):
return 1
else:
return 2
def seek_callback(self, decoder, offset, data):
pos = self.file.seek(offset, 0)
if (pos < 0):
return 1
else:
return 0
def tell_callback(self, decoder, offset, data):
pos = self.file.tell()
if (pos < 0):
return 1
else:
offset.contents.value = pos
return 0
def length_callback(self, decoder, length, data):
if (self.file_size == 0):
return 1
else:
length.contents.value = self.file_size
return 0
def eof_callback(self, decoder, data):
return (self.file.tell() >= self.file_size) |
def create_vector(site_folder):
(site, num) = site_folder
file_dir = ((fold + '/') + site)
file = list(os.walk(file_dir))[0][(- 1)][:]
label = []
data = []
id = []
rows = {}
with open((fold + '/abide_preprocessed.csv'), newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=' ', quotechar='|')
for i in reader:
if (list(i[0].split(','))[5] in ['UM_1', 'NYU', 'USM', 'UCLA_1']):
(name, lab) = list(i[0].split(','))[6:8]
lab = (int(lab) % 2)
rows[name] = lab
for filename in file:
tmp = dd.io.load((fold + '//{}//{}'.format(site, filename)))
tri = np.triu(tmp, 1).reshape((- 1))
tri = tri[(tri != 0)]
tri[(tri < 0)] = 0
data.append(tri)
label.append((int(rows[filename[:num]]) % 2))
id.append(filename)
data = np.array(data)
label = np.array(label, dtype=np.int32)
id = np.array(id)
dataset = {'data': data, 'label': label, 'id': id}
dd.io.save((fold + '/{}.h5'.format(site)), dataset) |
def test_dict_keyed_param_not_dotted():
param = 'ShipmentRequestDetails.PackageDimensions'
dict_from = {'Length': 5, 'Width': 5, 'Height': 5, 'Unit': 'inches'}
result = dict_keyed_param(param, dict_from)
expected = {'ShipmentRequestDetails.PackageDimensions.Length': 5, 'ShipmentRequestDetails.PackageDimensions.Width': 5, 'ShipmentRequestDetails.PackageDimensions.Height': 5, 'ShipmentRequestDetails.PackageDimensions.Unit': 'inches'}
assert (result == expected) |
def test_output_argument_full_path(runner, mocker):
mocker.patch('products.vmware_cb_response.CbResponse._authenticate')
with runner.isolated_filesystem() as temp_dir:
full_output_path = os.path.join(temp_dir, 'full_output.csv')
runner.invoke(cli, ['--output', full_output_path])
assert os.path.exists(full_output_path) |
def test_backward(n_times=1000):
device = torch.device('cuda')
input3d = torch.rand((1, 32, 32, 32, 32), requires_grad=True).to(device)
label = torch.rand((1, 32, 32, 32, 32), requires_grad=True).to(device)
deform_conv_pack = DeformConvPack(32, 32, 3, 1, 1).to(device)
optimizer = torch.optim.SGD(deform_conv_pack.parameters(), lr=0.01, momentum=0.9)
loss_func = nn.BCELoss()
time_deform_conv = measure_backward_time(deform_conv_pack, input3d, label, optimizer, loss_func, n_times)
print(f"{'Time backward deform conv 3d:':<30} {time_deform_conv:>6.2f} ms") |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.