code stringlengths 281 23.7M |
|---|
class _PrimitiveTemplateBase(TypeTemplate):
public_proxy = ('encode', 'decode')
def __eq__(self, other):
return (type(self) is type(other))
def __hash__(self):
return hash(type(self))
def get_name(self):
return self.__class__.__name__[1:]
def get_kind(self):
return 'primitive'
def get_field_names(self):
return []
def validate_field(self, name, field):
raise NotImplementedError
def validate_predicate_expr(self, self_expr, predicate_expr):
predicate = predicate_expr.template
if (type(predicate) not in self._valid_predicates):
raise TypeError(str(predicate_expr))
for bound in predicate.iter_boundaries():
if (not self.is_element_expr(self_expr, bound)):
raise TypeError(bound)
def validate_predicate(self, predicate):
raise NotImplementedError |
def iload_fh(f):
for (sc, cc, rcs) in parse3(f):
nslc = (pcode(get1(sc, b'16')), pcode(get1(sc, b'03')), ploc(get1(cc, b'03', b'')), pcode(get1(cc, b'04')))
try:
tmin = pdate(get1(cc, b'22'))
tmax = pdate(get1(cc, b'23'))
except util.TimeStrError as e:
raise RespError(('Invalid date in RESP information (%s).' % str(e)))
stage_elements = {}
istage = (- 1)
for (block, content) in rcs:
if (block not in bdefs):
raise RespError(('Unknown block type found: %s' % block))
(istage_temp, x) = bdefs[block]['parse'](content)
if (istage_temp != (- 1)):
istage = istage_temp
if (x is None):
continue
x.validate()
if (istage not in stage_elements):
stage_elements[istage] = []
stage_elements[istage].append(x)
istages = sorted(stage_elements.keys())
stages = []
totalgain = None
for istage in istages:
elements = stage_elements[istage]
if (istage == 0):
totalgain = gett1(elements, sxml.Gain)
else:
stage = sxml.ResponseStage(number=istage, poles_zeros_list=gett(elements, sxml.PolesZeros), coefficients_list=gett(elements, sxml.Coefficients), fir=gett1o(elements, sxml.FIR), decimation=gett1o(elements, sxml.Decimation), stage_gain=gett1o(elements, sxml.Gain))
stages.append(stage)
if stages:
resp = sxml.Response(stage_list=stages)
if totalgain:
totalgain_value = totalgain.value
totalgain_frequency = totalgain.frequency
else:
totalgain_value = 1.0
gain_frequencies = []
for stage in stages:
totalgain_value *= stage.stage_gain.value
gain_frequencies.append(stage.stage_gain.frequency)
totalgain_frequency = gain_frequencies[0]
if (not all(((f == totalgain_frequency) for f in gain_frequencies))):
logger.warning(('No total gain reported and inconsistent gain frequency values found in resp file for %s.%s.%s.%s: omitting total gain and frequency from created instrument sensitivity object.' % nslc))
totalgain_value = None
totalgain_frequency = None
resp.instrument_sensitivity = sxml.Sensitivity(value=totalgain_value, frequency=totalgain_frequency, input_units=stages[0].input_units, output_units=stages[(- 1)].output_units)
(yield ChannelResponse(codes=nslc, start_date=tmin, end_date=tmax, response=resp))
else:
logger.warning('Incomplete response information for %s (%s - %s).', '.'.join(nslc), util.time_to_str(tmin), util.time_to_str(tmax))
(yield ChannelResponse(codes=nslc, start_date=tmin, end_date=tmax, response=None)) |
class SimCLR(object):
def __init__(self, config):
self.config = config
self.device = self._get_device()
self.writer = SummaryWriter(os.path.join(self.config['save_dir'], 'tensorboard'))
self.nt_xent_criterion = NTXentLoss(self.device, **config['loss'])
split_dir = os.path.join(self.config['base_dir'], 'splits.pkl')
data_dir = os.path.join(self.config['base_dir'], 'preprocessed')
print(data_dir)
with open(split_dir, 'rb') as f:
splits = pickle.load(f)
tr_keys = ((splits[0]['train'] + splits[0]['val']) + splits[0]['test'])
val_keys = splits[0]['val']
self.train_loader = NumpyDataSet(data_dir, target_size=self.config['img_size'], batch_size=self.config['batch_size'], keys=tr_keys, do_reshuffle=True, mode='simclr')
self.val_loader = NumpyDataSet(data_dir, target_size=self.config['img_size'], batch_size=self.config['val_batch_size'], keys=val_keys, do_reshuffle=True, mode='simclr')
print(len(self.train_loader))
self.model = GlobalConUnet()
self.head = MLP(num_class=256)
self.nt_xent_criterion = NTXentLoss(self.device, **config['loss'])
if (torch.cuda.device_count() > 1):
print(("Let's use %d GPUs" % torch.cuda.device_count()))
self.model = nn.DataParallel(self.model)
self.head = nn.DataParallel(self.head)
self.model.to(self.device)
self.head.to(self.device)
self.model = self._load_pre_trained_weights(self.model)
self.optimizer = torch.optim.Adam(self.model.parameters(), 0.0003, weight_decay=eval(self.config['weight_decay']))
def _get_device(self):
device = ('cuda' if torch.cuda.is_available() else 'cpu')
print('Running on:', device)
return device
def _step(self, model, head, xis, xjs, n_iter):
ris = model(xis)
zis = head(ris)
rjs = model(xjs)
zjs = head(rjs)
zis = F.normalize(zis, dim=1)
zjs = F.normalize(zjs, dim=1)
loss = self.nt_xent_criterion(zis, zjs)
return loss
def train(self):
model_checkpoints_folder = os.path.join(self.writer.log_dir, 'checkpoints')
_save_config_file(model_checkpoints_folder)
n_iter = 0
valid_n_iter = 0
best_valid_loss = np.inf
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(self.optimizer, T_max=len(self.train_loader), eta_min=0, last_epoch=(- 1))
for epoch_counter in range(self.config['epochs']):
print(('=====Training Epoch: %d =====' % epoch_counter))
for (i, (xis, xjs)) in enumerate(self.train_loader):
self.optimizer.zero_grad()
xis = xis['data'][0].float().to(self.device)
xjs = xjs['data'][0].float().to(self.device)
loss = self._step(self.model, self.head, xis, xjs, n_iter)
if ((n_iter % self.config['log_every_n_steps']) == 0):
self.writer.add_scalar('train_loss', loss, global_step=n_iter)
print('Train:[{0}][{1}][{2}] loss: {loss:.4f}'.format(epoch_counter, i, len(self.train_loader), loss=loss.item()))
loss.backward()
self.optimizer.step()
n_iter += 1
print('===== Validation =====')
if ((epoch_counter % self.config['eval_every_n_epochs']) == 0):
valid_loss = self._validate(self.val_loader)
print('Val:[{0}] loss: {loss:.4f}'.format(epoch_counter, loss=valid_loss))
if (valid_loss < best_valid_loss):
best_valid_loss = valid_loss
torch.save(self.model.state_dict(), os.path.join(self.config['save_dir'], 'b_{}_model.pth'.format(self.config['batch_size'])))
self.writer.add_scalar('validation_loss', valid_loss, global_step=valid_n_iter)
valid_n_iter += 1
if (epoch_counter >= 10):
scheduler.step()
self.writer.add_scalar('cosine_lr_decay', scheduler.get_lr()[0], global_step=n_iter)
def _load_pre_trained_weights(self, model):
try:
checkpoints_folder = os.path.join('./runs', self.config['fine_tune_from'], 'checkpoints')
state_dict = torch.load(os.path.join(checkpoints_folder, 'model.pth'))
model.load_state_dict(state_dict)
print('Loaded pre-trained model with success.')
except FileNotFoundError:
print('Pre-trained weights not found. Training from scratch.')
return model
def _validate(self, valid_loader):
with torch.no_grad():
self.model.eval()
valid_loss = 0.0
counter = 0
for (xis, xjs) in valid_loader:
xis = xis['data'][0].float().to(self.device)
xjs = xjs['data'][0].float().to(self.device)
loss = self._step(self.model, self.head, xis, xjs, counter)
valid_loss += loss.item()
counter += 1
valid_loss /= counter
return valid_loss |
def open_frag_dbs(databases_options):
databases = databases_options.databases
if (not databases):
raise click.BadArgumentUsage('must specify at least one fragment database')
if (len(databases) == 1):
return SingleDatabase(databases[0])
else:
return MultipleDatabases(databases) |
def test_permutation_generator():
perms = generate_permutations(3)
assert (len(perms) == 2)
assert (perms[0] == [0, 1, 2])
assert (perms[1] == [1, 2, 0])
perms = generate_permutations(4)
assert (len(perms) == 2)
assert (perms[0] == [0, 1, 2, 3])
assert (perms[1] == [1, 3, 0, 2])
perms = generate_permutations(6)
assert (len(perms) == 3)
assert (perms[0] == [0, 1, 2, 3, 4, 5])
assert (perms[1] == [1, 3, 0, 5, 2, 4])
assert (perms[2] == [3, 5, 1, 4, 0, 2])
perms = generate_permutations(4, no_truncation=True)
assert (len(perms) == 5)
assert (perms[1] == [1, 0, 3, 2])
assert (perms[3] == [3, 1, 2, 0]) |
class QiskitNatureLogging():
LOG_FORMAT = '%(asctime)s:%(name)s:%(levelname)s: %(message)s'
def get_levels_for_names(self, names: List[str]) -> Dict[(str, int)]:
name_levels: Dict[(str, int)] = {}
for name in names:
name_levels[name] = python_logging.getLogger(name).getEffectiveLevel()
return name_levels
def set_levels_for_names(self, name_levels: Dict[(str, int)], add_default_handler: bool=True) -> None:
names: List[str] = []
for (name, level) in name_levels.items():
names.append(name)
logger = python_logging.getLogger(name)
logger.setLevel(level)
logger.propagate = False
handlers = logger.handlers
if (add_default_handler and (not any((isinstance(h, _DefaultStreamHandler) for h in handlers)))):
self.add_handler(name, _DefaultStreamHandler())
def add_handler(self, name: str, handler: python_logging.Handler, formatter: Optional[python_logging.Formatter]=None) -> None:
logger = python_logging.getLogger(name)
if (formatter is not None):
handler.setFormatter(formatter)
logger.addHandler(handler)
def remove_handler(self, name: str, handler: python_logging.Handler) -> None:
logger = python_logging.getLogger(name)
handler.close()
logger.removeHandler(handler)
def remove_all_handlers(self, names: List[str]) -> None:
for name in names:
logger = python_logging.getLogger(name)
handlers = logger.handlers.copy()
for handler in handlers:
self.remove_handler(name, handler=handler)
def remove_default_handler(self, names: List[str]) -> None:
for name in names:
logger = python_logging.getLogger(name)
handlers = logger.handlers.copy()
for handler in handlers:
if isinstance(handler, _DefaultStreamHandler):
self.remove_handler(name, handler=handler)
def log_to_file(self, names: List[str], path: str, mode: str='a') -> python_logging.FileHandler:
filepath = os.path.expanduser(path)
handler = python_logging.FileHandler(filepath, mode=mode)
formatter = python_logging.Formatter(fmt=QiskitNatureLogging.LOG_FORMAT)
for name in names:
self.add_handler(name, handler, formatter)
return handler |
def run_tests():
excludes = []
suite = unittest.TestSuite()
sys.path.append(testfolder)
for (root, dirs, files) in os.walk(testfolder):
test_modules = [file.replace('.py', '') for file in files if (file.startswith('test_') and file.endswith('.py'))]
test_modules = [mod for mod in test_modules if (mod.lower() not in excludes)]
for mod in test_modules:
imported_mod = __import__(mod, globals(), locals())
suite.addTests(unittest.defaultTestLoader.loadTestsFromModule(imported_mod))
unittest.TextTestRunner(verbosity=1).run(suite)
cov.stop()
print(cov.report())
cov.html_report(directory=os.path.join(package_root, 'Coverage_report'), omit=[os.path.join(package_root, 'pywinauto', '*tests', '*.py'), os.path.join(package_root, 'pywinauto', 'six.py')]) |
def backward_G(args, G_target, D_target, gen_in):
if args.miner:
gen_in = miner(gen_in)
fake_image_tgt = G_target(gen_in, step=step, alpha=alpha)
predict = D_target(fake_image_tgt, step=step, alpha=alpha)
gen_loss = F.softplus((- predict)).mean()
if (args.lambda_l2_G > 0):
l2_G_loss = (l2_reg(G_source, G_target) * args.lambda_l2_G)
else:
l2_G_loss = 0
(gen_loss + l2_G_loss).backward()
G_loss_val = gen_loss.item()
return G_loss_val |
_fixtures(WebFixture, FormLayoutFixture)
def test_adding_basic_input(web_fixture, form_layout_fixture):
fixture = form_layout_fixture
class FormWithInputAddedUsingDefaults(Form):
def __init__(self, view):
super().__init__(view, 'aform')
self.use_layout(FormLayout())
self.layout.add_input(TextInput(self, fixture.domain_object.fields.an_attribute))
browser = Browser(web_fixture.new_wsgi_app(child_factory=FormWithInputAddedUsingDefaults.factory()))
browser.open('/')
assert fixture.form_contains_form_group(browser)
[label, input_widget] = fixture.get_form_group_children(browser)
assert (label.tag == 'label')
assert (label.attrib['for'] == input_widget.attrib['id'])
assert (label.text == 'Some input')
assert (input_widget.tag == 'input')
assert (input_widget.attrib['name'] == 'aform-an_attribute') |
def events_for_expired_withdraws(channel_state: NettingChannelState, block_number: BlockNumber, pseudo_random_generator: random.Random) -> List[SendWithdrawExpired]:
events: List[SendWithdrawExpired] = []
for withdraw_state in list(channel_state.our_state.withdraws_pending.values()):
withdraw_expired = is_withdraw_expired(block_number=block_number, expiration_threshold=get_sender_expiration_threshold(withdraw_state.expiration))
if (not withdraw_expired):
break
nonce = get_next_nonce(channel_state.our_state)
channel_state.our_state.nonce = nonce
coop_settle = channel_state.our_state.initiated_coop_settle
if (coop_settle is not None):
if ((coop_settle.total_withdraw_initiator == withdraw_state.total_withdraw) and (coop_settle.expiration == withdraw_state.expiration)):
channel_state.our_state.initiated_coop_settle = None
channel_state.our_state.withdraws_expired.append(ExpiredWithdrawState(withdraw_state.total_withdraw, withdraw_state.expiration, withdraw_state.nonce, withdraw_state.recipient_metadata))
del channel_state.our_state.withdraws_pending[withdraw_state.total_withdraw]
events.append(SendWithdrawExpired(recipient=channel_state.partner_state.address, recipient_metadata=withdraw_state.recipient_metadata, canonical_identifier=channel_state.canonical_identifier, message_identifier=message_identifier_from_prng(pseudo_random_generator), total_withdraw=withdraw_state.total_withdraw, participant=channel_state.our_state.address, expiration=withdraw_state.expiration, nonce=nonce))
return events |
def _get_format_filter(format_name: str, checker: jsonschema.FormatChecker, strategy: st.SearchStrategy[str]) -> st.SearchStrategy[str]:
def check_valid(string: str) -> str:
try:
if (not isinstance(string, str)):
raise jsonschema.exceptions.FormatError(f'{string!r} is not a string')
checker.check(string, format=format_name)
except jsonschema.exceptions.FormatError as err:
raise InvalidArgument(f'Got string={string!r} from strategy {strategy!r}, but this is not a valid value for the {format_name!r} checker.') from err
return string
return strategy.map(check_valid) |
class TestBasicsXarray(TestBasics):
def setup_method(self):
reload(pysat.instruments.pysat_testing_xarray)
self.testInst = pysat.Instrument(platform='pysat', name='testing_xarray', num_samples=10, clean_level='clean', update_files=True, use_header=True, **self.testing_kwargs)
self.ref_time = pysat.instruments.pysat_testing_xarray._test_dates['']['']
self.ref_doy = int(self.ref_time.strftime('%j'))
self.out = None
return
def teardown_method(self):
del self.testInst, self.out, self.ref_time, self.ref_doy
return |
def reduce_process(opts, output_queue, spool_length, out_file=None, file_size=0, file_compress=True):
global options
options = opts
createLogger(options.quiet, options.debug, options.log_file)
if out_file:
nextFile = NextFile(out_file)
output = OutputSplitter(nextFile, file_size, file_compress)
else:
output = (sys.stdout if PY2 else sys.stdout.buffer)
if file_compress:
logging.warn('writing to stdout, so no output compression (use an external tool)')
interval_start = default_timer()
spool = {}
next_page = 0
while True:
if (next_page in spool):
output.write(spool.pop(next_page).encode('utf-8'))
next_page += 1
spool_length.value = len(spool)
if ((next_page % report_period) == 0):
interval_rate = (report_period / (default_timer() - interval_start))
logging.info('Extracted %d articles (%.1f art/s)', next_page, interval_rate)
interval_start = default_timer()
else:
pair = output_queue.get()
if (not pair):
break
(page_num, text) = pair
spool[page_num] = text
spool_length.value = len(spool)
if (len(spool) > 200):
logging.debug('Collected %d, waiting: %d, %d', len(spool), next_page, (next_page == page_num))
if (output != sys.stdout):
output.close() |
class BaseNodeVisitorTester(object):
visitor_cls = None
def _run_str(self, code_str, expect_failure=False, apply_changes=False, **kwargs):
kwargs.setdefault('fail_after_first', (not apply_changes))
if isinstance(code_str, bytes):
code_str = code_str.decode('utf-8')
code_str = textwrap.dedent(code_str)
tree = ast.parse(code_str, '<test input>')
try:
result = self._run_tree(code_str, tree, apply_changes=apply_changes, **kwargs)
except VisitorError as e:
if expect_failure:
return e
else:
raise
else:
if expect_failure:
assert False, ('Expected check of "%s" to fail' % code_str)
return result
def assert_passes(self, code_str, **kwargs):
code_str = textwrap.dedent(code_str)
errors = self._run_str(code_str, expect_failure=False, fail_after_first=False, **kwargs)
expected_errors = defaultdict((lambda : defaultdict(int)))
for (i, line) in enumerate(code_str.splitlines(), start=1):
whole_line_match = re.match('^ *#\\s*E:\\s*([a-z_]+)$', line)
if whole_line_match:
expected_errors[(i + 1)][whole_line_match.group(1)] += 1
continue
for separate_match in re.finditer('#\\s*E:\\s*([a-z_]+)', line):
expected_errors[i][separate_match.group(1)] += 1
mismatches = []
for error in errors:
lineno = error['lineno']
actual_code = error['code'].name
if ((actual_code in expected_errors[lineno]) and (expected_errors[lineno][actual_code] > 0)):
expected_errors[lineno][actual_code] -= 1
else:
mismatches.append(f'Did not expect error {actual_code} on line {lineno}')
for (lineno, errors_by_line) in expected_errors.items():
for (error_code, count) in errors_by_line.items():
if (count > 0):
mismatches.append(f'Expected {error_code} on line {lineno}')
assert (not mismatches), (''.join(((line + '\n') for line in mismatches)) + ''.join((error['message'] for error in errors)))
def assert_fails(self, expected_error_code, code_str, **kwargs):
exc = self._run_str(code_str, expect_failure=True, **kwargs)
assert (expected_error_code == exc.error_code), f'{exc} does not have code {expected_error_code}'
def assert_is_changed(self, code_str, expected_code_str, repeat=False, **kwargs):
code_str = textwrap.dedent(code_str)
expected_code_str = textwrap.dedent(expected_code_str)
if repeat:
while True:
output = self._run_str(code_str, apply_changes=True, **kwargs)[1]
if (output == code_str):
break
else:
code_str = output
else:
output = self._run_str(code_str, apply_changes=True, **kwargs)[1]
assert_code_equal(expected_code_str, output)
def _run_tree(self, code_str, tree, apply_changes=False, **kwargs):
return self.visitor_cls('<test input>', code_str, tree, **kwargs).check_for_test(apply_changes=apply_changes) |
class IsolatedEnv(BaseIsolatedEnv):
def __init__(self, env: Env, pool: RepositoryPool) -> None:
self._env = env
self._pool = pool
def python_executable(self) -> str:
return str(self._env.python)
def make_extra_environ(self) -> dict[(str, str)]:
path = os.environ.get('PATH')
scripts_dir = str(self._env._bin_dir)
return {'PATH': (os.pathsep.join([scripts_dir, path]) if (path is not None) else scripts_dir)}
def install(self, requirements: Collection[str]) -> None:
from cleo.io.buffered_io import BufferedIO
from poetry.core.packages.dependency import Dependency
from poetry.core.packages.project_package import ProjectPackage
from poetry.config.config import Config
from poetry.installation.installer import Installer
from poetry.packages.locker import Locker
from poetry.repositories.installed_repository import InstalledRepository
package = ProjectPackage('__root__', '0.0.0')
package.python_versions = '.'.join((str(v) for v in self._env.version_info[:3]))
for requirement in requirements:
dependency = Dependency.create_from_pep_508(requirement)
package.add_dependency(dependency)
io = BufferedIO()
installer = Installer(io, self._env, package, Locker(self._env.path.joinpath('poetry.lock'), {}), self._pool, Config.create(), InstalledRepository.load(self._env))
installer.update(True)
if (installer.run() != 0):
raise ChefInstallError(requirements, io.fetch_output(), io.fetch_error()) |
('I assign {value_str} to table.alignment')
def when_I_assign_value_to_table_alignment(context, value_str):
value = {'None': None, 'WD_TABLE_ALIGNMENT.LEFT': WD_TABLE_ALIGNMENT.LEFT, 'WD_TABLE_ALIGNMENT.RIGHT': WD_TABLE_ALIGNMENT.RIGHT, 'WD_TABLE_ALIGNMENT.CENTER': WD_TABLE_ALIGNMENT.CENTER}[value_str]
table = context.table_
table.alignment = value |
class Address(Base):
__tablename__ = 'datatablebootstrap_address'
id = Column(Integer, primary_key=True)
email_address = Column(UnicodeText)
name = Column(UnicodeText)
zip_code = Column(Integer)
fields = ExposedNames()
fields.name = (lambda i: Field(label='Name', required=True))
fields.email_address = (lambda i: EmailField(label='Email', required=True))
fields.zip_code = (lambda i: IntegerField(label='Zipcode', required=True))
events = ExposedNames()
events.save = (lambda i: Event(label='Save', action=Action(i.save)))
events.update = (lambda i: Event(label='Update'))
def save(self):
Session.add(self) |
class QdrantUploader(BaseUploader):
client = None
upload_params = {}
def init_client(cls, host, distance, connection_params, upload_params):
os.environ['GRPC_ENABLE_FORK_SUPPORT'] = 'true'
os.environ['GRPC_POLL_STRATEGY'] = 'epoll,poll'
cls.client = QdrantClient(host=host, prefer_grpc=True, **connection_params)
cls.upload_params = upload_params
def upload_batch(cls, ids: List[int], vectors: List[list], metadata: Optional[List[dict]]):
cls.client.upsert(collection_name=QDRANT_COLLECTION_NAME, points=Batch.construct(ids=ids, vectors=vectors, payloads=[(payload or {}) for payload in metadata]), wait=False)
def post_upload(cls, _distance):
cls.client.update_collection(collection_name=QDRANT_COLLECTION_NAME, optimizer_config=OptimizersConfigDiff(max_optimization_threads=1))
cls.wait_collection_green()
return {}
def wait_collection_green(cls):
wait_time = 5.0
total = 0
while True:
time.sleep(wait_time)
total += wait_time
collection_info = cls.client.get_collection(QDRANT_COLLECTION_NAME)
if (collection_info.status != CollectionStatus.GREEN):
continue
time.sleep(wait_time)
collection_info = cls.client.get_collection(QDRANT_COLLECTION_NAME)
if (collection_info.status == CollectionStatus.GREEN):
break
return total
def delete_client(cls):
if (cls.client is not None):
del cls.client |
class PinnaclePlan():
def __init__(self, pinnacle, path, plan):
self._pinnacle = pinnacle
self._path = path
self._plan_info = plan
self._machine_info = None
self._trials = None
self._trial_info = None
self._points = None
self._patient_setup = None
self._primary_image = None
self._uid_prefix = UID_PREFIX
self._roi_count = 0
self._iso_center = []
self._ct_center = []
self._dose_ref_pt = []
self._plan_inst_uid = None
self._dose_inst_uid = None
self._struct_inst_uid = None
for image in pinnacle.images:
if (image.image['ImageSetID'] == self.plan_info['PrimaryCTImageSetID']):
self._primary_image = image
if (not self._primary_image):
self.logger.warning('Primary Image Not Available')
def logger(self):
return self._pinnacle.logger
def pinnacle(self):
return self._pinnacle
def path(self):
return self._path
def primary_image(self):
return self._primary_image
def machine_info(self):
if (not self._machine_info):
path_machine = os.path.join(self._path, 'plan.Pinnacle.Machines')
self.logger.debug('Reading machine data from: %s', path_machine)
self._machine_info = pinn_to_dict(path_machine)
return self._machine_info
def trials(self):
if (not self._trials):
path_trial = os.path.join(self._path, 'plan.Trial')
self.logger.debug('Reading trial data from: %s', path_trial)
self._trials = pinn_to_dict(path_trial)
if isinstance(self._trials, dict):
self._trials = [self._trials['Trial']]
if (not self._trial_info):
self._trial_info = self._trials[0]
self.logger.debug('Number of trials read: %s', len(self._trials))
self.logger.debug('Active Trial: %s', self._trial_info['Name'])
return self._trials
def active_trial(self):
return self._trial_info
_trial.setter
def active_trial(self, trial_name):
if isinstance(trial_name, str):
for trial in self.trials:
if (trial['Name'] == trial_name):
self._trial_info = trial
self.logger.info('Active Trial set: %s', trial_name)
return
raise KeyError
def plan_info(self):
return self._plan_info
def trial_info(self):
if (not self._trial_info):
self.trials
return self._trial_info
def points(self):
if (not self._points):
path_points = os.path.join(self._path, 'plan.Points')
self.logger.debug('Reading points data from: %s', path_points)
self._points = pinn_to_dict(path_points)
if isinstance(self._points, dict):
self._points = [self._points['Poi']]
if (self._points is None):
self._points = []
return self._points
def patient_position(self):
if (not self._patient_setup):
self._patient_setup = pinn_to_dict(os.path.join(self._path, 'plan.PatientSetup'))
pat_pos = ''
if ('Head First' in self._patient_setup['Orientation']):
pat_pos = 'HF'
elif ('Feet First' in self._patient_setup['Orientation']):
pat_pos = 'FF'
if ('supine' in self._patient_setup['Position']):
pat_pos = f'{pat_pos}S'
elif ('prone' in self._patient_setup['Position']):
pat_pos = f'{pat_pos}P'
elif (('decubitus right' in self._patient_setup['Position']) or ('Decuibitus Right' in self._patient_setup['Position'])):
pat_pos = f'{pat_pos}DR'
elif (('decubitus left' in self._patient_setup['Position']) or ('Decuibitus Left' in self._patient_setup['Position'])):
pat_pos = f'{pat_pos}DL'
return pat_pos
def iso_center(self):
if (len(self._iso_center) == 0):
find_iso_center(self)
return self._iso_center
_center.setter
def iso_center(self, iso_center):
self._iso_center = iso_center
def is_prefix_valid(prefix):
if re.match(pydicom.uid.RE_VALID_UID_PREFIX, prefix):
return True
return False
def generate_uids(self, uid_type='HASH'):
entropy_srcs = None
if (uid_type == 'HASH'):
entropy_srcs = []
entropy_srcs.append(self._pinnacle.patient_info['MedicalRecordNumber'])
entropy_srcs.append(self.plan_info['PlanName'])
entropy_srcs.append(self.trial_info['Name'])
entropy_srcs.append(self.trial_info['ObjectVersion']['WriteTimeStamp'])
RTPLAN_prefix = f'{self._uid_prefix}1.'
self._plan_inst_uid = pydicom.uid.generate_uid(prefix=RTPLAN_prefix, entropy_srcs=entropy_srcs)
RTDOSE_prefix = f'{self._uid_prefix}2.'
self._dose_inst_uid = pydicom.uid.generate_uid(prefix=RTDOSE_prefix, entropy_srcs=entropy_srcs)
RTSTRUCT_prefix = f'{self._uid_prefix}3.'
self._struct_inst_uid = pydicom.uid.generate_uid(prefix=RTSTRUCT_prefix, entropy_srcs=entropy_srcs)
self.logger.debug(f'Plan Instance UID: {self._plan_inst_uid}')
self.logger.debug(f'Dose Instance UID: {self._dose_inst_uid}')
self.logger.debug(f'Struct Instance UID: {self._struct_inst_uid}')
def plan_inst_uid(self):
if (not self._plan_inst_uid):
self.generate_uids()
return self._plan_inst_uid
def dose_inst_uid(self):
if (not self._dose_inst_uid):
self.generate_uids()
return self._dose_inst_uid
def struct_inst_uid(self):
if (not self._struct_inst_uid):
self.generate_uids()
return self._struct_inst_uid
def convert_point(self, point):
image_header = self.primary_image.image_header
refpoint = [(point['XCoord'] * 10), (point['YCoord'] * 10), (point['ZCoord'] * 10)]
if ((image_header['patient_position'] == 'HFP') or (image_header['patient_position'] == 'FFS')):
refpoint[0] = (- refpoint[0])
if ((image_header['patient_position'] == 'HFS') or (image_header['patient_position'] == 'FFS')):
refpoint[1] = (- refpoint[1])
if ((image_header['patient_position'] == 'HFS') or (image_header['patient_position'] == 'HFP')):
refpoint[2] = (- refpoint[2])
point['refpoint'] = refpoint
refpoint[0] = round(refpoint[0], 5)
refpoint[1] = round(refpoint[1], 5)
refpoint[2] = round(refpoint[2], 5)
return refpoint |
def assert_envelope_values(nonce: int, channel_identifier: ChannelID, transferred_amount: TokenAmount, locked_amount: LockedAmount, locksroot: Locksroot) -> None:
if (nonce <= 0):
raise ValueError('nonce cannot be zero or negative')
if (nonce > UINT64_MAX):
raise ValueError('nonce is too large')
if (channel_identifier <= 0):
raise ValueError('channel id cannot be zero or negative')
if (channel_identifier > UINT256_MAX):
raise ValueError('channel id is too large')
if (transferred_amount < 0):
raise ValueError('transferred_amount cannot be negative')
if (transferred_amount > UINT256_MAX):
raise ValueError('transferred_amount is too large')
if (locked_amount < 0):
raise ValueError('locked_amount cannot be negative')
if (locked_amount > UINT256_MAX):
raise ValueError('locked_amount is too large')
if (len(locksroot) != 32):
raise ValueError('locksroot must have length 32') |
class TestDundersFullOrdering():
cls = FullOrderCSameType
def test_class(self):
assert (self.cls.__name__ == 'FullOrderCSameType')
assert (self.cls.__qualname__ == 'FullOrderCSameType')
def test_eq(self):
method = self.cls.__eq__
assert (method.__doc__.strip() == 'Return a == b. Computed by attrs.')
assert (method.__name__ == '__eq__')
def test_ne(self):
method = self.cls.__ne__
assert (method.__doc__.strip() == 'Check equality and either forward a NotImplemented or\n return the result negated.')
assert (method.__name__ == '__ne__')
def test_lt(self):
method = self.cls.__lt__
assert (method.__doc__.strip() == 'Return a < b. Computed by attrs.')
assert (method.__name__ == '__lt__')
def test_le(self):
method = self.cls.__le__
assert (method.__doc__.strip() == 'Return a <= b. Computed by attrs.')
assert (method.__name__ == '__le__')
def test_gt(self):
method = self.cls.__gt__
assert (method.__doc__.strip() == 'Return a > b. Computed by attrs.')
assert (method.__name__ == '__gt__')
def test_ge(self):
method = self.cls.__ge__
assert (method.__doc__.strip() == 'Return a >= b. Computed by attrs.')
assert (method.__name__ == '__ge__') |
def create_argparser():
defaults = dict(clip_denoised=True, num_samples=10000, batch_size=16, use_ddim=False, model_path='', classifier_path='', classifier_scale=1.0, img_size=224)
defaults.update(model_and_diffusion_defaults())
defaults.update(classifier_defaults())
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', default='brats', type=str, help='choose dataset')
parser.add_argument('--save_dir', default='./run/results/diff_tumor', type=str, help='where to store results and logger')
parser.add_argument('--class_index', default=None, type=int, help='choose which class to generate')
parser.add_argument('--guided', action='store_true', help='choose whether to use classifier guidance')
add_dict_to_argparser(parser, defaults)
return parser |
def evaluate(model, dataset, device, filename):
print('Start the evaluation')
model.eval()
(image, mask, gt) = zip(*[dataset[i] for i in range(8)])
image = torch.stack(image)
mask = torch.stack(mask)
gt = torch.stack(gt)
with torch.no_grad():
(output, _) = model(image.to(device), mask.to(device))
output = output.to(torch.device('cpu'))
output_comp = ((mask * image) + ((1 - mask) * output))
grid = make_grid(torch.cat([image, mask, output, output_comp, gt], dim=0))
save_image(grid, filename) |
class TestTrackingDict():
def test_slot_behaviour(self, td):
for attr in td.__slots__:
assert (getattr(td, attr, 'err') != 'err'), f"got extra slot '{attr}'"
assert (len(mro_slots(td)) == len(set(mro_slots(td)))), 'duplicate slot'
def test_representations(self, td, data):
assert (repr(td) == repr(data))
assert (str(td) == str(data))
def test_len(self, td, data):
assert (len(td) == len(data))
def test_boolean(self, td, data):
assert (bool(td) == bool(data))
assert (bool(TrackingDict()) == bool({}))
def test_equality(self, td, data):
assert (td == data)
assert (data == td)
assert (td != TrackingDict())
assert (TrackingDict() != td)
td_2 = TrackingDict()
td_2['foo'] = 7
assert (td != td_2)
assert (td_2 != td)
assert (td != 1)
assert (td != 1)
assert (td != 5)
assert (td != 5)
def test_getitem(self, td):
assert (td[1] == 1)
assert (not td.pop_accessed_write_items())
assert (not td.pop_accessed_keys())
def test_setitem(self, td):
td[5] = 5
assert (td[5] == 5)
assert (td.pop_accessed_write_items() == [(5, 5)])
td[5] = 7
assert (td[5] == 7)
assert (td.pop_accessed_keys() == {5})
def test_delitem(self, td):
assert (not td.pop_accessed_keys())
td[5] = 7
del td[1]
assert (1 not in td)
assert (td.pop_accessed_keys() == {1, 5})
td[1] = 7
td[5] = 7
assert (td.pop_accessed_keys() == {1, 5})
del td[5]
assert (5 not in td)
assert (td.pop_accessed_write_items() == [(5, TrackingDict.DELETED)])
def test_update_no_track(self, td):
assert (not td.pop_accessed_keys())
td.update_no_track({2: 2, 3: 3})
assert (td == {1: 1, 2: 2, 3: 3})
assert (not td.pop_accessed_keys())
def test_pop(self, td):
td.pop(1)
assert (1 not in td)
assert (td.pop_accessed_keys() == {1})
td[1] = 7
td[5] = 8
assert (1 in td)
assert (5 in td)
assert (td.pop_accessed_keys() == {1, 5})
td.pop(5)
assert (5 not in td)
assert (td.pop_accessed_write_items() == [(5, TrackingDict.DELETED)])
with pytest.raises(KeyError):
td.pop(5)
assert (td.pop(5, 8) == 8)
assert (5 not in td)
assert (not td.pop_accessed_keys())
assert (td.pop(5, 8) == 8)
assert (5 not in td)
assert (not td.pop_accessed_write_items())
def test_popitem(self, td):
td.update_no_track({2: 2})
assert (td.popitem() == (1, 1))
assert (1 not in td)
assert (td.pop_accessed_keys() == {1})
assert (td.popitem() == (2, 2))
assert (2 not in td)
assert (not td)
assert (td.pop_accessed_write_items() == [(2, TrackingDict.DELETED)])
with pytest.raises(KeyError):
td.popitem()
def test_clear(self, td):
td.clear()
assert (td == {})
assert (td.pop_accessed_keys() == {1})
td[5] = 7
assert (5 in td)
assert (td.pop_accessed_keys() == {5})
td.clear()
assert (td == {})
assert (td.pop_accessed_write_items() == [(5, TrackingDict.DELETED)])
def test_set_default(self, td):
assert (td.setdefault(1, 2) == 1)
assert (td[1] == 1)
assert (not td.pop_accessed_keys())
assert (not td.pop_accessed_write_items())
assert (td.setdefault(2, 3) == 3)
assert (td[2] == 3)
assert (td.pop_accessed_keys() == {2})
assert (td.setdefault(3, 4) == 4)
assert (td[3] == 4)
assert (td.pop_accessed_write_items() == [(3, 4)])
def test_iter(self, td, data):
data.update({2: 2, 3: 3, 4: 4})
td.update_no_track({2: 2, 3: 3, 4: 4})
assert (not td.pop_accessed_keys())
assert (list(iter(td)) == list(iter(data)))
def test_mark_as_accessed(self, td):
td[1] = 2
assert (td.pop_accessed_keys() == {1})
assert (td.pop_accessed_keys() == set())
td.mark_as_accessed(1)
assert (td.pop_accessed_keys() == {1}) |
_handler('bookmarks')
def qute_bookmarks(_url: QUrl) -> _HandlerRet:
bookmarks = sorted(objreg.get('bookmark-manager').marks.items(), key=(lambda x: x[1]))
quickmarks = sorted(objreg.get('quickmark-manager').marks.items(), key=(lambda x: x[0]))
src = jinja.render('bookmarks.html', title='Bookmarks', bookmarks=bookmarks, quickmarks=quickmarks)
return ('text/html', src) |
def test_complete_set_value_invalid_settable(cmd2_app, capsys):
text = ''
line = 'set fake {}'.format(text)
endidx = len(line)
begidx = (endidx - len(text))
first_match = complete_tester(text, line, begidx, endidx, cmd2_app)
assert (first_match is None)
(out, err) = capsys.readouterr()
assert ('fake is not a settable parameter' in out) |
def main(args):
text = codecs.open(Path(args.in_path), 'r', 'utf-8')
spk2textgrid = {}
xmin = 0
xmax = 0
for line in text:
uttlist = line.split()
utt_id = uttlist[0]
if ((utt_id == '') or (utt_id == '')):
continue
utt_text = uttlist[1]
utt_use = uttlist[2]
(utt_time_s, utt_time_e) = uttlist[(- 1)].strip('[').strip(']').split('][')
if (float(utt_time_s) < 0):
raise ValueError(float(utt_time_s))
if (float(utt_time_e) < 0):
raise ValueError(float(utt_time_e))
if (utt_use == ''):
utt_speaker = uttlist[3]
if ((args.speaker_limit == True) and ((utt_speaker != 'c1') and (utt_speaker != 'S1') and (utt_speaker != 'S2') and (utt_speaker != 'S3'))):
raise ValueError(((str(utt_id) + ' ') + str(utt_speaker)))
if (utt_speaker not in spk2textgrid):
spk2textgrid[utt_speaker] = []
xmax = max(xmax, float(utt_time_e))
spk2textgrid[utt_speaker].append(Segment(utt_id, utt_speaker, float(utt_time_s), float(utt_time_e), utt_text.strip()))
text.close()
xmax = (xmax + 0.01)
textgrid = codecs.open(Path(args.out_path), 'w', 'utf-8')
textgrid.write('File type = "ooTextFile"\n')
textgrid.write('Object class = "TextGrid"\n\n')
textgrid.write(('xmin = %s\n' % xmin))
textgrid.write(('xmax = %s\n' % xmax))
textgrid.write('tiers? <exists>\n')
textgrid.write(('size = %s\n' % len(spk2textgrid)))
textgrid.write('item []:\n')
num_spk = 1
for segments in spk2textgrid.keys():
textgrid.write(('\titem [%s]:\n' % num_spk))
num_spk = (num_spk + 1)
textgrid.write('\t\tclass = "IntervalTier"\n')
textgrid.write(('\t\tname = "%s"\n' % spk2textgrid[segments][0].spkr))
textgrid.write(('\t\txmin = %s\n' % xmin))
textgrid.write(('\t\txmax = %s\n' % xmax))
textgrid.write(('\t\tintervals: size = %s\n' % len(spk2textgrid[segments])))
for i in range(len(spk2textgrid[segments])):
textgrid.write(('\t\tintervals [%s]:\n' % (i + 1)))
textgrid.write(('\t\t\txmin = %s\n' % spk2textgrid[segments][i].stime))
textgrid.write(('\t\t\txmax = %s\n' % spk2textgrid[segments][i].etime))
textgrid.write(('\t\t\ttext = "%s"\n' % spk2textgrid[segments][i].text))
textgrid.close() |
def unify_generic_callable(type: NormalizedCallableType, target: NormalizedCallableType, ignore_return: bool, return_constraint_direction: (int | None)=None, *, no_unify_none: bool=False) -> (NormalizedCallableType | None):
import mypy.solve
if (return_constraint_direction is None):
return_constraint_direction = mypy.constraints.SUBTYPE_OF
constraints: list[mypy.constraints.Constraint] = []
cs = mypy.constraints.infer_constraints(type.copy_modified(ret_type=UninhabitedType()), target.copy_modified(ret_type=UninhabitedType()), mypy.constraints.SUBTYPE_OF, skip_neg_op=True)
constraints.extend(cs)
if (not ignore_return):
c = mypy.constraints.infer_constraints(type.ret_type, target.ret_type, return_constraint_direction)
constraints.extend(c)
if no_unify_none:
constraints = [c for c in constraints if (not isinstance(get_proper_type(c.target), NoneType))]
(inferred_vars, _) = mypy.solve.solve_constraints(type.variables, constraints)
if (None in inferred_vars):
return None
non_none_inferred_vars = cast(List[Type], inferred_vars)
had_errors = False
def report(*args: Any) -> None:
nonlocal had_errors
had_errors = True
applied = mypy.applytype.apply_generic_arguments(type, non_none_inferred_vars, report, context=target)
if had_errors:
return None
return cast(NormalizedCallableType, applied) |
def set_freeze_by_names(model, layer_alias=None, layer_names=None, freeze=True):
if ((layer_names is None) and (layer_alias is not None)):
(layer_names, name_alias_map) = get_actual_layer_names(model, layer_alias)
elif ((layer_names is None) and (layer_alias is None)):
logging.info(f'No layer_names ')
raise NotImplementedError
else:
pass
logging.info(f'layer_names: {layer_names}')
for (name, module) in model.named_modules():
if (name not in layer_names):
continue
else:
logging.info(f"module: {module} is {('freezed' if freeze else 'NOT freezed')}")
for param in module.parameters():
param.requires_grad = (not freeze) |
class ParserElement(object):
DEFAULT_WHITE_CHARS = ' \n\t\r'
verbose_stacktrace = False
def setDefaultWhitespaceChars(chars):
ParserElement.DEFAULT_WHITE_CHARS = chars
for expr in _builtin_exprs:
if expr.copyDefaultWhiteChars:
expr.whiteChars = chars
def inlineLiteralsUsing(cls):
ParserElement._literalStringClass = cls
def __init__(self, savelist=False):
self.parseAction = list()
self.failAction = None
self.strRepr = None
self.resultsName = None
self.saveAsList = savelist
self.skipWhitespace = True
self.whiteChars = set(ParserElement.DEFAULT_WHITE_CHARS)
self.copyDefaultWhiteChars = True
self.mayReturnEmpty = False
self.keepTabs = False
self.ignoreExprs = list()
self.debug = False
self.streamlined = False
self.mayIndexError = True
self.errmsg = ''
self.modalResults = True
self.debugActions = (None, None, None)
self.re = None
self.callPreparse = True
self.callDuringTry = False
def copy(self):
cpy = copy.copy(self)
cpy.parseAction = self.parseAction[:]
cpy.ignoreExprs = self.ignoreExprs[:]
if self.copyDefaultWhiteChars:
cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
return cpy
def setName(self, name):
self.name = name
self.errmsg = ('Expected ' + self.name)
if __diag__.enable_debug_on_named_expressions:
self.setDebug()
return self
def setResultsName(self, name, listAllMatches=False):
return self._setResultsName(name, listAllMatches)
def _setResultsName(self, name, listAllMatches=False):
newself = self.copy()
if name.endswith('*'):
name = name[:(- 1)]
listAllMatches = True
newself.resultsName = name
newself.modalResults = (not listAllMatches)
return newself
def setBreak(self, breakFlag=True):
if breakFlag:
_parseMethod = self._parse
def breaker(instring, loc, doActions=True, callPreParse=True):
import pdb
pdb.set_trace()
return _parseMethod(instring, loc, doActions, callPreParse)
breaker._originalParseMethod = _parseMethod
self._parse = breaker
elif hasattr(self._parse, '_originalParseMethod'):
self._parse = self._parse._originalParseMethod
return self
def setParseAction(self, *fns, **kwargs):
if (list(fns) == [None]):
self.parseAction = []
else:
if (not all((callable(fn) for fn in fns))):
raise TypeError('parse actions must be callable')
self.parseAction = list(map(_trim_arity, list(fns)))
self.callDuringTry = kwargs.get('callDuringTry', False)
return self
def addParseAction(self, *fns, **kwargs):
self.parseAction += list(map(_trim_arity, list(fns)))
self.callDuringTry = (self.callDuringTry or kwargs.get('callDuringTry', False))
return self
def addCondition(self, *fns, **kwargs):
for fn in fns:
self.parseAction.append(conditionAsParseAction(fn, message=kwargs.get('message'), fatal=kwargs.get('fatal', False)))
self.callDuringTry = (self.callDuringTry or kwargs.get('callDuringTry', False))
return self
def setFailAction(self, fn):
self.failAction = fn
return self
def _skipIgnorables(self, instring, loc):
exprsFound = True
while exprsFound:
exprsFound = False
for e in self.ignoreExprs:
try:
while 1:
(loc, dummy) = e._parse(instring, loc)
exprsFound = True
except ParseException:
pass
return loc
def preParse(self, instring, loc):
if self.ignoreExprs:
loc = self._skipIgnorables(instring, loc)
if self.skipWhitespace:
wt = self.whiteChars
instrlen = len(instring)
while ((loc < instrlen) and (instring[loc] in wt)):
loc += 1
return loc
def parseImpl(self, instring, loc, doActions=True):
return (loc, [])
def postParse(self, instring, loc, tokenlist):
return tokenlist
def _parseNoCache(self, instring, loc, doActions=True, callPreParse=True):
(TRY, MATCH, FAIL) = (0, 1, 2)
debugging = self.debug
if (debugging or self.failAction):
if self.debugActions[TRY]:
self.debugActions[TRY](instring, loc, self)
try:
if (callPreParse and self.callPreparse):
preloc = self.preParse(instring, loc)
else:
preloc = loc
tokensStart = preloc
if (self.mayIndexError or (preloc >= len(instring))):
try:
(loc, tokens) = self.parseImpl(instring, preloc, doActions)
except IndexError:
raise ParseException(instring, len(instring), self.errmsg, self)
else:
(loc, tokens) = self.parseImpl(instring, preloc, doActions)
except Exception as err:
if self.debugActions[FAIL]:
self.debugActions[FAIL](instring, tokensStart, self, err)
if self.failAction:
self.failAction(instring, tokensStart, self, err)
raise
else:
if (callPreParse and self.callPreparse):
preloc = self.preParse(instring, loc)
else:
preloc = loc
tokensStart = preloc
if (self.mayIndexError or (preloc >= len(instring))):
try:
(loc, tokens) = self.parseImpl(instring, preloc, doActions)
except IndexError:
raise ParseException(instring, len(instring), self.errmsg, self)
else:
(loc, tokens) = self.parseImpl(instring, preloc, doActions)
tokens = self.postParse(instring, loc, tokens)
retTokens = ParseResults(tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults)
if (self.parseAction and (doActions or self.callDuringTry)):
if debugging:
try:
for fn in self.parseAction:
try:
tokens = fn(instring, tokensStart, retTokens)
except IndexError as parse_action_exc:
exc = ParseException('exception raised in parse action')
exc.__cause__ = parse_action_exc
raise exc
if ((tokens is not None) and (tokens is not retTokens)):
retTokens = ParseResults(tokens, self.resultsName, asList=(self.saveAsList and isinstance(tokens, (ParseResults, list))), modal=self.modalResults)
except Exception as err:
if self.debugActions[FAIL]:
self.debugActions[FAIL](instring, tokensStart, self, err)
raise
else:
for fn in self.parseAction:
try:
tokens = fn(instring, tokensStart, retTokens)
except IndexError as parse_action_exc:
exc = ParseException('exception raised in parse action')
exc.__cause__ = parse_action_exc
raise exc
if ((tokens is not None) and (tokens is not retTokens)):
retTokens = ParseResults(tokens, self.resultsName, asList=(self.saveAsList and isinstance(tokens, (ParseResults, list))), modal=self.modalResults)
if debugging:
if self.debugActions[MATCH]:
self.debugActions[MATCH](instring, tokensStart, loc, self, retTokens)
return (loc, retTokens)
def tryParse(self, instring, loc, raise_fatal=False):
try:
return self._parse(instring, loc, doActions=False)[0]
except ParseFatalException:
if raise_fatal:
raise
raise ParseException(instring, loc, self.errmsg, self)
def canParseNext(self, instring, loc):
try:
self.tryParse(instring, loc)
except (ParseException, IndexError):
return False
else:
return True
class _UnboundedCache(object):
def __init__(self):
cache = {}
self.not_in_cache = not_in_cache = object()
def get(self, key):
return cache.get(key, not_in_cache)
def set(self, key, value):
cache[key] = value
def clear(self):
cache.clear()
def cache_len(self):
return len(cache)
self.get = types.MethodType(get, self)
self.set = types.MethodType(set, self)
self.clear = types.MethodType(clear, self)
self.__len__ = types.MethodType(cache_len, self)
class _FifoCache(object):
def __init__(self, size):
self.not_in_cache = not_in_cache = object()
cache = collections.OrderedDict()
def get(self, key):
return cache.get(key, not_in_cache)
def set(self, key, value):
cache[key] = value
while (len(cache) > size):
try:
cache.popitem(False)
except KeyError:
pass
def clear(self):
cache.clear()
def cache_len(self):
return len(cache)
self.get = types.MethodType(get, self)
self.set = types.MethodType(set, self)
self.clear = types.MethodType(clear, self)
self.__len__ = types.MethodType(cache_len, self)
packrat_cache = {}
packrat_cache_lock = RLock()
packrat_cache_stats = [0, 0]
def _parseCache(self, instring, loc, doActions=True, callPreParse=True):
(HIT, MISS) = (0, 1)
lookup = (self, instring, loc, callPreParse, doActions)
with ParserElement.packrat_cache_lock:
cache = ParserElement.packrat_cache
value = cache.get(lookup)
if (value is cache.not_in_cache):
ParserElement.packrat_cache_stats[MISS] += 1
try:
value = self._parseNoCache(instring, loc, doActions, callPreParse)
except ParseBaseException as pe:
cache.set(lookup, pe.__class__(*pe.args))
raise
else:
cache.set(lookup, (value[0], value[1].copy()))
return value
else:
ParserElement.packrat_cache_stats[HIT] += 1
if isinstance(value, Exception):
raise value
return (value[0], value[1].copy())
_parse = _parseNoCache
def resetCache():
ParserElement.packrat_cache.clear()
ParserElement.packrat_cache_stats[:] = ([0] * len(ParserElement.packrat_cache_stats))
_packratEnabled = False
def enablePackrat(cache_size_limit=128):
if (not ParserElement._packratEnabled):
ParserElement._packratEnabled = True
if (cache_size_limit is None):
ParserElement.packrat_cache = ParserElement._UnboundedCache()
else:
ParserElement.packrat_cache = ParserElement._FifoCache(cache_size_limit)
ParserElement._parse = ParserElement._parseCache
def parseString(self, instring, parseAll=False):
ParserElement.resetCache()
if (not self.streamlined):
self.streamline()
for e in self.ignoreExprs:
e.streamline()
if (not self.keepTabs):
instring = instring.expandtabs()
try:
(loc, tokens) = self._parse(instring, 0)
if parseAll:
loc = self.preParse(instring, loc)
se = (Empty() + StringEnd())
se._parse(instring, loc)
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
raise exc
else:
return tokens
def scanString(self, instring, maxMatches=_MAX_INT, overlap=False):
if (not self.streamlined):
self.streamline()
for e in self.ignoreExprs:
e.streamline()
if (not self.keepTabs):
instring = str(instring).expandtabs()
instrlen = len(instring)
loc = 0
preparseFn = self.preParse
parseFn = self._parse
ParserElement.resetCache()
matches = 0
try:
while ((loc <= instrlen) and (matches < maxMatches)):
try:
preloc = preparseFn(instring, loc)
(nextLoc, tokens) = parseFn(instring, preloc, callPreParse=False)
except ParseException:
loc = (preloc + 1)
else:
if (nextLoc > loc):
matches += 1
(yield (tokens, preloc, nextLoc))
if overlap:
nextloc = preparseFn(instring, loc)
if (nextloc > loc):
loc = nextLoc
else:
loc += 1
else:
loc = nextLoc
else:
loc = (preloc + 1)
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
raise exc
def transformString(self, instring):
out = []
lastE = 0
self.keepTabs = True
try:
for (t, s, e) in self.scanString(instring):
out.append(instring[lastE:s])
if t:
if isinstance(t, ParseResults):
out += t.asList()
elif isinstance(t, list):
out += t
else:
out.append(t)
lastE = e
out.append(instring[lastE:])
out = [o for o in out if o]
return ''.join(map(str, _flatten(out)))
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
raise exc
def searchString(self, instring, maxMatches=_MAX_INT):
try:
return ParseResults([t for (t, s, e) in self.scanString(instring, maxMatches)])
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
raise exc
def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False):
splits = 0
last = 0
for (t, s, e) in self.scanString(instring, maxMatches=maxsplit):
(yield instring[last:s])
if includeSeparators:
(yield t[0])
last = e
(yield instring[last:])
def __add__(self, other):
if (other is Ellipsis):
return _PendingSkip(self)
if isinstance(other, str_type):
other = self._literalStringClass(other)
if (not isinstance(other, ParserElement)):
warnings.warn(('Cannot combine element of type %s with ParserElement' % type(other)), SyntaxWarning, stacklevel=2)
return None
return And([self, other])
def __radd__(self, other):
if (other is Ellipsis):
return (SkipTo(self)('_skipped*') + self)
if isinstance(other, str_type):
other = self._literalStringClass(other)
if (not isinstance(other, ParserElement)):
warnings.warn(('Cannot combine element of type %s with ParserElement' % type(other)), SyntaxWarning, stacklevel=2)
return None
return (other + self)
def __sub__(self, other):
if isinstance(other, str_type):
other = self._literalStringClass(other)
if (not isinstance(other, ParserElement)):
warnings.warn(('Cannot combine element of type %s with ParserElement' % type(other)), SyntaxWarning, stacklevel=2)
return None
return ((self + And._ErrorStop()) + other)
def __rsub__(self, other):
if isinstance(other, str_type):
other = self._literalStringClass(other)
if (not isinstance(other, ParserElement)):
warnings.warn(('Cannot combine element of type %s with ParserElement' % type(other)), SyntaxWarning, stacklevel=2)
return None
return (other - self)
def __mul__(self, other):
if (other is Ellipsis):
other = (0, None)
elif (isinstance(other, tuple) and (other[:1] == (Ellipsis,))):
other = (((0,) + other[1:]) + (None,))[:2]
if isinstance(other, int):
(minElements, optElements) = (other, 0)
elif isinstance(other, tuple):
other = tuple(((o if (o is not Ellipsis) else None) for o in other))
other = (other + (None, None))[:2]
if (other[0] is None):
other = (0, other[1])
if (isinstance(other[0], int) and (other[1] is None)):
if (other[0] == 0):
return ZeroOrMore(self)
if (other[0] == 1):
return OneOrMore(self)
else:
return ((self * other[0]) + ZeroOrMore(self))
elif (isinstance(other[0], int) and isinstance(other[1], int)):
(minElements, optElements) = other
optElements -= minElements
else:
raise TypeError("cannot multiply 'ParserElement' and ('%s', '%s') objects", type(other[0]), type(other[1]))
else:
raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other))
if (minElements < 0):
raise ValueError('cannot multiply ParserElement by negative value')
if (optElements < 0):
raise ValueError('second tuple value must be greater or equal to first tuple value')
if (minElements == optElements == 0):
raise ValueError('cannot multiply ParserElement by 0 or (0, 0)')
if optElements:
def makeOptionalList(n):
if (n > 1):
return Optional((self + makeOptionalList((n - 1))))
else:
return Optional(self)
if minElements:
if (minElements == 1):
ret = (self + makeOptionalList(optElements))
else:
ret = (And(([self] * minElements)) + makeOptionalList(optElements))
else:
ret = makeOptionalList(optElements)
elif (minElements == 1):
ret = self
else:
ret = And(([self] * minElements))
return ret
def __rmul__(self, other):
return self.__mul__(other)
def __or__(self, other):
if (other is Ellipsis):
return _PendingSkip(self, must_skip=True)
if isinstance(other, str_type):
other = self._literalStringClass(other)
if (not isinstance(other, ParserElement)):
warnings.warn(('Cannot combine element of type %s with ParserElement' % type(other)), SyntaxWarning, stacklevel=2)
return None
return MatchFirst([self, other])
def __ror__(self, other):
if isinstance(other, str_type):
other = self._literalStringClass(other)
if (not isinstance(other, ParserElement)):
warnings.warn(('Cannot combine element of type %s with ParserElement' % type(other)), SyntaxWarning, stacklevel=2)
return None
return (other | self)
def __xor__(self, other):
if isinstance(other, str_type):
other = self._literalStringClass(other)
if (not isinstance(other, ParserElement)):
warnings.warn(('Cannot combine element of type %s with ParserElement' % type(other)), SyntaxWarning, stacklevel=2)
return None
return Or([self, other])
def __rxor__(self, other):
if isinstance(other, str_type):
other = self._literalStringClass(other)
if (not isinstance(other, ParserElement)):
warnings.warn(('Cannot combine element of type %s with ParserElement' % type(other)), SyntaxWarning, stacklevel=2)
return None
return (other ^ self)
def __and__(self, other):
if isinstance(other, str_type):
other = self._literalStringClass(other)
if (not isinstance(other, ParserElement)):
warnings.warn(('Cannot combine element of type %s with ParserElement' % type(other)), SyntaxWarning, stacklevel=2)
return None
return Each([self, other])
def __rand__(self, other):
if isinstance(other, str_type):
other = self._literalStringClass(other)
if (not isinstance(other, ParserElement)):
warnings.warn(('Cannot combine element of type %s with ParserElement' % type(other)), SyntaxWarning, stacklevel=2)
return None
return (other & self)
def __invert__(self):
return NotAny(self)
def __iter__(self):
raise TypeError(('%r object is not iterable' % self.__class__.__name__))
def __getitem__(self, key):
try:
if isinstance(key, str_type):
key = (key,)
iter(key)
except TypeError:
key = (key, key)
if (len(key) > 2):
warnings.warn('only 1 or 2 index arguments supported ({}{})'.format(key[:5], ('... [{}]'.format(len(key)) if (len(key) > 5) else '')))
ret = (self * tuple(key[:2]))
return ret
def __call__(self, name=None):
if (name is not None):
return self._setResultsName(name)
else:
return self.copy()
def suppress(self):
return Suppress(self)
def leaveWhitespace(self):
self.skipWhitespace = False
return self
def setWhitespaceChars(self, chars, copy_defaults=False):
self.skipWhitespace = True
self.whiteChars = chars
self.copyDefaultWhiteChars = copy_defaults
return self
def parseWithTabs(self):
self.keepTabs = True
return self
def ignore(self, other):
if isinstance(other, str_type):
other = Suppress(other)
if isinstance(other, Suppress):
if (other not in self.ignoreExprs):
self.ignoreExprs.append(other)
else:
self.ignoreExprs.append(Suppress(other.copy()))
return self
def setDebugActions(self, startAction, successAction, exceptionAction):
self.debugActions = ((startAction or _defaultStartDebugAction), (successAction or _defaultSuccessDebugAction), (exceptionAction or _defaultExceptionDebugAction))
self.debug = True
return self
def setDebug(self, flag=True):
if flag:
self.setDebugActions(_defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction)
else:
self.debug = False
return self
def __str__(self):
return self.name
def __repr__(self):
return str(self)
def streamline(self):
self.streamlined = True
self.strRepr = None
return self
def checkRecursion(self, parseElementList):
pass
def validate(self, validateTrace=None):
self.checkRecursion([])
def parseFile(self, file_or_filename, parseAll=False):
try:
file_contents = file_or_filename.read()
except AttributeError:
with open(file_or_filename, 'r') as f:
file_contents = f.read()
try:
return self.parseString(file_contents, parseAll)
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
raise exc
def __eq__(self, other):
if (self is other):
return True
elif isinstance(other, str_type):
return self.matches(other)
elif isinstance(other, ParserElement):
return (vars(self) == vars(other))
return False
def __hash__(self):
return id(self)
def __req__(self, other):
return (self == other)
def __rne__(self, other):
return (not (self == other))
def matches(self, testString, parseAll=True):
try:
self.parseString(str(testString), parseAll=parseAll)
return True
except ParseBaseException:
return False
def runTests(self, tests, parseAll=True, comment='#', fullDump=True, printResults=True, failureTests=False, postParse=None, file=None):
if isinstance(tests, str_type):
tests = list(map(type(tests).strip, tests.rstrip().splitlines()))
if isinstance(comment, str_type):
comment = Literal(comment)
if (file is None):
file = sys.stdout
print_ = file.write
allResults = []
comments = []
success = True
NL = Literal('\\n').addParseAction(replaceWith('\n')).ignore(quotedString)
BOM = '\ufeff'
for t in tests:
if (((comment is not None) and comment.matches(t, False)) or (comments and (not t))):
comments.append(t)
continue
if (not t):
continue
out = ['\n'.join(comments), t]
comments = []
try:
t = NL.transformString(t.lstrip(BOM))
result = self.parseString(t, parseAll=parseAll)
except ParseBaseException as pe:
fatal = ('(FATAL)' if isinstance(pe, ParseFatalException) else '')
if ('\n' in t):
out.append(line(pe.loc, t))
out.append((((' ' * (col(pe.loc, t) - 1)) + '^') + fatal))
else:
out.append((((' ' * pe.loc) + '^') + fatal))
out.append(('FAIL: ' + str(pe)))
success = (success and failureTests)
result = pe
except Exception as exc:
out.append(('FAIL-EXCEPTION: ' + str(exc)))
success = (success and failureTests)
result = exc
else:
success = (success and (not failureTests))
if (postParse is not None):
try:
pp_value = postParse(t, result)
if (pp_value is not None):
if isinstance(pp_value, ParseResults):
out.append(pp_value.dump())
else:
out.append(str(pp_value))
else:
out.append(result.dump())
except Exception as e:
out.append(result.dump(full=fullDump))
out.append('{} failed: {}: {}'.format(postParse.__name__, type(e).__name__, e))
else:
out.append(result.dump(full=fullDump))
out.append('')
if printResults:
print_('\n'.join(out))
allResults.append((t, result))
return (success, allResults) |
def logging_set(output_dir):
logging.basicConfig(filename=os.path.join(output_dir, 'train_{}.log'.format(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))), format='%(message)s')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
console = logging.StreamHandler()
logging.getLogger('').addHandler(console)
return logger |
(scope='function')
def tiffs(tmpdir):
with rasterio.open('tests/data/RGB.byte.tif') as src:
profile = src.profile
shadowed_profile = profile.copy()
shadowed_profile['count'] = 4
with rasterio.open(str(tmpdir.join('shadowed.tif')), 'w', **shadowed_profile) as dst:
for (i, band) in enumerate(src.read(masked=False), 1):
dst.write(band, i)
dst.write(band, 4)
del profile['nodata']
with rasterio.open(str(tmpdir.join('no-nodata.tif')), 'w', **profile) as dst:
dst.write(src.read(masked=False))
with rasterio.open(str(tmpdir.join('sidecar-masked.tif')), 'w', **profile) as dst:
dst.write(src.read(masked=False))
mask = np.zeros(src.shape, dtype='uint8')
dst.write_mask(mask)
return tmpdir |
def print_client_info(disp, client):
print('client: {}'.format(client))
resources = disp.res_query_client_resources(client)
rc = [r.count for r in resources.types]
print('\tresouces: {} resources of {} types'.format(sum(rc), len(rc)))
pb = disp.res_query_client_pixmap_bytes(client)
print('\tpixmaps: {} bytes {} overflow'.format(pb.bytes, pb.bytes_overflow))
pid = query_client_id(disp, client)
print('\tpid: {}'.format(pid))
rb = disp.res_query_resource_bytes(client, [{'resource': 0, 'type': 0}])
sizes = [s.size.bytes for s in rb.sizes]
print('\t{} resources consume {} bytes'.format(len(sizes), sum(sizes))) |
class AnyStage(nn.Sequential):
def __init__(self, width_in: int, width_out: int, stride: int, depth: int, block_constructor: nn.Module, activation: nn.Module, bot_mul: float, group_width: int, params: 'RegNetParams', stage_index: int=0):
super().__init__()
self.stage_depth = 0
for i in range(depth):
block = block_constructor((width_in if (i == 0) else width_out), width_out, (stride if (i == 0) else 1), params.bn_epsilon, params.bn_momentum, activation, bot_mul, group_width, params.se_ratio)
self.stage_depth += block.depth
self.add_module(f'block{stage_index}-{i}', block) |
def patch_messages():
old_add_message = PyLinter.add_message
def new_add_message(self, msg_id, line=None, node=None, args=None, confidence=UNDEFINED, col_offset=None, end_lineno=None, end_col_offset=None):
old_add_message(self, msg_id, line, node, args, confidence, col_offset, end_lineno, end_col_offset)
msg_info = self.msgs_store.get_message_definitions(msg_id)[0]
if hasattr(self.reporter, 'handle_node'):
self.reporter.handle_node(msg_info, node)
PyLinter.add_message = new_add_message |
class MIPSLexer(RegexLexer):
name = 'MIPS'
aliases = ['mips']
version_added = ''
filenames = ['*.mips', '*.MIPS']
url = '
keywords = ['add', 'sub', 'subu', 'addi', 'subi', 'addu', 'addiu', 'mul', 'mult', 'multu', 'mulu', 'madd', 'maddu', 'msub', 'msubu', 'div', 'divu', 'and', 'or', 'nor', 'xor', 'andi', 'ori', 'xori', 'clo', 'clz', 'sll', 'srl', 'sllv', 'srlv', 'sra', 'srav', 'slt', 'sltu', 'slti', 'sltiu', 'mfhi', 'mthi', 'mflo', 'mtlo', 'movn', 'movz', 'movf', 'movt', 'j', 'jal', 'jalr', 'jr', 'bc1f', 'bc1t', 'beq', 'bgez', 'bgezal', 'bgtz', 'blez', 'bltzal', 'bltz', 'bne', 'lui', 'lb', 'lbu', 'lh', 'lhu', 'lw', 'lwcl', 'lwl', 'lwr', 'sb', 'sh', 'sw', 'swl', 'swr', 'll', 'sc', 'teq', 'teqi', 'tne', 'tneqi', 'tge', 'tgeu', 'tgei', 'tgeiu', 'tlt', 'tltu', 'tlti', 'tltiu', 'eret', 'break', 'bop', 'syscall', 'add.s', 'add.d', 'sub.s', 'sub.d', 'mul.s', 'mul.d', 'div.s', 'div.d', 'neg.d', 'neg.s', 'c.e.d', 'c.e.s', 'c.le.d', 'c.le.s', 'c.lt.s', 'c.lt.d', 'madd.s', 'madd.d', 'msub.s', 'msub.d', 'mov.d', 'move.s', 'movf.d', 'movf.s', 'movt.d', 'movt.s', 'movn.d', 'movn.s', 'movnzd', 'movz.s', 'movz.d', 'cvt.d.s', 'cvt.d.w', 'cvt.s.d', 'cvt.s.w', 'cvt.w.d', 'cvt.w.s', 'trunc.w.d', 'trunc.w.s', 'abs.s', 'abs.d', 'sqrt.s', 'sqrt.d', 'ceil.w.d', 'ceil.w.s', 'floor.w.d', 'floor.w.s', 'round.w.d', 'round.w.s']
pseudoinstructions = ['rem', 'remu', 'mulo', 'mulou', 'abs', 'neg', 'negu', 'not', 'rol', 'ror', 'b', 'beqz', 'bge', 'bgeu', 'bgt', 'bgtu', 'ble', 'bleu', 'blt', 'bltu', 'bnez', 'la', 'li', 'ld', 'ulh', 'ulhu', 'ulw', 'sd', 'ush', 'usw', 'move', 'sgt', 'sgtu', 'sge', 'sgeu', 'sle', 'sleu', 'sne', 'seq', 'l.d', 'l.s', 's.d', 's.s']
directives = ['.align', '.ascii', '.asciiz', '.byte', '.data', '.double', '.extern', '.float', '.globl', '.half', '.kdata', '.ktext', '.space', '.text', '.word']
deprecated = ['beql', 'bnel', 'bgtzl', 'bgezl', 'bltzl', 'blezl', 'bltzall', 'bgezall']
tokens = {'root': [('\\s+', Whitespace), ('#.*', Comment), ('"', String, 'string'), ('-?[0-9]+?', Keyword.Constant), ('\\w*:', Name.Function), (words(deprecated, suffix='\\b'), Keyword.Pseudo), (words(pseudoinstructions, suffix='\\b'), Name.Variable), (words(keywords, suffix='\\b'), Keyword), ('[slm][ftwd]c[0-9]([.]d)?', Keyword), ('\\$(f?[0-2][0-9]|f?3[01]|[ft]?[0-9]|[vk][01]|a[0-3]|s[0-7]|[gsf]p|ra|at|zero)', Keyword.Type), (words(directives, suffix='\\b'), Name.Entity), (':|,|;|\\{|\\}|=>||\\$|=', Name.Builtin), ('\\w+', Text), ('.', Text)], 'string': [('\\\\.', String.Escape), ('"', String, '#pop'), ('[^\\\\"]+', String)]} |
class DatasetImage(Dataset):
def __init__(self, data_dir, transform=transform_train, is_test=False, is_multi=False, tensor_norm=False):
self.data_dir = data_dir
self.paths = glob.glob(f'{data_dir}/**/*.jpg')
self.paths += glob.glob(f'{data_dir}/**/*.jpeg')
self.paths += glob.glob(f'{data_dir}/*.jpg')
if is_test:
self.transform = transform_tests
else:
self.transform = transform_train
self.is_multi = is_multi
self.is_test = is_test
self.tensor_norm = tensor_norm
def __getitem__(self, index):
path = self.paths[index]
image = cv2.imread(path)
key = path.split('/')[(- 1)].split('.')[0]
if (not self.is_multi):
if (self.transform is not None):
image = transform_test(image=image)['image']
image = self.to_tensor(image)
return (image, path)
else:
images = []
for transform in self.transform:
image_cur = transform(image=image)['image']
images.append(self.to_tensor(image_cur))
return (images, path)
def to_tensor(self, x):
if self.tensor_norm:
x = normalize(x)
elif (x.dtype == np.uint8):
x = (x / 255)
x = x.transpose(2, 0, 1)
return torch.from_numpy(x).float()
def __len__(self):
return len(self.paths) |
class PTBInput(object):
def __init__(self, batch_size, num_steps, data, name=None):
self.batch_size = batch_size
self.num_steps = num_steps
self.epoch_size = (((len(data) // batch_size) - 1) // num_steps)
self.data_queue = reader.ptb_producer(data, batch_size, num_steps, name=name) |
class FlaubertOnnxConfig(OnnxConfig):
def inputs(self) -> Mapping[(str, Mapping[(int, str)])]:
if (self.task == 'multiple-choice'):
dynamic_axis = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
dynamic_axis = {0: 'batch', 1: 'sequence'}
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)]) |
(cc=STDCALL, params={'uFormat': UINT})
def hook_GetClipboardData(ql: Qiling, address: int, params):
uFormat = params['uFormat']
data = ql.os.clipboard.get_data(uFormat)
if data:
addr = ql.os.heap.alloc(len(data))
ql.mem.write(addr, data)
else:
ql.log.debug(f'Failed to get clipboard data (format = {uFormat})')
addr = 0
return addr |
def get_max_combination(list_of_extracted_meta, expected_meta_form):
total_rules = list(range(len(list_of_extracted_meta)))
mid_training_label = ([0] * len(list_of_extracted_meta))
max_match_extracted = set()
max_match_percent = 0
for i in range(1, 3):
all_combinations = list(itertools.combinations(total_rules, i))
for combination in all_combinations:
combination = list(combination)
extracted_tags = set()
for index in combination:
extracted_tags.update(list_of_extracted_meta[index])
(y_true_index, y_pred_index) = get_y_pred_and_y_true_label(expected_meta_form, extracted_tags)
match_percent = f1_score(y_true_index, y_pred_index)
if ((match_percent >= MATCH_THRESHOLD) and (match_percent > max_match_percent)):
mid_training_label = ([0] * len(list_of_extracted_meta))
max_match_percent = match_percent
max_match_extracted = extracted_tags
for i_combination in combination:
if (len(list_of_extracted_meta[i_combination]) > 0):
mid_training_label[i_combination] = 1
return (mid_training_label, max_match_extracted) |
class AFileManager(AObject):
def AOBJECT_TYPE():
return 'AFileManager'
def getJSONPath(self):
return self.getPath()
def __init__(self, path=None, clear_temp=None):
AObject.__init__(self, path=path)
self.initWithPath(path=path, clear_temp=clear_temp)
def initializeBlank(self):
AObject.initializeBlank(self)
self.directories = {}
def getJSONName(self):
return (self.AOBJECT_TYPE() + '.json')
def initWithPath(self, path=None, clear_temp=None):
oldpath = None
newpath = path
if path:
if os.path.isfile(path):
self.loadFromJSON(self.getJSONPath())
oldpath = self.getPath()
elif os.path.isdir(path):
json_file_path = ((path + os.sep) + self.getJSONName())
self.setPath(json_file_path)
if os.path.isfile(self.getJSONPath()):
self.loadFromJSON(json_file_path)
oldpath = self.getPath()
newpath = json_file_path
else:
newpath = self.getJSONPath()
self.writeToJSON(json_path=newpath)
else:
assert False, 'Given AFileManager path is neither an existing directory or file! path: {} (AFileManager.py)'.format(path)
self.setPath(file_path=newpath)
if oldpath:
oldir = get_dir_from_path(pathstring(oldpath))
newdir = get_dir_from_path(pathstring(newpath))
if (oldir != newdir):
AWARN('FILEMANAGER FOUND FILE MOVED FROM:\n{}\nTO:\n{}\nUPDATING DIRECTORIES...'.format(oldir, newdir))
for d in self.directories:
dpth = self.directories[d]
if dpth.startswith(oldir):
dpthst = dpth.lstrip(oldir)
self.directories[d] = os.path.join(newdir, dpthst)
AWARN('{} updated to {}'.format(dpth, self.directories[d]))
self.setDir('data', pathstring((((self.getDirectoryPath() + os.sep) + 'Data') + os.sep)))
self.setDir('backup', pathstring(((self.getDir('data') + 'Backups') + os.sep)))
self.setDir('temp', pathstring(((self.getDir('data') + 'TEMP') + os.sep)))
temp_dir = self.getDir('temp')
if (os.path.isdir(temp_dir) and clear_temp):
for the_file in os.listdir(temp_dir):
file_path = os.path.join(temp_dir, the_file)
try:
if os.path.isfile(file_path):
os.remove(file_path)
except Exception as e:
print(e)
make_sure_path_exists(temp_dir)
def setDir(self, name, path):
self.directories[name] = path
make_sure_path_exists(path)
return path
def addDir(self, name):
assert (name not in self.directories), 'tried to add {} dir to AFileManager, but this dir is already set'
return self.setDir(name, pathstring((((self.getDirectoryPath() + os.sep) + name) + os.sep)))
def getDir(self, name):
return self.directories.get(name)
def emptyDir(self, name):
dpth = self.getDir(name)
if ((dpth is not None) and os.path.isdir(dpth)):
shutil.rmtree(dpth)
make_sure_path_exists(dpth)
def deleteDir(self, name):
dpth = self.getDir(name)
if ((dpth is not None) and os.path.isdir(dpth)):
shutil.rmtree(dpth)
d = dict(self.directories)
del d[name]
self.directories = d
def toDictionary(self):
d = AObject.toDictionary(self)
d['directories'] = self.directories
return d
def copyPathToDir(self, path_to_copy, dest_dir):
dest_path = self.getDir(dest_dir)
if dest_path:
if os.path.isdir(path_to_copy):
copy_tree(src=path_to_copy, dst=dest_path)
elif os.path.isfile(path_to_copy):
shutil.copy2(path_to_copy, dest_path)
return
def copyDirToPath(self, dir_to_copy, dest_path):
src_path = self.getDir(dir_to_copy)
if src_path:
if os.path.isdir(dest_path):
copy_tree(src=src_path, dst=dest_path)
return
def copyRandomFractionOfFilesInSourceDir(source_dir, dest_dir, fraction=1.0, ext=None):
directories = []
subdirnames = []
filepaths = []
for filename in os.listdir(source_dir):
path = os.path.join(source_dir, filename)
if os.path.isdir(path):
directories.append(path)
subdirnames.append(filename)
elif ((ext is None) or filename.lower().endswith(ext)):
filepaths.append(path)
n_to_copy = int((len(filepaths) * fraction))
random_seed = 0
random.seed(random_seed)
random.shuffle(filepaths)
copy_sources = filepaths[:n_to_copy]
for (src, dst) in zip(copy_sources, ([dest_dir] * len(copy_sources))):
shutil.copy2(src, dst)
for d in range(len(directories)):
subdest = pathstring((os.path.join(dest_dir, subdirnames[d]) + os.sep))
make_sure_dir_exists(subdest)
AFileManager.copyRandomFractionOfFilesInSourceDir(source_dir=directories[d], dest_dir=subdest, fraction=fraction, ext=ext)
def initFromDictionary(self, d):
AObject.initFromDictionary(self, d)
self.directories = d['directories']
def save(self):
if os.path.isfile(self.getJSONPath()):
os.rename(self.getJSONPath(), (((self.getDir('backup') + os.sep) + self.AOBJECT_TYPE()) + '.json'))
self.writeToJSON(self.getJSONPath()) |
class MoonshotSlippageTestCase(unittest.TestCase):
def tearDown(self):
for file in glob.glob('{0}/moonshot*.pkl'.format(TMP_DIR)):
os.remove(file)
def test_no_slippage(self):
class BuyBelow10ShortAbove10(Moonshot):
def prices_to_signals(self, prices):
long_signals = (prices.loc['Close'] <= 10)
short_signals = (prices.loc['Close'] > 10)
signals = long_signals.astype(int).where(long_signals, (- short_signals.astype(int)))
return signals
def mock_get_prices(*args, **kwargs):
dt_idx = pd.DatetimeIndex(['2018-05-01', '2018-05-02', '2018-05-03', '2018-05-04'])
fields = ['Close', 'Volume']
idx = pd.MultiIndex.from_product([fields, dt_idx], names=['Field', 'Date'])
prices = pd.DataFrame({'FI12345': [9, 11, 10.5, 9.99, 5000, 16000, 8800, 9900], 'FI23456': [9.89, 11, 8.5, 10.5, 15000, 14000, 28800, 17000]}, index=idx)
return prices
def mock_download_master_file(f, *args, **kwargs):
master_fields = ['Timezone', 'Symbol', 'SecType', 'Currency', 'PriceMagnifier', 'Multiplier']
securities = pd.DataFrame({'FI12345': ['America/New_York', 'ABC', 'STK', 'USD', None, None], 'FI23456': ['America/New_York', 'DEF', 'STK', 'USD', None, None]}, index=master_fields)
securities.columns.name = 'Sid'
securities.T.to_csv(f, index=True, header=True)
f.seek(0)
with patch('moonshot.strategies.base.get_prices', new=mock_get_prices):
with patch('moonshot.strategies.base.download_master_file', new=mock_download_master_file):
results = BuyBelow10ShortAbove10().backtest()
self.assertSetEqual(set(results.index.get_level_values('Field')), {'Commission', 'AbsExposure', 'Signal', 'Return', 'Slippage', 'NetExposure', 'TotalHoldings', 'Turnover', 'AbsWeight', 'Weight'})
results = results.round(7)
results = results.where(results.notnull(), 'nan')
signals = results.loc['Signal'].reset_index()
signals['Date'] = signals.Date.dt.strftime('%Y-%m-%dT%H:%M:%S%z')
self.assertDictEqual(signals.to_dict(orient='list'), {'Date': ['2018-05-01T00:00:00', '2018-05-02T00:00:00', '2018-05-03T00:00:00', '2018-05-04T00:00:00'], 'FI12345': [1.0, (- 1.0), (- 1.0), 1.0], 'FI23456': [1.0, (- 1.0), 1.0, (- 1.0)]})
weights = results.loc['Weight'].reset_index()
weights['Date'] = weights.Date.dt.strftime('%Y-%m-%dT%H:%M:%S%z')
self.assertDictEqual(weights.to_dict(orient='list'), {'Date': ['2018-05-01T00:00:00', '2018-05-02T00:00:00', '2018-05-03T00:00:00', '2018-05-04T00:00:00'], 'FI12345': [0.5, (- 0.5), (- 0.5), 0.5], 'FI23456': [0.5, (- 0.5), 0.5, (- 0.5)]})
net_positions = results.loc['NetExposure'].reset_index()
net_positions['Date'] = net_positions.Date.dt.strftime('%Y-%m-%dT%H:%M:%S%z')
self.assertDictEqual(net_positions.to_dict(orient='list'), {'Date': ['2018-05-01T00:00:00', '2018-05-02T00:00:00', '2018-05-03T00:00:00', '2018-05-04T00:00:00'], 'FI12345': ['nan', 0.5, (- 0.5), (- 0.5)], 'FI23456': ['nan', 0.5, (- 0.5), 0.5]})
turnover = results.loc['Turnover'].reset_index()
turnover['Date'] = turnover.Date.dt.strftime('%Y-%m-%dT%H:%M:%S%z')
self.assertDictEqual(turnover.to_dict(orient='list'), {'Date': ['2018-05-01T00:00:00', '2018-05-02T00:00:00', '2018-05-03T00:00:00', '2018-05-04T00:00:00'], 'FI12345': ['nan', 0.5, 1.0, 0.0], 'FI23456': ['nan', 0.5, 1.0, 1.0]})
slippage = results.loc['Slippage'].reset_index()
slippage['Date'] = slippage.Date.dt.strftime('%Y-%m-%dT%H:%M:%S%z')
self.assertDictEqual(slippage.to_dict(orient='list'), {'Date': ['2018-05-01T00:00:00', '2018-05-02T00:00:00', '2018-05-03T00:00:00', '2018-05-04T00:00:00'], 'FI12345': [0.0, 0.0, 0.0, 0.0], 'FI23456': [0.0, 0.0, 0.0, 0.0]})
returns = results.loc['Return']
returns = returns.reset_index()
returns['Date'] = returns.Date.dt.strftime('%Y-%m-%dT%H:%M:%S%z')
self.assertDictEqual(returns.to_dict(orient='list'), {'Date': ['2018-05-01T00:00:00', '2018-05-02T00:00:00', '2018-05-03T00:00:00', '2018-05-04T00:00:00'], 'FI12345': [0.0, 0.0, (- 0.0227273), 0.0242857], 'FI23456': [0.0, 0.0, (- 0.1136364), (- 0.1176471)]})
def test_apply_slippage(self):
class TestSlippage(FixedSlippage):
ONE_WAY_SLIPPAGE = 0.001
class BuyBelow10ShortAbove10(Moonshot):
SLIPPAGE_CLASSES = TestSlippage
def prices_to_signals(self, prices):
long_signals = (prices.loc['Close'] <= 10)
short_signals = (prices.loc['Close'] > 10)
signals = long_signals.astype(int).where(long_signals, (- short_signals.astype(int)))
return signals
def mock_get_prices(*args, **kwargs):
dt_idx = pd.DatetimeIndex(['2018-05-01', '2018-05-02', '2018-05-03', '2018-05-04'])
fields = ['Close', 'Volume']
idx = pd.MultiIndex.from_product([fields, dt_idx], names=['Field', 'Date'])
prices = pd.DataFrame({'FI12345': [9, 11, 10.5, 9.99, 5000, 16000, 8800, 9900], 'FI23456': [9.89, 11, 8.5, 10.5, 15000, 14000, 28800, 17000]}, index=idx)
return prices
def mock_download_master_file(f, *args, **kwargs):
master_fields = ['Timezone', 'Symbol', 'SecType', 'Currency', 'PriceMagnifier', 'Multiplier']
securities = pd.DataFrame({'FI12345': ['America/New_York', 'ABC', 'STK', 'USD', None, None], 'FI23456': ['America/New_York', 'DEF', 'STK', 'USD', None, None]}, index=master_fields)
securities.columns.name = 'Sid'
securities.T.to_csv(f, index=True, header=True)
f.seek(0)
with patch('moonshot.strategies.base.get_prices', new=mock_get_prices):
with patch('moonshot.strategies.base.download_master_file', new=mock_download_master_file):
results = BuyBelow10ShortAbove10().backtest()
self.assertSetEqual(set(results.index.get_level_values('Field')), {'Commission', 'AbsExposure', 'Signal', 'Return', 'Slippage', 'NetExposure', 'TotalHoldings', 'Turnover', 'AbsWeight', 'Weight'})
results = results.round(7)
results = results.where(results.notnull(), 'nan')
signals = results.loc['Signal'].reset_index()
signals['Date'] = signals.Date.dt.strftime('%Y-%m-%dT%H:%M:%S%z')
self.assertDictEqual(signals.to_dict(orient='list'), {'Date': ['2018-05-01T00:00:00', '2018-05-02T00:00:00', '2018-05-03T00:00:00', '2018-05-04T00:00:00'], 'FI12345': [1.0, (- 1.0), (- 1.0), 1.0], 'FI23456': [1.0, (- 1.0), 1.0, (- 1.0)]})
weights = results.loc['Weight'].reset_index()
weights['Date'] = weights.Date.dt.strftime('%Y-%m-%dT%H:%M:%S%z')
self.assertDictEqual(weights.to_dict(orient='list'), {'Date': ['2018-05-01T00:00:00', '2018-05-02T00:00:00', '2018-05-03T00:00:00', '2018-05-04T00:00:00'], 'FI12345': [0.5, (- 0.5), (- 0.5), 0.5], 'FI23456': [0.5, (- 0.5), 0.5, (- 0.5)]})
net_positions = results.loc['NetExposure'].reset_index()
net_positions['Date'] = net_positions.Date.dt.strftime('%Y-%m-%dT%H:%M:%S%z')
self.assertDictEqual(net_positions.to_dict(orient='list'), {'Date': ['2018-05-01T00:00:00', '2018-05-02T00:00:00', '2018-05-03T00:00:00', '2018-05-04T00:00:00'], 'FI12345': ['nan', 0.5, (- 0.5), (- 0.5)], 'FI23456': ['nan', 0.5, (- 0.5), 0.5]})
turnover = results.loc['Turnover'].reset_index()
turnover['Date'] = turnover.Date.dt.strftime('%Y-%m-%dT%H:%M:%S%z')
self.assertDictEqual(turnover.to_dict(orient='list'), {'Date': ['2018-05-01T00:00:00', '2018-05-02T00:00:00', '2018-05-03T00:00:00', '2018-05-04T00:00:00'], 'FI12345': ['nan', 0.5, 1.0, 0.0], 'FI23456': ['nan', 0.5, 1.0, 1.0]})
slippage = results.loc['Slippage'].reset_index()
slippage['Date'] = slippage.Date.dt.strftime('%Y-%m-%dT%H:%M:%S%z')
self.assertDictEqual(slippage.to_dict(orient='list'), {'Date': ['2018-05-01T00:00:00', '2018-05-02T00:00:00', '2018-05-03T00:00:00', '2018-05-04T00:00:00'], 'FI12345': [0.0, 0.0005, 0.001, 0.0], 'FI23456': [0.0, 0.0005, 0.001, 0.001]})
returns = results.loc['Return']
returns = returns.reset_index()
returns['Date'] = returns.Date.dt.strftime('%Y-%m-%dT%H:%M:%S%z')
self.assertDictEqual(returns.to_dict(orient='list'), {'Date': ['2018-05-01T00:00:00', '2018-05-02T00:00:00', '2018-05-03T00:00:00', '2018-05-04T00:00:00'], 'FI12345': [0.0, (- 0.0005), (- 0.0237273), 0.0242857], 'FI23456': [0.0, (- 0.0005), (- 0.1146364), (- 0.1186471)]})
def test_apply_slippage_continuous_intraday(self):
class TestSlippage(FixedSlippage):
ONE_WAY_SLIPPAGE = 0.001
class BuyBelow10ShortAbove10ContIntraday(Moonshot):
SLIPPAGE_CLASSES = TestSlippage
def prices_to_signals(self, prices):
long_signals = (prices.loc['Close'] <= 10)
short_signals = (prices.loc['Close'] > 10)
signals = long_signals.astype(int).where(long_signals, (- short_signals.astype(int)))
return signals
def mock_get_prices(*args, **kwargs):
dt_idx = pd.DatetimeIndex(['2018-05-01', '2018-05-02'])
fields = ['Close']
times = ['10:00:00', '11:00:00', '12:00:00']
idx = pd.MultiIndex.from_product([fields, dt_idx, times], names=['Field', 'Date', 'Time'])
prices = pd.DataFrame({'FI12345': [9.6, 10.45, 10.12, 15.45, 8.67, 12.3], 'FI23456': [10.56, 12.01, 10.5, 9.8, 13.4, 7.5]}, index=idx)
return prices
def mock_download_master_file(f, *args, **kwargs):
master_fields = ['Timezone', 'Symbol', 'SecType', 'Currency', 'PriceMagnifier', 'Multiplier']
securities = pd.DataFrame({'FI12345': ['America/New_York', 'ABC', 'STK', 'USD', None, None], 'FI23456': ['America/New_York', 'DEF', 'STK', 'USD', None, None]}, index=master_fields)
securities.columns.name = 'Sid'
securities.T.to_csv(f, index=True, header=True)
f.seek(0)
with patch('moonshot.strategies.base.get_prices', new=mock_get_prices):
with patch('moonshot.strategies.base.download_master_file', new=mock_download_master_file):
results = BuyBelow10ShortAbove10ContIntraday().backtest()
self.assertSetEqual(set(results.index.get_level_values('Field')), {'Commission', 'AbsExposure', 'Signal', 'Return', 'Slippage', 'NetExposure', 'TotalHoldings', 'Turnover', 'AbsWeight', 'Weight'})
results = results.round(7)
results = results.where(results.notnull(), 'nan')
signals = results.loc['Signal'].reset_index()
signals['Date'] = signals.Date.dt.strftime('%Y-%m-%dT%H:%M:%S%z')
self.assertDictEqual(signals.to_dict(orient='list'), {'Date': ['2018-05-01T00:00:00', '2018-05-01T00:00:00', '2018-05-01T00:00:00', '2018-05-02T00:00:00', '2018-05-02T00:00:00', '2018-05-02T00:00:00'], 'Time': ['10:00:00', '11:00:00', '12:00:00', '10:00:00', '11:00:00', '12:00:00'], 'FI12345': [1.0, (- 1.0), (- 1.0), (- 1.0), 1.0, (- 1.0)], 'FI23456': [(- 1.0), (- 1.0), (- 1.0), 1.0, (- 1.0), 1.0]})
weights = results.loc['Weight'].reset_index()
weights['Date'] = weights.Date.dt.strftime('%Y-%m-%dT%H:%M:%S%z')
self.assertDictEqual(weights.to_dict(orient='list'), {'Date': ['2018-05-01T00:00:00', '2018-05-01T00:00:00', '2018-05-01T00:00:00', '2018-05-02T00:00:00', '2018-05-02T00:00:00', '2018-05-02T00:00:00'], 'Time': ['10:00:00', '11:00:00', '12:00:00', '10:00:00', '11:00:00', '12:00:00'], 'FI12345': [0.5, (- 0.5), (- 0.5), (- 0.5), 0.5, (- 0.5)], 'FI23456': [(- 0.5), (- 0.5), (- 0.5), 0.5, (- 0.5), 0.5]})
net_positions = results.loc['NetExposure'].reset_index()
net_positions['Date'] = net_positions.Date.dt.strftime('%Y-%m-%dT%H:%M:%S%z')
self.assertDictEqual(net_positions.to_dict(orient='list'), {'Date': ['2018-05-01T00:00:00', '2018-05-01T00:00:00', '2018-05-01T00:00:00', '2018-05-02T00:00:00', '2018-05-02T00:00:00', '2018-05-02T00:00:00'], 'Time': ['10:00:00', '11:00:00', '12:00:00', '10:00:00', '11:00:00', '12:00:00'], 'FI12345': ['nan', 0.5, (- 0.5), (- 0.5), (- 0.5), 0.5], 'FI23456': ['nan', (- 0.5), (- 0.5), (- 0.5), 0.5, (- 0.5)]})
turnover = results.loc['Turnover'].reset_index()
turnover['Date'] = turnover.Date.dt.strftime('%Y-%m-%dT%H:%M:%S%z')
self.assertDictEqual(turnover.to_dict(orient='list'), {'Date': ['2018-05-01T00:00:00', '2018-05-01T00:00:00', '2018-05-01T00:00:00', '2018-05-02T00:00:00', '2018-05-02T00:00:00', '2018-05-02T00:00:00'], 'Time': ['10:00:00', '11:00:00', '12:00:00', '10:00:00', '11:00:00', '12:00:00'], 'FI12345': ['nan', 0.5, 1.0, 0.0, 0.0, 1.0], 'FI23456': ['nan', 0.5, 0.0, 0.0, 1.0, 1.0]})
slippage = results.loc['Slippage'].reset_index()
slippage['Date'] = slippage.Date.dt.strftime('%Y-%m-%dT%H:%M:%S%z')
self.assertDictEqual(slippage.to_dict(orient='list'), {'Date': ['2018-05-01T00:00:00', '2018-05-01T00:00:00', '2018-05-01T00:00:00', '2018-05-02T00:00:00', '2018-05-02T00:00:00', '2018-05-02T00:00:00'], 'Time': ['10:00:00', '11:00:00', '12:00:00', '10:00:00', '11:00:00', '12:00:00'], 'FI12345': [0.0, 0.0005, 0.001, 0.0, 0.0, 0.001], 'FI23456': [0.0, 0.0005, 0.0, 0.0, 0.001, 0.001]})
returns = results.loc['Return'].reset_index()
returns['Date'] = returns.Date.dt.strftime('%Y-%m-%dT%H:%M:%S%z')
self.assertDictEqual(returns.to_dict(orient='list'), {'Date': ['2018-05-01T00:00:00', '2018-05-01T00:00:00', '2018-05-01T00:00:00', '2018-05-02T00:00:00', '2018-05-02T00:00:00', '2018-05-02T00:00:00'], 'Time': ['10:00:00', '11:00:00', '12:00:00', '10:00:00', '11:00:00', '12:00:00'], 'FI12345': [0.0, (- 0.0005), (- 0.0167895), (- 0.2633399), 0.2194175, (- 0.2103426)], 'FI23456': [0.0, (- 0.0005), 0.0628643, 0.0333333, (- 0.1846735), (- 0.2211493)]})
def test_apply_SLIPPAGE_BPS(self):
class BuyBelow10ShortAbove10(Moonshot):
SLIPPAGE_BPS = 20
def prices_to_signals(self, prices):
long_signals = (prices.loc['Close'] <= 10)
short_signals = (prices.loc['Close'] > 10)
signals = long_signals.astype(int).where(long_signals, (- short_signals.astype(int)))
return signals
def mock_get_prices(*args, **kwargs):
dt_idx = pd.DatetimeIndex(['2018-05-01', '2018-05-02', '2018-05-03', '2018-05-04'])
fields = ['Close', 'Volume']
idx = pd.MultiIndex.from_product([fields, dt_idx], names=['Field', 'Date'])
prices = pd.DataFrame({'FI12345': [9, 11, 10.5, 9.99, 5000, 16000, 8800, 9900], 'FI23456': [9.89, 11, 8.5, 10.5, 15000, 14000, 28800, 17000]}, index=idx)
return prices
def mock_download_master_file(f, *args, **kwargs):
master_fields = ['Timezone', 'Symbol', 'SecType', 'Currency', 'PriceMagnifier', 'Multiplier']
securities = pd.DataFrame({'FI12345': ['America/New_York', 'ABC', 'STK', 'USD', None, None], 'FI23456': ['America/New_York', 'DEF', 'STK', 'USD', None, None]}, index=master_fields)
securities.columns.name = 'Sid'
securities.T.to_csv(f, index=True, header=True)
f.seek(0)
with patch('moonshot.strategies.base.get_prices', new=mock_get_prices):
with patch('moonshot.strategies.base.download_master_file', new=mock_download_master_file):
results = BuyBelow10ShortAbove10().backtest()
self.assertSetEqual(set(results.index.get_level_values('Field')), {'Commission', 'AbsExposure', 'Signal', 'Return', 'Slippage', 'NetExposure', 'TotalHoldings', 'Turnover', 'AbsWeight', 'Weight'})
results = results.round(7)
results = results.where(results.notnull(), 'nan')
signals = results.loc['Signal'].reset_index()
signals['Date'] = signals.Date.dt.strftime('%Y-%m-%dT%H:%M:%S%z')
self.assertDictEqual(signals.to_dict(orient='list'), {'Date': ['2018-05-01T00:00:00', '2018-05-02T00:00:00', '2018-05-03T00:00:00', '2018-05-04T00:00:00'], 'FI12345': [1.0, (- 1.0), (- 1.0), 1.0], 'FI23456': [1.0, (- 1.0), 1.0, (- 1.0)]})
weights = results.loc['Weight'].reset_index()
weights['Date'] = weights.Date.dt.strftime('%Y-%m-%dT%H:%M:%S%z')
self.assertDictEqual(weights.to_dict(orient='list'), {'Date': ['2018-05-01T00:00:00', '2018-05-02T00:00:00', '2018-05-03T00:00:00', '2018-05-04T00:00:00'], 'FI12345': [0.5, (- 0.5), (- 0.5), 0.5], 'FI23456': [0.5, (- 0.5), 0.5, (- 0.5)]})
net_positions = results.loc['NetExposure'].reset_index()
net_positions['Date'] = net_positions.Date.dt.strftime('%Y-%m-%dT%H:%M:%S%z')
self.assertDictEqual(net_positions.to_dict(orient='list'), {'Date': ['2018-05-01T00:00:00', '2018-05-02T00:00:00', '2018-05-03T00:00:00', '2018-05-04T00:00:00'], 'FI12345': ['nan', 0.5, (- 0.5), (- 0.5)], 'FI23456': ['nan', 0.5, (- 0.5), 0.5]})
turnover = results.loc['Turnover'].reset_index()
turnover['Date'] = turnover.Date.dt.strftime('%Y-%m-%dT%H:%M:%S%z')
self.assertDictEqual(turnover.to_dict(orient='list'), {'Date': ['2018-05-01T00:00:00', '2018-05-02T00:00:00', '2018-05-03T00:00:00', '2018-05-04T00:00:00'], 'FI12345': ['nan', 0.5, 1.0, 0.0], 'FI23456': ['nan', 0.5, 1.0, 1.0]})
slippage = results.loc['Slippage'].reset_index()
slippage['Date'] = slippage.Date.dt.strftime('%Y-%m-%dT%H:%M:%S%z')
self.assertDictEqual(slippage.to_dict(orient='list'), {'Date': ['2018-05-01T00:00:00', '2018-05-02T00:00:00', '2018-05-03T00:00:00', '2018-05-04T00:00:00'], 'FI12345': [0.0, 0.001, 0.002, 0.0], 'FI23456': [0.0, 0.001, 0.002, 0.002]})
returns = results.loc['Return']
returns = returns.reset_index()
returns['Date'] = returns.Date.dt.strftime('%Y-%m-%dT%H:%M:%S%z')
self.assertDictEqual(returns.to_dict(orient='list'), {'Date': ['2018-05-01T00:00:00', '2018-05-02T00:00:00', '2018-05-03T00:00:00', '2018-05-04T00:00:00'], 'FI12345': [0.0, (- 0.001), (- 0.0247273), 0.0242857], 'FI23456': [0.0, (- 0.001), (- 0.1156364), (- 0.1196471)]})
def test_apply_multiple_slippages(self):
class TestSlippage1(FixedSlippage):
ONE_WAY_SLIPPAGE = 0.003
class TestSlippage2(FixedSlippage):
ONE_WAY_SLIPPAGE = 0.002
class BuyBelow10ShortAbove10(Moonshot):
SLIPPAGE_CLASSES = (TestSlippage1, TestSlippage2)
SLIPPAGE_BPS = 50
def prices_to_signals(self, prices):
long_signals = (prices.loc['Close'] <= 10)
short_signals = (prices.loc['Close'] > 10)
signals = long_signals.astype(int).where(long_signals, (- short_signals.astype(int)))
return signals
def mock_get_prices(*args, **kwargs):
dt_idx = pd.DatetimeIndex(['2018-05-01', '2018-05-02', '2018-05-03', '2018-05-04'])
fields = ['Close', 'Volume']
idx = pd.MultiIndex.from_product([fields, dt_idx], names=['Field', 'Date'])
prices = pd.DataFrame({'FI12345': [9, 11, 10.5, 9.99, 5000, 16000, 8800, 9900], 'FI23456': [9.89, 11, 8.5, 10.5, 15000, 14000, 28800, 17000]}, index=idx)
return prices
def mock_download_master_file(f, *args, **kwargs):
master_fields = ['Timezone', 'Symbol', 'SecType', 'Currency', 'PriceMagnifier', 'Multiplier']
securities = pd.DataFrame({'FI12345': ['America/New_York', 'ABC', 'STK', 'USD', None, None], 'FI23456': ['America/New_York', 'DEF', 'STK', 'USD', None, None]}, index=master_fields)
securities.columns.name = 'Sid'
securities.T.to_csv(f, index=True, header=True)
f.seek(0)
with patch('moonshot.strategies.base.get_prices', new=mock_get_prices):
with patch('moonshot.strategies.base.download_master_file', new=mock_download_master_file):
results = BuyBelow10ShortAbove10().backtest()
self.assertSetEqual(set(results.index.get_level_values('Field')), {'Commission', 'AbsExposure', 'Signal', 'Return', 'Slippage', 'NetExposure', 'TotalHoldings', 'Turnover', 'AbsWeight', 'Weight'})
results = results.round(7)
results = results.where(results.notnull(), 'nan')
signals = results.loc['Signal'].reset_index()
signals['Date'] = signals.Date.dt.strftime('%Y-%m-%dT%H:%M:%S%z')
self.assertDictEqual(signals.to_dict(orient='list'), {'Date': ['2018-05-01T00:00:00', '2018-05-02T00:00:00', '2018-05-03T00:00:00', '2018-05-04T00:00:00'], 'FI12345': [1.0, (- 1.0), (- 1.0), 1.0], 'FI23456': [1.0, (- 1.0), 1.0, (- 1.0)]})
weights = results.loc['Weight'].reset_index()
weights['Date'] = weights.Date.dt.strftime('%Y-%m-%dT%H:%M:%S%z')
self.assertDictEqual(weights.to_dict(orient='list'), {'Date': ['2018-05-01T00:00:00', '2018-05-02T00:00:00', '2018-05-03T00:00:00', '2018-05-04T00:00:00'], 'FI12345': [0.5, (- 0.5), (- 0.5), 0.5], 'FI23456': [0.5, (- 0.5), 0.5, (- 0.5)]})
net_positions = results.loc['NetExposure'].reset_index()
net_positions['Date'] = net_positions.Date.dt.strftime('%Y-%m-%dT%H:%M:%S%z')
self.assertDictEqual(net_positions.to_dict(orient='list'), {'Date': ['2018-05-01T00:00:00', '2018-05-02T00:00:00', '2018-05-03T00:00:00', '2018-05-04T00:00:00'], 'FI12345': ['nan', 0.5, (- 0.5), (- 0.5)], 'FI23456': ['nan', 0.5, (- 0.5), 0.5]})
turnover = results.loc['Turnover'].reset_index()
turnover['Date'] = turnover.Date.dt.strftime('%Y-%m-%dT%H:%M:%S%z')
self.assertDictEqual(turnover.to_dict(orient='list'), {'Date': ['2018-05-01T00:00:00', '2018-05-02T00:00:00', '2018-05-03T00:00:00', '2018-05-04T00:00:00'], 'FI12345': ['nan', 0.5, 1.0, 0.0], 'FI23456': ['nan', 0.5, 1.0, 1.0]})
slippage = results.loc['Slippage'].reset_index()
slippage['Date'] = slippage.Date.dt.strftime('%Y-%m-%dT%H:%M:%S%z')
self.assertDictEqual(slippage.to_dict(orient='list'), {'Date': ['2018-05-01T00:00:00', '2018-05-02T00:00:00', '2018-05-03T00:00:00', '2018-05-04T00:00:00'], 'FI12345': [0.0, 0.005, 0.01, 0.0], 'FI23456': [0.0, 0.005, 0.01, 0.01]})
returns = results.loc['Return']
returns = returns.reset_index()
returns['Date'] = returns.Date.dt.strftime('%Y-%m-%dT%H:%M:%S%z')
self.assertDictEqual(returns.to_dict(orient='list'), {'Date': ['2018-05-01T00:00:00', '2018-05-02T00:00:00', '2018-05-03T00:00:00', '2018-05-04T00:00:00'], 'FI12345': [0.0, (- 0.005), (- 0.0327273), 0.0242857], 'FI23456': [0.0, (- 0.005), (- 0.1236364), (- 0.1276471)]}) |
def test_sqliteio_write_inserts_new_pixmap_item_without_filename(tmpfile, view, item):
view.scene.addItem(item)
io = SQLiteIO(tmpfile, view.scene, create_new=True)
io.write()
assert (item.save_id == 1)
result = io.fetchone('SELECT items.data, sqlar.name FROM items INNER JOIN sqlar on sqlar.item_id = items.id')
assert (json.loads(result[0])['filename'] is None)
assert (result[1] == '0001.png') |
.parametrize('retriever, documents, k', [pytest.param(retriever, documents(), k, id=f'retriever: {retriever.__class__.__name__}, k: {k}') for k in [None, 2, 4] for retriever in cherche_retrievers(on='article')])
def test_retriever(retriever, documents: list, k: int):
retriever = (retriever + documents)
retriever.add(documents)
answers = retriever(q='town', k=k)
if ((k is None) or (k >= 1)):
assert (len(answers) >= 1)
else:
assert (len(answers) == 0)
for sample in answers:
for key in ['title', 'article', 'author']:
assert (key in sample)
answers = retriever(q='un', k=k)
assert (len(answers) == 0)
answers = retriever(q='Montreal Eiffel France', k=k)
if ((k is None) or (k >= len(documents))):
assert (len(answers) == len(documents))
else:
assert (len(answers) == k) |
((torch.__version__ < '1.6.0'), JIT_MSG)
class TestJitSequenceGenerator(TestJitSequenceGeneratorBase):
def test_export_transformer(self):
model = self.transformer_model
torch.jit.script(model)
def test_ensemble_sequence_generator(self):
model = self.transformer_model
generator = SequenceGenerator([model], self.task.tgt_dict, beam_size=2, no_repeat_ngram_size=2, max_len_b=10)
scripted_model = torch.jit.script(generator)
self._test_save_and_load(scripted_model)
def test_export_ensemble_model(self):
model = self.transformer_model
ensemble_models = EnsembleModel([model])
torch.jit.script(ensemble_models) |
def test_poly_union():
with pytest.raises(AssertionError):
utils.poly_union(0, 1)
points = [0, 0, 0, 1, 1, 1, 1, 0]
points1 = [2, 2, 2, 3, 3, 3, 3, 2]
points2 = [0, 0, 0, 0, 0, 0, 0, 0]
points3 = [0, 0, 0, 1, 1, 0, 1, 1]
points4 = [0.5, 0.5, 1, 0, 1, 1, 0.5, 0.5]
poly = utils.points2polygon(points)
poly1 = utils.points2polygon(points1)
poly2 = utils.points2polygon(points2)
poly3 = utils.points2polygon(points3)
poly4 = utils.points2polygon(points4)
assert (utils.poly_union(poly, poly1) == 2)
assert (utils.poly_union(poly, poly) == 1)
assert (utils.poly_union(poly2, poly2) == 0)
assert (utils.poly_union(poly3, poly3, invalid_ret=1) == 1)
assert (utils.poly_union(poly3, poly3, invalid_ret=None) == 0.25)
assert (utils.poly_union(poly2, poly3) == 0.25)
assert (utils.poly_union(poly3, poly4) == 0.5)
(_, poly) = utils.poly_union(poly, poly1, return_poly=True)
assert isinstance(poly, MultiPolygon)
(_, poly) = utils.poly_union(poly3, poly3, return_poly=True)
assert isinstance(poly, Polygon)
(_, poly) = utils.poly_union(poly2, poly3, invalid_ret=0, return_poly=True)
assert (poly is None) |
def main():
cfg = Config()
cfg = parse_cmdline_args_to_config(cfg)
if ('CUDA_VISIBLE_DEVICES' not in os.environ):
os.environ['CUDA_VISIBLE_DEVICES'] = cfg.available_gpus
log_config(cfg)
if (cfg.task == 'video'):
generate_video(cfg)
elif (cfg.task == 'video_interp'):
interpolate_video(cfg)
else:
generate_diverse_samples(cfg) |
class Vox256_cross(Dataset):
def __init__(self, transform=None):
self.ds_path = './datasets/vox/test/'
self.videos = os.listdir(self.ds_path)
self.anno = pd.read_csv('pairs_annotations/vox256.csv')
self.transform = transform
def __getitem__(self, idx):
source_name = self.anno['source'][idx]
driving_name = self.anno['driving'][idx]
source_vid_path = os.path.join(self.ds_path, source_name)
driving_vid_path = os.path.join(self.ds_path, driving_name)
source_frame_path = sorted(glob.glob((source_vid_path + '/*.png')))[0]
driving_frames_path = sorted(glob.glob((driving_vid_path + '/*.png')))[:100]
source_img = self.transform(Image.open(source_frame_path).convert('RGB'))
driving_vid = [self.transform(Image.open(p).convert('RGB')) for p in driving_frames_path]
return (source_img, driving_vid, source_name, driving_name)
def __len__(self):
return len(self.videos) |
_test
def test_conv1d_legacy_interface():
old_layer = keras.layers.Convolution1D(5, filter_length=3, input_dim=3, input_length=4, name='conv')
new_layer = keras.layers.Conv1D(5, 3, name='conv', input_shape=(4, 3))
assert (json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config()))
old_layer = keras.layers.Convolution1D(5, 3, init='normal', subsample_length=2, border_mode='valid', W_regularizer='l1', b_regularizer='l2', W_constraint='maxnorm', b_constraint='unitnorm', name='conv')
new_layer = keras.layers.Conv1D(5, 3, kernel_initializer='normal', strides=2, padding='valid', kernel_regularizer='l1', bias_regularizer='l2', kernel_constraint='max_norm', bias_constraint='unit_norm', name='conv')
assert (json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())) |
_required
_
def proposal_upload_content(request, conference_slug, slug):
conference = get_object_or_404(Conference, slug=conference_slug)
proposal = get_object_or_404(Proposal, slug=slug, conference=conference)
if (not (permissions.is_proposal_section_reviewer(request.user, conference, proposal) or request.user.is_superuser)):
raise PermissionDenied
host = '{}://{}'.format(settings.SITE_PROTOCOL, request.META['HTTP_HOST'])
if settings.USE_ASYNC_FOR_EMAIL:
send_mail_for_proposal_content.delay(conference.id, proposal.id, host)
message = 'Email sent successfully.'
else:
response = send_mail_for_proposal_content(conference.id, proposal.id, host)
if (response == 1):
message = 'Email sent successfully.'
else:
message = 'There is problem in sending mail. Please contact conference chair.'
return HttpResponse(message) |
class XML(XHTML):
newline_default_on = set()
def _stringify(self, str_type):
join = ('\n' if self._newlines else '')
if (self._name is None):
return join.join(map(str_type, self._content))
a = [('%s="%s"' % i) for i in self._attrs.items()]
l = ([self._name] + a)
s = ('<%s>%s' % (' '.join(l), join))
if self._content:
s += join.join(map(str_type, self._content))
s += (join + ('</%s>' % self._name))
else:
s = ('<%s />%s' % (' '.join(l), join))
return s |
def _to_list(results):
for img_id in results:
for t in range(len(results[img_id])):
for k in results[img_id][t]:
if isinstance(results[img_id][t][k], (np.ndarray, np.float32)):
results[img_id][t][k] = results[img_id][t][k].tolist()
return results |
class Migration(migrations.Migration):
dependencies = [('conferences', '0028_store_incoming_webhooks_in_conference_model'), ('submissions', '0019_allow_adding_a_short_summy_for_socials')]
operations = [migrations.AlterField(model_name='submission', name='topic', field=models.ForeignKey(null=True, blank=True, on_delete=django.db.models.deletion.PROTECT, to='conferences.topic', verbose_name='topic'))] |
class SamplerFactory():
def __init__(self):
pass
def get_by_name(dataset_name, dataset):
if (dataset_name == 'Mixed_EXPR'):
from .imbalanced_SLML import ImbalancedDatasetSampler_SLML
sampler = ImbalancedDatasetSampler_SLML(dataset)
elif (dataset_name == 'Mixed_AU'):
from .imbalanced_ML import ImbalancedDatasetSampler_ML
sampler = ImbalancedDatasetSampler_ML(dataset)
elif (dataset_name == 'Mixed_VA'):
from .imbalanced_VA import ImbalancedDatasetSampler_VA
sampler = ImbalancedDatasetSampler_VA(dataset)
else:
raise ValueError(('Dataset [%s] not recognized.' % dataset_name))
return sampler |
class LocalPath():
class ImportMismatchError(ImportError):
sep = os.sep
def __init__(self, path=None, expanduser=False):
if (path is None):
self.strpath = error.checked_call(os.getcwd)
else:
try:
path = os.fspath(path)
except TypeError:
raise ValueError('can only pass None, Path instances or non-empty strings to LocalPath')
if expanduser:
path = os.path.expanduser(path)
self.strpath = abspath(path)
if (sys.platform != 'win32'):
def chown(self, user, group, rec=0):
uid = getuserid(user)
gid = getgroupid(group)
if rec:
for x in self.visit(rec=(lambda x: x.check(link=0))):
if x.check(link=0):
error.checked_call(os.chown, str(x), uid, gid)
error.checked_call(os.chown, str(self), uid, gid)
def readlink(self) -> str:
return error.checked_call(os.readlink, self.strpath)
def mklinkto(self, oldname):
error.checked_call(os.link, str(oldname), str(self))
def mksymlinkto(self, value, absolute=1):
if absolute:
error.checked_call(os.symlink, str(value), self.strpath)
else:
base = self.common(value)
relsource = self.__class__(value).relto(base)
reldest = self.relto(base)
n = reldest.count(self.sep)
target = self.sep.join(((('..',) * n) + (relsource,)))
error.checked_call(os.symlink, target, self.strpath)
def __div__(self, other):
return self.join(os.fspath(other))
__truediv__ = __div__
def basename(self):
return self._getbyspec('basename')[0]
def dirname(self):
return self._getbyspec('dirname')[0]
def purebasename(self):
return self._getbyspec('purebasename')[0]
def ext(self):
return self._getbyspec('ext')[0]
def read_binary(self):
with self.open('rb') as f:
return f.read()
def read_text(self, encoding):
with self.open('r', encoding=encoding) as f:
return f.read()
def read(self, mode='r'):
with self.open(mode) as f:
return f.read()
def readlines(self, cr=1):
mode = 'r'
if (not cr):
content = self.read(mode)
return content.split('\n')
else:
f = self.open(mode)
try:
return f.readlines()
finally:
f.close()
def load(self):
f = self.open('rb')
try:
import pickle
return error.checked_call(pickle.load, f)
finally:
f.close()
def move(self, target):
if target.relto(self):
raise error.EINVAL(target, 'cannot move path into a subdirectory of itself')
try:
self.rename(target)
except error.EXDEV:
self.copy(target)
self.remove()
def fnmatch(self, pattern):
return FNMatcher(pattern)(self)
def relto(self, relpath):
if (not isinstance(relpath, (str, LocalPath))):
raise TypeError(f'{relpath!r}: not a string or path object')
strrelpath = str(relpath)
if (strrelpath and (strrelpath[(- 1)] != self.sep)):
strrelpath += self.sep
strself = self.strpath
if ((sys.platform == 'win32') or (getattr(os, '_name', None) == 'nt')):
if os.path.normcase(strself).startswith(os.path.normcase(strrelpath)):
return strself[len(strrelpath):]
elif strself.startswith(strrelpath):
return strself[len(strrelpath):]
return ''
def ensure_dir(self, *args):
return self.ensure(*args, **{'dir': True})
def bestrelpath(self, dest):
try:
if (self == dest):
return os.curdir
base = self.common(dest)
if (not base):
return str(dest)
self2base = self.relto(base)
reldest = dest.relto(base)
if self2base:
n = (self2base.count(self.sep) + 1)
else:
n = 0
lst = ([os.pardir] * n)
if reldest:
lst.append(reldest)
target = dest.sep.join(lst)
return target
except AttributeError:
return str(dest)
def exists(self):
return self.check()
def isdir(self):
return self.check(dir=1)
def isfile(self):
return self.check(file=1)
def parts(self, reverse=False):
current = self
lst = [self]
while 1:
last = current
current = current.dirpath()
if (last == current):
break
lst.append(current)
if (not reverse):
lst.reverse()
return lst
def common(self, other):
last = None
for (x, y) in zip(self.parts(), other.parts()):
if (x != y):
return last
last = x
return last
def __add__(self, other):
return self.new(basename=(self.basename + str(other)))
def visit(self, fil=None, rec=None, ignore=NeverRaised, bf=False, sort=False):
(yield from Visitor(fil, rec, ignore, bf, sort).gen(self))
def _sortlist(self, res, sort):
if sort:
if hasattr(sort, '__call__'):
warnings.warn(DeprecationWarning('listdir(sort=callable) is deprecated and breaks on python3'), stacklevel=3)
res.sort(sort)
else:
res.sort()
def __fspath__(self):
return self.strpath
def __hash__(self):
s = self.strpath
if iswin32:
s = s.lower()
return hash(s)
def __eq__(self, other):
s1 = os.fspath(self)
try:
s2 = os.fspath(other)
except TypeError:
return False
if iswin32:
s1 = s1.lower()
try:
s2 = s2.lower()
except AttributeError:
return False
return (s1 == s2)
def __ne__(self, other):
return (not (self == other))
def __lt__(self, other):
return (os.fspath(self) < os.fspath(other))
def __gt__(self, other):
return (os.fspath(self) > os.fspath(other))
def samefile(self, other):
other = os.fspath(other)
if (not isabs(other)):
other = abspath(other)
if (self == other):
return True
if (not hasattr(os.path, 'samefile')):
return False
return error.checked_call(os.path.samefile, self.strpath, other)
def remove(self, rec=1, ignore_errors=False):
if self.check(dir=1, link=0):
if rec:
if iswin32:
self.chmod(448, rec=1)
import shutil
error.checked_call(shutil.rmtree, self.strpath, ignore_errors=ignore_errors)
else:
error.checked_call(os.rmdir, self.strpath)
else:
if iswin32:
self.chmod(448)
error.checked_call(os.remove, self.strpath)
def computehash(self, hashtype='md5', chunksize=524288):
try:
try:
import hashlib as mod
except ImportError:
if (hashtype == 'sha1'):
hashtype = 'sha'
mod = __import__(hashtype)
hash = getattr(mod, hashtype)()
except (AttributeError, ImportError):
raise ValueError(f"Don't know how to compute {hashtype!r} hash")
f = self.open('rb')
try:
while 1:
buf = f.read(chunksize)
if (not buf):
return hash.hexdigest()
hash.update(buf)
finally:
f.close()
def new(self, **kw):
obj = object.__new__(self.__class__)
if (not kw):
obj.strpath = self.strpath
return obj
(drive, dirname, basename, purebasename, ext) = self._getbyspec('drive,dirname,basename,purebasename,ext')
if ('basename' in kw):
if (('purebasename' in kw) or ('ext' in kw)):
raise ValueError(('invalid specification %r' % kw))
else:
pb = kw.setdefault('purebasename', purebasename)
try:
ext = kw['ext']
except KeyError:
pass
else:
if (ext and (not ext.startswith('.'))):
ext = ('.' + ext)
kw['basename'] = (pb + ext)
if (('dirname' in kw) and (not kw['dirname'])):
kw['dirname'] = drive
else:
kw.setdefault('dirname', dirname)
kw.setdefault('sep', self.sep)
obj.strpath = normpath(('%(dirname)s%(sep)s%(basename)s' % kw))
return obj
def _getbyspec(self, spec: str) -> list[str]:
res = []
parts = self.strpath.split(self.sep)
args = filter(None, spec.split(','))
for name in args:
if (name == 'drive'):
res.append(parts[0])
elif (name == 'dirname'):
res.append(self.sep.join(parts[:(- 1)]))
else:
basename = parts[(- 1)]
if (name == 'basename'):
res.append(basename)
else:
i = basename.rfind('.')
if (i == (- 1)):
(purebasename, ext) = (basename, '')
else:
(purebasename, ext) = (basename[:i], basename[i:])
if (name == 'purebasename'):
res.append(purebasename)
elif (name == 'ext'):
res.append(ext)
else:
raise ValueError(('invalid part specification %r' % name))
return res
def dirpath(self, *args, **kwargs):
if (not kwargs):
path = object.__new__(self.__class__)
path.strpath = dirname(self.strpath)
if args:
path = path.join(*args)
return path
return self.new(basename='').join(*args, **kwargs)
def join(self, *args: os.PathLike[str], abs: bool=False) -> LocalPath:
sep = self.sep
strargs = [os.fspath(arg) for arg in args]
strpath = self.strpath
if abs:
newargs: list[str] = []
for arg in reversed(strargs):
if isabs(arg):
strpath = arg
strargs = newargs
break
newargs.insert(0, arg)
actual_sep = ('' if strpath.endswith(sep) else sep)
for arg in strargs:
arg = arg.strip(sep)
if iswin32:
arg = arg.strip('/')
arg = arg.replace('/', sep)
strpath = ((strpath + actual_sep) + arg)
actual_sep = sep
obj = object.__new__(self.__class__)
obj.strpath = normpath(strpath)
return obj
def open(self, mode='r', ensure=False, encoding=None):
if ensure:
self.dirpath().ensure(dir=1)
if encoding:
return error.checked_call(io.open, self.strpath, mode, encoding=encoding)
return error.checked_call(open, self.strpath, mode)
def _fastjoin(self, name):
child = object.__new__(self.__class__)
child.strpath = ((self.strpath + self.sep) + name)
return child
def islink(self):
return islink(self.strpath)
def check(self, **kw):
if (not kw):
return exists(self.strpath)
if (len(kw) == 1):
if ('dir' in kw):
return (not (kw['dir'] ^ isdir(self.strpath)))
if ('file' in kw):
return (not (kw['file'] ^ isfile(self.strpath)))
if (not kw):
kw = {'exists': 1}
return Checkers(self)._evaluate(kw)
_patternchars = set(('*?[' + os.sep))
def listdir(self, fil=None, sort=None):
if ((fil is None) and (sort is None)):
names = error.checked_call(os.listdir, self.strpath)
return map_as_list(self._fastjoin, names)
if isinstance(fil, str):
if (not self._patternchars.intersection(fil)):
child = self._fastjoin(fil)
if exists(child.strpath):
return [child]
return []
fil = FNMatcher(fil)
names = error.checked_call(os.listdir, self.strpath)
res = []
for name in names:
child = self._fastjoin(name)
if ((fil is None) or fil(child)):
res.append(child)
self._sortlist(res, sort)
return res
def size(self) -> int:
return self.stat().size
def mtime(self) -> float:
return self.stat().mtime
def copy(self, target, mode=False, stat=False):
if self.check(file=1):
if target.check(dir=1):
target = target.join(self.basename)
assert (self != target)
copychunked(self, target)
if mode:
copymode(self.strpath, target.strpath)
if stat:
copystat(self, target)
else:
def rec(p):
return p.check(link=0)
for x in self.visit(rec=rec):
relpath = x.relto(self)
newx = target.join(relpath)
newx.dirpath().ensure(dir=1)
if x.check(link=1):
newx.mksymlinkto(x.readlink())
continue
elif x.check(file=1):
copychunked(x, newx)
elif x.check(dir=1):
newx.ensure(dir=1)
if mode:
copymode(x.strpath, newx.strpath)
if stat:
copystat(x, newx)
def rename(self, target):
target = os.fspath(target)
return error.checked_call(os.rename, self.strpath, target)
def dump(self, obj, bin=1):
f = self.open('wb')
import pickle
try:
error.checked_call(pickle.dump, obj, f, bin)
finally:
f.close()
def mkdir(self, *args):
p = self.join(*args)
error.checked_call(os.mkdir, os.fspath(p))
return p
def write_binary(self, data, ensure=False):
if ensure:
self.dirpath().ensure(dir=1)
with self.open('wb') as f:
f.write(data)
def write_text(self, data, encoding, ensure=False):
if ensure:
self.dirpath().ensure(dir=1)
with self.open('w', encoding=encoding) as f:
f.write(data)
def write(self, data, mode='w', ensure=False):
if ensure:
self.dirpath().ensure(dir=1)
if ('b' in mode):
if (not isinstance(data, bytes)):
raise ValueError('can only process bytes')
elif (not isinstance(data, str)):
if (not isinstance(data, bytes)):
data = str(data)
else:
data = data.decode(sys.getdefaultencoding())
f = self.open(mode)
try:
f.write(data)
finally:
f.close()
def _ensuredirs(self):
parent = self.dirpath()
if (parent == self):
return self
if parent.check(dir=0):
parent._ensuredirs()
if self.check(dir=0):
try:
self.mkdir()
except error.EEXIST:
if self.check(dir=0):
raise
return self
def ensure(self, *args, **kwargs):
p = self.join(*args)
if kwargs.get('dir', 0):
return p._ensuredirs()
else:
p.dirpath()._ensuredirs()
if (not p.check(file=1)):
p.open('wb').close()
return p
def stat(self, raising: Literal[True]=...) -> Stat:
...
def stat(self, raising: Literal[False]) -> (Stat | None):
...
def stat(self, raising: bool=True) -> (Stat | None):
if raising:
return Stat(self, error.checked_call(os.stat, self.strpath))
try:
return Stat(self, os.stat(self.strpath))
except KeyboardInterrupt:
raise
except Exception:
return None
def lstat(self) -> Stat:
return Stat(self, error.checked_call(os.lstat, self.strpath))
def setmtime(self, mtime=None):
if (mtime is None):
return error.checked_call(os.utime, self.strpath, mtime)
try:
return error.checked_call(os.utime, self.strpath, ((- 1), mtime))
except error.EINVAL:
return error.checked_call(os.utime, self.strpath, (self.atime(), mtime))
def chdir(self):
try:
old = self.__class__()
except error.ENOENT:
old = None
error.checked_call(os.chdir, self.strpath)
return old
def as_cwd(self):
old = self.chdir()
try:
(yield old)
finally:
if (old is not None):
old.chdir()
def realpath(self):
return self.__class__(os.path.realpath(self.strpath))
def atime(self):
return self.stat().atime
def __repr__(self):
return ('local(%r)' % self.strpath)
def __str__(self):
return self.strpath
def chmod(self, mode, rec=0):
if (not isinstance(mode, int)):
raise TypeError(f'mode {mode!r} must be an integer')
if rec:
for x in self.visit(rec=rec):
error.checked_call(os.chmod, str(x), mode)
error.checked_call(os.chmod, self.strpath, mode)
def pypkgpath(self):
pkgpath = None
for parent in self.parts(reverse=True):
if parent.isdir():
if (not parent.join('__init__.py').exists()):
break
if (not isimportable(parent.basename)):
break
pkgpath = parent
return pkgpath
def _ensuresyspath(self, ensuremode, path):
if ensuremode:
s = str(path)
if (ensuremode == 'append'):
if (s not in sys.path):
sys.path.append(s)
elif (s != sys.path[0]):
sys.path.insert(0, s)
def pyimport(self, modname=None, ensuresyspath=True):
if (not self.check()):
raise error.ENOENT(self)
if (ensuresyspath == 'importlib'):
if (modname is None):
modname = self.purebasename
spec = importlib.util.spec_from_file_location(modname, str(self))
if ((spec is None) or (spec.loader is None)):
raise ImportError(f"Can't find module {modname} at location {str(self)}")
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
return mod
pkgpath = None
if (modname is None):
pkgpath = self.pypkgpath()
if (pkgpath is not None):
pkgroot = pkgpath.dirpath()
names = self.new(ext='').relto(pkgroot).split(self.sep)
if (names[(- 1)] == '__init__'):
names.pop()
modname = '.'.join(names)
else:
pkgroot = self.dirpath()
modname = self.purebasename
self._ensuresyspath(ensuresyspath, pkgroot)
__import__(modname)
mod = sys.modules[modname]
if (self.basename == '__init__.py'):
return mod
modfile = mod.__file__
assert (modfile is not None)
if (modfile[(- 4):] in ('.pyc', '.pyo')):
modfile = modfile[:(- 1)]
elif modfile.endswith('$py.class'):
modfile = (modfile[:(- 9)] + '.py')
if modfile.endswith((os.sep + '__init__.py')):
if (self.basename != '__init__.py'):
modfile = modfile[:(- 12)]
try:
issame = self.samefile(modfile)
except error.ENOENT:
issame = False
if (not issame):
ignore = os.getenv('PY_IGNORE_IMPORTMISMATCH')
if (ignore != '1'):
raise self.ImportMismatchError(modname, modfile, self)
return mod
else:
try:
return sys.modules[modname]
except KeyError:
import types
mod = types.ModuleType(modname)
mod.__file__ = str(self)
sys.modules[modname] = mod
try:
with open(str(self), 'rb') as f:
exec(f.read(), mod.__dict__)
except BaseException:
del sys.modules[modname]
raise
return mod
def sysexec(self, *argv: os.PathLike[str], **popen_opts: Any) -> str:
from subprocess import Popen, PIPE
popen_opts.pop('stdout', None)
popen_opts.pop('stderr', None)
proc = Popen(([str(self)] + [str(arg) for arg in argv]), **popen_opts, stdout=PIPE, stderr=PIPE)
stdout: (str | bytes)
(stdout, stderr) = proc.communicate()
ret = proc.wait()
if isinstance(stdout, bytes):
stdout = stdout.decode(sys.getdefaultencoding())
if (ret != 0):
if isinstance(stderr, bytes):
stderr = stderr.decode(sys.getdefaultencoding())
raise RuntimeError(ret, ret, str(self), stdout, stderr)
return stdout
def sysfind(cls, name, checker=None, paths=None):
if isabs(name):
p = local(name)
if p.check(file=1):
return p
else:
if (paths is None):
if iswin32:
paths = os.environ['Path'].split(';')
if (('' not in paths) and ('.' not in paths)):
paths.append('.')
try:
systemroot = os.environ['SYSTEMROOT']
except KeyError:
pass
else:
paths = [path.replace('%SystemRoot%', systemroot) for path in paths]
else:
paths = os.environ['PATH'].split(':')
tryadd = []
if iswin32:
tryadd += os.environ['PATHEXT'].split(os.pathsep)
tryadd.append('')
for x in paths:
for addext in tryadd:
p = (local(x).join(name, abs=True) + addext)
try:
if p.check(file=1):
if checker:
if (not checker(p)):
continue
return p
except error.EACCES:
pass
return None
def _gethomedir(cls):
try:
x = os.environ['HOME']
except KeyError:
try:
x = (os.environ['HOMEDRIVE'] + os.environ['HOMEPATH'])
except KeyError:
return None
return cls(x)
def get_temproot(cls):
import tempfile
return local(tempfile.gettempdir())
def mkdtemp(cls, rootdir=None):
import tempfile
if (rootdir is None):
rootdir = cls.get_temproot()
path = error.checked_call(tempfile.mkdtemp, dir=str(rootdir))
return cls(path)
def make_numbered_dir(cls, prefix='session-', rootdir=None, keep=3, lock_timeout=172800):
if (rootdir is None):
rootdir = cls.get_temproot()
nprefix = prefix.lower()
def parse_num(path):
nbasename = path.basename.lower()
if nbasename.startswith(nprefix):
try:
return int(nbasename[len(nprefix):])
except ValueError:
pass
def create_lockfile(path):
mypid = os.getpid()
lockfile = path.join('.lock')
if hasattr(lockfile, 'mksymlinkto'):
lockfile.mksymlinkto(str(mypid))
else:
fd = error.checked_call(os.open, str(lockfile), ((os.O_WRONLY | os.O_CREAT) | os.O_EXCL), 420)
with os.fdopen(fd, 'w') as f:
f.write(str(mypid))
return lockfile
def atexit_remove_lockfile(lockfile):
mypid = os.getpid()
def try_remove_lockfile():
if (os.getpid() != mypid):
return
try:
lockfile.remove()
except error.Error:
pass
atexit.register(try_remove_lockfile)
lastmax = None
while True:
maxnum = (- 1)
for path in rootdir.listdir():
num = parse_num(path)
if (num is not None):
maxnum = max(maxnum, num)
try:
udir = rootdir.mkdir((prefix + str((maxnum + 1))))
if lock_timeout:
lockfile = create_lockfile(udir)
atexit_remove_lockfile(lockfile)
except (error.EEXIST, error.ENOENT, error.EBUSY):
if (lastmax == maxnum):
raise
lastmax = maxnum
continue
break
def get_mtime(path):
try:
return path.lstat().mtime
except error.Error:
pass
garbage_prefix = (prefix + 'garbage-')
def is_garbage(path):
bn = path.basename
return bn.startswith(garbage_prefix)
udir_time = get_mtime(udir)
if (keep and udir_time):
for path in rootdir.listdir():
num = parse_num(path)
if ((num is not None) and (num <= (maxnum - keep))):
try:
if lock_timeout:
create_lockfile(path)
except (error.EEXIST, error.ENOENT, error.EBUSY):
path_time = get_mtime(path)
if (not path_time):
continue
if (abs((udir_time - path_time)) < lock_timeout):
continue
garbage_path = rootdir.join((garbage_prefix + str(uuid.uuid4())))
try:
path.rename(garbage_path)
garbage_path.remove(rec=1)
except KeyboardInterrupt:
raise
except Exception:
pass
if is_garbage(path):
try:
path.remove(rec=1)
except KeyboardInterrupt:
raise
except Exception:
pass
try:
username = os.environ['USER']
except KeyError:
try:
username = os.environ['USERNAME']
except KeyError:
username = 'current'
src = str(udir)
dest = ((src[:src.rfind('-')] + '-') + username)
try:
os.unlink(dest)
except OSError:
pass
try:
os.symlink(src, dest)
except (OSError, AttributeError, NotImplementedError):
pass
return udir |
_error_logging()
class CurrentPositionsSheet(AbstractDocument):
def __init__(self, settings: Settings, pdf_exporter: PDFExporter, portfolio: Portfolio, title: str='Current Positions'):
super().__init__(settings, pdf_exporter, title)
self._portfolio = portfolio
def build_document(self):
self._add_header()
self.document.add_element(ParagraphElement('\n'))
self.document.add_element(HeadingElement(level=2, text='Open Positions in the Portfolio'))
self._add_open_positions_table()
def _add_open_positions_table(self):
open_positions_dict = self._portfolio.open_positions_dict
tickers = open_positions_dict.keys()
directions = [open_positions_dict[t].direction() for t in tickers]
directions = [('LONG' if (direction == 1) else 'SHORT') for direction in directions]
total_exposures = ['{:,.2f}'.format(open_positions_dict[t].total_exposure()) for t in tickers]
pnls = ['{:,.2f}'.format(open_positions_dict[t].unrealised_pnl) for t in tickers]
start_time = [open_positions_dict[t].start_time.date() for t in tickers]
data = {'Tickers name': [t.name for t in tickers], 'Specific ticker': tickers, 'Direction': directions, 'Total Exposure': total_exposures, 'PnL': pnls, 'Position Creation': start_time}
table = DFTable(QFDataFrame.from_dict(data), css_classes=['table', 'left-align'])
self.document.add_element(table)
def save(self, report_dir: str=''):
plt.style.use(['tearsheet'])
filename = '%Y_%m_%d-%H%M Current Positions.pdf'
filename = datetime.now().strftime(filename)
return self.pdf_exporter.generate([self.document], report_dir, filename) |
class STM32F1xxRcc(QlPeripheral):
class Type(ctypes.Structure):
_fields_ = [('CR', ctypes.c_uint32), ('CFGR', ctypes.c_uint32), ('CIR', ctypes.c_uint32), ('APB2RSTR', ctypes.c_uint32), ('APB1RSTR', ctypes.c_uint32), ('AHBENR', ctypes.c_uint32), ('APB2ENR', ctypes.c_uint32), ('APB1ENR', ctypes.c_uint32), ('BDCR', ctypes.c_uint32), ('CSR', ctypes.c_uint32)]
def __init__(self, ql, label, intn=None):
super().__init__(ql, label)
self.instance = self.struct(CR=131, AHBENR=20, CSR=)
self.rdyon = {'CR': [(RCC_CR.HSIRDY, RCC_CR.HSION), (RCC_CR.HSERDY, RCC_CR.HSEON), (RCC_CR.PLLRDY, RCC_CR.PLLON), (RCC_CR.PLLI2SRDY, RCC_CR.PLLI2SON)], 'CFGR': [(RCC_CFGR.SWS_0, RCC_CFGR.SW_0), (RCC_CFGR.SWS_1, RCC_CFGR.SW_1)], 'CSR': [(RCC_CSR.LSIRDY, RCC_CSR.LSION)]}
self.intn = intn
()
def read(self, offset: int, size: int) -> int:
buf = ctypes.create_string_buffer(size)
ctypes.memmove(buf, (ctypes.addressof(self.instance) + offset), size)
return int.from_bytes(buf.raw, byteorder='little')
()
def write(self, offset: int, size: int, value: int):
data = value.to_bytes(size, 'little')
ctypes.memmove((ctypes.addressof(self.instance) + offset), data, size)
def step(self):
for (reg, rdyon) in self.rdyon.items():
value = getattr(self.instance, reg)
for (rdy, on) in rdyon:
if (value & on):
value |= rdy
else:
value &= (~ rdy)
setattr(self.instance, reg, value) |
class TestDrudeLorentzBath():
def test_create(self):
Q = sigmaz()
ck_real = [0., 0.]
vk_real = [0.05, 6.]
ck_imag = [(- 0.)]
vk_imag = [0.05]
bath = DrudeLorentzBath(Q=Q, lam=0.025, T=(1 / 0.95), Nk=1, gamma=0.05, tag='bath1')
[exp1, exp2] = bath.exponents
check_exponent(exp1, 'RI', dim=None, Q=Q, ck=ck_real[0], vk=vk_real[0], ck2=ck_imag[0], tag='bath1')
check_exponent(exp2, 'R', dim=None, Q=Q, ck=ck_real[1], vk=vk_real[1], tag='bath1')
bath = DrudeLorentzBath(Q=Q, lam=0.025, T=(1 / 0.95), Nk=1, gamma=0.05, combine=False)
[exp1, exp2, exp3] = bath.exponents
check_exponent(exp1, 'R', dim=None, Q=Q, ck=ck_real[0], vk=vk_real[0])
check_exponent(exp2, 'R', dim=None, Q=Q, ck=ck_real[1], vk=vk_real[1])
check_exponent(exp3, 'I', dim=None, Q=Q, ck=ck_imag[0], vk=vk_imag[0])
.parametrize(['combine'], [pytest.param(True, id='combine'), pytest.param(False, id='no-combine')])
def test_terminator(self, combine):
Q = sigmaz()
op = (((((- 2) * spre(Q)) * spost(Q.dag())) + spre((Q.dag() * Q))) + spost((Q.dag() * Q)))
bath = DrudeLorentzBath(Q=Q, lam=0.025, T=(1 / 0.95), Nk=1, gamma=0.05, combine=combine)
(delta, terminator) = bath.terminator()
assert (np.abs((delta - (0. / 4.0))) < 1e-08)
assert isequal(terminator, ((- (0. / 4.0)) * op), tol=1e-08) |
def commands_normalize_resnet(resnet_features_dir: str=RESNET_FEATURES_DIR, resnet_normalized_features_dir: str=RESNET_NORMALIZED_FEATURES_DIR, models_dir: str=MODELS_DIR, splits_dir: str=SPLITS_DIR) -> List[Command]:
normalizer_path = os.path.join(models_dir, RESNET_NORMALIZER_PKL)
create_normalizer_command = Command('Create a normalizer for the ResNet features', SCRIPT_CREATE_NORMALIZER, {f'--{CreateNormalizerArgs.FEATURES_DIR}': resnet_features_dir, f'--{CreateNormalizerArgs.SPLITS_DIR}': splits_dir, f'--{CreateNormalizerArgs.NORMALIZER}': NORMALIZER_MAX_ABS, f'--{CreateNormalizerArgs.FEATURE_NAME}': SOCCERNET_FEATURE_NAME_RESNET_TF2, f'--{CreateNormalizerArgs.OUT_PATH}': normalizer_path})
normalize_features_command = Command('Normalize the ResNet features with the learned normalizer', SCRIPT_TRANSFORM, {f'--{TransformArgs.INPUT_DIRS}': resnet_features_dir, f'--{TransformArgs.INPUT_FEATURE_NAMES}': SOCCERNET_FEATURE_NAME_RESNET_TF2, f'--{TransformArgs.OUTPUT_DIR}': resnet_normalized_features_dir, f'--{TransformArgs.OUTPUT_FEATURE_NAME}': RESNET_NORMALIZED_FEATURE_NAME, f'--{TransformArgs.NORMALIZERS}': normalizer_path, f'--{TransformArgs.FACTORS}': '1.0', f'--{TransformArgs.RESAMPLING}': 'interpolate'})
return [create_normalizer_command, normalize_features_command] |
def subfiles(folder, join=True, prefix=None, suffix=None, sort=True):
if join:
l = os.path.join
else:
l = (lambda x, y: y)
res = [l(folder, i) for i in os.listdir(folder) if (os.path.isfile(os.path.join(folder, i)) and ((prefix is None) or i.startswith(prefix)) and ((suffix is None) or i.endswith(suffix)))]
if sort:
res.sort()
return res |
class Tenum(TestCase):
def test_enum(self):
class Foo(object):
FOO = 1
BAR = 3
self.assertEqual(Foo.FOO, 1)
self.assertTrue(isinstance(Foo.FOO, Foo))
self.assertEqual(repr(Foo.FOO), '<Foo.FOO: 1>')
self.assertEqual(repr(Foo(3)), '<Foo.BAR: 3>')
self.assertEqual(repr(Foo(42)), '42')
self.assertEqual(str(Foo(42)), '42')
self.assertEqual(int(Foo(42)), 42)
self.assertEqual(str(Foo(1)), 'Foo.FOO')
self.assertEqual(int(Foo(1)), 1)
self.assertTrue(isinstance(str(Foo.FOO), str))
self.assertTrue(isinstance(repr(Foo.FOO), str)) |
class SupervisedGraphsage(models.SampleAndAggregate):
def __init__(self, placeholders, features, layer_infos, batch_size=32, concat=True, aggregator_type='mean', model_size='small', name=None, **kwargs):
models.GeneralizedModel.__init__(self, **kwargs)
if (aggregator_type == 'mean'):
self.aggregator_cls = MeanAggregator
elif (aggregator_type == 'seq'):
self.aggregator_cls = SeqAggregator
elif (aggregator_type == 'meanpool'):
self.aggregator_cls = MeanPoolingAggregator
elif (aggregator_type == 'maxpool'):
self.aggregator_cls = MaxPoolingAggregator
elif (aggregator_type == 'gcn'):
self.aggregator_cls = GCNAggregator
else:
raise Exception('Unknown aggregator: ', self.aggregator_cls)
self.graph_aggregator_cls = MaxPoolingGraphAggregator
self.model_size = model_size
self.features = tf.Variable(tf.constant(features, dtype=tf.float32), trainable=False)
self.concat = concat
print(self.features.shape)
self.dims = [int(self.features.shape[2])]
self.dims.extend([layer_infos[i].output_dim for i in range(len(layer_infos))])
print(self.dims)
self.placeholders = placeholders
self.layer_infos = layer_infos
self.env_batch_size = batch_size
self.build_aggregators()
self.build()
def build_aggregators(self):
self.aggregators = []
self.num_layers = len(self.layer_infos)
for layer in range(self.num_layers):
dim_mult = (2 if (self.concat and (layer != 0)) else 1)
if (layer == (self.num_layers - 1)):
aggregator = self.aggregator_cls((dim_mult * self.dims[layer]), self.dims[(layer + 1)], act=(lambda x: x), dropout=self.placeholders['dropout'], concat=self.concat, model_size=self.model_size)
self.output_dim = (self.dims[(layer + 1)] * dim_mult)
else:
aggregator = self.aggregator_cls((dim_mult * self.dims[layer]), self.dims[(layer + 1)], dropout=self.placeholders['dropout'], concat=self.concat, model_size=self.model_size)
self.aggregators.append(aggregator)
self.graph_aggregator = self.graph_aggregator_cls((self.output_dim * 2), (self.output_dim * 2), act=(lambda x: x), dropout=self.placeholders['dropout'], model_size=self.model_size)
def build(self):
num_samples = [layer_info.num_samples for layer_info in self.layer_infos]
g_id = self.placeholders['graph_idx']
(samples1, support_sizes1) = self.sample(g_id, self.placeholders['batch'], self.placeholders['batch_size'], True)
features = tf.slice(self.features, [g_id, 0, 0], [1, (- 1), (- 1)])
features = tf.reshape(features, [self.features.shape[1], self.features.shape[2]])
outputs1 = self.aggregate(samples1, [features], num_samples, support_sizes1, self.placeholders['batch_size'], concat=self.concat, model_size=self.model_size)
outputs1 = tf.nn.l2_normalize(outputs1, 1)
(samples2, support_sizes2) = self.sample(g_id, self.placeholders['batch'], self.placeholders['batch_size'], False)
features = tf.slice(self.features, [g_id, 0, 0], [1, (- 1), (- 1)])
features = tf.reshape(features, [self.features.shape[1], self.features.shape[2]])
outputs2 = self.aggregate(samples2, [features], num_samples, support_sizes2, self.placeholders['batch_size'], concat=self.concat, model_size=self.model_size)
outputs2 = tf.nn.l2_normalize(outputs2, 1)
self.node_preds = tf.concat([outputs1, outputs2], axis=1)
self.graph_preds = self.graph_aggregator(self.node_preds)
def get_node_preds(self):
return self.node_preds
def get_graph_preds(self):
return self.graph_preds
def sample(self, g_id, inputs, batch_size, ins):
samples = [inputs]
support_size = 1
support_sizes = [support_size]
for k in range(len(self.layer_infos)):
t = ((len(self.layer_infos) - k) - 1)
support_size *= self.layer_infos[t].num_samples
sampler = self.layer_infos[t].neigh_sampler
node = sampler((g_id, samples[k], self.layer_infos[t].num_samples, ins))
samples.append(tf.reshape(node, [(support_size * batch_size)]))
support_sizes.append(support_size)
return (samples, support_sizes)
def aggregate(self, samples, input_features, num_samples, support_sizes, batch_size, name=None, concat=False, model_size='small'):
hidden = [tf.nn.embedding_lookup(input_features, node_samples) for node_samples in samples]
for layer in range(len(num_samples)):
aggregator = self.aggregators[layer]
next_hidden = []
for hop in range((len(num_samples) - layer)):
dim_mult = (2 if (concat and (layer != 0)) else 1)
neigh_dims = [(batch_size * support_sizes[hop]), num_samples[((len(num_samples) - hop) - 1)], (dim_mult * self.dims[layer])]
h = aggregator((hidden[hop], tf.reshape(hidden[(hop + 1)], neigh_dims)))
next_hidden.append(h)
hidden = next_hidden
return hidden[0] |
class PartialParquetParameters(PartialFileDownloadParams):
def of(row_groups_to_download: Optional[List[int]]=None, num_row_groups: Optional[int]=None, num_rows: Optional[int]=None, in_memory_size_bytes: Optional[float]=None, pq_metadata: Optional[FileMetaData]=None) -> PartialParquetParameters:
if ((row_groups_to_download is None) or (num_row_groups is None) or (num_rows is None) or (in_memory_size_bytes is None)):
assert (pq_metadata is not None), 'Parquet file metadata must be passed explicitly'
num_row_groups = pq_metadata.num_row_groups
row_groups_to_download = [rg for rg in range(num_row_groups)]
in_memory_size_bytes = 0.0
num_rows = pq_metadata.num_rows
for rg in row_groups_to_download:
row_group_meta = pq_metadata.row_group(rg)
in_memory_size_bytes += row_group_meta.total_byte_size
result = PartialParquetParameters({'row_groups_to_download': row_groups_to_download, 'num_row_groups': num_row_groups, 'num_rows': num_rows, 'in_memory_size_bytes': in_memory_size_bytes})
if pq_metadata:
result['pq_metadata'] = pq_metadata
return result
def row_groups_to_download(self) -> List[int]:
return self['row_groups_to_download']
def num_row_groups(self) -> List[int]:
return self['num_row_groups']
def num_rows(self) -> int:
return self['num_rows']
def in_memory_size_bytes(self) -> float:
return self['in_memory_size_bytes']
def pq_metadata(self) -> Optional[FileMetaData]:
return self.get('pq_metadata')
_metadata.setter
def pq_metadata(self, metadata: FileMetaData) -> None:
self['pq_metadata'] = metadata |
class JciHitachiPowerConsumptionSensorEntity(JciHitachiEntity, SensorEntity):
def __init__(self, thing, coordinator):
super().__init__(thing, coordinator)
def name(self):
return f'{self._thing.name} Power Consumption'
def native_value(self):
status = self.hass.data[DOMAIN][UPDATED_DATA].get(self._thing.name, None)
if status:
return (None if (status.power_kwh == 'unsupported') else status.power_kwh)
return None
def device_class(self):
return SensorDeviceClass.ENERGY
def native_unit_of_measurement(self):
return UnitOfEnergy.KILO_WATT_HOUR
def unique_id(self):
return f'{self._thing.gateway_mac_address}_power_consumption_sensor'
def state_class(self):
return STATE_CLASS_TOTAL_INCREASING |
_platform(Platform.WINDOWS)
class WindowsMulticoreClockTestCase(unittest.TestCase):
def test_multicore(self):
failures = 0
old_time = clock()
end_time = (time.time() + 3)
while (time.time() < end_time):
t = clock()
if (t < old_time):
failures += 1
old_time = t
time.sleep(0.001)
self.assertTrue((failures == 0)) |
class Adaround():
def apply_adaround(cls, session: tf.compat.v1.Session, starting_op_names: List[str], output_op_names: List[str], params: AdaroundParameters, path: str, filename_prefix: str, default_param_bw: int=4, default_quant_scheme: QuantScheme=QuantScheme.post_training_tf_enhanced, default_config_file: str=None) -> tf.compat.v1.Session:
if (not os.path.exists(WORKING_DIR)):
os.makedirs(WORKING_DIR)
(param_encodings, session_soft_rounded_weight) = cls._apply_adaround_helper(session, starting_op_names, output_op_names, params, default_param_bw, default_quant_scheme, default_config_file)
cls.export_encoding_to_json(path, filename_prefix, param_encodings)
if os.path.exists(WORKING_DIR):
logger.info('Deleting temporary working directory %s', WORKING_DIR)
shutil.rmtree(WORKING_DIR)
logger.info('Completed Adarounding Model')
return session_soft_rounded_weight
def _apply_adaround_helper(cls, session: tf.compat.v1.Session, starting_op_names: List[str], output_op_names: List[str], params: AdaroundParameters, param_bw: int, quant_scheme: QuantScheme, config_file: str) -> Tuple[(Dict, tf.compat.v1.Session)]:
session_hard_rounded_weight = graph_saver.save_and_load_graph(WORKING_DIR, session)
session_soft_rounded_weight = graph_saver.save_and_load_graph(WORKING_DIR, session)
(configs, strict_symmetric, unsigned_symmetric, enable_per_channel) = Adaround.get_config_dict_keys(config_file)
opt_params = AdaroundHyperParameters(params.num_iterations, params.reg_param, params.beta_range, params.warm_start)
act_sampler = ActivationSampler(params.data_set)
ordered_ops = cls._get_ordered_list_of_ops(session.graph, starting_op_names, output_op_names)
param_encodings = {}
for op in tqdm(ordered_ops):
logger.info('Started Optimizing weight rounding of op: %s', op.name)
hard_rounded_op = session_hard_rounded_weight.graph.get_operation_by_name(op.name)
soft_rounded_op = session_soft_rounded_weight.graph.get_operation_by_name(op.name)
(all_inp_data, all_out_data) = act_sampler.sample_activation(op, hard_rounded_op, session, session_hard_rounded_weight, starting_op_names, params.num_batches)
is_symmetric = cls.get_is_symmetric_flag_for_op_param(configs, op.type, param_name='weight', framework_to_onnx_type_dict=tf_op_type_to_onnx_type_dict)
act_func = cls._get_act_func(op)
graph = tf.Graph()
with graph.as_default():
(output_height, output_width, output_channels) = (None, None, None)
if (op.type == 'Conv2DBackpropInput'):
(output_height, output_width, output_channels) = cls.get_conv2d_transpose_output_tensor_shape(op.get_attr('data_format').decode('utf-8'), all_out_data)
wrapper = AdaroundWrapper(session, op, param_bw, quant_scheme, is_symmetric, strict_symmetric, unsigned_symmetric, enable_per_channel, output_height, output_width, output_channels)
(hard_rounded_weight, soft_rounded_weight) = AdaroundOptimizer().adaround_wrapper(wrapper, act_func, all_inp_data, all_out_data, opt_params)
cls._update_param_encodings_dict(param_encodings, op, wrapper.encoding, is_symmetric)
WeightTensorUtils.update_tensor_for_op(session_hard_rounded_weight, hard_rounded_op, hard_rounded_weight)
WeightTensorUtils.update_tensor_for_op(session_soft_rounded_weight, soft_rounded_op, soft_rounded_weight)
session_hard_rounded_weight.close()
return (param_encodings, session_soft_rounded_weight)
def get_config_dict_keys(config_file: str) -> Tuple[(ConfigDictType, bool, bool, bool)]:
configs = JsonConfigImporter.import_json_config_file(config_file)
strict_symmetric = configs[ConfigDictKeys.DEFAULTS].get(ConfigDictKeys.STRICT_SYMMETRIC, False)
unsigned_symmetric = configs[ConfigDictKeys.DEFAULTS].get(ConfigDictKeys.UNSIGNED_SYMMETRIC, False)
per_channel_enabled = configs[ConfigDictKeys.DEFAULTS].get(ConfigDictKeys.PER_CHANNEL_QUANTIZATION, False)
return (configs, strict_symmetric, unsigned_symmetric, per_channel_enabled)
def _get_ordered_list_of_ops(graph: tf.Graph, input_op_names: List[str], output_op_names: List[str]) -> List[tf.Operation]:
list_of_ordered_ops = get_ordered_ops(graph, input_op_names, output_op_names)
ordered_ops = []
for op in list_of_ordered_ops:
if (op.type in AdaroundSupportedOps):
ordered_ops.append(op)
return ordered_ops
def _get_act_func(op: tf.Operation) -> Union[(Callable, None)]:
act_func = None
consumer_ops = op.outputs[0].consumers()
if (not consumer_ops):
return act_func
if (consumer_ops[0].type in ActFuncMap):
act_func = ActFuncMap[consumer_ops[0].type]
elif (consumer_ops[0].type in ['Add', 'BiasAdd']):
if (consumer_ops[0].outputs[0].consumers() and (consumer_ops[0].outputs[0].consumers()[0].type in ActFuncMap)):
act_func = ActFuncMap[consumer_ops[0].outputs[0].consumers()[0].type]
logger.info("op: %s 's next following act func: %s", op.name, act_func)
return act_func
def export_encoding_to_json(cls, path: str, filename_prefix: str, param_encodings: Dict):
os.makedirs(os.path.abspath(path), exist_ok=True)
encoding_file_path = os.path.join(path, (filename_prefix + '.encodings'))
with open(encoding_file_path, 'w') as encoding_fp:
json.dump(param_encodings, encoding_fp, sort_keys=True, indent=4)
def _update_param_encodings_dict(encoding_dict: Dict, op: tf.Operation, encoding: Union[(libpymo.TfEncoding, List[libpymo.TfEncoding])], is_symmetric: bool):
tensor_name = op.inputs[1].name
encoding = (encoding if isinstance(encoding, list) else [encoding])
encoding_dict[tensor_name] = [{'min': enc.min, 'max': enc.max, 'scale': enc.delta, 'offset': int(enc.offset), 'bitwidth': enc.bw, 'is_symmetric': str(is_symmetric)} for enc in encoding]
def get_is_symmetric_flag_for_op_param(configs: ConfigDictType, tf_op_type: str, param_name: str, framework_to_onnx_type_dict: dict) -> bool:
assert (param_name in MAP_TF_PARAM_NAME_TO_QUANTSIM_NAME.keys()), 'param name is invalid.'
try:
onnx_type = framework_to_onnx_type_dict[tf_op_type]
return configs[ConfigDictKeys.OP_TYPE][onnx_type][ConfigDictKeys.PARAMS][param_name][ConfigDictKeys.IS_SYMMETRIC]
except KeyError:
pass
try:
return configs[ConfigDictKeys.PARAMS][param_name][ConfigDictKeys.IS_SYMMETRIC]
except KeyError:
pass
try:
return configs[ConfigDictKeys.DEFAULTS][ConfigDictKeys.PARAMS][ConfigDictKeys.IS_SYMMETRIC]
except KeyError:
pass
return False
def get_conv2d_transpose_output_tensor_shape(data_format: str, output_data: np.ndarray):
if (data_format == 'NHWC'):
output_height = output_data.shape[1]
output_width = output_data.shape[2]
output_channels = output_data.shape[3]
else:
output_height = output_data.shape[2]
output_width = output_data.shape[3]
output_channels = output_data.shape[1]
return (output_height, output_width, output_channels) |
class GemmRelated(COp):
__props__: tuple[(str, ...)] = ()
def c_support_code(self, **kwargs):
mod_str = '\n #ifndef MOD\n #define MOD %\n #endif\n static double time_time() // a time function like time.perf_counter()\n {\n struct timeval tv;\n gettimeofday(&tv, 0);\n return (double) tv.tv_sec + (double) tv.tv_usec / 1000000.0;\n }\n\n void compute_strides(npy_intp *shape, int N_shape, int type_size, npy_intp *res) {\n int s;\n res[N_shape - 1] = type_size;\n for (int i = N_shape - 1; i > 0; i--) {\n s = shape[i];\n res[i - 1] = res[i] * (s > 0 ? s : 1);\n }\n }\n '
return (blas_header_text() + mod_str)
def c_headers(self, **kwargs):
return ['<iostream>', '<time.h>', '<sys/time.h>']
def c_libraries(self, **kwargs):
return ldflags()
def c_compile_args(self, **kwargs):
return ldflags(libs=False, flags=True)
def c_lib_dirs(self, **kwargs):
return ldflags(libs=False, libs_dir=True)
def c_header_dirs(self, **kwargs):
return ldflags(libs=False, include_dir=True)
declare_NS = '\n int unit = 0;\n\n int type_num = PyArray_DESCR(%(_x)s)->type_num;\n int type_size = PyArray_DESCR(%(_x)s)->elsize; // in bytes\n\n npy_intp* Nx = PyArray_DIMS(%(_x)s);\n npy_intp* Ny = PyArray_DIMS(%(_y)s);\n npy_intp* Nz = 0; //PyArray_DIMS(%(_zout)s);\n\n npy_intp* Sx = PyArray_STRIDES(%(_x)s);\n npy_intp* Sy = PyArray_STRIDES(%(_y)s);\n npy_intp* Sz = 0; //PyArray_STRIDES(%(_zout)s);\n\n //strides for x, y, z in dimensions 0, 1\n int sx_0, sx_1, sy_0, sy_1, sz_0, sz_1;\n '
check_xyz_rank2 = '\n if (PyArray_NDIM(%(_x)s) != 2) {\n PyErr_Format(PyExc_NotImplementedError,\n "rank(x) != 2. rank(x) is %%d.",\n PyArray_NDIM(%(_x)s));\n %(fail)s;\n }\n if (PyArray_NDIM(%(_y)s) != 2) {\n PyErr_Format(PyExc_NotImplementedError,\n "rank(y) != 2. rank(y) is %%d.", PyArray_NDIM(%(_y)s));\n %(fail)s;\n }\n if (%(_zout)s && PyArray_NDIM(%(_zout)s) != 2) {\n PyErr_Format(PyExc_NotImplementedError,\n "rank(z) != 2. rank(z) is %%d.", PyArray_NDIM(%(_zout)s));\n %(fail)s;\n }\n '
check_xyz_double_or_float = '\n if ((PyArray_DESCR(%(_x)s)->type_num != NPY_DOUBLE)\n && (PyArray_DESCR(%(_x)s)->type_num != NPY_FLOAT))\n {PyErr_SetString(PyExc_NotImplementedError, "type(x) is not double or float"); %(fail)s;}\n\n if ((PyArray_DESCR(%(_y)s)->type_num != NPY_DOUBLE)\n && (PyArray_DESCR(%(_y)s)->type_num != NPY_FLOAT))\n {PyErr_SetString(PyExc_NotImplementedError, "type(y) is not double or float"); %(fail)s;}\n\n if ((PyArray_DESCR(%(_zout)s)->type_num != NPY_DOUBLE)\n && (PyArray_DESCR(%(_zout)s)->type_num != NPY_FLOAT))\n {PyErr_SetString(PyExc_NotImplementedError, "type(z) is not double or float"); %(fail)s;}\n\n if ((PyArray_DESCR(%(_x)s)->type_num != PyArray_DESCR(%(_y)s)->type_num)\n ||(PyArray_DESCR(%(_x)s)->type_num != PyArray_DESCR(%(_zout)s)->type_num))\n { PyErr_SetString(PyExc_NotImplementedError, "type(x), type(y), type(z) are not all the same"); %(fail)s; }\n '
check_ab_double_or_float = '\n if ((PyArray_DESCR(%(_a)s)->type_num != NPY_DOUBLE)\n && (PyArray_DESCR(%(_a)s)->type_num != NPY_FLOAT))\n {PyErr_SetString(PyExc_NotImplementedError, "type(a) is not double or float"); %(fail)s;}\n\n if ((PyArray_DESCR(%(_b)s)->type_num != NPY_DOUBLE)\n && (PyArray_DESCR(%(_b)s)->type_num != NPY_FLOAT))\n {PyErr_SetString(PyExc_NotImplementedError, "type(b) is not double or float"); %(fail)s;}\n '
check_dims = '\n if (Nx[0] !=1 && Nz[0] != 1 && Nx[0] != Nz[0])\n {\n PyErr_Format(PyExc_ValueError,\n "Shape mismatch: x has %%ld rows but z has %%ld rows",\n (long int)Nx[0], (long int)Nz[0]);\n %(fail)s;\n }\n if (Nx[1] != Ny[0])\n {\n PyErr_Format(PyExc_ValueError,\n "Shape mismatch: x has %%ld cols (and %%ld rows) but y has %%ld rows (and %%ld cols)",\n (long int)Nx[1], (long int)Nx[0], (long int)Ny[0], (long int)Ny[1]);\n %(fail)s;\n }\n if (Ny[1] != 1 && Nz[1]!= 1 && Ny[1] != Nz[1])\n {\n PyErr_Format(PyExc_ValueError,\n "Shape mismatch: y has %%ld cols but z has %%ld cols",\n (long int)Ny[1], (long int)Nz[1]);\n %(fail)s;\n }\n\n // We must not raise an error when Nx[1] == 0. This would disable cases\n // that numpy.dot accept.\n '
check_strides = '\n /*\n If some matrices are not contiguous on either dimensions,\n or have invalid strides, copy their content into a contiguous one\n */\n if ((Sx[0] < 1) || (Sx[1] < 1) || (Sx[0] MOD type_size) || (Sx[1] MOD type_size)\n || ((Sx[0] != type_size) && (Sx[1] != type_size)))\n {\n PyArrayObject * _x_copy = (PyArrayObject *) PyArray_Copy(%(_x)s);\n if (!_x_copy)\n %(fail)s\n Py_XDECREF(%(_x)s);\n %(_x)s = _x_copy;\n Sx = PyArray_STRIDES(%(_x)s);\n if ((Sx[0] < 1) || (Sx[1] < 1)) {\n compute_strides(Nx, 2, type_size, Sx);\n }\n }\n\n if ((Sy[0] < 1) || (Sy[1] < 1) || (Sy[0] MOD type_size) || (Sy[1] MOD type_size)\n || ((Sy[0] != type_size) && (Sy[1] != type_size)))\n {\n PyArrayObject * _y_copy = (PyArrayObject *) PyArray_Copy(%(_y)s);\n if (!_y_copy)\n %(fail)s\n Py_XDECREF(%(_y)s);\n %(_y)s = _y_copy;\n Sy = PyArray_STRIDES(%(_y)s);\n if ((Sy[0] < 1) || (Sy[1] < 1)) {\n compute_strides(Ny, 2, type_size, Sy);\n }\n }\n\n if ((Sz[0] < 1) || (Sz[1] < 1) || (Sz[0] MOD type_size) || (Sz[1] MOD type_size)\n || ((Sz[0] != type_size) && (Sz[1] != type_size)))\n {\n PyArrayObject * _z_copy = (PyArrayObject *) PyArray_Copy(%(_zout)s);\n if (!_z_copy)\n %(fail)s\n Py_XDECREF(%(_zout)s);\n %(_zout)s = _z_copy;\n Sz = PyArray_STRIDES(%(_zout)s);\n if ((Sz[0] < 1) || (Sz[1] < 1)) {\n compute_strides(Nz, 2, type_size, Sz);\n }\n }\n '
encode_strides_in_unit = '\n /*\n encode the stride structure of _x,_y,_zout into a single integer\n */\n unit |= ((Sx[1] == type_size || Nx[1]==1) ? 0x0 : (Sx[0] == type_size || Nx[0]==1) ? 0x1 : 0x2) << 8;\n unit |= ((Sy[1] == type_size || Ny[1]==1) ? 0x0 : (Sy[0] == type_size || Ny[0]==1) ? 0x1 : 0x2) << 4;\n unit |= ((Sz[1] == type_size || Nz[1]==1) ? 0x0 : (Sz[0] == type_size || Nz[0]==1) ? 0x1 : 0x2) << 0;\n '
compute_strides = '\n /* create appropriate strides for malformed matrices that are row or column\n * vectors, or empty matrices.\n * In that case, the value of the stride does not really matter, but\n * some versions of BLAS insist that:\n * - they are not smaller than the number of elements in the array,\n * - they are not 0.\n */\n sx_0 = (Nx[0] > 1) ? Sx[0]/type_size : (Nx[1] + 1);\n sx_1 = (Nx[1] > 1) ? Sx[1]/type_size : (Nx[0] + 1);\n sy_0 = (Ny[0] > 1) ? Sy[0]/type_size : (Ny[1] + 1);\n sy_1 = (Ny[1] > 1) ? Sy[1]/type_size : (Ny[0] + 1);\n sz_0 = (Nz[0] > 1) ? Sz[0]/type_size : (Nz[1] + 1);\n sz_1 = (Nz[1] > 1) ? Sz[1]/type_size : (Nz[0] + 1);\n '
begin_switch_typenum = '\n switch (type_num)\n {\n '
case_float = '\n case NPY_FLOAT:\n {\n '
case_float_gemm = '\n float* x = (float*)PyArray_DATA(%(_x)s);\n float* y = (float*)PyArray_DATA(%(_y)s);\n float* z = (float*)PyArray_DATA(%(_zout)s);\n char N = \'N\';\n char T = \'T\';\n int Nz0 = Nz[0], Nz1 = Nz[1], Nx1 = Nx[1];\n //std::cerr << (unit/256) MOD 16 << (unit / 16) MOD 16 << unit MOD 16<< \'\\n\';\n //double t0 = time_time();\n switch(unit)\n {\n case 0x000: sgemm_(&N, &N, &Nz1, &Nz0, &Nx1, &a, y, &sy_0, x, &sx_0, &b, z, &sz_0); break;\n case 0x100: sgemm_(&N, &T, &Nz1, &Nz0, &Nx1, &a, y, &sy_0, x, &sx_1, &b, z, &sz_0); break;\n case 0x010: sgemm_(&T, &N, &Nz1, &Nz0, &Nx1, &a, y, &sy_1, x, &sx_0, &b, z, &sz_0); break;\n case 0x110: sgemm_(&T, &T, &Nz1, &Nz0, &Nx1, &a, y, &sy_1, x, &sx_1, &b, z, &sz_0); break;\n case 0x001: sgemm_(&T, &T, &Nz0, &Nz1, &Nx1, &a, x, &sx_0, y, &sy_0, &b, z, &sz_1); break;\n case 0x101: sgemm_(&N, &T, &Nz0, &Nz1, &Nx1, &a, x, &sx_1, y, &sy_0, &b, z, &sz_1); break;\n case 0x011: sgemm_(&T, &N, &Nz0, &Nz1, &Nx1, &a, x, &sx_0, y, &sy_1, &b, z, &sz_1); break;\n case 0x111: sgemm_(&N, &N, &Nz0, &Nz1, &Nx1, &a, x, &sx_1, y, &sy_1, &b, z, &sz_1); break;\n default: PyErr_SetString(PyExc_ValueError, "some matrix has no unit stride"); %(fail)s;\n };\n //fprintf(stderr, "Calling sgemm %%i %%i %%i %%i took %%f\\n", unit, Nz1, Nz0, Nx1, time_time() - t0);\n '
case_double = '\n }\n break;\n case NPY_DOUBLE:\n {\n '
case_double_gemm = '\n double* x = (double*)PyArray_DATA(%(_x)s);\n double* y = (double*)PyArray_DATA(%(_y)s);\n double* z = (double*)PyArray_DATA(%(_zout)s);\n char N = \'N\';\n char T = \'T\';\n int Nz0 = Nz[0], Nz1 = Nz[1], Nx1 = Nx[1];\n //std::cerr << (unit/256) MOD 16 << (unit / 16) MOD 16 << unit MOD 16<< \'\\n\';\n //double t0 = time_time();\n //fprintf(stderr, "unit=%%x N= %%i %%i %%i S = %%i %%i %%i %%i %%i %%i\\n", unit,\n //Nz1, Nz0, Nx1,\n //sy_0, sy_1,\n //sx_0, sx_1,\n //sz_0, sz_1\n //);\n switch(unit)\n {\n case 0x000: dgemm_(&N, &N, &Nz1, &Nz0, &Nx1, &a, y,\n &sy_0, x, &sx_0, &b, z, &sz_0); break;\n case 0x100: dgemm_(&N, &T, &Nz1, &Nz0, &Nx1, &a, y,\n &sy_0, x, &sx_1, &b, z, &sz_0); break;\n case 0x010: dgemm_(&T, &N, &Nz1, &Nz0, &Nx1, &a, y,\n &sy_1, x, &sx_0, &b, z, &sz_0); break;\n case 0x110: dgemm_(&T, &T, &Nz1, &Nz0, &Nx1, &a, y,\n &sy_1, x, &sx_1, &b, z, &sz_0); break;\n case 0x001: dgemm_(&T, &T, &Nz0, &Nz1, &Nx1, &a, x,\n &sx_0, y, &sy_0, &b, z, &sz_1); break;\n case 0x101: dgemm_(&N, &T, &Nz0, &Nz1, &Nx1, &a, x,\n &sx_1, y, &sy_0, &b, z, &sz_1); break;\n case 0x011: dgemm_(&T, &N, &Nz0, &Nz1, &Nx1, &a, x,\n &sx_0, y, &sy_1, &b, z, &sz_1); break;\n case 0x111: dgemm_(&N, &N, &Nz0, &Nz1, &Nx1, &a, x,\n &sx_1, y, &sy_1, &b, z, &sz_1); break;\n default: PyErr_SetString(PyExc_ValueError,\n "some matrix has no unit stride");\n %(fail)s;\n };\n //fprintf(stderr, "Calling dgemm %%i %%i %%i %%i took %%f\\n",\n // unit, Nz1, Nz0, Nx1, time_time()- t0);\n '
end_switch_typenum = '\n }\n break;\n }\n '
def build_gemm_call(self):
if hasattr(self, 'inplace'):
setup_z_Nz_Sz = 'if(%(params)s->inplace){{{}}}else{{{}}}'.format(self.setup_z_Nz_Sz_inplace, self.setup_z_Nz_Sz_outplace)
else:
setup_z_Nz_Sz = self.setup_z_Nz_Sz
return ''.join((self.declare_NS, self.check_xyz_rank2, setup_z_Nz_Sz, self.check_xyz_double_or_float, self.check_ab_double_or_float, self.broadcast_xy, self.check_dims, self.check_strides, self.encode_strides_in_unit, self.compute_strides, self.begin_switch_typenum, self.case_float, self.case_float_ab_constants, self.case_float_gemm, self.case_double, self.case_double_ab_constants, self.case_double_gemm, self.end_switch_typenum))
def build_gemm_version(self):
return (13, blas_header_version()) |
class TagTreeJoinUtilTest(TestCase):
def test_join_tree_none(self):
name = tag_utils.join_tree_name([])
self.assertEqual(name, '')
def test_join_tree_one(self):
name = tag_utils.join_tree_name(['one'])
self.assertEqual(name, 'one')
def test_join_tree_three(self):
name = tag_utils.join_tree_name(['one', 'two', 'three'])
self.assertEqual(name, 'one/two/three')
def test_join_tree_escape(self):
name = tag_utils.join_tree_name(['one', 'two/dos', 'three'])
self.assertEqual(name, 'one/two//dos/three')
def test_join_tree_escape_odd(self):
name = tag_utils.join_tree_name(['one/', 'two'])
self.assertEqual(name, 'one///two')
def test_join_tree_escape_even(self):
name = tag_utils.join_tree_name(['one', 'two//dos', 'three'])
self.assertEqual(name, 'one/two////dos/three')
def test_join_tree_escape_leading(self):
name = tag_utils.join_tree_name(['/one', 'two'])
self.assertEqual(name, '//one/two')
def test_join_tree_escape_trailing(self):
name = tag_utils.join_tree_name(['one', 'two/'])
self.assertEqual(name, 'one/two//') |
def _container_getitem(instance, elts, index, context: (InferenceContext | None)=None):
try:
if isinstance(index, Slice):
index_slice = _infer_slice(index, context=context)
new_cls = instance.__class__()
new_cls.elts = elts[index_slice]
new_cls.parent = instance.parent
return new_cls
if isinstance(index, Const):
return elts[index.value]
except ValueError as exc:
raise AstroidValueError(message='Slice {index!r} cannot index container', node=instance, index=index, context=context) from exc
except IndexError as exc:
raise AstroidIndexError(message='Index {index!s} out of range', node=instance, index=index, context=context) from exc
except TypeError as exc:
raise AstroidTypeError(message='Type error {error!r}', node=instance, index=index, context=context) from exc
raise AstroidTypeError(f'Could not use {index} as subscript index') |
class ChannelListView(ChannelMixin, ListView):
paginate_by = 100
template_name = 'website/channel_list.html'
page_title = 'Channel Index'
max_popular = 10
def get_context_data(self, **kwargs):
context = super(ChannelListView, self).get_context_data(**kwargs)
context['most_popular'] = sorted(list(self.get_queryset()), key=(lambda channel: len(channel.subscriptions.all())), reverse=True)[:self.max_popular]
return context |
class CSHintDetailsTab(GameDetailsTab):
def __init__(self, parent: QtWidgets.QWidget, game: RandovaniaGame):
super().__init__(parent, game)
self.tree_widget = QtWidgets.QTreeWidget(parent)
def widget(self) -> QtWidgets.QWidget:
return self.tree_widget
def tab_title(self) -> str:
return 'Hints'
def update_content(self, configuration: BaseConfiguration, all_patches: dict[(int, GamePatches)], players: PlayersConfiguration):
self.tree_widget.clear()
self.tree_widget.setColumnCount(3)
self.tree_widget.setHeaderLabels(['Hint', 'Pickup', 'In-Game Text'])
game = filtered_database.game_description_for_layout(configuration)
region_list = game.region_list
patches = all_patches[players.player_index]
per_region: dict[(str, dict[(str, tuple[(str, str)])])] = collections.defaultdict(dict)
hints = get_hints(all_patches, players, Random())
for (identifier, hint) in patches.hints.items():
node = region_list.node_by_identifier(identifier)
source_region = region_list.nodes_to_region(node)
source_name = region_list.node_name(node)
hint_text = hints[identifier]
hint = patches.hints[identifier]
if (hint.target is None):
hinted_pickup = 'No target for hint'
else:
target = patches.pickup_assignment.get(hint.target)
if (target is None):
hinted_pickup = 'Nothing'
else:
hinted_pickup = target.pickup.name
if players.is_multiworld:
hinted_pickup = f"{players.player_names[target.player]}'s {hinted_pickup}"
per_region[source_region.name][source_name] = (hint_text, hinted_pickup)
for (region_name, region_contents) in iterate_key_sorted(per_region):
region_item = QtWidgets.QTreeWidgetItem(self.tree_widget)
region_item.setText(0, region_name)
region_item.setExpanded(True)
for (source_name, content) in iterate_key_sorted(region_contents):
area_item = QtWidgets.QTreeWidgetItem(region_item)
area_item.setText(0, source_name)
area_item.setText(1, content[1])
area_item.setText(2, content[0])
self.tree_widget.resizeColumnToContents(0)
self.tree_widget.resizeColumnToContents(1)
self.tree_widget.resizeColumnToContents(3) |
class FakeFilesystem():
def __init__(self, path_separator: str=os.path.sep, total_size: Optional[int]=None, patcher: Any=None, create_temp_dir: bool=False) -> None:
self.path_separator: str = path_separator
self.alternative_path_separator: Optional[str] = os.path.altsep
self.patcher = patcher
self.create_temp_dir = create_temp_dir
if (path_separator != os.sep):
self.alternative_path_separator = None
self._is_windows_fs = (sys.platform == 'win32')
self._is_macos = (sys.platform == 'darwin')
self.is_case_sensitive: bool = (not (self.is_windows_fs or self._is_macos))
self.root: FakeDirectory
self._cwd = ''
self.umask: int = os.umask(18)
os.umask(self.umask)
self.open_files: List[Optional[List[AnyFileWrapper]]] = []
self._free_fd_heap: List[int] = []
self.last_ino: int = 0
self.last_dev: int = 0
self.mount_points: Dict[(AnyString, Dict)] = OrderedDict()
self.dev_null: Any = None
self.reset(total_size=total_size, init_pathlib=False)
self.patch_open_code = PatchMode.OFF
self.shuffle_listdir_results = False
def is_linux(self) -> bool:
return ((not self.is_windows_fs) and (not self.is_macos))
def is_windows_fs(self) -> bool:
return self._is_windows_fs
_windows_fs.setter
def is_windows_fs(self, value: bool) -> None:
if (self._is_windows_fs != value):
self._is_windows_fs = value
self.reset()
FakePathModule.reset(self)
def is_macos(self) -> bool:
return self._is_macos
_macos.setter
def is_macos(self, value: bool) -> None:
if (self._is_macos != value):
self._is_macos = value
self.reset()
FakePathModule.reset(self)
def cwd(self) -> str:
return self._cwd
def cwd(self, value: str) -> None:
self._cwd = value
self._auto_mount_drive_if_needed(value)
def root_dir(self) -> FakeDirectory:
if self.is_windows_fs:
return self._mount_point_dir_for_cwd()
return self.root
def root_dir_name(self) -> str:
root_dir = to_string(self.root_dir.name)
if (not root_dir.endswith(self.path_separator)):
return (root_dir + self.path_separator)
return root_dir
def os(self) -> OSType:
return (OSType.WINDOWS if self.is_windows_fs else (OSType.MACOS if self.is_macos else OSType.LINUX))
def os(self, value: OSType) -> None:
self._is_windows_fs = (value == OSType.WINDOWS)
self._is_macos = (value == OSType.MACOS)
self.is_case_sensitive = (value == OSType.LINUX)
self.path_separator = ('\\' if (value == OSType.WINDOWS) else '/')
self.alternative_path_separator = ('/' if (value == OSType.WINDOWS) else None)
self.reset()
FakePathModule.reset(self)
def reset(self, total_size: Optional[int]=None, init_pathlib: bool=True):
self.root = FakeDirectory(self.path_separator, filesystem=self)
self.dev_null = FakeNullFile(self)
self.open_files.clear()
self._free_fd_heap.clear()
self.last_ino = 0
self.last_dev = 0
self.mount_points.clear()
self._add_root_mount_point(total_size)
self._add_standard_streams()
if self.create_temp_dir:
self._create_temp_dir()
if init_pathlib:
from pyfakefs import fake_pathlib
fake_pathlib.init_module(self)
def _add_root_mount_point(self, total_size):
mount_point = ('C:' if self.is_windows_fs else self.path_separator)
self._cwd = mount_point
if (not self.cwd.endswith(self.path_separator)):
self._cwd += self.path_separator
self.add_mount_point(mount_point, total_size)
def pause(self) -> None:
if (self.patcher is None):
raise RuntimeError('pause() can only be called from a fake file system object created by a Patcher object')
self.patcher.pause()
def resume(self) -> None:
if (self.patcher is None):
raise RuntimeError('resume() can only be called from a fake file system object created by a Patcher object')
self.patcher.resume()
def clear_cache(self) -> None:
if self.patcher:
self.patcher.clear_cache()
def line_separator(self) -> str:
return ('\r\n' if self.is_windows_fs else '\n')
def raise_os_error(self, err_no: int, filename: Optional[AnyString]=None, winerror: Optional[int]=None) -> NoReturn:
message = (os.strerror(err_no) + ' in the fake filesystem')
if ((winerror is not None) and (sys.platform == 'win32') and self.is_windows_fs):
raise OSError(err_no, message, filename, winerror)
raise OSError(err_no, message, filename)
def get_path_separator(self, path: AnyStr) -> AnyStr:
return matching_string(path, self.path_separator)
def _alternative_path_separator(self, path: AnyStr) -> Optional[AnyStr]:
return matching_string(path, self.alternative_path_separator)
def starts_with_sep(self, path: AnyStr) -> bool:
sep = self.get_path_separator(path)
altsep = self._alternative_path_separator(path)
return (path.startswith(sep) or ((altsep is not None) and path.startswith(altsep)))
def add_mount_point(self, path: AnyStr, total_size: Optional[int]=None, can_exist: bool=False) -> Dict:
path = self.normpath(self.normcase(path))
for mount_point in self.mount_points:
if ((self.is_case_sensitive and (path == matching_string(path, mount_point))) or ((not self.is_case_sensitive) and (path.lower() == matching_string(path, mount_point.lower())))):
if can_exist:
return self.mount_points[mount_point]
self.raise_os_error(errno.EEXIST, path)
self.last_dev += 1
self.mount_points[path] = {'idev': self.last_dev, 'total_size': total_size, 'used_size': 0}
if (path == matching_string(path, self.root.name)):
root_dir = self.root
self.last_ino += 1
root_dir.st_ino = self.last_ino
else:
root_dir = self._create_mount_point_dir(path)
root_dir.st_dev = self.last_dev
return self.mount_points[path]
def _create_mount_point_dir(self, directory_path: AnyPath) -> FakeDirectory:
dir_path = self.make_string_path(directory_path)
path_components = self._path_components(dir_path)
current_dir = self.root
new_dirs = []
for component in [to_string(p) for p in path_components]:
directory = self._directory_content(current_dir, to_string(component))[1]
if (not directory):
new_dir = FakeDirectory(component, filesystem=self)
new_dirs.append(new_dir)
current_dir.add_entry(new_dir)
current_dir = new_dir
else:
current_dir = cast(FakeDirectory, directory)
for new_dir in new_dirs:
new_dir.st_mode = (S_IFDIR | helpers.PERM_DEF)
return current_dir
def _auto_mount_drive_if_needed(self, path: AnyStr) -> Optional[Dict]:
if self.is_windows_fs:
drive = self.splitdrive(path)[0]
if drive:
return self.add_mount_point(path=drive, can_exist=True)
return None
def _mount_point_for_path(self, path: AnyStr) -> Dict:
path = self.absnormpath(self._original_path(path))
for mount_path in self.mount_points:
if (path == matching_string(path, mount_path)):
return self.mount_points[mount_path]
mount_path = matching_string(path, '')
drive = self.splitdrive(path)[0]
for root_path in self.mount_points:
root_path = matching_string(path, root_path)
if (drive and (not root_path.startswith(drive))):
continue
if (path.startswith(root_path) and (len(root_path) > len(mount_path))):
mount_path = root_path
if mount_path:
return self.mount_points[to_string(mount_path)]
mount_point = self._auto_mount_drive_if_needed(path)
assert mount_point
return mount_point
def _mount_point_dir_for_cwd(self) -> FakeDirectory:
def object_from_path(file_path) -> FakeDirectory:
path_components = self._path_components(file_path)
target = self.root
for component in path_components:
target = cast(FakeDirectory, target.get_entry(component))
return target
path = to_string(self.cwd)
for mount_path in self.mount_points:
if (path == to_string(mount_path)):
return object_from_path(mount_path)
mount_path = ''
drive = to_string(self.splitdrive(path)[0])
for root_path in self.mount_points:
str_root_path = to_string(root_path)
if (drive and (not str_root_path.startswith(drive))):
continue
if (path.startswith(str_root_path) and (len(str_root_path) > len(mount_path))):
mount_path = root_path
return object_from_path(mount_path)
def _mount_point_for_device(self, idev: int) -> Optional[Dict]:
for mount_point in self.mount_points.values():
if (mount_point['idev'] == idev):
return mount_point
return None
def get_disk_usage(self, path: Optional[AnyStr]=None) -> Tuple[(int, int, int)]:
DiskUsage = namedtuple('DiskUsage', 'total, used, free')
if (path is None):
mount_point = next(iter(self.mount_points.values()))
else:
file_path = make_string_path(path)
mount_point = self._mount_point_for_path(file_path)
if (mount_point and (mount_point['total_size'] is not None)):
return DiskUsage(mount_point['total_size'], mount_point['used_size'], (mount_point['total_size'] - mount_point['used_size']))
return DiskUsage((((1024 * 1024) * 1024) * 1024), 0, (((1024 * 1024) * 1024) * 1024))
def set_disk_usage(self, total_size: int, path: Optional[AnyStr]=None) -> None:
file_path: AnyStr = (path if (path is not None) else self.root_dir_name)
mount_point = self._mount_point_for_path(file_path)
if ((mount_point['total_size'] is not None) and (mount_point['used_size'] > total_size)):
self.raise_os_error(errno.ENOSPC, path)
mount_point['total_size'] = total_size
def change_disk_usage(self, usage_change: int, file_path: AnyStr, st_dev: int) -> None:
mount_point = self._mount_point_for_device(st_dev)
if mount_point:
total_size = mount_point['total_size']
if (total_size is not None):
if ((total_size - mount_point['used_size']) < usage_change):
self.raise_os_error(errno.ENOSPC, file_path)
mount_point['used_size'] += usage_change
def stat(self, entry_path: AnyStr, follow_symlinks: bool=True):
try:
file_object = self.resolve(entry_path, follow_symlinks, allow_fd=True, check_read_perm=False)
except TypeError:
file_object = self.resolve(entry_path)
if (not is_root()):
parent_dir = file_object.parent_dir
if parent_dir:
self.get_object(parent_dir.path)
self.raise_for_filepath_ending_with_separator(entry_path, file_object, follow_symlinks)
return file_object.stat_result.copy()
def raise_for_filepath_ending_with_separator(self, entry_path: AnyStr, file_object: FakeFile, follow_symlinks: bool=True, macos_handling: bool=False) -> None:
if self.ends_with_path_separator(entry_path):
if S_ISLNK(file_object.st_mode):
try:
link_object = self.resolve(entry_path)
except OSError as exc:
if (self.is_macos and (exc.errno != errno.ENOENT)):
return
if self.is_windows_fs:
self.raise_os_error(errno.EINVAL, entry_path)
raise
if ((not follow_symlinks) or self.is_windows_fs or self.is_macos):
file_object = link_object
if self.is_windows_fs:
is_error = S_ISREG(file_object.st_mode)
elif (self.is_macos and macos_handling):
is_error = (not S_ISLNK(file_object.st_mode))
else:
is_error = (not S_ISDIR(file_object.st_mode))
if is_error:
error_nr = (errno.EINVAL if self.is_windows_fs else errno.ENOTDIR)
self.raise_os_error(error_nr, entry_path)
def chmod(self, path: AnyStr, mode: int, follow_symlinks: bool=True, force_unix_mode: bool=False) -> None:
file_object = self.resolve(path, follow_symlinks, allow_fd=True, check_owner=True)
if (self.is_windows_fs and (not force_unix_mode)):
if (mode & helpers.PERM_WRITE):
file_object.st_mode = (file_object.st_mode | 146)
else:
file_object.st_mode = (file_object.st_mode & 261997)
else:
file_object.st_mode = ((file_object.st_mode & (~ helpers.PERM_ALL)) | (mode & helpers.PERM_ALL))
file_object.st_ctime = helpers.now()
def utime(self, path: AnyStr, times: Optional[Tuple[(Union[(int, float)], Union[(int, float)])]]=None, *, ns: Optional[Tuple[(int, int)]]=None, follow_symlinks: bool=True) -> None:
self._handle_utime_arg_errors(ns, times)
file_object = self.resolve(path, follow_symlinks, allow_fd=True)
if (times is not None):
for file_time in times:
if (not isinstance(file_time, (int, float))):
raise TypeError('atime and mtime must be numbers')
file_object.st_atime = times[0]
file_object.st_mtime = times[1]
elif (ns is not None):
for file_time in ns:
if (not isinstance(file_time, int)):
raise TypeError('atime and mtime must be ints')
file_object.st_atime_ns = ns[0]
file_object.st_mtime_ns = ns[1]
else:
current_time = helpers.now()
file_object.st_atime = current_time
file_object.st_mtime = current_time
def _handle_utime_arg_errors(ns: Optional[Tuple[(int, int)]], times: Optional[Tuple[(Union[(int, float)], Union[(int, float)])]]):
if ((times is not None) and (ns is not None)):
raise ValueError("utime: you may specify either 'times' or 'ns' but not both")
if ((times is not None) and (len(times) != 2)):
raise TypeError("utime: 'times' must be either a tuple of two ints or None")
if ((ns is not None) and (len(ns) != 2)):
raise TypeError("utime: 'ns' must be a tuple of two ints")
def _add_open_file(self, file_obj: AnyFileWrapper) -> int:
if self._free_fd_heap:
open_fd = heapq.heappop(self._free_fd_heap)
self.open_files[open_fd] = [file_obj]
return open_fd
self.open_files.append([file_obj])
return (len(self.open_files) - 1)
def _close_open_file(self, file_des: int) -> None:
self.open_files[file_des] = None
heapq.heappush(self._free_fd_heap, file_des)
def get_open_file(self, file_des: int) -> AnyFileWrapper:
if (not is_int_type(file_des)):
raise TypeError('an integer is required')
valid = (file_des < len(self.open_files))
if valid:
file_list = self.open_files[file_des]
if (file_list is not None):
return file_list[0]
self.raise_os_error(errno.EBADF, str(file_des))
def has_open_file(self, file_object: FakeFile) -> bool:
return (file_object in [wrappers[0].get_object() for wrappers in self.open_files if wrappers])
def _normalize_path_sep(self, path: AnyStr) -> AnyStr:
alt_sep = self._alternative_path_separator(path)
if (alt_sep is not None):
return path.replace(alt_sep, self.get_path_separator(path))
return path
def normcase(self, path: AnyStr) -> AnyStr:
file_path = make_string_path(path)
return self._normalize_path_sep(file_path)
def normpath(self, path: AnyStr) -> AnyStr:
path_str = self.normcase(path)
(drive, path_str) = self.splitdrive(path_str)
sep = self.get_path_separator(path_str)
is_absolute_path = path_str.startswith(sep)
path_components: List[AnyStr] = path_str.split(sep)
collapsed_path_components: List[AnyStr] = []
dot = matching_string(path_str, '.')
dotdot = matching_string(path_str, '..')
for component in path_components:
if ((not component) or (component == dot)):
continue
if (component == dotdot):
if (collapsed_path_components and (collapsed_path_components[(- 1)] != dotdot)):
collapsed_path_components.pop()
continue
elif is_absolute_path:
continue
collapsed_path_components.append(component)
collapsed_path = sep.join(collapsed_path_components)
if is_absolute_path:
collapsed_path = (sep + collapsed_path)
return ((drive + collapsed_path) or dot)
def _original_path(self, path: AnyStr) -> AnyStr:
def components_to_path():
if (len(path_components) > len(normalized_components)):
normalized_components.extend((to_string(p) for p in path_components[len(normalized_components):]))
sep = self.path_separator
normalized_path = sep.join(normalized_components)
if (self.starts_with_sep(path) and (not self.starts_with_sep(normalized_path))):
normalized_path = (sep + normalized_path)
if ((len(normalized_path) == 2) and self.starts_with_drive_letter(normalized_path)):
normalized_path += sep
return normalized_path
if (self.is_case_sensitive or (not path)):
return path
path = self.replace_windows_root(path)
path_components = self._path_components(path)
normalized_components = []
current_dir = self.root
for component in path_components:
if (not isinstance(current_dir, FakeDirectory)):
return components_to_path()
(dir_name, directory) = self._directory_content(current_dir, to_string(component))
if ((directory is None) or (isinstance(directory, FakeDirectory) and (directory._byte_contents is None) and (directory.st_size == 0))):
return components_to_path()
current_dir = cast(FakeDirectory, directory)
normalized_components.append(dir_name)
return components_to_path()
def absnormpath(self, path: AnyStr) -> AnyStr:
path = self.normcase(path)
cwd = matching_string(path, self.cwd)
if (not path):
path = self.get_path_separator(path)
if (path == matching_string(path, '.')):
path = cwd
elif (not self._starts_with_root_path(path)):
root_name = matching_string(path, self.root.name)
empty = matching_string(path, '')
path = self.get_path_separator(path).join(((((cwd != root_name) and cwd) or empty), path))
else:
path = self.replace_windows_root(path)
return self.normpath(path)
def splitpath(self, path: AnyStr) -> Tuple[(AnyStr, AnyStr)]:
path = make_string_path(path)
sep = self.get_path_separator(path)
alt_sep = self._alternative_path_separator(path)
seps = (sep if (alt_sep is None) else (sep + alt_sep))
(drive, path) = self.splitdrive(path)
i = len(path)
while (i and (path[(i - 1)] not in seps)):
i -= 1
(head, tail) = (path[:i], path[i:])
head = (head.rstrip(seps) or head)
return ((drive + head), tail)
def splitdrive(self, path: AnyStr) -> Tuple[(AnyStr, AnyStr)]:
path_str = make_string_path(path)
if self.is_windows_fs:
if (len(path_str) >= 2):
norm_str = self.normcase(path_str)
sep = self.get_path_separator(path_str)
if ((norm_str[0:2] == (sep * 2)) and (norm_str[2:3] != sep)):
sep_index = norm_str.find(sep, 2)
if (sep_index == (- 1)):
return (path_str[:0], path_str)
sep_index2 = norm_str.find(sep, (sep_index + 1))
if (sep_index2 == (sep_index + 1)):
return (path_str[:0], path_str)
if (sep_index2 == (- 1)):
sep_index2 = len(path_str)
return (path_str[:sep_index2], path_str[sep_index2:])
if (path_str[1:2] == matching_string(path_str, ':')):
return (path_str[:2], path_str[2:])
return (path_str[:0], path_str)
def splitroot(self, path: AnyStr):
p = os.fspath(path)
if isinstance(p, bytes):
sep = self.path_separator.encode()
altsep = None
if self.alternative_path_separator:
altsep = self.alternative_path_separator.encode()
colon = b':'
unc_prefix = b'\\\\?\\UNC\\'
empty = b''
else:
sep = self.path_separator
altsep = self.alternative_path_separator
colon = ':'
unc_prefix = '\\\\?\\UNC\\'
empty = ''
if self.is_windows_fs:
normp = (p.replace(altsep, sep) if altsep else p)
if (normp[:1] == sep):
if (normp[1:2] == sep):
start = (8 if (normp[:8].upper() == unc_prefix) else 2)
index = normp.find(sep, start)
if (index == (- 1)):
return (p, empty, empty)
index2 = normp.find(sep, (index + 1))
if (index2 == (- 1)):
return (p, empty, empty)
return (p[:index2], p[index2:(index2 + 1)], p[(index2 + 1):])
else:
return (empty, p[:1], p[1:])
elif (normp[1:2] == colon):
if (normp[2:3] == sep):
return (p[:2], p[2:3], p[3:])
else:
return (p[:2], empty, p[2:])
else:
return (empty, empty, p)
elif (p[:1] != sep):
return (empty, empty, p)
elif ((p[1:2] != sep) or (p[2:3] == sep)):
return (empty, sep, p[1:])
else:
return (empty, p[:2], p[2:])
def _join_paths_with_drive_support(self, *all_paths: AnyStr) -> AnyStr:
base_path = all_paths[0]
paths_to_add = all_paths[1:]
sep = self.get_path_separator(base_path)
seps = [sep, self._alternative_path_separator(base_path)]
(result_drive, result_path) = self.splitdrive(base_path)
for path in paths_to_add:
(drive_part, path_part) = self.splitdrive(path)
if (path_part and (path_part[:1] in seps)):
if (drive_part or (not result_drive)):
result_drive = drive_part
result_path = path_part
continue
elif (drive_part and (drive_part != result_drive)):
if (self.is_case_sensitive or (drive_part.lower() != result_drive.lower())):
result_drive = drive_part
result_path = path_part
continue
result_drive = drive_part
if (result_path and (result_path[(- 1):] not in seps)):
result_path = (result_path + sep)
result_path = (result_path + path_part)
colon = matching_string(base_path, ':')
if (result_path and (result_path[:1] not in seps) and result_drive and (result_drive[(- 1):] != colon)):
return ((result_drive + sep) + result_path)
return (result_drive + result_path)
def joinpaths(self, *paths: AnyStr) -> AnyStr:
file_paths = [os.fspath(path) for path in paths]
if (len(file_paths) == 1):
return paths[0]
if self.is_windows_fs:
return self._join_paths_with_drive_support(*file_paths)
joined_path_segments = []
sep = self.get_path_separator(file_paths[0])
for path_segment in file_paths:
if self._starts_with_root_path(path_segment):
joined_path_segments = [path_segment]
else:
if (joined_path_segments and (not joined_path_segments[(- 1)].endswith(sep))):
joined_path_segments.append(sep)
if path_segment:
joined_path_segments.append(path_segment)
return matching_string(file_paths[0], '').join(joined_path_segments)
def _path_components(self, path: str) -> List[str]:
...
def _path_components(self, path: bytes) -> List[bytes]:
...
def _path_components(self, path: AnyStr) -> List[AnyStr]:
if ((not path) or (path == self.get_path_separator(path))):
return []
(drive, path) = self.splitdrive(path)
path_components = path.split(self.get_path_separator(path))
assert (drive or path_components)
if (not path_components[0]):
if ((len(path_components) > 1) and (not path_components[1])):
path_components = []
else:
path_components = path_components[1:]
if drive:
path_components.insert(0, drive)
return path_components
def starts_with_drive_letter(self, file_path: AnyStr) -> bool:
colon = matching_string(file_path, ':')
if ((len(file_path) >= 2) and file_path[0:1].isalpha() and (file_path[1:2] == colon)):
if self.is_windows_fs:
return True
if (os.name == 'nt'):
try:
self.get_object_from_normpath(file_path)
return True
except OSError:
return False
return False
def _starts_with_root_path(self, file_path: AnyStr) -> bool:
root_name = matching_string(file_path, self.root.name)
file_path = self._normalize_path_sep(file_path)
return (file_path.startswith(root_name) or ((not self.is_case_sensitive) and file_path.lower().startswith(root_name.lower())) or self.starts_with_drive_letter(file_path))
def replace_windows_root(self, path: AnyStr) -> AnyStr:
if (path and self.is_windows_fs and self.root_dir):
sep = self.get_path_separator(path)
if ((path[0:1] == sep) and ((len(path) == 1) or (path[1:2] != sep))):
for root_path in self.mount_points:
root_path = matching_string(path, root_path)
if path.startswith(root_path):
return path
mount_point = matching_string(path, self.root_dir_name)
path = (mount_point + path[1:])
return path
def _is_root_path(self, file_path: AnyStr) -> bool:
root_name = matching_string(file_path, self.root.name)
return ((file_path == root_name) or self.is_mount_point(file_path))
def is_mount_point(self, file_path: AnyStr) -> bool:
for mount_point in self.mount_points:
mount_point = matching_string(file_path, mount_point)
if ((file_path == mount_point) or ((not self.is_case_sensitive) and (file_path.lower() == mount_point.lower()))):
return True
if (self.is_windows_fs and (len(file_path) == 3) and (len(mount_point) == 2) and self.starts_with_drive_letter(file_path) and (file_path[:2].lower() == mount_point.lower())):
return True
return False
def ends_with_path_separator(self, path: Union[(int, AnyPath)]) -> bool:
if isinstance(path, int):
return False
file_path = make_string_path(path)
if (not file_path):
return False
sep = self.get_path_separator(file_path)
altsep = self._alternative_path_separator(file_path)
return ((file_path not in (sep, altsep)) and (file_path.endswith(sep) or ((altsep is not None) and file_path.endswith(altsep))))
def is_filepath_ending_with_separator(self, path: AnyStr) -> bool:
if (not self.ends_with_path_separator(path)):
return False
return self.isfile(self._path_without_trailing_separators(path))
def _directory_content(self, directory: FakeDirectory, component: str) -> Tuple[(Optional[str], Optional[AnyFile])]:
if (not isinstance(directory, FakeDirectory)):
return (None, None)
if (component in directory.entries):
return (component, directory.entries[component])
if (not self.is_case_sensitive):
matching_content = [(subdir, directory.entries[subdir]) for subdir in directory.entries if (subdir.lower() == component.lower())]
if matching_content:
return matching_content[0]
return (None, None)
def exists(self, file_path: AnyPath, check_link: bool=False) -> bool:
if (check_link and self.islink(file_path)):
return True
path = to_string(self.make_string_path(file_path))
if (path is None):
raise TypeError
if (not path):
return False
if (path == self.dev_null.name):
return ((not self.is_windows_fs) or (sys.version_info >= (3, 8)))
try:
if self.is_filepath_ending_with_separator(path):
return False
path = self.resolve_path(path)
except OSError:
return False
if self._is_root_path(path):
return True
path_components: List[str] = self._path_components(path)
current_dir = self.root
for component in path_components:
directory = self._directory_content(current_dir, to_string(component))[1]
if (directory is None):
return False
current_dir = cast(FakeDirectory, directory)
return True
def resolve_path(self, file_path: AnyStr, allow_fd: bool=False) -> AnyStr:
if (allow_fd and isinstance(file_path, int)):
return self.get_open_file(file_path).get_object().path
path = make_string_path(file_path)
if (path is None):
raise TypeError('Expected file system path string, received None')
if ((sys.platform == 'win32') and (self.os != OSType.WINDOWS)):
path = path.replace(matching_string(path, os.sep), matching_string(path, self.path_separator))
if ((not path) or (not self._valid_relative_path(path))):
self.raise_os_error(errno.ENOENT, path)
path = self.absnormpath(self._original_path(path))
path = self.replace_windows_root(path)
if self._is_root_path(path):
return path
if (path == matching_string(path, self.dev_null.name)):
return path
path_components = self._path_components(path)
resolved_components = self._resolve_components(path_components)
path = self._components_to_path(resolved_components)
return self.replace_windows_root(path)
def _components_to_path(self, component_folders):
sep = (self.get_path_separator(component_folders[0]) if component_folders else self.path_separator)
path = sep.join(component_folders)
if (not self._starts_with_root_path(path)):
path = (sep + path)
return path
def _resolve_components(self, components: List[AnyStr]) -> List[str]:
current_dir = self.root
link_depth = 0
path_components = [to_string(comp) for comp in components]
resolved_components: List[str] = []
while path_components:
component = path_components.pop(0)
resolved_components.append(component)
directory = self._directory_content(current_dir, component)[1]
if (directory is None):
resolved_components.extend(path_components)
break
elif S_ISLNK(directory.st_mode):
if (link_depth > _MAX_LINK_DEPTH):
self.raise_os_error(errno.ELOOP, self._components_to_path(resolved_components))
link_path = self._follow_link(resolved_components, directory)
target_components = self._path_components(link_path)
path_components = (target_components + path_components)
resolved_components = []
current_dir = self.root
link_depth += 1
else:
current_dir = cast(FakeDirectory, directory)
return resolved_components
def _valid_relative_path(self, file_path: AnyStr) -> bool:
if self.is_windows_fs:
return True
slash_dotdot = matching_string(file_path, (self.path_separator + '..'))
while (file_path and (slash_dotdot in file_path)):
file_path = file_path[:file_path.rfind(slash_dotdot)]
if (not self.exists(self.absnormpath(file_path))):
return False
return True
def _follow_link(self, link_path_components: List[str], link: AnyFile) -> str:
link_path = link.contents
if (link_path is not None):
if (self.is_windows_fs and link_path.startswith('\\\\?\\')):
link_path = link_path[4:]
sep = self.get_path_separator(link_path)
if (not self._starts_with_root_path(link_path)):
components = link_path_components[:(- 1)]
components.append(link_path)
link_path = sep.join(components)
return self.normpath(link_path)
raise ValueError('Invalid link')
def get_object_from_normpath(self, file_path: AnyPath, check_read_perm: bool=True, check_owner: bool=False) -> AnyFile:
path = make_string_path(file_path)
if (path == matching_string(path, self.root.name)):
return self.root
if (path == matching_string(path, self.dev_null.name)):
return self.dev_null
path = self._original_path(path)
path_components = self._path_components(path)
target = self.root
try:
for component in path_components:
if S_ISLNK(target.st_mode):
if target.contents:
target = cast(FakeDirectory, self.resolve(target.contents))
if (not S_ISDIR(target.st_mode)):
if (not self.is_windows_fs):
self.raise_os_error(errno.ENOTDIR, path)
self.raise_os_error(errno.ENOENT, path)
target = target.get_entry(component)
if ((not is_root()) and check_read_perm and target and (not self._can_read(target, check_owner))):
self.raise_os_error(errno.EACCES, target.path)
except KeyError:
self.raise_os_error(errno.ENOENT, path)
return target
def _can_read(target, owner_can_read):
if (target.st_uid == helpers.get_uid()):
if (owner_can_read or (target.st_mode & 256)):
return True
if (target.st_gid == get_gid()):
if (target.st_mode & 32):
return True
return (target.st_mode & 4)
def get_object(self, file_path: AnyPath, check_read_perm: bool=True) -> FakeFile:
path = make_string_path(file_path)
path = self.absnormpath(self._original_path(path))
return self.get_object_from_normpath(path, check_read_perm)
def resolve(self, file_path: AnyStr, follow_symlinks: bool=True, allow_fd: bool=False, check_read_perm: bool=True, check_owner: bool=False) -> FakeFile:
if isinstance(file_path, int):
if allow_fd:
return self.get_open_file(file_path).get_object()
raise TypeError('path should be string, bytes or os.PathLike, not int')
if follow_symlinks:
return self.get_object_from_normpath(self.resolve_path(file_path, allow_fd), check_read_perm, check_owner)
return self.lresolve(file_path)
def lresolve(self, path: AnyPath) -> FakeFile:
path_str = make_string_path(path)
if (not path_str):
raise OSError(errno.ENOENT, path_str)
if (path_str == matching_string(path_str, self.root.name)):
return self.root
path_str = self._path_without_trailing_separators(path_str)
if (path_str == matching_string(path_str, '.')):
path_str = matching_string(path_str, self.cwd)
path_str = self._original_path(path_str)
(parent_directory, child_name) = self.splitpath(path_str)
if (not parent_directory):
parent_directory = matching_string(path_str, self.cwd)
try:
parent_obj = self.resolve(parent_directory)
assert parent_obj
if (not isinstance(parent_obj, FakeDirectory)):
if ((not self.is_windows_fs) and isinstance(parent_obj, FakeFile)):
self.raise_os_error(errno.ENOTDIR, path_str)
self.raise_os_error(errno.ENOENT, path_str)
if (not (parent_obj.st_mode & helpers.PERM_READ)):
self.raise_os_error(errno.EACCES, parent_directory)
return (parent_obj.get_entry(to_string(child_name)) if child_name else parent_obj)
except KeyError:
pass
raise OSError(errno.ENOENT, path_str)
def add_object(self, file_path: AnyStr, file_object: AnyFile) -> None:
if (not file_path):
target_directory = self.root_dir
else:
target_directory = cast(FakeDirectory, self.resolve(file_path))
if (not S_ISDIR(target_directory.st_mode)):
error = (errno.ENOENT if self.is_windows_fs else errno.ENOTDIR)
self.raise_os_error(error, file_path)
target_directory.add_entry(file_object)
def rename(self, old_file_path: AnyPath, new_file_path: AnyPath, force_replace: bool=False) -> None:
old_path = make_string_path(old_file_path)
new_path = make_string_path(new_file_path)
ends_with_sep = self.ends_with_path_separator(old_path)
old_path = self.absnormpath(old_path)
new_path = self.absnormpath(new_path)
if (not self.exists(old_path, check_link=True)):
self.raise_os_error(errno.ENOENT, old_path, 2)
if ends_with_sep:
self._handle_broken_link_with_trailing_sep(old_path)
old_object = self.lresolve(old_path)
if (not self.is_windows_fs):
self._handle_posix_dir_link_errors(new_path, old_path, ends_with_sep)
if self.exists(new_path, check_link=True):
renamed_path = self._rename_to_existing_path(force_replace, new_path, old_path, old_object, ends_with_sep)
if (renamed_path is None):
return
else:
new_path = renamed_path
(old_dir, old_name) = self.splitpath(old_path)
(new_dir, new_name) = self.splitpath(new_path)
if (not self.exists(new_dir)):
self.raise_os_error(errno.ENOENT, new_dir)
old_dir_object = self.resolve(old_dir)
new_dir_object = self.resolve(new_dir)
if (old_dir_object.st_dev != new_dir_object.st_dev):
self.raise_os_error(errno.EXDEV, old_path)
if (not S_ISDIR(new_dir_object.st_mode)):
self.raise_os_error((errno.EACCES if self.is_windows_fs else errno.ENOTDIR), new_path)
if new_dir_object.has_parent_object(old_object):
self.raise_os_error(errno.EINVAL, new_path)
self._do_rename(old_dir_object, old_name, new_dir_object, new_name)
def _do_rename(self, old_dir_object, old_name, new_dir_object, new_name):
object_to_rename = old_dir_object.get_entry(old_name)
old_dir_object.remove_entry(old_name, recursive=False)
object_to_rename.name = new_name
new_name = new_dir_object._normalized_entryname(new_name)
old_entry = (new_dir_object.get_entry(new_name) if (new_name in new_dir_object.entries) else None)
try:
if old_entry:
new_dir_object.remove_entry(new_name)
new_dir_object.add_entry(object_to_rename)
except OSError:
if (old_entry and (new_name not in new_dir_object.entries)):
new_dir_object.add_entry(old_entry)
object_to_rename.name = old_name
old_dir_object.add_entry(object_to_rename)
raise
def _handle_broken_link_with_trailing_sep(self, path: AnyStr) -> None:
if self.islink(path):
if (not self.exists(path)):
error = (errno.ENOENT if self.is_macos else (errno.EINVAL if self.is_windows_fs else errno.ENOTDIR))
self.raise_os_error(error, path)
def _handle_posix_dir_link_errors(self, new_file_path: AnyStr, old_file_path: AnyStr, ends_with_sep: bool) -> None:
if (self.isdir(old_file_path, follow_symlinks=False) and self.islink(new_file_path)):
self.raise_os_error(errno.ENOTDIR, new_file_path)
if (self.isdir(new_file_path, follow_symlinks=False) and self.islink(old_file_path)):
if (ends_with_sep and self.is_macos):
return
error = (errno.ENOTDIR if ends_with_sep else errno.EISDIR)
self.raise_os_error(error, new_file_path)
if (ends_with_sep and self.islink(old_file_path) and (old_file_path == new_file_path) and (not self.is_windows_fs)):
self.raise_os_error(errno.ENOTDIR, new_file_path)
def _rename_to_existing_path(self, force_replace: bool, new_file_path: AnyStr, old_file_path: AnyStr, old_object: FakeFile, ends_with_sep: bool) -> Optional[AnyStr]:
new_object = self.get_object(new_file_path)
if (old_file_path == new_file_path):
if ((not S_ISLNK(new_object.st_mode)) and ends_with_sep):
error = (errno.EINVAL if self.is_windows_fs else errno.ENOTDIR)
self.raise_os_error(error, old_file_path)
return None
if (old_object == new_object):
return self._rename_same_object(new_file_path, old_file_path)
if (S_ISDIR(new_object.st_mode) or S_ISLNK(new_object.st_mode)):
self._handle_rename_error_for_dir_or_link(force_replace, new_file_path, new_object, old_object, ends_with_sep)
elif S_ISDIR(old_object.st_mode):
error = (errno.EEXIST if self.is_windows_fs else errno.ENOTDIR)
self.raise_os_error(error, new_file_path)
elif (self.is_windows_fs and (not force_replace)):
self.raise_os_error(errno.EEXIST, new_file_path)
else:
self.remove_object(new_file_path)
return new_file_path
def _handle_rename_error_for_dir_or_link(self, force_replace: bool, new_file_path: AnyStr, new_object: FakeFile, old_object: FakeFile, ends_with_sep: bool) -> None:
if self.is_windows_fs:
if force_replace:
self.raise_os_error(errno.EACCES, new_file_path)
else:
self.raise_os_error(errno.EEXIST, new_file_path)
if (not S_ISLNK(new_object.st_mode)):
if new_object.entries:
if ((not S_ISLNK(old_object.st_mode)) or (not ends_with_sep) or (not self.is_macos)):
self.raise_os_error(errno.ENOTEMPTY, new_file_path)
if S_ISREG(old_object.st_mode):
self.raise_os_error(errno.EISDIR, new_file_path)
def _rename_same_object(self, new_file_path: AnyStr, old_file_path: AnyStr) -> Optional[AnyStr]:
do_rename = (old_file_path.lower() == new_file_path.lower())
if (not do_rename):
try:
real_old_path = self.resolve_path(old_file_path)
original_old_path = self._original_path(real_old_path)
real_new_path = self.resolve_path(new_file_path)
if ((real_new_path == original_old_path) and ((new_file_path == real_old_path) == (new_file_path.lower() == real_old_path.lower()))):
real_object = self.resolve(old_file_path, follow_symlinks=False)
do_rename = ((os.path.basename(old_file_path) == real_object.name) or (not self.is_macos))
else:
do_rename = (real_new_path.lower() == real_old_path.lower())
if do_rename:
(parent, file_name) = self.splitpath(new_file_path)
new_file_path = self.joinpaths(self._original_path(parent), file_name)
except OSError:
pass
if (not do_rename):
return None
return new_file_path
def remove_object(self, file_path: AnyStr) -> None:
file_path = self.absnormpath(self._original_path(file_path))
if self._is_root_path(file_path):
self.raise_os_error(errno.EBUSY, file_path)
try:
(dirname, basename) = self.splitpath(file_path)
target_directory = self.resolve(dirname, check_read_perm=False)
target_directory.remove_entry(basename)
except KeyError:
self.raise_os_error(errno.ENOENT, file_path)
except AttributeError:
self.raise_os_error(errno.ENOTDIR, file_path)
def make_string_path(self, path: AnyPath) -> AnyStr:
path_str = make_string_path(path)
os_sep = matching_string(path_str, os.sep)
fake_sep = self.get_path_separator(path_str)
return path_str.replace(os_sep, fake_sep)
def create_dir(self, directory_path: AnyPath, perm_bits: int=helpers.PERM_DEF) -> FakeDirectory:
dir_path = self.make_string_path(directory_path)
dir_path = self.absnormpath(dir_path)
self._auto_mount_drive_if_needed(dir_path)
if (self.exists(dir_path, check_link=True) and (dir_path not in self.mount_points)):
self.raise_os_error(errno.EEXIST, dir_path)
path_components = self._path_components(dir_path)
current_dir = self.root
new_dirs = []
for component in [to_string(p) for p in path_components]:
directory = self._directory_content(current_dir, to_string(component))[1]
if (not directory):
new_dir = FakeDirectory(component, filesystem=self)
new_dirs.append(new_dir)
if (self.is_windows_fs and (current_dir == self.root)):
current_dir = self.root_dir
current_dir.add_entry(new_dir)
current_dir = new_dir
else:
if S_ISLNK(directory.st_mode):
assert directory.contents
directory = self.resolve(directory.contents)
assert directory
current_dir = cast(FakeDirectory, directory)
if ((directory.st_mode & S_IFDIR) != S_IFDIR):
self.raise_os_error(errno.ENOTDIR, current_dir.path)
for new_dir in new_dirs:
new_dir.st_mode = (S_IFDIR | perm_bits)
return current_dir
def create_file(self, file_path: AnyPath, st_mode: int=(S_IFREG | helpers.PERM_DEF_FILE), contents: AnyString='', st_size: Optional[int]=None, create_missing_dirs: bool=True, apply_umask: bool=False, encoding: Optional[str]=None, errors: Optional[str]=None, side_effect: Optional[Callable]=None) -> FakeFile:
return self.create_file_internally(file_path, st_mode, contents, st_size, create_missing_dirs, apply_umask, encoding, errors, side_effect=side_effect)
def add_real_file(self, source_path: AnyPath, read_only: bool=True, target_path: Optional[AnyPath]=None) -> FakeFile:
target_path = (target_path or source_path)
source_path_str = make_string_path(source_path)
real_stat = os.stat(source_path_str)
fake_file = self.create_file_internally(target_path, read_from_real_fs=True)
fake_file.stat_result.set_from_stat_result(real_stat)
if read_only:
fake_file.st_mode &= 261924
fake_file.file_path = source_path_str
self.change_disk_usage(fake_file.size, fake_file.name, fake_file.st_dev)
return fake_file
def add_real_symlink(self, source_path: AnyPath, target_path: Optional[AnyPath]=None) -> FakeFile:
source_path_str = make_string_path(source_path)
source_path_str = self._path_without_trailing_separators(source_path_str)
if ((not os.path.exists(source_path_str)) and (not os.path.islink(source_path_str))):
self.raise_os_error(errno.ENOENT, source_path_str)
target = os.readlink(source_path_str)
if target_path:
return self.create_symlink(target_path, target)
else:
return self.create_symlink(source_path_str, target)
def add_real_directory(self, source_path: AnyPath, read_only: bool=True, lazy_read: bool=True, target_path: Optional[AnyPath]=None) -> FakeDirectory:
source_path_str = make_string_path(source_path)
source_path_str = self._path_without_trailing_separators(source_path_str)
if (not os.path.exists(source_path_str)):
self.raise_os_error(errno.ENOENT, source_path_str)
target_path_str = make_string_path((target_path or source_path_str))
if (os.altsep is not None):
target_path_str = os.path.normpath(target_path_str)
if (os.sep != self.path_separator):
target_path_str = target_path_str.replace(os.sep, self.path_separator)
self._auto_mount_drive_if_needed(target_path_str)
if lazy_read:
self._create_fake_from_real_dir_lazily(source_path_str, target_path_str, read_only)
else:
self._create_fake_from_real_dir(source_path_str, target_path_str, read_only)
return cast(FakeDirectory, self.get_object(target_path_str))
def _create_fake_from_real_dir(self, source_path_str, target_path_str, read_only):
if (not self.exists(target_path_str)):
self.create_dir(target_path_str)
for (base, _, files) in os.walk(source_path_str):
new_base = os.path.join(target_path_str, os.path.relpath(base, source_path_str))
for file_entry in os.listdir(base):
file_path = os.path.join(base, file_entry)
if os.path.islink(file_path):
self.add_real_symlink(file_path, os.path.join(new_base, file_entry))
for file_entry in files:
path = os.path.join(base, file_entry)
if (not os.path.islink(path)):
self.add_real_file(path, read_only, os.path.join(new_base, file_entry))
def _create_fake_from_real_dir_lazily(self, source_path_str, target_path_str, read_only):
if self.exists(target_path_str):
if (not self.isdir(target_path_str)):
raise OSError(errno.ENOTDIR, 'Mapping target is not a directory')
for entry in os.listdir(source_path_str):
src_entry_path = os.path.join(source_path_str, entry)
target_entry_path = os.path.join(target_path_str, entry)
if os.path.isdir(src_entry_path):
self.add_real_directory(src_entry_path, read_only, True, target_entry_path)
elif os.path.islink(src_entry_path):
self.add_real_symlink(src_entry_path, target_entry_path)
elif os.path.isfile(src_entry_path):
self.add_real_file(src_entry_path, read_only, target_entry_path)
return self.get_object(target_path_str)
parent_path = os.path.split(target_path_str)[0]
if self.exists(parent_path):
parent_dir = self.get_object(parent_path)
else:
parent_dir = self.create_dir(parent_path)
new_dir = FakeDirectoryFromRealDirectory(source_path_str, self, read_only, target_path_str)
parent_dir.add_entry(new_dir)
return new_dir
def add_real_paths(self, path_list: List[AnyStr], read_only: bool=True, lazy_dir_read: bool=True) -> None:
for path in path_list:
if os.path.isdir(path):
self.add_real_directory(path, read_only, lazy_dir_read)
else:
self.add_real_file(path, read_only)
def create_file_internally(self, file_path: AnyPath, st_mode: int=(S_IFREG | helpers.PERM_DEF_FILE), contents: AnyString='', st_size: Optional[int]=None, create_missing_dirs: bool=True, apply_umask: bool=False, encoding: Optional[str]=None, errors: Optional[str]=None, read_from_real_fs: bool=False, side_effect: Optional[Callable]=None) -> FakeFile:
path = self.make_string_path(file_path)
path = self.absnormpath(path)
if (not is_int_type(st_mode)):
raise TypeError('st_mode must be of int type - did you mean to set contents?')
if self.exists(path, check_link=True):
self.raise_os_error(errno.EEXIST, path)
(parent_directory, new_file) = self.splitpath(path)
if (not parent_directory):
parent_directory = matching_string(path, self.cwd)
self._auto_mount_drive_if_needed(parent_directory)
if (not self.exists(parent_directory)):
if (not create_missing_dirs):
self.raise_os_error(errno.ENOENT, parent_directory)
parent_directory = matching_string(path, self.create_dir(parent_directory).path)
else:
parent_directory = self._original_path(parent_directory)
if apply_umask:
st_mode &= (~ self.umask)
file_object: FakeFile
if read_from_real_fs:
file_object = FakeFileFromRealFile(to_string(path), filesystem=self, side_effect=side_effect)
else:
file_object = FakeFile(new_file, st_mode, filesystem=self, encoding=encoding, errors=errors, side_effect=side_effect)
self.add_object(parent_directory, file_object)
if ((st_size is None) and (contents is None)):
contents = ''
if ((not read_from_real_fs) and ((contents is not None) or (st_size is not None))):
try:
if (st_size is not None):
file_object.set_large_file_size(st_size)
else:
file_object.set_initial_contents(contents)
except OSError:
self.remove_object(path)
raise
return file_object
def create_symlink(self, file_path: AnyPath, link_target: AnyPath, create_missing_dirs: bool=True) -> FakeFile:
link_path = self.make_string_path(file_path)
link_target_path = self.make_string_path(link_target)
link_path = self.normcase(link_path)
if self.ends_with_path_separator(link_path):
if self.exists(link_path):
self.raise_os_error(errno.EEXIST, link_path)
if self.exists(link_target_path):
if (not self.is_windows_fs):
self.raise_os_error(errno.ENOENT, link_path)
else:
if self.is_windows_fs:
self.raise_os_error(errno.EINVAL, link_target_path)
if (not self.exists(self._path_without_trailing_separators(link_path), check_link=True)):
self.raise_os_error(errno.ENOENT, link_target_path)
if self.is_macos:
if self.exists(link_path, check_link=True):
self.remove_object(link_path)
else:
self.raise_os_error(errno.EEXIST, link_target_path)
if (not self.islink(link_path)):
link_path = self.resolve_path(link_path)
return self.create_file_internally(link_path, st_mode=(S_IFLNK | helpers.PERM_DEF), contents=link_target_path, create_missing_dirs=create_missing_dirs)
def create_link(self, old_path: AnyPath, new_path: AnyPath, follow_symlinks: bool=True, create_missing_dirs: bool=True) -> FakeFile:
old_path_str = make_string_path(old_path)
new_path_str = make_string_path(new_path)
new_path_normalized = self.absnormpath(new_path_str)
if self.exists(new_path_normalized, check_link=True):
self.raise_os_error(errno.EEXIST, new_path_str)
(new_parent_directory, new_basename) = self.splitpath(new_path_normalized)
if (not new_parent_directory):
new_parent_directory = matching_string(new_path_str, self.cwd)
if (not self.exists(new_parent_directory)):
if create_missing_dirs:
self.create_dir(new_parent_directory)
else:
self.raise_os_error(errno.ENOENT, new_parent_directory)
if self.ends_with_path_separator(old_path_str):
error = (errno.EINVAL if self.is_windows_fs else errno.ENOTDIR)
self.raise_os_error(error, old_path_str)
if ((not self.is_windows_fs) and self.ends_with_path_separator(new_path)):
self.raise_os_error(errno.ENOENT, old_path_str)
try:
old_file = self.resolve(old_path_str, follow_symlinks=follow_symlinks)
except OSError:
self.raise_os_error(errno.ENOENT, old_path_str)
if (old_file.st_mode & S_IFDIR):
self.raise_os_error((errno.EACCES if self.is_windows_fs else errno.EPERM), old_path_str)
old_file.name = new_basename
self.add_object(new_parent_directory, old_file)
return old_file
def link(self, old_path: AnyPath, new_path: AnyPath, follow_symlinks: bool=True) -> FakeFile:
return self.create_link(old_path, new_path, follow_symlinks, create_missing_dirs=False)
def _is_circular_link(self, link_obj: FakeFile) -> bool:
try:
assert link_obj.contents
self.resolve_path(link_obj.contents)
except OSError as exc:
return (exc.errno == errno.ELOOP)
return False
def readlink(self, path: AnyPath) -> str:
if (path is None):
raise TypeError
link_path = make_string_path(path)
link_obj = self.lresolve(link_path)
if (S_IFMT(link_obj.st_mode) != S_IFLNK):
self.raise_os_error(errno.EINVAL, link_path)
if self.ends_with_path_separator(link_path):
if ((not self.is_windows_fs) and self.exists(link_path)):
self.raise_os_error(errno.EINVAL, link_path)
if (not self.exists(link_obj.path)):
if self.is_windows_fs:
error = errno.EINVAL
elif self._is_circular_link(link_obj):
if self.is_macos:
return link_obj.path
error = errno.ELOOP
else:
error = errno.ENOENT
self.raise_os_error(error, link_obj.path)
assert link_obj.contents
return link_obj.contents
def makedir(self, dir_path: AnyPath, mode: int=helpers.PERM_DEF) -> None:
dir_name = make_string_path(dir_path)
ends_with_sep = self.ends_with_path_separator(dir_name)
dir_name = self._path_without_trailing_separators(dir_name)
if (not dir_name):
self.raise_os_error(errno.ENOENT, '')
if self.is_windows_fs:
dir_name = self.absnormpath(dir_name)
(parent_dir, rest) = self.splitpath(dir_name)
if parent_dir:
base_dir = self.normpath(parent_dir)
ellipsis = matching_string(parent_dir, (self.path_separator + '..'))
if (parent_dir.endswith(ellipsis) and (not self.is_windows_fs)):
(base_dir, dummy_dotdot, _) = parent_dir.partition(ellipsis)
if (self.is_windows_fs and (not rest) and (not self.exists(base_dir))):
self._auto_mount_drive_if_needed(parent_dir)
if (not self.exists(base_dir)):
self.raise_os_error(errno.ENOENT, base_dir)
dir_name = self.absnormpath(dir_name)
if self.exists(dir_name, check_link=True):
if (self.is_windows_fs and (dir_name == self.root_dir_name)):
error_nr = errno.EACCES
else:
error_nr = errno.EEXIST
if (ends_with_sep and self.is_macos and (not self.exists(dir_name))):
self.remove_object(dir_name)
else:
self.raise_os_error(error_nr, dir_name)
(head, tail) = self.splitpath(dir_name)
self.add_object(to_string(head), FakeDirectory(to_string(tail), (mode & (~ self.umask)), filesystem=self))
def _path_without_trailing_separators(self, path: AnyStr) -> AnyStr:
while self.ends_with_path_separator(path):
path = path[:(- 1)]
return path
def makedirs(self, dir_name: AnyStr, mode: int=helpers.PERM_DEF, exist_ok: bool=False) -> None:
if (not dir_name):
self.raise_os_error(errno.ENOENT, '')
ends_with_sep = self.ends_with_path_separator(dir_name)
dir_name = self.absnormpath(dir_name)
if (ends_with_sep and self.is_macos and self.exists(dir_name, check_link=True) and (not self.exists(dir_name))):
self.remove_object(dir_name)
dir_name_str = to_string(dir_name)
path_components = self._path_components(dir_name_str)
current_dir = self.root_dir
for component in path_components:
if ((not hasattr(current_dir, 'entries')) or (component not in current_dir.entries)):
break
else:
current_dir = cast(FakeDirectory, current_dir.entries[component])
try:
self.create_dir(dir_name, (mode & (~ self.umask)))
except OSError as e:
if (e.errno == errno.EACCES):
raise
if ((not exist_ok) or (not isinstance(self.resolve(dir_name), FakeDirectory))):
if (self.is_windows_fs and (e.errno == errno.ENOTDIR)):
e.errno = errno.ENOENT
self.raise_os_error(e.errno, e.filename)
def _is_of_type(self, path: AnyPath, st_flag: int, follow_symlinks: bool=True, check_read_perm: bool=True) -> bool:
if (path is None):
raise TypeError
file_path = make_string_path(path)
try:
obj = self.resolve(file_path, follow_symlinks, check_read_perm=check_read_perm)
if obj:
self.raise_for_filepath_ending_with_separator(file_path, obj, macos_handling=(not follow_symlinks))
return (S_IFMT(obj.st_mode) == st_flag)
except OSError:
return False
return False
def isdir(self, path: AnyPath, follow_symlinks: bool=True) -> bool:
return self._is_of_type(path, S_IFDIR, follow_symlinks)
def isfile(self, path: AnyPath, follow_symlinks: bool=True) -> bool:
return self._is_of_type(path, S_IFREG, follow_symlinks, check_read_perm=False)
def islink(self, path: AnyPath) -> bool:
return self._is_of_type(path, S_IFLNK, follow_symlinks=False)
if (sys.version_info >= (3, 12)):
def isjunction(self, path: AnyPath) -> bool:
return False
def confirmdir(self, target_directory: AnyStr, check_owner: bool=False) -> FakeDirectory:
directory = cast(FakeDirectory, self.resolve(target_directory, check_owner=check_owner))
if (not (directory.st_mode & S_IFDIR)):
self.raise_os_error(errno.ENOTDIR, target_directory, 267)
return directory
def remove(self, path: AnyStr) -> None:
norm_path = make_string_path(path)
norm_path = self.absnormpath(norm_path)
if self.ends_with_path_separator(path):
self._handle_broken_link_with_trailing_sep(norm_path)
if self.exists(norm_path):
obj = self.resolve(norm_path, check_read_perm=False)
if (S_IFMT(obj.st_mode) == S_IFDIR):
link_obj = self.lresolve(norm_path)
if (S_IFMT(link_obj.st_mode) != S_IFLNK):
if self.is_windows_fs:
error = errno.EACCES
elif self.is_macos:
error = errno.EPERM
else:
error = errno.EISDIR
self.raise_os_error(error, norm_path)
if path.endswith(self.get_path_separator(path)):
if self.is_windows_fs:
error = errno.EACCES
elif self.is_macos:
error = errno.EPERM
else:
error = errno.ENOTDIR
self.raise_os_error(error, norm_path)
else:
self.raise_for_filepath_ending_with_separator(path, obj)
self.remove_object(norm_path)
def rmdir(self, target_directory: AnyStr, allow_symlink: bool=False) -> None:
if (target_directory == matching_string(target_directory, '.')):
error_nr = (errno.EACCES if self.is_windows_fs else errno.EINVAL)
self.raise_os_error(error_nr, target_directory)
ends_with_sep = self.ends_with_path_separator(target_directory)
target_directory = self.absnormpath(target_directory)
if self.confirmdir(target_directory, check_owner=True):
if ((not self.is_windows_fs) and self.islink(target_directory)):
if allow_symlink:
return
if ((not ends_with_sep) or (not self.is_macos)):
self.raise_os_error(errno.ENOTDIR, target_directory)
dir_object = self.resolve(target_directory, check_owner=True)
if dir_object.entries:
self.raise_os_error(errno.ENOTEMPTY, target_directory)
self.remove_object(target_directory)
def listdir(self, target_directory: AnyStr) -> List[AnyStr]:
target_directory = self.resolve_path(target_directory, allow_fd=True)
directory = self.confirmdir(target_directory)
directory_contents = list(directory.entries.keys())
if self.shuffle_listdir_results:
random.shuffle(directory_contents)
return directory_contents
def __str__(self) -> str:
return str(self.root_dir)
def _add_standard_streams(self) -> None:
self._add_open_file(StandardStreamWrapper(sys.stdin))
self._add_open_file(StandardStreamWrapper(sys.stdout))
self._add_open_file(StandardStreamWrapper(sys.stderr))
def _create_temp_dir(self):
temp_dir = tempfile.gettempdir()
if (not self.exists(temp_dir)):
self.create_dir(temp_dir)
if ((sys.platform != 'win32') and (not self.exists('/tmp'))):
self.create_symlink('/tmp', temp_dir)
next(iter(self.mount_points.values()))['used_size'] = 0 |
class ApiException(HTTPException):
def __init__(self, error_type, status_code, error_description, payload=None):
Exception.__init__(self)
self.error_description = error_description
self.code = status_code
self.payload = payload
self.error_type = error_type
self.data = self.to_dict()
super(ApiException, self).__init__(error_description, None)
def to_dict(self):
rv = dict((self.payload or ()))
if (self.error_description is not None):
rv['detail'] = self.error_description
rv['error_message'] = self.error_description
rv['error_type'] = self.error_type.value
rv['title'] = self.error_type.value
rv['type'] = url_for('api.error', error_type=self.error_type.value, _external=True)
rv['status'] = self.code
return rv |
class RCC_APB1ENR(IntEnum):
TIM2EN = (1 << 0)
TIM3EN = (1 << 1)
TIM4EN = (1 << 2)
TIM5EN = (1 << 3)
WWDGEN = (1 << 11)
SPI2EN = (1 << 14)
SPI3EN = (1 << 15)
USART2EN = (1 << 17)
I2C1EN = (1 << 21)
I2C2EN = (1 << 22)
I2C3EN = (1 << 23)
PWREN = (1 << 28) |
def match_script_against_template(script, template) -> bool:
if (script is None):
return False
if isinstance(script, (bytes, bytearray)):
try:
script = [x for x in script_GetOp(script)]
except MalformedBitcoinScript:
return False
if (len(script) != len(template)):
return False
for i in range(len(script)):
template_item = template[i]
script_item = script[i]
if (OPPushDataGeneric.is_instance(template_item) and template_item.check_data_len(script_item[0])):
continue
if (template_item != script_item[0]):
return False
return True |
.parametrize('exc_cls', (BaseException, Exception, GeneratorExit, KeyboardInterrupt, RuntimeError, SystemExit))
def test_instance_method_spy_exception(exc_cls: Type[BaseException], mocker: MockerFixture) -> None:
class Foo():
def bar(self, arg):
raise exc_cls(f'Error with {arg}')
foo = Foo()
spy = mocker.spy(foo, 'bar')
expected_calls = []
for (i, v) in enumerate([10, 20]):
with pytest.raises(exc_cls, match=f'Error with {v}'):
foo.bar(arg=v)
expected_calls.append(mocker.call(arg=v))
assert (foo.bar.call_args_list == expected_calls)
assert (str(spy.spy_exception) == f'Error with {v}') |
def notify_new_submission(submission_id: int, title: str, elevator_pitch: str, submission_type: str, admin_url: str, duration: int, topic: str, speaker_id: int, conference_id: int, tags: str):
publish_message('NewCFPSubmission', {'title': title, 'elevator_pitch': elevator_pitch, 'submission_type': submission_type, 'admin_url': admin_url, 'topic': topic, 'duration': str(duration), 'speaker_id': speaker_id, 'conference_id': conference_id, 'tags': tags}, deduplication_id=str(submission_id)) |
def calculate_max_pss_salt_length(key: (rsa.RSAPrivateKey | rsa.RSAPublicKey), hash_algorithm: hashes.HashAlgorithm) -> int:
if (not isinstance(key, (rsa.RSAPrivateKey, rsa.RSAPublicKey))):
raise TypeError('key must be an RSA public or private key')
emlen = ((key.key_size + 6) // 8)
salt_length = ((emlen - hash_algorithm.digest_size) - 2)
assert (salt_length >= 0)
return salt_length |
def local_to_utc(df):
try:
offset_hour = (- (datetime.now() - datetime.utcnow()).seconds)
except:
offset_hour = (time.altzone if time.daylight else time.timezone)
offset_hour = (offset_hour // 3600)
offset_hour = (offset_hour if (offset_hour < 10) else (offset_hour // 10))
df = df.copy()
df.index = (pd_to_datetime(df.index, utc=True) + timedelta(hours=offset_hour))
return df |
def _multi_create_fileset(base_dir, name, structure, recurse):
range_count = structure.get('range')
if range_count:
if isinstance(range_count, int):
range_count = [range_count]
for count in range(*range_count):
_create_fileset(os.path.join(base_dir, name.format(count)), structure, recurse)
else:
_create_fileset(os.path.join(base_dir, name), structure, recurse) |
def CollateIterators(*rorp_iters):
iter_num = len(rorp_iters)
if (iter_num == 2):
return Collate2Iters(rorp_iters[0], rorp_iters[1])
overflow = ([None] * iter_num)
rorps = overflow[:]
def setrorps(overflow, rorps):
for i in range(iter_num):
if ((not overflow[i]) and (rorps[i] is None)):
try:
rorps[i] = next(rorp_iters[i])
except StopIteration:
overflow[i] = 1
rorps[i] = None
def getleastindex(rorps):
return min([rorp.index for rorp in [x for x in rorps if x]])
def yield_tuples(iter_num, overflow, rorps):
while 1:
setrorps(overflow, rorps)
if (None not in overflow):
break
index = getleastindex(rorps)
yieldval = []
for i in range(iter_num):
if (rorps[i] and (rorps[i].index == index)):
yieldval.append(rorps[i])
rorps[i] = None
else:
yieldval.append(None)
(yield IndexedTuple(index, yieldval))
return yield_tuples(iter_num, overflow, rorps) |
_on_pypy
def test_dynamic_attributes():
instance = m.DynamicClass()
assert (not hasattr(instance, 'foo'))
assert ('foo' not in dir(instance))
instance.foo = 42
assert hasattr(instance, 'foo')
assert (instance.foo == 42)
assert ('foo' in dir(instance))
assert ('foo' in instance.__dict__)
instance.__dict__ = {'bar': True}
assert (not hasattr(instance, 'foo'))
assert hasattr(instance, 'bar')
with pytest.raises(TypeError) as excinfo:
instance.__dict__ = []
assert (str(excinfo.value) == "__dict__ must be set to a dictionary, not a 'list'")
cstats = ConstructorStats.get(m.DynamicClass)
assert (cstats.alive() == 1)
del instance
assert (cstats.alive() == 0)
class PythonDerivedDynamicClass(m.DynamicClass):
pass
for cls in (m.CppDerivedDynamicClass, PythonDerivedDynamicClass):
derived = cls()
derived.foobar = 100
assert (derived.foobar == 100)
assert (cstats.alive() == 1)
del derived
assert (cstats.alive() == 0) |
def parse_args():
parser = argparse.ArgumentParser(description='Simple example of a training script.')
parser.add_argument('--filepath', type=str, help='path of the model')
parser.add_argument('--max_train_steps', type=int, default=1000, help='Total number of training steps to perform. If provided, overrides num_train_epochs.')
parser.add_argument('--checkpointing_steps', type=int, default=200, help='Save a checkpoint of the training state every X updates. These checkpoints can be used both as final checkpoints in case they are better than the last checkpoint, and are also suitable for resuming training using `--resume_from_checkpoint`.')
(args, _) = parser.parse_known_args()
return args |
def format_list(lst: Sequence[str], style: Literal[('standard', 'standard-short', 'or', 'or-short', 'unit', 'unit-short', 'unit-narrow')]='standard', locale: ((Locale | str) | None)=DEFAULT_LOCALE) -> str:
locale = Locale.parse(locale)
if (not lst):
return ''
if (len(lst) == 1):
return lst[0]
if (style not in locale.list_patterns):
raise ValueError(f'Locale {locale} does not support list formatting style {style!r} (supported are {sorted(locale.list_patterns)})')
patterns = locale.list_patterns[style]
if (len(lst) == 2):
return patterns['2'].format(*lst)
result = patterns['start'].format(lst[0], lst[1])
for elem in lst[2:(- 1)]:
result = patterns['middle'].format(result, elem)
result = patterns['end'].format(result, lst[(- 1)])
return result |
class CodeBlock(Block):
accepts_lines = True
def continue_(parser=None, container=None):
ln = parser.current_line
indent = parser.indent
if container.is_fenced:
match = ((indent <= 3) and (len(ln) >= (parser.next_nonspace + 1)) and (ln[parser.next_nonspace] == container.fence_char) and re.search(reClosingCodeFence, ln[parser.next_nonspace:]))
if (match and (len(match.group()) >= container.fence_length)):
parser.finalize(container, parser.line_number)
return 2
else:
i = container.fence_offset
while ((i > 0) and is_space_or_tab(peek(ln, parser.offset))):
parser.advance_offset(1, True)
i -= 1
elif (indent >= CODE_INDENT):
parser.advance_offset(CODE_INDENT, True)
elif parser.blank:
parser.advance_next_nonspace()
else:
return 1
return 0
def finalize(parser=None, block=None):
if block.is_fenced:
content = block.string_content
newline_pos = content.index('\n')
first_line = content[0:newline_pos]
rest = content[(newline_pos + 1):]
block.info = unescape_string(first_line.strip())
block.literal = rest
else:
block.literal = re.sub('(\\n *)+$', '\n', block.string_content)
block.string_content = None
def can_contain(t):
return False |
class ControlStateTests(unittest.TestCase):
def setUp(self):
self.app = Application()
self.app.start(os.path.join(mfc_samples_folder, u'CmnCtrl1.exe'))
self.dlg = self.app.Common_Controls_Sample
self.dlg.TabControl.select(4)
self.ctrl = self.dlg.EditBox.find()
def tearDown(self):
self.app.kill()
def test_VerifyEnabled(self):
self.assertRaises(ElementNotEnabled, self.ctrl.verify_enabled)
def test_VerifyVisible(self):
self.dlg.TabControl.select(3)
self.assertRaises(ElementNotVisible, self.ctrl.verify_visible) |
def load_sem_seg(gt_root, image_root, gt_ext='png', image_ext='jpg'):
def file2id(folder_path, file_path):
image_id = os.path.normpath(os.path.relpath(file_path, start=folder_path))
image_id = os.path.splitext(image_id)[0]
return image_id
input_files = sorted((os.path.join(image_root, f) for f in PathManager.ls(image_root) if f.endswith(image_ext)), key=(lambda file_path: file2id(image_root, file_path)))
gt_files = sorted((os.path.join(gt_root, f) for f in PathManager.ls(gt_root) if f.endswith(gt_ext)), key=(lambda file_path: file2id(gt_root, file_path)))
assert (len(gt_files) > 0), 'No annotations found in {}.'.format(gt_root)
if (len(input_files) != len(gt_files)):
logger.warn('Directory {} and {} has {} and {} files, respectively.'.format(image_root, gt_root, len(input_files), len(gt_files)))
input_basenames = [os.path.basename(f)[:(- len(image_ext))] for f in input_files]
gt_basenames = [os.path.basename(f)[:(- len(gt_ext))] for f in gt_files]
intersect = list((set(input_basenames) & set(gt_basenames)))
intersect = sorted(intersect)
logger.warn('Will use their intersection of {} files.'.format(len(intersect)))
input_files = [os.path.join(image_root, (f + image_ext)) for f in intersect]
gt_files = [os.path.join(gt_root, (f + gt_ext)) for f in intersect]
logger.info('Loaded {} images with semantic segmentation from {}'.format(len(input_files), image_root))
dataset_dicts = []
for (img_path, gt_path) in zip(input_files, gt_files):
record = {}
record['file_name'] = img_path
record['sem_seg_file_name'] = gt_path
dataset_dicts.append(record)
return dataset_dicts |
class _PlainFormatter():
_error_format = field()
def filenotfound_error(self, path, exc_info):
return '{!r} does not exist.\n'.format(path)
def parsing_error(self, path, exc_info):
return 'Failed to parse {}: {}\n'.format(('<stdin>' if (path == '<stdin>') else repr(path)), exc_info[1])
def validation_error(self, instance_path, error):
return self._error_format.format(file_name=instance_path, error=error)
def validation_success(self, instance_path):
return '' |
class HugefilesNet(XFSDownloader):
__name__ = 'HugefilesNet'
__type__ = 'downloader'
__version__ = '0.12'
__status__ = 'testing'
__pattern__ = '
__config__ = [('enabled', 'bool', 'Activated', True), ('use_premium', 'bool', 'Use premium account if available', True), ('fallback', 'bool', 'Fallback to free download if premium fails', True), ('chk_filesize', 'bool', 'Check file size', True), ('max_wait', 'int', 'Reconnect if waiting time is greater than minutes', 10)]
__description__ = 'Hugefiles.net downloader plugin'
__license__ = 'GPLv3'
__authors__ = [('stickell', 'l.'), ('GammaC0de', 'nitzo2001[AT]yahoo[DOT]com')]
PLUGIN_DOMAIN = 'hugefiles.net'
SIZE_PATTERN = '<font style="color:#2574b6;"> \\((?P<S>[\\d.,]+) (?P<U>[\\w^_]+)\\)' |
class QtPluginBase(object):
def load_wallet(self: Union[('QtPluginBase', HW_PluginBase)], wallet: 'Abstract_Wallet', window: ElectrumWindow):
relevant_keystores = [keystore for keystore in wallet.get_keystores() if isinstance(keystore, self.keystore_class)]
if (not relevant_keystores):
return
for keystore in relevant_keystores:
if (not self.libraries_available):
message = keystore.plugin.get_library_not_available_message()
window.show_error(message)
return
tooltip = ((self.device + '\n') + (keystore.label or 'unnamed'))
cb = partial(self._on_status_bar_button_click, window=window, keystore=keystore)
button = StatusBarButton(read_QIcon(self.icon_unpaired), tooltip, cb)
button.icon_paired = self.icon_paired
button.icon_unpaired = self.icon_unpaired
window.statusBar().addPermanentWidget(button)
handler = self.create_handler(window)
handler.button = button
keystore.handler = handler
keystore.thread = TaskThread(window, on_error=partial(self.on_task_thread_error, window, keystore))
self.add_show_address_on_hw_device_button_for_receive_addr(wallet, keystore, window)
def trigger_pairings():
devmgr = self.device_manager()
devices = devmgr.scan_devices()
for keystore in relevant_keystores:
try:
self.get_client(keystore=keystore, force_pair=True, allow_user_interaction=False, devices=devices)
except UserCancelled:
pass
for keystore in relevant_keystores:
try:
self.get_client(keystore=keystore, force_pair=True, allow_user_interaction=True, devices=devices)
except UserCancelled:
pass
some_keystore = relevant_keystores[0]
some_keystore.thread.add(trigger_pairings)
def _on_status_bar_button_click(self, *, window: ElectrumWindow, keystore: 'Hardware_KeyStore'):
try:
self.show_settings_dialog(window=window, keystore=keystore)
except (UserFacingException, UserCancelled) as e:
exc_info = (type(e), e, e.__traceback__)
self.on_task_thread_error(window=window, keystore=keystore, exc_info=exc_info)
def on_task_thread_error(self: Union[('QtPluginBase', HW_PluginBase)], window: ElectrumWindow, keystore: 'Hardware_KeyStore', exc_info):
e = exc_info[1]
if isinstance(e, OutdatedHwFirmwareException):
if window.question(e.text_ignore_old_fw_and_continue(), title=_('Outdated device firmware')):
self.set_ignore_outdated_fw()
devmgr = self.device_manager()
def re_pair_device():
device_id = self.choose_device(window, keystore)
devmgr.unpair_id(device_id)
self.get_client(keystore)
keystore.thread.add(re_pair_device)
return
else:
window.on_error(exc_info)
def choose_device(self: Union[('QtPluginBase', HW_PluginBase)], window: ElectrumWindow, keystore: 'Hardware_KeyStore') -> Optional[str]:
assert (window.gui_thread != threading.current_thread()), 'must not be called from GUI thread'
device_id = self.device_manager().id_by_pairing_code(keystore.pairing_code())
if (not device_id):
try:
info = self.device_manager().select_device(self, keystore.handler, keystore)
except UserCancelled:
return
device_id = info.device.id_
return device_id
def show_settings_dialog(self, window: ElectrumWindow, keystore: 'Hardware_KeyStore') -> None:
def connect():
device_id = self.choose_device(window, keystore)
keystore.thread.add(connect)
def add_show_address_on_hw_device_button_for_receive_addr(self, wallet: 'Abstract_Wallet', keystore: 'Hardware_KeyStore', main_window: ElectrumWindow):
plugin = keystore.plugin
receive_address_e = main_window.receive_address_e
def show_address():
addr = str(receive_address_e.text())
keystore.thread.add(partial(plugin.show_address, wallet, addr, keystore))
dev_name = f'{plugin.device} ({keystore.label})'
receive_address_e.addButton('eye1.png', show_address, _('Show on {}').format(dev_name))
def create_handler(self, window: Union[(ElectrumWindow, InstallWizard)]) -> 'QtHandlerBase':
raise NotImplementedError() |
def receptive_field(net):
def _f(output_size, ksize, stride, dilation):
return (((((output_size - 1) * stride) + (ksize * dilation)) - dilation) + 1)
stats = []
for m in net.modules():
if isinstance(m, (nn.Conv2d, nn.AvgPool2d, nn.MaxPool2d)):
stats.append((m.kernel_size, m.stride, m.dilation))
rsize = 1
for (ksize, stride, dilation) in reversed(stats):
if (type(ksize) == tuple):
ksize = ksize[0]
if (type(stride) == tuple):
stride = stride[0]
if (type(dilation) == tuple):
dilation = dilation[0]
rsize = _f(rsize, ksize, stride, dilation)
return rsize |
class SarlLexer(RegexLexer):
name = 'SARL'
url = '
aliases = ['sarl']
filenames = ['*.sarl']
mimetypes = ['text/x-sarl']
version_added = '2.4'
flags = (re.MULTILINE | re.DOTALL)
tokens = {'root': [('^(\\s*(?:[a-zA-Z_][\\w.\\[\\]]*\\s+)+?)([a-zA-Z_$][\\w$]*)(\\s*)(\\()', bygroups(using(this), Name.Function, Whitespace, Operator)), ('[^\\S\\n]+', Whitespace), ('(//.*?)(\\n)', bygroups(Comment.Single, Whitespace)), ('/\\*.*?\\*/', Comment.Multiline), ('[a-zA-Z_][\\w.]*', Name.Decorator), ('(as|break|case|catch|default|do|else|extends|extension|finally|fires|for|if|implements|instanceof|new|on|requires|return|super|switch|throw|throws|try|typeof|uses|while|with)\\b', Keyword), ('(abstract|def|dispatch|final|native|override|private|protected|public|static|strictfp|synchronized|transient|val|var|volatile)\\b', Keyword.Declaration), ('(boolean|byte|char|double|float|int|long|short|void)\\b', Keyword.Type), ('(package)(\\s+)', bygroups(Keyword.Namespace, Whitespace)), ('(false|it|null|occurrence|this|true|void)\\b', Keyword.Constant), ('(agent|annotation|artifact|behavior|capacity|class|enum|event|interface|skill|space)(\\s+)', bygroups(Keyword.Declaration, Whitespace), 'class'), ('(import)(\\s+)', bygroups(Keyword.Namespace, Whitespace), 'import'), ('"(|\\\\[^\\\\]|[^"\\\\])*"', String.Double), ("'(|\\\\[^\\\\]|[^'\\\\])*'", String.Single), ('[a-zA-Z_]\\w*:', Name.Label), ('[a-zA-Z_$]\\w*', Name), ('[~^*!%&\\[\\](){}<>\\|+=:;,./?-]', Operator), ('[0-9][0-9]*\\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float), ('0x[0-9a-fA-F]+', Number.Hex), ('[0-9]+L?', Number.Integer), ('\\n', Whitespace)], 'class': [('[a-zA-Z_]\\w*', Name.Class, '#pop')], 'import': [('[\\w.]+\\*?', Name.Namespace, '#pop')]} |
def test_make_setup_py_reqs():
builder = sdist.SdistBuilder.from_ini_path(((samples_dir / 'extras') / 'pyproject.toml'))
ns = get_setup_assigns(builder.make_setup_py())
assert (ns['install_requires'] == ['toml'])
assert (ns['extras_require'] == {'test': ['pytest'], 'custom': ['requests']}) |
class GetChatMember():
async def get_chat_member(self: 'pyrogram.Client', chat_id: Union[(int, str)], user_id: Union[(int, str)]) -> 'types.ChatMember':
chat = (await self.resolve_peer(chat_id))
user = (await self.resolve_peer(user_id))
if isinstance(chat, raw.types.InputPeerChat):
r = (await self.invoke(raw.functions.messages.GetFullChat(chat_id=chat.chat_id)))
members = getattr(r.full_chat.participants, 'participants', [])
users = {i.id: i for i in r.users}
for member in members:
member = types.ChatMember._parse(self, member, users, {})
if isinstance(user, raw.types.InputPeerSelf):
if member.user.is_self:
return member
elif (member.user.id == user.user_id):
return member
else:
raise UserNotParticipant
elif isinstance(chat, raw.types.InputPeerChannel):
r = (await self.invoke(raw.functions.channels.GetParticipant(channel=chat, participant=user)))
users = {i.id: i for i in r.users}
chats = {i.id: i for i in r.chats}
return types.ChatMember._parse(self, r.participant, users, chats)
else:
raise ValueError(f'The chat_id "{chat_id}" belongs to a user') |
.parametrize('voucher_tag,voucher_code,expected_roles', (('speakers', 'code', [Role.SPEAKER, Role.ATTENDEE]), ('', 'keynoter-123', [Role.KEYNOTER, Role.ATTENDEE]), ('staff', 'code', [Role.STAFF, Role.ATTENDEE]), ('', 'staff-5667', [Role.STAFF, Role.ATTENDEE]), ('sponsor,pizzacorp', 'pizza', [Role.SPONSOR, Role.ATTENDEE]), ('community,sushi', 'code', [Role.ATTENDEE])))
def test_get_roles(conference_factory, requests_mock, voucher_tag, voucher_code, expected_roles):
conference = conference_factory()
requests_mock.get(f'{settings.PRETIX_API}organizers/base-pretix-organizer-id/events/base-pretix-event-id/vouchers', status_code=200, json={'next': None, 'results': [{'id': 1, 'code': voucher_code, 'tag': voucher_tag}]})
roles = _get_roles(conference=conference, user_id=1, ticket={'id': 1, 'voucher': 1})
assert (roles == expected_roles) |
def bin_encode_attr(attr: Dict[(str, Any)]) -> None:
if (BINARY in attr):
attr[BINARY] = _b64encode(attr[BINARY])
elif (BINARY_SET in attr):
attr[BINARY_SET] = [_b64encode(v) for v in attr[BINARY_SET]]
elif (MAP in attr):
for sub_attr in attr[MAP].values():
bin_encode_attr(sub_attr)
elif (LIST in attr):
for sub_attr in attr[LIST]:
bin_encode_attr(sub_attr) |
class testMiscIters(unittest.TestCase):
def setUp(self):
Myrm(abs_output_dir)
self.outputrp = rpath.RPath(Globals.local_connection, abs_output_dir)
self.regfile1 = self.outputrp.append('reg1')
self.regfile2 = self.outputrp.append('reg2')
self.regfile3 = self.outputrp.append('reg3')
self.outputrp.mkdir()
with self.regfile1.open('wb') as fp:
fp.write(b'hello')
self.regfile1.setfile(self.regfile1.open('rb'))
self.regfile2.touch()
self.regfile2.setfile(self.regfile2.open('rb'))
with self.regfile3.open('wb') as fp:
fp.write(b'goodbye')
self.regfile3.setfile(self.regfile3.open('rb'))
self.regfile1.setdata()
self.regfile2.setdata()
self.regfile3.setdata()
def print_MiscIterFile(self, rpiter_file):
while 1:
buf = rpiter_file.read()
sys.stdout.write(buf)
if (buf[0] == b'z'):
break
def testBasic(self):
rplist = [self.outputrp, self.regfile1, self.regfile2, self.regfile3]
i_out = FileToMiscIter(MiscIterToFile(iter(rplist)))
out1 = next(i_out)
self.assertEqual(out1, self.outputrp)
out2 = next(i_out)
self.assertEqual(out2, self.regfile1)
fp = out2.open('rb')
self.assertEqual(fp.read(), b'hello')
self.assertFalse(fp.close())
out3 = next(i_out)
self.assertEqual(out3, self.regfile2)
fp = out3.open('rb')
self.assertEqual(fp.read(), b'')
self.assertFalse(fp.close())
next(i_out)
self.assertRaises(StopIteration, i_out.__next__)
((os.name == 'nt'), 'FIXME fails under Windows')
def testMix(self):
filelist = [5, self.regfile3, 'hello']
s = MiscIterToFile(iter(filelist)).read()
i_out = FileToMiscIter(io.BytesIO(s))
out1 = next(i_out)
self.assertEqual(out1, 5)
out2 = next(i_out)
self.assertEqual(out2, self.regfile3)
fp = out2.open('rb')
self.assertEqual(fp.read(), b'goodbye')
self.assertFalse(fp.close())
out3 = next(i_out)
self.assertEqual(out3, 'hello')
self.assertRaises(StopIteration, i_out.__next__)
def testFlush(self):
rplist = [self.outputrp, MiscIterFlush, self.outputrp]
filelike = MiscIterToFile(iter(rplist))
new_filelike = io.BytesIO(((filelike.read() + b'z') + filelike._i2b(0, 7)))
i_out = FileToMiscIter(new_filelike)
self.assertEqual(next(i_out), self.outputrp)
self.assertRaises(StopIteration, i_out.__next__)
i_out2 = FileToMiscIter(filelike)
self.assertEqual(next(i_out2), self.outputrp)
self.assertRaises(StopIteration, i_out2.__next__)
((os.name == 'nt'), 'FIXME fails under Windows')
def testFlushRepeat(self):
rplist = [self.outputrp, MiscIterFlushRepeat, self.outputrp]
filelike = MiscIterToFile(iter(rplist))
new_filelike = io.BytesIO(((filelike.read() + b'z') + filelike._i2b(0, 7)))
i_out = FileToMiscIter(new_filelike)
self.assertEqual(next(i_out), self.outputrp)
self.assertIs(next(i_out), MiscIterFlushRepeat)
self.assertRaises(StopIteration, i_out.__next__)
i_out2 = FileToMiscIter(filelike)
self.assertEqual(next(i_out2), self.outputrp)
self.assertRaises(StopIteration, i_out2.__next__) |
def test_merge_into_using_subquery():
sql = 'MERGE INTO target USING (select k, max(v) as v_max from src group by k) AS b ON target.k = b.k\nWHEN MATCHED THEN UPDATE SET target.v = b.v_max\nWHEN NOT MATCHED THEN INSERT (k, v) VALUES (b.k, b.v_max)'
assert_column_lineage_equal(sql, [(ColumnQualifierTuple('v', 'src'), ColumnQualifierTuple('v', 'target')), (ColumnQualifierTuple('k', 'src'), ColumnQualifierTuple('k', 'target'))]) |
class GELUActivation(nn.Module):
def __init__(self, use_gelu_python: bool=False):
super().__init__()
if ((version.parse(version.parse(torch.__version__).base_version) < version.parse('1.4')) or use_gelu_python):
self.act = self._gelu_python
else:
self.act = nn.functional.gelu
def _gelu_python(self, input: Tensor) -> Tensor:
return ((input * 0.5) * (1.0 + torch.erf((input / math.sqrt(2.0)))))
def forward(self, input: Tensor) -> Tensor:
return self.act(input) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.