code stringlengths 281 23.7M |
|---|
def blksize(path):
if (os.name != 'nt'):
size = os.statvfs(path).f_bsize
else:
import ctypes
drive = (os.path.splitdrive(os.path.abspath(path))[0] + '\\')
cluster_sectors = ctypes.c_longlong(0)
sector_size = ctypes.c_longlong(0)
ctypes.windll.kernel32.GetDiskFreeSpaceW(ctypes.c_wchar_p(drive), ctypes.pointer(cluster_sectors), ctypes.pointer(sector_size), None, None)
size = int((cluster_sectors.value * sector_size.value))
return size |
class TestReporter(Dataset):
def __init__(self, multi_task_instance):
self.test_task = multi_task_instance
self.task_type = multi_task_instance.dataset_type
self.config = registry.get('config')
self.writer = registry.get('writer')
self.report = []
self.timer = Timer()
self.training_parameters = self.config['training_parameters']
self.num_workers = self.training_parameters['num_workers']
self.batch_size = self.training_parameters['batch_size']
self.report_folder_arg = self.config.get('report_folder', None)
self.experiment_name = self.training_parameters.get('experiment_name', '')
self.datasets = []
for dataset in self.test_task.get_datasets():
self.datasets.append(dataset)
self.current_dataset_idx = (- 1)
self.current_dataset = self.datasets[self.current_dataset_idx]
self.save_dir = self.config.training_parameters.save_dir
self.report_folder = ckpt_name_from_core_args(self.config)
self.report_folder += foldername_from_config_override(self.config)
self.report_folder = os.path.join(self.save_dir, self.report_folder)
self.report_folder = os.path.join(self.report_folder, 'reports')
if (self.report_folder_arg is not None):
self.report_folder = self.report_folder_arg
if (not os.path.exists(self.report_folder)):
os.makedirs(self.report_folder)
def next_dataset(self):
if (self.current_dataset_idx >= 0):
self.flush_report()
self.current_dataset_idx += 1
if (self.current_dataset_idx == len(self.datasets)):
return False
else:
self.current_dataset = self.datasets[self.current_dataset_idx]
self.writer.write(('Predicting for ' + self.current_dataset._name))
return True
def flush_report(self):
if (not is_main_process()):
return
name = self.current_dataset._name
time_format = '%Y-%m-%dT%H:%M:%S'
time = self.timer.get_time_hhmmss(None, format=time_format)
filename = (name + '_')
if (len(self.experiment_name) > 0):
filename += (self.experiment_name + '_')
filename += (self.task_type + '_')
filename += (time + '.json')
filepath = os.path.join(self.report_folder, filename)
with open(filepath, 'w') as f:
json.dump(self.report, f)
self.writer.write(('Wrote evalai predictions for %s to %s' % (name, os.path.abspath(filepath))))
self.report = []
def get_dataloader(self):
other_args = self._add_extra_args_for_dataloader()
return DataLoader(dataset=self.current_dataset, collate_fn=BatchCollator(), num_workers=self.num_workers, pin_memory=self.config.training_parameters.pin_memory, **other_args)
def _add_extra_args_for_dataloader(self, other_args={}):
training_parameters = self.config.training_parameters
if ((training_parameters.local_rank is not None) and training_parameters.distributed):
other_args['sampler'] = DistributedSampler(self.current_dataset)
else:
other_args['shuffle'] = True
batch_size = training_parameters.batch_size
world_size = get_world_size()
if ((batch_size % world_size) != 0):
raise RuntimeError('Batch size {} must be divisible by number of GPUs {} used.'.format(batch_size, world_size))
other_args['batch_size'] = (batch_size // world_size)
return other_args
def prepare_batch(self, batch):
return self.current_dataset.prepare_batch(batch)
def __len__(self):
return len(self.current_dataset)
def __getitem__(self, idx):
return self.current_dataset[idx]
def add_to_report(self, report):
if (self.current_dataset._name == 'coco'):
report.captions = gather_tensor(report.captions)
if isinstance(report.image_id, torch.Tensor):
report.image_id = gather_tensor(report.image_id).view((- 1))
else:
report.scores = gather_tensor(report.scores).view((- 1), report.scores.size((- 1)))
report.question_id = gather_tensor(report.question_id).view((- 1))
if (not is_main_process()):
return
results = self.current_dataset.format_for_evalai(report)
self.report = (self.report + results) |
def rewrite_record(bdist_dir: str) -> None:
info_dir = _dist_info_dir(bdist_dir)
record_path = pjoin(info_dir, 'RECORD')
record_relpath = relpath(record_path, bdist_dir)
sig_path = pjoin(info_dir, 'RECORD.jws')
if exists(sig_path):
os.unlink(sig_path)
def walk() -> Generator[(str, None, None)]:
for (dir, dirs, files) in os.walk(bdist_dir):
for f in files:
(yield pjoin(dir, f))
def skip(path: str) -> bool:
return (path == record_relpath)
with open(record_path, 'w+', newline='', encoding='utf-8') as record_file:
writer = csv.writer(record_file)
for path in walk():
relative_path = relpath(path, bdist_dir)
if skip(relative_path):
hash_ = ''
size = ''
else:
with open(path, 'rb') as f:
data = f.read()
digest = hashlib.sha256(data).digest()
sha256 = urlsafe_b64encode(digest).rstrip(b'=').decode('ascii')
hash_ = f'sha256={sha256}'
size = f'{len(data)}'
record_path = relpath(path, bdist_dir).replace(psep, '/')
writer.writerow((record_path, hash_, size)) |
class FMaxListener(Listener):
def __init__(self, name, beta=1):
self.beta = beta
self.fname = _timestamped_filename(('%s-Fmax' % name))
smokesignal.on('evaluation_finished', self.on_evaluation_finished)
super().__init__()
def on_evaluation_finished(self, evaluation, dataset, predictor):
fmax = evaluation.f_score(self.beta).max()
status = f'''{dataset} {predictor} {fmax:.4f}
'''
with open(self.fname, 'a') as f:
f.write(status)
log.info('Evaluation finished: %s', status) |
(StepsRunner, 'run_step_group')
def test_run_step_groups_sequence(mock_run_step_group):
StepsRunner(get_valid_test_pipeline(), Context()).run_step_groups(groups=['sg3', 'sg1', 'sg2', 'sg4'], success_group='arb success', failure_group='arb fail')
assert (mock_run_step_group.mock_calls == [call('sg3'), call('sg1'), call('sg2'), call('sg4'), call('arb success')]) |
class Base(object):
type = None
parent = None
children = ()
was_changed = False
was_checked = False
def __new__(cls, *args, **kwds):
assert (cls is not Base), 'Cannot instantiate Base'
return object.__new__(cls)
def __eq__(self, other):
if (self.__class__ is not other.__class__):
return NotImplemented
return self._eq(other)
__hash__ = None
def _eq(self, other):
raise NotImplementedError
def clone(self):
raise NotImplementedError
def post_order(self):
raise NotImplementedError
def pre_order(self):
raise NotImplementedError
def replace(self, new):
assert (self.parent is not None), str(self)
assert (new is not None)
if (not isinstance(new, list)):
new = [new]
l_children = []
found = False
for ch in self.parent.children:
if (ch is self):
assert (not found), (self.parent.children, self, new)
if (new is not None):
l_children.extend(new)
found = True
else:
l_children.append(ch)
assert found, (self.children, self, new)
self.parent.changed()
self.parent.children = l_children
for x in new:
x.parent = self.parent
self.parent = None
def get_lineno(self):
node = self
while (not isinstance(node, Leaf)):
if (not node.children):
return
node = node.children[0]
return node.lineno
def changed(self):
if self.parent:
self.parent.changed()
self.was_changed = True
def remove(self):
if self.parent:
for (i, node) in enumerate(self.parent.children):
if (node is self):
self.parent.changed()
del self.parent.children[i]
self.parent = None
return i
def next_sibling(self):
if (self.parent is None):
return None
for (i, child) in enumerate(self.parent.children):
if (child is self):
try:
return self.parent.children[(i + 1)]
except IndexError:
return None
def prev_sibling(self):
if (self.parent is None):
return None
for (i, child) in enumerate(self.parent.children):
if (child is self):
if (i == 0):
return None
return self.parent.children[(i - 1)]
def leaves(self):
for child in self.children:
(yield from child.leaves())
def depth(self):
if (self.parent is None):
return 0
return (1 + self.parent.depth())
def get_suffix(self):
next_sib = self.next_sibling
if (next_sib is None):
return ''
return next_sib.prefix
if (sys.version_info < (3, 0)):
def __str__(self):
return str(self).encode('ascii') |
def get_valid_stats(trainer):
stats = OrderedDict()
stats['valid_loss'] = trainer.get_meter('valid_loss').avg
if (trainer.get_meter('valid_nll_loss').count > 0):
nll_loss = trainer.get_meter('valid_nll_loss').avg
stats['valid_nll_loss'] = nll_loss
else:
nll_loss = trainer.get_meter('valid_loss').avg
stats['valid_ppl'] = get_perplexity(nll_loss)
return stats |
def build_optimizer(args, model):
params_with_decay = []
params_without_decay = []
for (name, param) in model.named_parameters():
if (param.requires_grad is False):
continue
if (args.only_prompt_loss and (name.find('clip_model') != (- 1))):
continue
if (args.filter_biases_wd and ((len(param.shape) == 1) or name.endswith('bias'))):
params_without_decay.append(param)
else:
params_with_decay.append(param)
if args.filter_biases_wd:
param_groups = [{'params': params_without_decay, 'weight_decay': 0.0}, {'params': params_with_decay, 'weight_decay': args.weight_decay}]
else:
param_groups = [{'params': params_with_decay, 'weight_decay': args.weight_decay}]
optimizer = torch.optim.AdamW(param_groups, lr=args.base_lr)
return optimizer |
class Effect6561(BaseEffect):
type = 'passive'
def handler(fit, src, context, projectionRange, **kwargs):
lvl = src.level
fit.fighters.filteredItemBoost((lambda mod: mod.item.requiresSkill('Light Fighters')), 'maxVelocity', (src.getModifiedItemAttr('maxVelocityBonus') * lvl), **kwargs) |
def GetLineWidth(line):
if isinstance(line, unicode):
width = 0
for uc in unicodedata.normalize('NFC', line):
if (unicodedata.east_asian_width(uc) in ('W', 'F')):
width += 2
elif (not unicodedata.combining(uc)):
width += 1
return width
else:
return len(line) |
class Agilent34410A(Instrument):
voltage_dc = Instrument.measurement('MEAS:VOLT:DC? DEF,DEF', 'DC voltage, in Volts')
voltage_ac = Instrument.measurement('MEAS:VOLT:AC? DEF,DEF', 'AC voltage, in Volts')
current_dc = Instrument.measurement('MEAS:CURR:DC? DEF,DEF', 'DC current, in Amps')
current_ac = Instrument.measurement('MEAS:CURR:AC? DEF,DEF', 'AC current, in Amps')
resistance = Instrument.measurement('MEAS:RES? DEF,DEF', 'Resistance, in Ohms')
resistance_4w = Instrument.measurement('MEAS:FRES? DEF,DEF', 'Four-wires (remote sensing) resistance, in Ohms')
def __init__(self, adapter, name='HP/Agilent/Keysight 34410A Multimeter', **kwargs):
super().__init__(adapter, name, **kwargs) |
class MouseHandler():
def __init__(self, interface, loglevel):
logger.debug('Initializing %s: (interface: %s, loglevel: %s)', self.__class__.__name__, interface, loglevel)
self.interface = interface
self.alignments = interface.alignments
self.frames = interface.frames
self.extractor = dict()
self.init_extractor(loglevel)
self.mouse_state = None
self.last_move = None
self.center = None
self.dims = None
self.media = {'frame_id': None, 'image': None, 'bounding_box': list(), 'bounding_last': list(), 'bounding_box_orig': list()}
logger.debug('Initialized %s', self.__class__.__name__)
def init_extractor(self, loglevel):
logger.debug('Initialize Extractor')
out_queue = queue_manager.get_queue('out')
d_kwargs = {'in_queue': queue_manager.get_queue('in'), 'out_queue': queue_manager.get_queue('align')}
a_kwargs = {'in_queue': queue_manager.get_queue('align'), 'out_queue': out_queue}
detector = PluginLoader.get_detector('manual')(loglevel=loglevel)
detect_process = SpawnProcess(detector.run, **d_kwargs)
d_event = detect_process.event
detect_process.start()
plugins = (['fan_amd'] if (get_backend() == 'amd') else ['fan'])
plugins.append('cv2_dnn')
for plugin in plugins:
aligner = PluginLoader.get_aligner(plugin)(loglevel=loglevel, normalize_method='hist')
align_process = SpawnProcess(aligner.run, **a_kwargs)
a_event = align_process.event
align_process.start()
a_event.wait(300)
if (not a_event.is_set()):
if plugin.startswith('fan'):
align_process.join()
logger.error('Error initializing FAN. Trying CV2-DNN')
continue
else:
raise ValueError('Error inititalizing Aligner')
if (plugin == 'cv2_dnn'):
break
try:
err = None
err = out_queue.get(True, 1)
except QueueEmpty:
pass
if (not err):
break
align_process.join()
logger.error('Error initializing FAN. Trying CV2-DNN')
d_event.wait(10)
if (not d_event.is_set()):
raise ValueError('Error inititalizing Detector')
self.extractor['detect'] = detector
self.extractor['align'] = aligner
logger.debug('Initialized Extractor')
def on_event(self, event, x, y, flags, param):
if (self.interface.get_edit_mode() != 'Edit'):
return
logger.trace('Mouse event: (event: %s, x: %s, y: %s, flags: %s, param: %s', event, x, y, flags, param)
if ((not self.mouse_state) and (event not in (cv2.EVENT_LBUTTONDOWN, cv2.EVENT_MBUTTONDOWN))):
return
self.initialize()
if (event in (cv2.EVENT_LBUTTONUP, cv2.EVENT_MBUTTONUP)):
self.mouse_state = None
self.last_move = None
elif (event == cv2.EVENT_LBUTTONDOWN):
self.mouse_state = 'left'
self.set_bounding_box(x, y)
elif (event == cv2.EVENT_MBUTTONDOWN):
self.mouse_state = 'middle'
self.set_bounding_box(x, y)
elif (event == cv2.EVENT_MOUSEMOVE):
if (self.mouse_state == 'left'):
self.move_bounding_box(x, y)
elif (self.mouse_state == 'middle'):
self.resize_bounding_box(x, y)
def initialize(self):
frame = self.interface.get_frame_name()
if (frame == self.media['frame_id']):
return
logger.debug("Initialize frame: '%s'", frame)
self.media['frame_id'] = frame
self.media['image'] = self.frames.load_image(frame)
self.dims = None
self.center = None
self.last_move = None
self.mouse_state = None
self.media['bounding_box'] = list()
self.media['bounding_box_orig'] = list()
def set_bounding_box(self, pt_x, pt_y):
if (self.interface.get_selected_face_id() is None):
self.check_click_location(pt_x, pt_y)
if (self.interface.get_selected_face_id() is not None):
self.dims_from_alignment()
else:
self.dims_from_image()
self.move_bounding_box(pt_x, pt_y)
def check_click_location(self, pt_x, pt_y):
frame = self.media['frame_id']
alignments = self.alignments.get_faces_in_frame(frame)
scale = self.interface.get_frame_scaling()
pt_x = int((pt_x / scale))
pt_y = int((pt_y / scale))
for (idx, alignment) in enumerate(alignments):
left = alignment['x']
right = (alignment['x'] + alignment['w'])
top = alignment['y']
bottom = (alignment['y'] + alignment['h'])
if ((left <= pt_x <= right) and (top <= pt_y <= bottom)):
self.interface.set_state_value('edit', 'selected', idx)
break
def dims_from_alignment(self):
frame = self.media['frame_id']
face_id = self.interface.get_selected_face_id()
alignment = self.alignments.get_faces_in_frame(frame)[face_id]
self.dims = (alignment['w'], alignment['h'])
def dims_from_image(self):
size = max(self.media['image'].shape[:2])
dim = int((size / 10.0))
self.dims = (dim, dim)
def bounding_from_center(self):
(pt_x, pt_y) = self.center
(width, height) = self.dims
scale = self.interface.get_frame_scaling()
self.media['bounding_box'] = [int(((pt_x / scale) - (width / 2))), int(((pt_y / scale) - (height / 2))), int(((pt_x / scale) + (width / 2))), int(((pt_y / scale) + (height / 2)))]
def move_bounding_box(self, pt_x, pt_y):
self.center = (pt_x, pt_y)
self.bounding_from_center()
self.update_landmarks()
def resize_bounding_box(self, pt_x, pt_y):
scale = self.interface.get_frame_scaling()
if (not self.last_move):
self.last_move = (pt_x, pt_y)
self.media['bounding_box_orig'] = self.media['bounding_box']
move_x = int((pt_x - self.last_move[0]))
move_y = int((self.last_move[1] - pt_y))
original = self.media['bounding_box_orig']
updated = self.media['bounding_box']
minsize = int((10 / scale))
center = (int((self.center[0] / scale)), int((self.center[1] / scale)))
updated[0] = min((center[0] - minsize), (original[0] - move_x))
updated[1] = min((center[1] - minsize), (original[1] - move_y))
updated[2] = max((center[0] + minsize), (original[2] + move_x))
updated[3] = max((center[1] + minsize), (original[3] + move_y))
self.update_landmarks()
self.last_move = (pt_x, pt_y)
def update_landmarks(self):
queue_manager.get_queue('in').put({'image': self.media['image'], 'filename': self.media['frame_id'], 'face': self.media['bounding_box']})
landmarks = queue_manager.get_queue('out').get()
if (isinstance(landmarks, dict) and landmarks.get('exception')):
cv2.destroyAllWindows()
pid = landmarks['exception'][0]
t_back = landmarks['exception'][1].getvalue()
err = 'Error in child process {}. {}'.format(pid, t_back)
raise Exception(err)
if (landmarks == 'EOF'):
exit(0)
alignment = self.extracted_to_alignment((landmarks['detected_faces'][0], landmarks['landmarks'][0]))
frame = self.media['frame_id']
if (self.interface.get_selected_face_id() is None):
idx = self.alignments.add_face(frame, alignment)
self.interface.set_state_value('edit', 'selected', idx)
else:
self.alignments.update_face(frame, self.interface.get_selected_face_id(), alignment)
self.interface.set_redraw(True)
self.interface.state['edit']['updated'] = True
self.interface.state['edit']['update_faces'] = True
def extracted_to_alignment(extract_data):
alignment = dict()
(bbox, landmarks) = extract_data
alignment['x'] = bbox['left']
alignment['w'] = (bbox['right'] - bbox['left'])
alignment['y'] = bbox['top']
alignment['h'] = (bbox['bottom'] - bbox['top'])
alignment['landmarksXY'] = landmarks
return alignment |
_on_failure
.parametrize('number_of_nodes', [2])
def test_settle_is_automatically_called(raiden_network: List[RaidenService], token_addresses: List[TokenAddress]) -> None:
(app0, app1) = raiden_network
registry_address = app0.default_registry.address
token_address = token_addresses[0]
token_network_address = views.get_token_network_address_by_token_address(views.state_from_raiden(app0), app0.default_registry.address, token_address)
assert token_network_address
token_network = views.get_token_network_by_address(views.state_from_raiden(app0), token_network_address)
assert token_network
channel_identifier = get_channelstate(app0, app1, token_network_address).identifier
assert (channel_identifier in token_network.partneraddresses_to_channelidentifiers[app1.address])
RaidenAPI(app1).channel_close(registry_address, token_address, app0.address, coop_settle=False)
waiting.wait_for_close(app0, registry_address, token_address, [channel_identifier], app0.alarm.sleep_time)
channel_state = views.get_channelstate_for(views.state_from_raiden(app0), registry_address, token_address, app1.address)
assert channel_state
assert channel_state.close_transaction
assert channel_state.close_transaction.finished_block_number
waiting.wait_for_settle(app0, registry_address, token_address, [channel_identifier], app0.alarm.sleep_time)
token_network = views.get_token_network_by_address(views.state_from_raiden(app0), token_network_address)
assert token_network
assert (channel_identifier not in token_network.partneraddresses_to_channelidentifiers[app1.address])
assert app0.wal, MSG_BLOCKCHAIN_EVENTS
assert app0.alarm, MSG_BLOCKCHAIN_EVENTS
state_changes = app0.wal.storage.get_statechanges_by_range(RANGE_ALL_STATE_CHANGES)
assert search_for_item(state_changes, ContractReceiveChannelClosed, {'token_network_address': token_network_address, 'channel_identifier': channel_identifier, 'transaction_from': app1.address, 'block_number': channel_state.close_transaction.finished_block_number})
assert search_for_item(state_changes, ContractReceiveChannelSettled, {'token_network_address': token_network_address, 'channel_identifier': channel_identifier}) |
.parametrize('delayed', [True, False])
def test_zero_timeout(qtbot, timer, delayed, signaller):
with qtbot.waitSignal(signaller.signal, raising=False, timeout=0) as blocker:
if delayed:
timer.single_shot(signaller.signal, 0)
else:
signaller.signal.emit()
assert (blocker.signal_triggered != delayed) |
class GeneralTab(QWidget):
def __init__(self, fileInfo, parent=None):
super(GeneralTab, self).__init__(parent)
fileNameLabel = QLabel('File Name:')
fileNameEdit = QLineEdit(fileInfo.fileName())
pathLabel = QLabel('Path:')
pathValueLabel = QLabel(fileInfo.absoluteFilePath())
pathValueLabel.setFrameStyle((QFrame.Panel | QFrame.Sunken))
sizeLabel = QLabel('Size:')
size = (fileInfo.size() // 1024)
sizeValueLabel = QLabel(('%d K' % size))
sizeValueLabel.setFrameStyle((QFrame.Panel | QFrame.Sunken))
lastReadLabel = QLabel('Last Read:')
lastReadValueLabel = QLabel(fileInfo.lastRead().toString())
lastReadValueLabel.setFrameStyle((QFrame.Panel | QFrame.Sunken))
lastModLabel = QLabel('Last Modified:')
lastModValueLabel = QLabel(fileInfo.lastModified().toString())
lastModValueLabel.setFrameStyle((QFrame.Panel | QFrame.Sunken))
mainLayout = QVBoxLayout()
mainLayout.addWidget(fileNameLabel)
mainLayout.addWidget(fileNameEdit)
mainLayout.addWidget(pathLabel)
mainLayout.addWidget(pathValueLabel)
mainLayout.addWidget(sizeLabel)
mainLayout.addWidget(sizeValueLabel)
mainLayout.addWidget(lastReadLabel)
mainLayout.addWidget(lastReadValueLabel)
mainLayout.addWidget(lastModLabel)
mainLayout.addWidget(lastModValueLabel)
mainLayout.addStretch(1)
self.setLayout(mainLayout) |
def is_no_type_check_decorator(expr: ast3.expr) -> bool:
if isinstance(expr, Name):
return (expr.id == 'no_type_check')
elif isinstance(expr, Attribute):
if isinstance(expr.value, Name):
return ((expr.value.id == 'typing') and (expr.attr == 'no_type_check'))
return False |
def validate(mw_model, model, val_loader):
mw_model.eval()
model.eval()
(scores, gt_scores) = ([], [])
for (num, val_batch) in enumerate(val_loader):
(im_mw, imp_iwt, gt_iwt, im_dmos) = val_batch
print(im_mw.size())
pre_iwt = mw_model(im_mw)
pre_iwt = [LocalNormalization(pre_iwt[i][0].detach().cpu().numpy()) for i in range(pre_iwt.size(0))]
pre_iwt = torch.stack(pre_iwt).cuda()
pre_score = model(imp_iwt, (pre_iwt - imp_iwt))
scores.append(pre_score.squeeze(0).detach().cpu().numpy())
gt_scores.append(im_dmos.squeeze(0).cpu().numpy())
scores = np.array(scores)
gt_scores = np.array(gt_scores)
print(scores, gt_scores)
(srocc, krocc, plcc, rmse, mae) = metricIQA(scores, gt_scores)
return (srocc, krocc, plcc, rmse, mae) |
def rank_vote(key: str, match: typing.Dict[(int, typing.List[typing.List[typing.Dict[(str, str)]]])], scores: typing.List[typing.Dict[(str, float)]]) -> typing.List[typing.List[typing.Dict[(str, str)]]]:
queries_rank = []
for (documents_query, scores_query) in zip(match.values(), scores):
query_rank = []
index = {document[key]: document for document in documents_query}
for key_value in sorted(scores_query, key=scores_query.get, reverse=True):
document = index[key_value]
document.pop('similarity')
query_rank.append({**document, 'similarity': scores_query[key_value]})
queries_rank.append(query_rank)
return queries_rank |
def conv_init(m):
classname = m.__class__.__name__
if (classname.find('Conv') != (- 1)):
init.xavier_uniform(m.weight, gain=math.sqrt(2))
init.constant(m.bias, 0)
elif (classname.find('BatchNorm') != (- 1)):
init.constant(m.weight, 1)
init.constant(m.bias, 0) |
class TestCreateNotify(EndianTest):
def setUp(self):
self.evt_args_0 = {'border_width': 56468, 'height': 7111, 'override': 0, 'parent': , 'sequence_number': 31058, 'type': 151, 'width': 44173, 'window': , 'x': (- 21847), 'y': (- 22248)}
self.evt_bin_0 = b'\x97\x00Ry\xe9\xfc\x8a,\x1f\xc0E4\xa9\xaa\x18\xa9\x8d\xac\xc7\x1b\x94\xdc\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
def testPack0(self):
bin = event.CreateNotify._fields.to_binary(*(), **self.evt_args_0)
self.assertBinaryEqual(bin, self.evt_bin_0)
def testUnpack0(self):
(args, remain) = event.CreateNotify._fields.parse_binary(self.evt_bin_0, dummy_display, 1)
self.assertBinaryEmpty(remain)
self.assertEqual(args, self.evt_args_0) |
def parse_opt():
parser = argparse.ArgumentParser()
parser.add_argument('--data_folder', type=str, help='folder with data files saved by create_input_files.py.', default='')
parser.add_argument('--model_path', type=str, help='path for pretrained ResNet.', default='')
parser.add_argument('--word_map_file', type=str, help='path for word map.', default='')
parser.add_argument('--model_folder', type=str, help='base folder to save models.', default='')
parser.add_argument('--checkpoint', type=str, help='Resume training using checkpoint.', default='')
parser.add_argument('--finetune_start_layer', type=int, help='finetune resnet from this layer apart.', default=5)
parser.add_argument('--start_epoch', type=int, help='start epoch for training.', default=0)
parser.add_argument('--epochs', type=int, help='number of epochs to train for (if early stopping is not triggered).', default=200)
parser.add_argument('--epochs_since_improvement', type=int, help='keeps track of number of epochs since theirs been an improvement in validation BLEU.', default=0)
parser.add_argument('--batch_size', type=int, help='batch size for training.', default=145)
parser.add_argument('--workers', type=int, help='for data-loading; right now, only 1 works with h5py.', default=1)
parser.add_argument('--print_freq', type=int, help='print training/validation stats every __ batches.', default=50)
parser.add_argument('--emb_dim', type=int, help='dimension of word embeddings.', default=512)
parser.add_argument('--attention_dim', type=int, help='dimension of attention linear layers.', default=512)
parser.add_argument('--decoder_dim', type=int, help='dimension of decoder RNN.', default=512)
parser.add_argument('--encoded_image_size', type=int, help='encoded image size of encoder output.', default=14)
parser.add_argument('--encoder_lr', type=float, help='learning rate for encoder if fine-tuning.', default=0.0001)
parser.add_argument('--lr', type=float, help='learning rate for decoder.', default=0.0001)
parser.add_argument('--alpha_c', type=float, help='regularization parameter for doubly stochastic attention, as in the paper.', default=1)
parser.add_argument('--dropout', type=float, help='dropout rate.', default=0.5)
parser.add_argument('--grad_clip', type=float, help='clip gradients at an absolute value of.', default=5)
parser.add_argument('--best_cider', type=float, help='best cider saved.', default=0.0)
parser.add_argument('--fine_tune_encoder', help='fine tune encoder or not.', action='store_true')
parser.add_argument('--start_from', type=str, default=None, help="continue training from saved model at this path. Path must contain files saved by \n previous training process: \n 'infos.pkl' : configuration;\n 'checkpoint' : paths to model file(s) (created by tf).\n Note: this file contains absolute paths, be careful when moving files around;\n 'model.ckpt-*' : file(s) with model definition (created by tf)\n ")
parser.add_argument('--cached_tokens', type=str, default='coco-train-idxs', help='Cached token file for calculating cider score during self critical training.')
parser.add_argument('-m', '--nthreads', type=int, default=4, help='pytorch data loader threads')
parser.add_argument('-ft', '--finetune_after', type=int, default=8, help='epochs after which vgg16 is fine-tuned')
parser.add_argument('-st', '--lr_step_size', type=int, default=15, help='epochs to decay learning rate after')
parser.add_argument('-sc', '--score_select', type=str, default='CIDEr', help='metric to pick best model')
parser.add_argument('--attention', dest='attention', action='store_true', help='Use this for convcap with attention (by default set)')
parser.add_argument('--no-attention', dest='attention', action='store_false', help='Use this for convcap without attention')
parser.set_defaults(attention=True)
parser.add_argument('--caption_model', type=str, default='show_tell', help='show_tell, show_attend_tell, all_img, fc, att2in, att2in2, adaatt, adaattmo, updown, transformer')
parser.add_argument('--rnn_size', type=int, default=512, help='size of the rnn in number of hidden nodes in each layer')
parser.add_argument('--num_layers', type=int, default=1, help='number of layers in the RNN')
parser.add_argument('--rnn_type', type=str, default='lstm', help='rnn, gru, or lstm')
parser.add_argument('--input_encoding_size', type=int, default=512, help='the encoding size of each token in the vocabulary, and the image.')
parser.add_argument('--att_hid_size', type=int, default=512, help='the hidden size of the attention MLP; only useful in show_attend_tell; 0 if not using hidden layer')
parser.add_argument('--fc_feat_size', type=int, default=2048, help='2048 for resnet, 4096 for vgg')
parser.add_argument('--att_feat_size', type=int, default=2048, help='2048 for resnet, 512 for vgg')
parser.add_argument('--logit_layers', type=int, default=1, help='number of layers in the RNN')
parser.add_argument('--use_bn', type=int, default=0, help='If 1, then do batch_normalization first in att_embed, if 2 then do bn both in the beginning and the end of att_embed')
parser.add_argument('--train_num', type=int, default=883212, help='number of training samples')
parser.add_argument('--val_num', type=int, default=10000, help='number of validation samples')
parser.add_argument('--test_num', type=int, default=993193, help='number of test samples')
parser.add_argument('--norm_att_feat', type=int, default=0, help='If normalize attention features')
parser.add_argument('--use_box', type=int, default=0, help='If use box features')
parser.add_argument('--norm_box_feat', type=int, default=0, help='If use box, do we normalize box feature')
parser.add_argument('--max_epochs', type=int, default=(- 1), help='number of epochs')
parser.add_argument('--grad_clip_mode', type=str, default='value', help='value or norm')
parser.add_argument('--grad_clip_value', type=float, default=0.1, help='clip gradients at this value/max_norm, 0 means no clipping')
parser.add_argument('--drop_prob_lm', type=float, default=0.5, help='strength of dropout in the Language Model RNN')
parser.add_argument('--self_critical_after', type=int, default=(- 1), help='After what epoch do we start finetuning the CNN? (-1 = disable; never finetune, 0 = finetune from start)')
parser.add_argument('--seq_per_img', type=int, default=1, help='number of captions to sample for each image during training. Done for efficiency since CNN forward pass is expensive. E.g. coco has 5 sents/image, 1 for FACAD')
add_eval_sample_opts(parser)
parser.add_argument('--optim', type=str, default='adam', help='what update to use? rmsprop|sgd|sgdmom|adagrad|adam')
parser.add_argument('--learning_rate', type=float, default=0.0004, help='learning rate')
parser.add_argument('--learning_rate_decay_start', type=int, default=(- 1), help='at what iteration to start decaying learning rate? (-1 = dont) (in epoch)')
parser.add_argument('--learning_rate_decay_every', type=int, default=3, help='every how many iterations thereafter to drop LR?(in epoch)')
parser.add_argument('--learning_rate_decay_rate', type=float, default=0.8, help='every how many iterations thereafter to drop LR?(in epoch)')
parser.add_argument('--optim_alpha', type=float, default=0.9, help='alpha for adam')
parser.add_argument('--optim_beta', type=float, default=0.999, help='beta used for adam')
parser.add_argument('--optim_epsilon', type=float, default=1e-08, help='epsilon that goes into denominator for smoothing')
parser.add_argument('--weight_decay', type=float, default=0, help='weight_decay')
parser.add_argument('--label_smoothing', type=float, default=0, help='')
parser.add_argument('--noamopt', action='store_true', help='')
parser.add_argument('--noamopt_warmup', type=int, default=2000, help='')
parser.add_argument('--noamopt_factor', type=float, default=1, help='')
parser.add_argument('--reduce_on_plateau', action='store_true', help='')
parser.add_argument('--scheduled_sampling_start', type=int, default=(- 1), help='at what iteration to start decay gt probability')
parser.add_argument('--scheduled_sampling_increase_every', type=int, default=5, help='every how many iterations thereafter to gt probability')
parser.add_argument('--scheduled_sampling_increase_prob', type=float, default=0.05, help='How much to update the prob')
parser.add_argument('--scheduled_sampling_max_prob', type=float, default=0.25, help='Maximum scheduled sampling prob.')
parser.add_argument('--val_images_use', type=int, default=3200, help='how many images to use when periodically evaluating the validation loss? (-1 = all)')
parser.add_argument('--save_checkpoint_every', type=int, default=2500, help='how often to save a model checkpoint (in iterations)?')
parser.add_argument('--save_every_epoch', action='store_true', help='Save checkpoint every epoch, will overwrite save_checkpoint_every')
parser.add_argument('--save_history_ckpt', type=int, default=0, help='If save checkpoints at every save point')
parser.add_argument('--checkpoint_path', type=str, default=None, help='directory to store checkpointed models')
parser.add_argument('--language_eval', type=int, default=0, help='Evaluate language as well (1 = yes, 0 = no)? BLEU/CIDEr/METEOR/ROUGE_L? requires coco-caption code from Github.')
parser.add_argument('--losses_log_every', type=int, default=25, help='How often do we snapshot losses, for inclusion in the progress dump? (0 = disable)')
parser.add_argument('--load_best_score', type=int, default=1, help='Do we load previous best score when resuming training.')
parser.add_argument('--id', type=str, default='', help='an id identifying this run/job. used in cross-val and appended when writing progress files')
parser.add_argument('--train_only', type=int, default=0, help='if true then use 80k, else use 110k')
parser.add_argument('--cider_reward_weight', type=float, default=1, help='The reward weight from cider')
parser.add_argument('--bleu_reward_weight', type=float, default=0, help='The reward weight from bleu4')
parser.add_argument('--structure_loss_weight', type=float, default=1, help='')
parser.add_argument('--structure_after', type=int, default=(- 1), help='T')
parser.add_argument('--structure_loss_type', type=str, default='seqnll', help='')
parser.add_argument('--struc_use_logsoftmax', action='store_true', help='')
parser.add_argument('--entropy_reward_weight', type=float, default=0, help='Entropy reward, seems very interesting')
parser.add_argument('--self_cider_reward_weight', type=float, default=0, help='self cider reward')
parser.add_argument('--train_sample_n', type=int, default=16, help='The reward weight from cider')
parser.add_argument('--train_sample_method', type=str, default='sample', help='')
parser.add_argument('--train_beam_size', type=int, default=1, help='')
parser.add_argument('--sc_sample_method', type=str, default='greedy', help='')
parser.add_argument('--sc_beam_size', type=int, default=1, help='')
add_diversity_opts(parser)
parser.add_argument('--cfg', type=str, default=None, help='configuration; similar to what is used in detectron')
parser.add_argument('--set_cfgs', dest='set_cfgs', help='Set config keys. Key value sequence seperate by whitespace.e.g. [key] [value] [key] [value]\n This has higher prioritythan cfg file but lower than other args. (You can only overwritearguments that have alerady been defined in config file.)', default=[], nargs='+')
args = parser.parse_args()
if ((args.cfg is not None) or (args.set_cfgs is not None)):
from misc.config import CfgNode
if (args.cfg is not None):
cn = CfgNode(CfgNode.load_yaml_with_base(args.cfg))
else:
cn = CfgNode()
if (args.set_cfgs is not None):
cn.merge_from_list(args.set_cfgs)
for (k, v) in cn.items():
if (not hasattr(args, k)):
print(('Warning: key %s not in args' % k))
setattr(args, k, v)
args = parser.parse_args(namespace=args)
assert (args.rnn_size > 0), 'rnn_size should be greater than 0'
assert (args.num_layers > 0), 'num_layers should be greater than 0'
assert (args.input_encoding_size > 0), 'input_encoding_size should be greater than 0'
assert (args.batch_size > 0), 'batch_size should be greater than 0'
assert (0 <= args.drop_prob_lm < 1), 'drop_prob_lm should be between 0 and 1'
assert (args.seq_per_img > 0), 'seq_per_img should be greater than 0'
assert (args.beam_size > 0), 'beam_size should be greater than 0'
assert (args.save_checkpoint_every > 0), 'save_checkpoint_every should be greater than 0'
assert (args.losses_log_every > 0), 'losses_log_every should be greater than 0'
assert ((args.language_eval == 0) or (args.language_eval == 1)), 'language_eval should be 0 or 1'
assert ((args.load_best_score == 0) or (args.load_best_score == 1)), 'language_eval should be 0 or 1'
assert ((args.train_only == 0) or (args.train_only == 1)), 'language_eval should be 0 or 1'
args.checkpoint_path = (args.checkpoint_path or ('./log_%s' % args.id))
args.start_from = (args.start_from or args.checkpoint_path)
(args.use_fc, args.use_att) = utils.if_use_feat(args.caption_model)
if args.use_box:
args.att_feat_size = (args.att_feat_size + 5)
return args |
class GroupNormalization(Layer):
' Group Normalization\n from: shoanlu GAN:
def __init__(self, axis=(- 1), gamma_init='one', beta_init='zero', gamma_regularizer=None, beta_regularizer=None, epsilon=1e-06, group=32, data_format=None, **kwargs):
self.beta = None
self.gamma = None
super(GroupNormalization, self).__init__(**kwargs)
self.axis = to_list(axis)
self.gamma_init = initializers.get(gamma_init)
self.beta_init = initializers.get(beta_init)
self.gamma_regularizer = regularizers.get(gamma_regularizer)
self.beta_regularizer = regularizers.get(beta_regularizer)
self.epsilon = epsilon
self.group = group
self.data_format = K.normalize_data_format(data_format)
self.supports_masking = True
def build(self, input_shape):
self.input_spec = [InputSpec(shape=input_shape)]
shape = [1 for _ in input_shape]
if (self.data_format == 'channels_last'):
channel_axis = (- 1)
shape[channel_axis] = input_shape[channel_axis]
elif (self.data_format == 'channels_first'):
channel_axis = 1
shape[channel_axis] = input_shape[channel_axis]
self.gamma = self.add_weight(shape=shape, initializer=self.gamma_init, regularizer=self.gamma_regularizer, name='gamma')
self.beta = self.add_weight(shape=shape, initializer=self.beta_init, regularizer=self.beta_regularizer, name='beta')
self.built = True
def call(self, inputs, mask=None):
input_shape = K.int_shape(inputs)
if ((len(input_shape) != 4) and (len(input_shape) != 2)):
raise ValueError((((('Inputs should have rank ' + str(4)) + ' or ') + str(2)) + '; Received input shape:'), str(input_shape))
if (len(input_shape) == 4):
if (self.data_format == 'channels_last'):
(batch_size, height, width, channels) = input_shape
if (batch_size is None):
batch_size = (- 1)
if (channels < self.group):
raise ValueError((((('Input channels should be larger than group size' + '; Received input channels: ') + str(channels)) + '; Group size: ') + str(self.group)))
var_x = K.reshape(inputs, (batch_size, height, width, self.group, (channels // self.group)))
mean = K.mean(var_x, axis=[1, 2, 4], keepdims=True)
std = K.sqrt((K.var(var_x, axis=[1, 2, 4], keepdims=True) + self.epsilon))
var_x = ((var_x - mean) / std)
var_x = K.reshape(var_x, (batch_size, height, width, channels))
retval = ((self.gamma * var_x) + self.beta)
elif (self.data_format == 'channels_first'):
(batch_size, channels, height, width) = input_shape
if (batch_size is None):
batch_size = (- 1)
if (channels < self.group):
raise ValueError((((('Input channels should be larger than group size' + '; Received input channels: ') + str(channels)) + '; Group size: ') + str(self.group)))
var_x = K.reshape(inputs, (batch_size, self.group, (channels // self.group), height, width))
mean = K.mean(var_x, axis=[2, 3, 4], keepdims=True)
std = K.sqrt((K.var(var_x, axis=[2, 3, 4], keepdims=True) + self.epsilon))
var_x = ((var_x - mean) / std)
var_x = K.reshape(var_x, (batch_size, channels, height, width))
retval = ((self.gamma * var_x) + self.beta)
elif (len(input_shape) == 2):
reduction_axes = list(range(0, len(input_shape)))
del reduction_axes[0]
(batch_size, _) = input_shape
if (batch_size is None):
batch_size = (- 1)
mean = K.mean(inputs, keepdims=True)
std = K.sqrt((K.var(inputs, keepdims=True) + self.epsilon))
var_x = ((inputs - mean) / std)
retval = ((self.gamma * var_x) + self.beta)
return retval
def get_config(self):
config = {'epsilon': self.epsilon, 'axis': self.axis, 'gamma_init': initializers.serialize(self.gamma_init), 'beta_init': initializers.serialize(self.beta_init), 'gamma_regularizer': regularizers.serialize(self.gamma_regularizer), 'beta_regularizer': regularizers.serialize(self.gamma_regularizer), 'group': self.group}
base_config = super(GroupNormalization, self).get_config()
return dict((list(base_config.items()) + list(config.items()))) |
class GeneralPriceProvider(DataProvider):
def __init__(self, bloomberg: BloombergDataProvider=None, quandl: QuandlDataProvider=None, haver: HaverDataProvider=None):
super().__init__()
self._ticker_type_to_data_provider_dict = {}
for provider in [bloomberg, quandl, haver]:
if (provider is not None):
self._register_data_provider(provider)
def get_price(self, tickers: Union[(Ticker, Sequence[Ticker])], fields: Union[(PriceField, Sequence[PriceField])], start_date: datetime, end_date: datetime=None, frequency: Frequency=Frequency.DAILY, **kwargs) -> Union[(None, PricesSeries, PricesDataFrame, QFDataArray)]:
use_prices_types = True
normalized_result = self._get_data_for_multiple_tickers(tickers, fields, start_date, end_date, frequency, use_prices_types)
return normalized_result
def get_history(self, tickers: Union[(Ticker, Sequence[Ticker])], fields: Union[(str, Sequence[str])], start_date: datetime, end_date: datetime=None, frequency: Frequency=Frequency.DAILY, **kwargs) -> Union[(QFSeries, QFDataFrame, QFDataArray)]:
use_prices_types = False
normalized_result = self._get_data_for_multiple_tickers(tickers, fields, start_date, end_date, frequency, use_prices_types)
return normalized_result
def get_futures_chain_tickers(self, tickers: Union[(FutureTicker, Sequence[FutureTicker])], expiration_date_fields: Union[(ExpirationDateField, Sequence[ExpirationDateField])]) -> Dict[(FutureTicker, Union[(QFSeries, QFDataFrame)])]:
(tickers, got_single_ticker) = convert_to_list(tickers, Ticker)
results = {}
def get_data_func(data_prov: DataProvider, tickers_for_single_data_provider) -> Dict[(FutureTicker, QFSeries)]:
return data_prov.get_futures_chain_tickers(tickers_for_single_data_provider, ExpirationDateField.all_dates())
for (ticker_class, ticker_group) in groupby(tickers, (lambda t: type(t))):
data_provider = self._identify_data_provider(ticker_class)
partial_result = get_data_func(data_provider, list(ticker_group))
if (partial_result is not None):
results.update(partial_result)
return results
def supported_ticker_types(self):
return self._ticker_type_to_data_provider_dict.keys()
def _get_data_for_multiple_tickers(self, tickers, fields, start_date, end_date, frequency, use_prices_types):
if use_prices_types:
type_of_field = PriceField
def get_data_func(data_prov: DataProvider, tickers_for_single_data_provider):
prices = data_prov.get_price(tickers_for_single_data_provider, fields, start_date, end_date, frequency)
return prices
else:
type_of_field = str
def get_data_func(data_prov: DataProvider, tickers_for_single_data_provider):
prices = data_prov.get_history(tickers_for_single_data_provider, fields, start_date, end_date, frequency)
return prices
(tickers, got_single_ticker) = convert_to_list(tickers, Ticker)
(fields, got_single_field) = convert_to_list(fields, type_of_field)
got_single_date = self._got_single_date(start_date, end_date, frequency)
partial_results = []
for (ticker_class, ticker_group) in groupby(tickers, (lambda t: type(t))):
data_provider = self._identify_data_provider(ticker_class)
partial_result = get_data_func(data_provider, list(ticker_group))
if (partial_result is not None):
partial_results.append(partial_result)
if (not all((isinstance(partial_result, type(partial_results[0])) for partial_result in partial_results))):
raise ValueError('Not all partial result are the same type')
if isinstance(partial_results[0], QFDataArray):
result = QFDataArray.concat(partial_results, dim=TICKERS)
result = normalize_data_array(result, tickers, fields, got_single_date, got_single_ticker, got_single_field, use_prices_types)
else:
result = pd.concat(partial_results).squeeze(axis=1)
return result
def _register_data_provider(self, price_provider: DataProvider):
for ticker_class in price_provider.supported_ticker_types():
self._ticker_type_to_data_provider_dict[ticker_class] = price_provider
def _identify_data_provider(self, ticker_class: Type[Ticker]) -> DataProvider:
data_provider = self._ticker_type_to_data_provider_dict.get(ticker_class, None)
if (data_provider is None):
raise LookupError('Unknown ticker type: {}. No appropriate data provider found'.format(str(ticker_class)))
return data_provider |
def _concatkdf_derive(key_material: bytes, length: int, auxfn: typing.Callable[([], hashes.HashContext)], otherinfo: bytes) -> bytes:
utils._check_byteslike('key_material', key_material)
output = [b'']
outlen = 0
counter = 1
while (length > outlen):
h = auxfn()
h.update(_int_to_u32be(counter))
h.update(key_material)
h.update(otherinfo)
output.append(h.finalize())
outlen += len(output[(- 1)])
counter += 1
return b''.join(output)[:length] |
def info_verbose(log_info, e_epoch=None, path=None):
from scipy.ndimage.filters import gaussian_filter1d
epochs = log_info['epoch']
end_epoch = (epochs[(- 1)] if (not e_epoch) else e_epoch)
(f, (ax1, ax2)) = plt.subplots(2, sharex=True, sharey=False)
trans = 0.3
ax1.plot(epochs[:end_epoch], log_info['lord']['baseline_wr'][:end_epoch], alpha=trans, color='r')
sm = gaussian_filter1d(log_info['lord']['baseline_wr'][:end_epoch], sigma=10)
ax1.plot(epochs[:end_epoch], sm, label='lord', color='r')
ax1.plot(epochs[:end_epoch], log_info['farmer_up']['baseline_wr'][:end_epoch], alpha=trans, color='g')
sm = gaussian_filter1d(log_info['farmer_up']['baseline_wr'][:end_epoch], sigma=10)
ax1.plot(epochs[:end_epoch], sm, label='farmer_up', color='g')
ax1.plot(epochs[:end_epoch], log_info['farmer_down']['baseline_wr'][:end_epoch], alpha=trans, color='b')
sm = gaussian_filter1d(log_info['farmer_down']['baseline_wr'][:end_epoch], sigma=10)
ax1.plot(epochs[:end_epoch], sm, label='farmer_down', color='b')
ax1.legend(loc=4)
ax1.set_ylim([0, 0.8])
ax1.set_title('Baseline')
ax1.set_ylabel('Winning Rate', rotation='horizontal')
ax1.yaxis.set_label_coords((- 0.025), 1.05)
ax2.plot(epochs[:end_epoch], log_info['lord']['training_wr'][:end_epoch], alpha=trans, color='c')
sm = gaussian_filter1d(log_info['lord']['training_wr'][:end_epoch], sigma=10)
ax2.plot(epochs[:end_epoch], sm, color='c', label='lord')
ax2.plot(epochs[:end_epoch], log_info['farmer_up']['training_wr'][:end_epoch], alpha=trans, color='m')
sm = gaussian_filter1d(log_info['farmer_up']['training_wr'][:end_epoch], sigma=10)
ax2.plot(epochs[:end_epoch], sm, color='m', label='farmer')
ax2.legend()
ax2.set_ylim([0, 1])
ax2.set_title('Training')
ax2.set_xlabel('Epoch')
ax2.xaxis.set_label_coords(1.05, (- 0.025))
plt.show()
if path:
f.savefig(path) |
class SpatialGate(nn.Module):
def __init__(self):
super(SpatialGate, self).__init__()
kernel_size = 7
self.compress = ChannelPool()
self.spatial = BasicConv(2, 1, kernel_size, stride=1, padding=((kernel_size - 1) // 2))
def forward(self, x):
x_compress = self.compress(x)
x_out = self.spatial(x_compress)
scale = torch.sigmoid(x_out)
return (x * scale) |
def process_all_speaker_f0(speaker_directory, fs, window, hop, voiced_prob_cutoff=0.2):
all_speakers = sorted(os.listdir(speaker_directory))
all_speaker_f0_info = {}
for speaker_id in all_speakers:
print(f'Processing speaker {speaker_id}...')
speaker_utt_path = os.path.join(speaker_directory, speaker_id)
(f0_median, f0_std) = find_speaker_f0_median_std(speaker_utt_path, fs, window, hop, voiced_prob_cutoff)
current_speaker_f0_info = {'median': f0_median, 'std': f0_std}
all_speaker_f0_info[speaker_id] = current_speaker_f0_info
return all_speaker_f0_info |
class SortDataset(BaseWrapperDataset):
def __init__(self, dataset, sort_order):
super().__init__(dataset)
if (not isinstance(sort_order, (list, tuple))):
sort_order = [sort_order]
self.sort_order = sort_order
assert all(((len(so) == len(dataset)) for so in sort_order))
def ordered_indices(self):
return np.lexsort(self.sort_order) |
def try_finally_resolve_control(builder: IRBuilder, cleanup_block: BasicBlock, finally_control: FinallyNonlocalControl, old_exc: Value, ret_reg: ((Register | AssignmentTarget) | None)) -> BasicBlock:
(reraise, rest) = (BasicBlock(), BasicBlock())
builder.add(Branch(old_exc, rest, reraise, Branch.IS_ERROR))
builder.activate_block(reraise)
builder.call_c(reraise_exception_op, [], NO_TRACEBACK_LINE_NO)
builder.add(Unreachable())
builder.builder.pop_error_handler()
if ret_reg:
builder.activate_block(rest)
(return_block, rest) = (BasicBlock(), BasicBlock())
builder.add(Branch(builder.read(ret_reg), rest, return_block, Branch.IS_ERROR))
builder.activate_block(return_block)
builder.nonlocal_control[(- 1)].gen_return(builder, builder.read(ret_reg), (- 1))
builder.activate_block(rest)
out_block = BasicBlock()
builder.goto(out_block)
builder.activate_block(cleanup_block)
finally_control.gen_cleanup(builder, (- 1))
builder.call_c(keep_propagating_op, [], NO_TRACEBACK_LINE_NO)
builder.add(Unreachable())
return out_block |
class TestSvdTrainingExtensions(unittest.TestCase):
def test_svd_layer_selection_without_mo(self):
tf.compat.v1.reset_default_graph()
svd = s.Svd(None, None, s.CostMetric.memory)
svd._svd = create_autospec(pymo.Svd, instance=True)
x = tf.compat.v1.placeholder(tf.float32, [None, 784], 'data')
y_hat = model(x)
sess = tf.compat.v1.Session()
sess.run(tf.compat.v1.global_variables_initializer())
svd._svd.GetCompressionType.return_value = pymo.SVD_COMPRESS_TYPE.TYPE_SINGLE
svd._store_net_stats(sess)
self.assertTrue(svd._svd.GetCompressionType.called)
def test_svd_layer_selection_with_mo(self):
tf.compat.v1.reset_default_graph()
svd = s.Svd(None, None, s.CostMetric.memory)
x = tf.compat.v1.placeholder(tf.float32, [None, 784], 'data')
y_hat = model(x)
sess = tf.compat.v1.Session()
sess.run(tf.compat.v1.global_variables_initializer())
svd._store_net_stats(sess)
layers = svd._svd.GetLayerNames()
print('Layers added: ', layers)
def test_compute_layer_cost(self):
tf.compat.v1.reset_default_graph()
with tf.compat.v1.Session() as sess:
x = tf.compat.v1.placeholder(tf.float32, [None, 784], 'data')
y_hat = model(x)
sess.run(tf.compat.v1.global_variables_initializer())
ops = tf.compat.v1.get_default_graph().get_operations()
for op in ops:
if (op.name == 'Conv2D'):
conv1 = op
elif (op.name == 'MatMul'):
fc1 = op
(mem_cost, mac_cost) = s.Svd._compute_layer_cost(conv1.inputs[1].shape, conv1.outputs[0].shape, conv1.type)
self.assertEqual(mem_cost, 800)
self.assertEqual(mac_cost, 627200)
(mem_cost, mac_cost) = s.Svd._compute_layer_cost(fc1.inputs[1].shape, fc1.outputs[0].shape, fc1.type)
self.assertEqual(mem_cost, 3211264)
self.assertEqual(mac_cost, 3211264)
def test_create_layer_attributes_list(self):
tf.compat.v1.reset_default_graph()
with tf.compat.v1.Session() as sess:
x = tf.compat.v1.placeholder(tf.float32, [None, 784], 'data')
y_hat = model(x)
sess.run(tf.compat.v1.global_variables_initializer())
ops = tf.compat.v1.get_default_graph().get_operations()
ops_to_use_for_cost = []
for op in ops:
if (op.type in ['Conv2D', 'MatMul']):
ops_to_use_for_cost.append(op)
layer_attributes_list = s.Svd._create_layer_attributes_list(ops_to_use_for_cost, sess)
print(layer_attributes_list)
self.assertEqual((800, 627200), layer_attributes_list[0].cost)
self.assertEqual((51200, ), layer_attributes_list[1].cost)
self.assertEqual(len(layer_attributes_list), 4)
def test_compute_network_cost(self):
tf.compat.v1.reset_default_graph()
with tf.compat.v1.Session() as sess:
x = tf.compat.v1.placeholder(tf.float32, [None, 784], 'data')
y_hat = model(x)
sess.run(tf.compat.v1.global_variables_initializer())
ops = tf.compat.v1.get_default_graph().get_operations()
ops_to_use_for_cost = []
for op in ops:
if (op.type in ['Conv2D', 'MatMul']):
ops_to_use_for_cost.append(op)
layer_attributes_list = s.Svd._create_layer_attributes_list(ops_to_use_for_cost, sess)
(mem_cost, mac_cost) = s.Svd._compute_network_cost(layer_attributes_list)
self.assertEqual(mem_cost, (((800 + 51200) + 3211264) + 10240))
self.assertEqual(mac_cost, (((627200 + ) + 3211264) + 10240))
def test_pick_compression_layers_top_n_layers(self):
tf.compat.v1.reset_default_graph()
with tf.compat.v1.Session() as sess:
x = tf.compat.v1.placeholder(tf.float32, [None, 784], 'data')
y_hat = model(x)
sess.run(tf.compat.v1.global_variables_initializer())
(picked_layers, network_cost) = s.Svd._pick_compression_layers(sess, cost_metric=s.CostMetric.mac, layer_select_scheme=s.Svd.LayerSelectionScheme.top_n_layers, num_layers=2)
self.assertEqual('Conv2D_1', picked_layers[0].layer_ref.name)
self.assertEqual('MatMul', picked_layers[1].layer_ref.name)
self.assertEqual(2, len(picked_layers))
self.assertEqual((3273504, ), network_cost)
(picked_layers, network_cost) = s.Svd._pick_compression_layers(sess, cost_metric=s.CostMetric.memory, layer_select_scheme=s.Svd.LayerSelectionScheme.top_n_layers, num_layers=2)
for layer in picked_layers:
print(layer.layer_ref.name, layer.cost)
self.assertEqual('MatMul', picked_layers[0].layer_ref.name)
self.assertEqual('Conv2D_1', picked_layers[1].layer_ref.name)
self.assertEqual(2, len(picked_layers))
self.assertEqual((3273504, ), network_cost)
def test_pick_compression_layers_top_x_percent(self):
tf.compat.v1.reset_default_graph()
with tf.compat.v1.Session() as sess:
x = tf.compat.v1.placeholder(tf.float32, [None, 784], 'data')
y_hat = model(x)
sess.run(tf.compat.v1.global_variables_initializer())
(picked_layers, network_cost) = s.Svd._pick_compression_layers(sess, cost_metric=s.CostMetric.memory, layer_select_scheme=s.Svd.LayerSelectionScheme.top_x_percent, percent_thresh=100)
for layer in picked_layers:
print(layer.layer_ref.name, layer.cost)
self.assertEqual('MatMul', picked_layers[0].layer_ref.name)
self.assertEqual('Conv2D_1', picked_layers[1].layer_ref.name)
self.assertEqual('MatMul_1', picked_layers[2].layer_ref.name)
self.assertEqual(3, len(picked_layers))
self.assertEqual((3273504, ), network_cost)
(picked_layers, network_cost) = s.Svd._pick_compression_layers(sess, cost_metric=s.CostMetric.memory, layer_select_scheme=s.Svd.LayerSelectionScheme.top_x_percent, percent_thresh=80)
for layer in picked_layers:
print(layer.layer_ref.name, layer.cost)
self.assertEqual('Conv2D_1', picked_layers[0].layer_ref.name)
self.assertEqual('MatMul_1', picked_layers[1].layer_ref.name)
self.assertEqual(2, len(picked_layers))
self.assertEqual((3273504, ), network_cost)
(picked_layers, network_cost) = s.Svd._pick_compression_layers(sess, cost_metric=s.CostMetric.memory, layer_select_scheme=s.Svd.LayerSelectionScheme.top_x_percent, percent_thresh=98.5)
for layer in picked_layers:
print(layer.layer_ref.name, layer.cost)
self.assertEqual('MatMul', picked_layers[0].layer_ref.name)
self.assertEqual('MatMul_1', picked_layers[1].layer_ref.name)
self.assertEqual(2, len(picked_layers))
self.assertEqual((3273504, ), network_cost)
def test_pick_compression_layers_manual(self):
tf.compat.v1.reset_default_graph()
with tf.compat.v1.Session() as sess:
x = tf.compat.v1.placeholder(tf.float32, [None, 784], 'data')
y_hat = model(x)
sess.run(tf.compat.v1.global_variables_initializer())
(picked_layers, network_cost) = s.Svd._pick_compression_layers(sess, cost_metric=s.CostMetric.memory, layer_select_scheme=s.Svd.LayerSelectionScheme.manual, layers_to_compress=['Conv2D_1'])
for layer in picked_layers:
print(layer.layer_ref.name, layer.cost)
self.assertEqual('Conv2D_1', picked_layers[0].layer_ref.name)
self.assertEqual(1, len(picked_layers))
self.assertEqual((3273504, ), network_cost)
def test_automatic_rank_selection(self):
tf.compat.v1.reset_default_graph()
with tf.compat.v1.Session() as sess:
x = tf.compat.v1.placeholder(tf.float32, [None, 784], 'data')
y_hat = model(x)
sess.run(tf.compat.v1.global_variables_initializer())
s.Svd._baseline_perf = 1
s.Svd._svd = create_autospec(pymo.Svd, instance=True)
s.Svd._compute_compression_ratio = create_autospec(s.Svd._compute_compression_ratio)
s.Svd._compute_compression_ratio.side_effect = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
s.Svd._create_compressed_network = create_autospec(s.Svd._create_compressed_network)
s.Svd._create_compressed_network.return_value = (None, None)
s.Svd._network_cost = (500, 500)
run_graph_return_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
s.Svd.run_graph = unittest.mock.Mock(side_effect=run_graph_return_values)
s.Svd._svd.SetCandidateRanks = create_autospec(s.Svd._svd.SetCandidateRanks)
s.Svd._svd.SetCandidateRanks.side_effect = 20
s.Svd._perform_rank_selection(self) |
def test_disk_and_tensorflow_logger():
args_line = FAST_LOCAL_TEST_ARGS
args_line += ' --log disk tensorboard'
result = run_main(args_line, 'results_test_loggers', clean_run=True)
experiment_dir = Path(result[(- 1)])
assert experiment_dir.is_dir()
raw_logs = list(experiment_dir.glob('raw_log-*.txt'))
assert (len(raw_logs) == 1)
df = pd.read_json(raw_logs[0], lines=True)
assert (sorted(df.iter.unique()) == [0, 1, 2])
assert (sorted(df.group.unique()) == ['test', 'train', 'valid'])
assert (len(df.group.unique()) == 3)
tb_events_logs = list(experiment_dir.glob('events.out.tfevents*'))
assert (len(tb_events_logs) == 1)
assert experiment_dir.joinpath(tb_events_logs[0]).is_file() |
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [migrations.CreateModel(name='Site', fields=[('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=200))]), migrations.CreateModel(name='Post', fields=[('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=200)), ('site', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='testapp.Site'))]), migrations.CreateModel(name='Comment', fields=[('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('text', models.TextField()), ('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='testapp.Post'))])] |
class TestTriangulation():
def setup_method(self):
gdf = gpd.read_file(geodatasets.get_path('geoda liquor_stores')).explode(ignore_index=True)
self.gdf = gdf[(~ gdf.geometry.duplicated())]
self.gdf_str = self.gdf.set_index('placeid')
.parametrize('method', TRIANGULATIONS)
def test_triangulation_intids(self, method):
g = graph.Graph.build_triangulation(self.gdf, method)
assert pd.api.types.is_numeric_dtype(g._adjacency.index.dtypes['focal'])
assert pd.api.types.is_numeric_dtype(g._adjacency.index.dtypes['neighbor'])
assert pd.api.types.is_numeric_dtype(g._adjacency.dtype)
.parametrize('method', TRIANGULATIONS)
def test_triangulation_strids(self, method):
g = graph.Graph.build_triangulation(self.gdf_str, method)
assert pd.api.types.is_string_dtype(g._adjacency.index.dtypes['focal'])
assert pd.api.types.is_string_dtype(g._adjacency.index.dtypes['neighbor'])
assert pd.api.types.is_numeric_dtype(g._adjacency.dtype)
.parametrize('method', TRIANGULATIONS)
def test_triangulation_intids_kernel(self, method):
g = graph.Graph.build_triangulation(self.gdf, method, kernel='parabolic', bandwidth=7500)
assert pd.api.types.is_numeric_dtype(g._adjacency.index.dtypes['focal'])
assert pd.api.types.is_numeric_dtype(g._adjacency.index.dtypes['neighbor'])
assert pd.api.types.is_numeric_dtype(g._adjacency.dtype)
.parametrize('method', TRIANGULATIONS)
def test_triangulation_strids_kernel(self, method):
g = graph.Graph.build_triangulation(self.gdf_str, method, kernel='parabolic', bandwidth=7500)
assert pd.api.types.is_string_dtype(g._adjacency.index.dtypes['focal'])
assert pd.api.types.is_string_dtype(g._adjacency.index.dtypes['neighbor'])
assert pd.api.types.is_numeric_dtype(g._adjacency.dtype)
def test_invalid_method(self):
with pytest.raises(ValueError, match="Method 'invalid' is not supported"):
graph.Graph.build_triangulation(self.gdf, method='invalid', kernel='parabolic', bandwidth=7500) |
.parametrize('valuetrigger', [OSC.ParameterCondition('asdf', 1, OSC.Rule.greaterOrEqual), OSC.VariableCondition('asdf', 1, OSC.Rule.greaterOrEqual), OSC.TimeOfDayCondition(OSC.Rule.greaterOrEqual, 2023, 4, 5, 6, 4, 8), OSC.SimulationTimeCondition(2, OSC.Rule.greaterOrEqual), OSC.StoryboardElementStateCondition(OSC.StoryboardElementType.action, 'my action', OSC.StoryboardElementState.endTransition), OSC.UserDefinedValueCondition('myvalue', 2, OSC.Rule.greaterOrEqual), OSC.TrafficSignalCondition('stuffs', 'red'), OSC.TrafficSignalControllerCondition('my signal', 'myphase')])
def test_value_condition_factory(valuetrigger):
element = ET.Element('ByValueCondition')
element.append(valuetrigger.get_element())
factoryoutput = OSC.triggers._ValueConditionFactory.parse_value_condition(element)
prettyprint(valuetrigger, None)
prettyprint(factoryoutput, None)
assert (valuetrigger == factoryoutput) |
def train(train_queue, valid_queue, model, architect, criterion, optimizer, lr, epoch, analyzer):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
for (step, (input, target)) in enumerate(train_queue):
model.train()
n = input.size(0)
input = input.cuda()
target = target.cuda(non_blocking=True)
try:
(input_search, target_search) = next(valid_queue_iter)
except:
valid_queue_iter = iter(valid_queue)
(input_search, target_search) = next(valid_queue_iter)
input_search = input_search.cuda()
target_search = target_search.cuda(non_blocking=True)
if (epoch >= args.warm_start_epochs):
architect.step(input, target, input_search, target_search, lr, optimizer)
optimizer.zero_grad()
model.binarization()
logits = model(input)
loss = criterion(logits, target)
loss.backward()
model.restore()
nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip)
optimizer.step()
model.clip()
(prec1, prec5) = utils.accuracy(logits, target, topk=(1, 5))
objs.update(loss.data.item(), n)
top1.update(prec1.data.item(), n)
top5.update(prec5.data.item(), n)
if ((step % args.report_freq) == 0):
logging.info('train %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
if ('debug' in args.save):
break
_data_loader = deepcopy(train_queue)
(input, target) = next(iter(_data_loader))
input = Variable(input, requires_grad=False).cuda()
target = Variable(target, requires_grad=False).cuda(non_blocking=True)
model.binarization()
H = analyzer.compute_Hw(input, target, input_search, target_search, lr, optimizer, False)
model.restore()
del _data_loader
ev = max(LA.eigvals(H.cpu().data.numpy()))
ev = np.linalg.norm(ev)
return (top1.avg, objs.avg, ev) |
(repr=False, eq=False)
class SignedBlindedBalanceProof():
channel_identifier: ChannelID
token_network_address: TokenNetworkAddress
nonce: Nonce
additional_hash: AdditionalHash
chain_id: ChainID
balance_hash: BalanceHash
signature: Signature
non_closing_signature: Optional[Signature] = field(default=EMPTY_SIGNATURE)
def __post_init__(self) -> None:
if (self.signature == EMPTY_SIGNATURE):
raise ValueError('balance proof is not signed')
def from_balance_proof_signed_state(cls, balance_proof: BalanceProofSignedState) -> 'SignedBlindedBalanceProof':
typecheck(balance_proof, BalanceProofSignedState)
return cls(channel_identifier=balance_proof.channel_identifier, token_network_address=balance_proof.token_network_address, nonce=balance_proof.nonce, additional_hash=balance_proof.message_hash, chain_id=balance_proof.chain_id, signature=balance_proof.signature, balance_hash=hash_balance_data(balance_proof.transferred_amount, balance_proof.locked_amount, balance_proof.locksroot))
def _data_to_sign(self) -> bytes:
packed = pack_signed_balance_proof(msg_type=MessageTypeId.BALANCE_PROOF_UPDATE, nonce=self.nonce, balance_hash=self.balance_hash, additional_hash=self.additional_hash, canonical_identifier=CanonicalIdentifier(chain_identifier=self.chain_id, token_network_address=self.token_network_address, channel_identifier=self.channel_identifier), partner_signature=self.signature)
return packed
def _sign(self, signer: Signer) -> Signature:
data = self._data_to_sign()
return signer.sign(data) |
class traindataset(data.Dataset):
def __init__(self, root, mode, transform=None, num_class=5, multitask=False):
self.root = os.path.expanduser(root)
self.transform = transform
self.mode = mode
self.train_data = []
self.train_label = []
self.test_data = []
self.test_label = []
self.name = []
self.num_class = num_class
self.multitask = multitask
files = glob.glob((self.root + '/images/trainset/*.jpg'))
self.train_root = files
if (self.mode == 'train'):
(dictLabels_DR, dictLabels_DME) = self.load_csv((self.root + '/labels/trainset.csv'))
for each_one in self.train_root:
img = Image.open(each_one)
img = img.convert('RGB')
if self.multitask:
label_DR = [k for (k, v) in dictLabels_DR.items() if (each_one.split('/')[(- 1)][:(- 4)] in v)]
label_DME = [k for (k, v) in dictLabels_DME.items() if (each_one.split('/')[(- 1)][:(- 4)] in v)]
self.train_label.append([int(label_DR[0]), int(label_DME[0])])
else:
if (self.num_class == 5):
label_DR = [k for (k, v) in dictLabels_DR.items() if (each_one.split('/')[(- 1)][:(- 4)] in v)]
else:
label_DR = [k for (k, v) in dictLabels_DME.items() if (each_one.split('/')[(- 1)][:(- 4)] in v)]
self.train_label.append(int(label_DR[0]))
assert (len(label_DR) == 1)
self.train_data.append(img)
self.name.append(each_one.split('/')[(- 1)][:(- 4)])
assert (len(self.train_label) == len(self.train_root))
if self.multitask:
print('=> Total Train: ', len(self.train_root), ' Multi-Task images ')
elif (self.num_class == 5):
print('=> Total Train: ', len(self.train_root), ' DR images ')
else:
print('=> Total Train: ', len(self.train_root), ' DME images ')
elif (self.mode == 'val'):
self.test_root = glob.glob((self.root + '/images/testset/*.jpg'))
(dictLabels_DR, dictLabels_DME) = self.load_csv((self.root + '/labels/testset.csv'))
for item in self.test_root:
img = Image.open(item)
img = img.convert('RGB')
if self.multitask:
label_DR = [k for (k, v) in dictLabels_DR.items() if (item.split('/')[(- 1)][:(- 4)] in v)]
label_DME = [k for (k, v) in dictLabels_DME.items() if (item.split('/')[(- 1)][:(- 4)] in v)]
self.test_label.append([int(label_DR[0]), int(label_DME[0])])
else:
if (self.num_class == 5):
label_DR = [k for (k, v) in dictLabels_DR.items() if (item.split('/')[(- 1)][:(- 4)] in v)]
else:
label_DR = [k for (k, v) in dictLabels_DME.items() if (item.split('/')[(- 1)][:(- 4)] in v)]
self.test_label.append(int(label_DR[0]))
assert (len(label_DR) == 1)
self.test_data.append(img)
self.name.append(item.split('/')[(- 1)][:(- 4)])
assert (len(self.test_root) == len(self.test_label))
if self.multitask:
print('=> Total Test: ', len(self.test_root), ' Multi-Task images ')
elif (self.num_class == 5):
print('=> Total Test: ', len(self.test_root), ' DR images ')
else:
print('=> Total Test: ', len(self.test_root), ' DME images ')
def load_csv(self, path):
dictLabels_DR = {}
dictLabels_DME = {}
with open(path) as csvfile:
csvreader = csv.reader(csvfile, delimiter=',')
next(csvreader, None)
for (i, row) in enumerate(csvreader):
filename = row[0]
label1 = row[1]
label2 = row[2]
if (label1 in dictLabels_DR.keys()):
dictLabels_DR[label1].append(filename)
else:
dictLabels_DR[label1] = [filename]
if (label2 in dictLabels_DME.keys()):
dictLabels_DME[label2].append(filename)
else:
dictLabels_DME[label2] = [filename]
return (dictLabels_DR, dictLabels_DME)
def __getitem__(self, index):
if (self.mode == 'train'):
img = self.train_data[index]
(label, name) = (self.train_label[index], self.name[index])
elif (self.mode == 'val'):
img = self.test_data[index]
(label, name) = (self.test_label[index], self.name[index])
img = self.transform(img)
return (img, label, name)
def __len__(self):
if (self.mode == 'train'):
return len(self.train_root)
elif (self.mode == 'val'):
return len(self.test_root) |
def _quantize(module: nn.Module, inplace: bool, output_type: torch.dtype=torch.float, quant_state_dict_split_scale_bias: bool=False, per_table_weight_dtypes: Optional[Dict[(str, torch.dtype)]]=None) -> nn.Module:
if quant_state_dict_split_scale_bias:
quant_prep_enable_quant_state_dict_split_scale_bias_for_types(module, [EmbeddingBagCollection])
qconfig = quant.QConfig(activation=quant.PlaceholderObserver.with_args(dtype=output_type), weight=quant.PlaceholderObserver.with_args(dtype=torch.qint8))
if per_table_weight_dtypes:
qconfig = QuantConfig(activation=quant.PlaceholderObserver.with_args(dtype=output_type), weight=quant.PlaceholderObserver.with_args(dtype=torch.quint8), per_table_weight_dtype=per_table_weight_dtypes)
return quant.quantize_dynamic(module, qconfig_spec={EmbeddingBagCollection: qconfig}, mapping={EmbeddingBagCollection: QuantEmbeddingBagCollection}, inplace=inplace) |
class ConfigTestCase(unittest.TestCase):
def test_sections(self):
config = Configuration()
self.assertSetEqual(set(config._parser.sections()), {'SERVICE', 'FRONTEND'})
def test_get_option(self):
config = Configuration()
self.assertEqual(config.get_option('SERVICE', 'name'), 'local')
self.assertDictEqual(config.get_section('SERVICE'), {'name': 'local'})
def test_invalid_config(self):
filepath = f'/tmp/{int(time.time())}'
for content in ('[SERVICE]\nname = sillyservice\n', '[FRONTEND]\ndefault_category = '):
with open(filepath, 'w') as file:
file.write(content)
self.assertRaises(InvalidConfigError, Configuration, filepath=filepath)
def test_nonexisting_config_filepath(self):
filepath = f'/tmp/{time.time()}'
with self.assertRaises(InvalidConfigError) as cm:
Configuration(filepath=filepath)
self.assertTrue(cm.exception.args[0].endswith('Config filepath does not exist!')) |
.parametrize('creator', sorted((set(PythonInfo.current_system().creators().key_to_class) - {'builtin'})))
.usefixtures('session_app_data')
def test_create_distutils_cfg(creator, tmp_path, monkeypatch):
result = cli_run([str((tmp_path / 'venv')), '--activators', '', '--creator', creator, '--setuptools', 'bundle', '--wheel', 'bundle'])
app = (Path(__file__).parent / 'console_app')
dest = (tmp_path / 'console_app')
shutil.copytree(str(app), str(dest))
setup_cfg = (dest / 'setup.cfg')
conf = dedent(f'''
[install]
prefix={tmp_path}{os.sep}prefix
install_purelib={tmp_path}{os.sep}purelib
install_platlib={tmp_path}{os.sep}platlib
install_headers={tmp_path}{os.sep}headers
install_scripts={tmp_path}{os.sep}scripts
install_data={tmp_path}{os.sep}data
''')
setup_cfg.write_text((setup_cfg.read_text(encoding='utf-8') + conf), encoding='utf-8')
monkeypatch.chdir(dest)
install_demo_cmd = [str(result.creator.script('pip')), '--disable-pip-version-check', 'install', str(dest), '--no-use-pep517', '-vv']
subprocess.check_call(install_demo_cmd)
magic = result.creator.script('magic')
assert magic.exists()
package_folder = (result.creator.purelib / 'demo')
assert package_folder.exists(), list_files(str(tmp_path)) |
_test
.skipif((K.backend() != 'tensorflow'), reason='Requires tensorflow backend')
def test_TensorBoard(tmpdir):
np.random.seed(np.random.randint(1, .0))
filepath = str((tmpdir / 'logs'))
((X_train, y_train), (X_test, y_test)) = get_test_data(num_train=train_samples, num_test=test_samples, input_shape=(input_dim,), classification=True, num_classes=num_class)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
def data_generator(train):
if train:
max_batch_index = (len(X_train) // batch_size)
else:
max_batch_index = (len(X_test) // batch_size)
i = 0
while 1:
if train:
(yield (X_train[(i * batch_size):((i + 1) * batch_size)], y_train[(i * batch_size):((i + 1) * batch_size)]))
else:
(yield (X_test[(i * batch_size):((i + 1) * batch_size)], y_test[(i * batch_size):((i + 1) * batch_size)]))
i += 1
i = (i % max_batch_index)
inp = Input((input_dim,))
hidden = Dense(num_hidden, activation='relu')(inp)
hidden = Dropout(0.1)(hidden)
output = Dense(num_class, activation='softmax')(hidden)
model = Model(inputs=inp, outputs=output)
model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
def callbacks_factory(histogram_freq):
return [callbacks.TensorBoard(log_dir=filepath, histogram_freq=histogram_freq, write_images=True, write_grads=True, embeddings_freq=1, embeddings_layer_names=['dense_1'], batch_size=5)]
model.fit(X_train, y_train, batch_size=batch_size, callbacks=callbacks_factory(histogram_freq=0), epochs=3)
model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=callbacks_factory(histogram_freq=0), epochs=2)
model.fit_generator(data_generator(True), len(X_train), epochs=2, callbacks=callbacks_factory(histogram_freq=0))
model.fit_generator(data_generator(True), len(X_train), epochs=2, validation_data=(X_test, y_test), callbacks=callbacks_factory(histogram_freq=1))
assert os.path.isdir(filepath)
shutil.rmtree(filepath)
assert (not tmpdir.listdir()) |
class menu(page):
def __init__(self, name, items):
super(menu, self).__init__(name)
self.selection = 0
self.items = items
self.prev = False
self.last_selection = (- 1)
self.menu_values = {}
def find_parents(self):
for p in self.items:
p.lcd = self.lcd
p.prev = self
if isinstance(p, menu):
p.find_parents()
def mainmenu(self):
if self.prev:
return self.prev.mainmenu()
return self
def display(self, refresh):
self.lcd.menu = self
if ((not refresh) and (self.selection == self.last_selection)):
for item in self.items:
if (isinstance(item, ValueCheck) or isinstance(item, RangeEdit)):
path = item.pypilot_path
val = self.last_val(path)
if (not refresh):
refresh = ((not (path in self.menu_values)) or (self.menu_values[path] != val))
self.menu_values[path] = val
if (not refresh):
return
self.last_selection = self.selection
self.fill(black)
fit = self.fittext(rectangle(0, 0, 1, 0.25), self.name)
sy = y = (fit[1] + 0.03)
items = min(int(((1 - y) / 0.15)), len(self.items))
scroll = max((self.selection - int((items / 2))), 0)
scroll = min(scroll, (len(self.items) - items))
maxsizeslider = 0
sliders = []
for item in self.items[scroll:]:
size = self.fittext(rectangle(0, y, 1, 0.15), item.name)
if isinstance(item, ValueCheck):
val = self.last_val(item.pypilot_path)
if (val is True):
self.invertrectangle(rectangle(0.8, (y + 0.07), 0.1, 0.07))
elif (isinstance(item, RangeEdit) and (size[0] < 0.8)):
maxsizeslider = max((size[0] + 0.02), maxsizeslider)
sliders.append((item, y))
y += 0.15
if (y >= 0.9):
break
for (item, y) in sliders:
sliderarea = rectangle(maxsizeslider, (y + 0.05), (1 - maxsizeslider), 0.07)
self.rectangle(sliderarea, 0.015)
try:
values = self.lcd.client.get_values()
name = item.pypilot_path
minv = values[name]['min']
maxv = values[name]['max']
val = ((self.last_val(name, 0, 0) - minv) / (maxv - minv))
if (val <= 0):
continue
sliderarea.width *= val
self.rectangle(sliderarea)
except Exception as e:
pass
if (self.selection >= 0):
y = ((0.15 * (self.selection - scroll)) + sy)
if (y < 0.85):
self.invertrectangle(rectangle(0, (y + 0.01), 1, 0.14))
def process(self):
if self.testkeydown(AUTO):
self.lcd.menu = self.lcd.menu.mainmenu()
return control(self.lcd)
if (self.testkeydown(SMALL_PORT) or self.testkeydown(BIG_PORT)):
self.selection -= 1
if (self.selection < 0):
self.selection = (len(self.items) - 1)
elif (self.testkeydown(SMALL_STARBOARD) or self.testkeydown(BIG_STARBOARD)):
self.selection += 1
if (self.selection == len(self.items)):
self.selection = 0
elif self.testkeydown(MENU):
if ((self.selection >= 0) and (self.selection < len(self.items))):
return self.items[self.selection]
return
if (self.selection >= len(self.items)):
self.selection = (len(self.items) - 1)
return super(menu, self).process() |
class STM32F1xxAdc(QlPeripheral):
class Type(ctypes.Structure):
_fields_ = [('SR', ctypes.c_uint32), ('CR1', ctypes.c_uint32), ('CR2', ctypes.c_uint32), ('SMPR1', ctypes.c_uint32), ('SMPR2', ctypes.c_uint32), ('JOFR1', ctypes.c_uint32), ('JOFR2', ctypes.c_uint32), ('JOFR3', ctypes.c_uint32), ('JOFR4', ctypes.c_uint32), ('HTR', ctypes.c_uint32), ('LTR', ctypes.c_uint32), ('SQR1', ctypes.c_uint32), ('SQR2', ctypes.c_uint32), ('SQR3', ctypes.c_uint32), ('JSQR', ctypes.c_uint32), ('JDR1', ctypes.c_uint32), ('JDR2', ctypes.c_uint32), ('JDR3', ctypes.c_uint32), ('JDR4', ctypes.c_uint32), ('DR', ctypes.c_uint32)]
def __init__(self, ql, label, intn=None):
super().__init__(ql, label)
self.instance = self.struct(DR=2047)
self.intn = intn
()
def read(self, offset: int, size: int) -> int:
buf = ctypes.create_string_buffer(size)
ctypes.memmove(buf, (ctypes.addressof(self.instance) + offset), size)
return int.from_bytes(buf.raw, byteorder='little')
()
def write(self, offset: int, size: int, value: int):
self.raw_write(offset, size, value)
if (offset == self.struct.CR2.offset):
if (value & ADC_CR2.RSTCAL):
self.instance.CR2 = (value & (~ ADC_CR2.RSTCAL))
if (value & ADC_CR2.CAL):
self.instance.CR2 = (value & (~ ADC_CR2.CAL))
if (value & ADC_CR2.SWSTART):
self.instance.SR |= ADC_SR.EOS
self.instance.CR2 = (value & (~ ADC_CR2.SWSTART)) |
class VertexArray():
def __init__(self):
self._context = pyglet.gl.current_context
self._id = GLuint()
glGenVertexArrays(1, self._id)
def id(self):
return self._id.value
def bind(self):
glBindVertexArray(self._id)
def unbind():
glBindVertexArray(0)
def delete(self):
glDeleteVertexArrays(1, self._id)
self._id = None
__enter__ = bind
def __exit__(self, *_):
glBindVertexArray(0)
def __del__(self):
if (self._id is not None):
try:
self._context.delete_vao(self.id)
self._id = None
except (ImportError, AttributeError):
pass
def __repr__(self):
return '{}(id={})'.format(self.__class__.__name__, self._id.value) |
def load_model(config, ckpt, gpu, eval_mode):
if ckpt:
pl_sd = torch.load(ckpt, map_location='cpu')
global_step = pl_sd['global_step']
print(f'loaded model from global step {global_step}.')
else:
pl_sd = {'state_dict': None}
global_step = None
model = load_model_from_config(config.model, pl_sd['state_dict'], gpu=gpu, eval_mode=eval_mode)['model']
return (model, global_step) |
def test_context_cosine(local_client, grpc_client):
def f(client: QdrantBase, **kwargs: Dict[(str, Any)]) -> List[models.ScoredPoint]:
return client.discover(collection_name=COLLECTION_NAME, context=[models.ContextExamplePair(positive=10, negative=19)], with_payload=True, limit=1000, using='image')
compare_client_results(grpc_client, f, is_context_search=True)
compare_client_results(local_client, f, is_context_search=True) |
.slow
(deadline=None)
(chunk_size=integers(min_value=1, max_value=(2 ** 12)), mode=sampled_from(list(brotlicffi.BrotliEncoderMode)), quality=integers(min_value=0, max_value=11), lgwin=integers(min_value=10, max_value=24), lgblock=one_of(integers(min_value=0, max_value=0), integers(min_value=16, max_value=24)))
def test_streaming_compression_flush(one_compressed_file, chunk_size, mode, quality, lgwin, lgblock):
compressed_chunks = []
c = brotlicffi.Compressor(mode=mode, quality=quality, lgwin=lgwin, lgblock=lgblock)
with open(one_compressed_file, 'rb') as f:
while True:
next_data = f.read(chunk_size)
if (not next_data):
break
compressed_chunks.append(c.compress(next_data))
compressed_chunks.append(c.flush())
compressed_chunks.append(c.finish())
decompressed = brotlicffi.decompress(b''.join(compressed_chunks))
with open(one_compressed_file, 'rb') as f:
assert (decompressed == f.read()) |
class Image(collections.namedtuple('Image', image_fields)):
def to_str_row(self):
return ('%d\t%d\t%d\t%s\t%s' % (self.width, self.height, self.file_size, self.type, self.path.replace('\t', '\\t')))
def to_str_row_verbose(self):
return ('%d\t%d\t%d\t%s\t%s\t##%s' % (self.width, self.height, self.file_size, self.type, self.path.replace('\t', '\\t'), self))
def to_str_json(self, indent=None):
return json.dumps(self._asdict(), indent=indent) |
class ListDatatableDataTest(unittest.TestCase):
def setUpClass(cls):
datatable_data = {'datatable': DatatableDataFactory.build()}
meta = {'meta': DatatableMetaFactory.build()}
datatable_data.update(meta)
re.compile(' body=json.dumps(datatable_data))
re.compile(' body=json.dumps(datatable_data))
cls.expected_raw_data = []
cls.expected_list_values = []
def tearDownClass(cls):
def tearDown(self):
RequestType.USE_GET_REQUEST = True
('quandl.connection.Connection.request')
def test_data_calls_connection_get(self, mock):
datatable = Datatable('ZACKS/FC')
Data.page(datatable, params={'ticker': ['AAPL', 'MSFT'], 'per_end_date': {'gte': '2015-01-01'}, 'qopts': {'columns': ['ticker', 'per_end_date']}})
expected = call('get', 'datatables/ZACKS/FC', params={'ticker[]': ['AAPL', 'MSFT'], 'per_end_date.gte': '2015-01-01', 'qopts.columns[]': ['ticker', 'per_end_date']})
self.assertEqual(mock.call_args, expected)
('quandl.connection.Connection.request')
def test_data_calls_connection_post(self, mock):
RequestType.USE_GET_REQUEST = False
datatable = Datatable('ZACKS/FC')
Data.page(datatable, params={'ticker': ['AAPL', 'MSFT'], 'per_end_date': {'gte': '2015-01-01'}, 'qopts': {'columns': ['ticker', 'per_end_date']}})
expected = call('post', 'datatables/ZACKS/FC', json={'ticker': ['AAPL', 'MSFT'], 'per_end_date.gte': '2015-01-01', 'qopts.columns': ['ticker', 'per_end_date']})
self.assertEqual(mock.call_args, expected)
(['GET', 'POST'])
def test_values_and_meta_exist(self, request_method):
if (request_method == 'POST'):
RequestType.USE_GET_REQUEST = False
datatable = Datatable('ZACKS/FC')
results = Data.page(datatable, params={})
self.assertIsNotNone(results.values)
self.assertIsNotNone(results.meta)
(['GET', 'POST'])
def test_to_pandas_returns_pandas_dataframe_object(self, request_method):
if (request_method == 'POST'):
RequestType.USE_GET_REQUEST = False
datatable = Datatable('ZACKS/FC')
results = Data.page(datatable, params={})
df = results.to_pandas()
self.assertIsInstance(df, pandas.core.frame.DataFrame)
(['GET', 'POST'])
def test_pandas_dataframe_index_is_none(self, request_method):
if (request_method == 'POST'):
RequestType.USE_GET_REQUEST = False
datatable = Datatable('ZACKS/FC')
results = Data.page(datatable, params={})
df = results.to_pandas()
self.assertEqual(df.index.name, 'None')
(['GET', 'POST'])
def test_pandas_dataframe_date_field_is_datetime(self, request_method):
if (request_method == 'POST'):
RequestType.USE_GET_REQUEST = False
datatable = Datatable('ZACKS/FC')
results = Data.page(datatable, params={})
df = results.to_pandas()
self.assertIsInstance(df['per_end_date'][0], pandas.datetime)
self.assertIsInstance(df['per_end_date'][1], pandas.datetime)
self.assertIsInstance(df['per_end_date'][2], pandas.datetime)
self.assertIsInstance(df['per_end_date'][3], pandas.datetime)
(['GET', 'POST'])
def test_to_numpy_returns_numpy_object(self, request_method):
if (request_method == 'POST'):
RequestType.USE_GET_REQUEST = False
datatable = Datatable('ZACKS/FC')
results = Data.page(datatable, params={})
data = results.to_numpy()
self.assertIsInstance(data, numpy.core.records.recarray)
(['GET', 'POST'])
def test_to_csv_returns_expected_csv(self, request_method):
if (request_method == 'POST'):
RequestType.USE_GET_REQUEST = False
datatable = Datatable('ZACKS/FC')
results = Data.page(datatable, params={})
data = results.to_csv()
expected = (((('None,per_end_date,ticker,tot_oper_exp\n' + '0,2015-07-11,AAPL,456.9\n') + '1,2015-07-13,433.3,\n') + '2,2015-07-14,AAPL,419.1\n') + '3,2015-07-15,476.5,\n')
self.assertEqual(data, expected) |
def make_transform(dataset, transform_fragments, property_info_list, rule_selection_function, substructure_pat=None, min_radius=0, min_pairs=0, min_variable_size=0, min_constant_size=0, max_variable_size=9999, pool=None, cursor=None, explain=None):
if (explain is None):
explain = reporters.no_explain
if (cursor is None):
cursor = dataset.get_cursor()
assert (min_radius in (0, 1, 2, 3, 4, 5))
product_rule_environment_table = defaultdict(set)
to_weld = []
for frag in transform_fragments:
explain('Processing fragment %r', frag)
if (min_variable_size and (frag.variable_num_heavies < min_variable_size)):
explain(' The %d heavy atoms of variable %r is below the --min-variable-size of %d. Skipping fragment.', frag.variable_num_heavies, frag.variable_smiles, min_variable_size)
continue
if (frag.variable_num_heavies > max_variable_size):
explain(' The %d heavy atoms of variable %r is above the --max-variable-size of %d. Skipping fragment.', frag.variable_num_heavies, frag.variable_smiles, max_variable_size)
continue
if (min_constant_size and (frag.constant_num_heavies < min_constant_size)):
explain(' The %d heavy atoms of constant %r is below the --min-constant-size of %d. Skipping fragment.', frag.constant_num_heavies, frag.constant_smiles, min_constant_size)
continue
if (frag.num_cuts > 1):
constant_fragments = frag.constant_smiles.split('.')
new_constant_smiles = constant_fragments[int(frag.attachment_order[0])]
new_constant_smiles += ('.' + constant_fragments[int(frag.attachment_order[1])])
if (frag.num_cuts == 3):
new_constant_smiles += ('.' + constant_fragments[int(frag.attachment_order[2])])
frag.constant_smiles = new_constant_smiles
query_possibilities = []
for (permutation, permuted_variable_smiles) in enumerate_permutations(dataset, frag.variable_smiles):
permuted_variable_smiles_id = dataset.get_rule_smiles_id(permuted_variable_smiles, cursor=cursor)
if (permuted_variable_smiles_id is not None):
explain(' variable %r matches SMILES %r (id %d)', frag.variable_smiles, permuted_variable_smiles, permuted_variable_smiles_id)
query_possibilities.append((permutation, permuted_variable_smiles, permuted_variable_smiles_id))
else:
explain(' variable %r not found as SMILES %r', frag.variable_smiles, permuted_variable_smiles)
if (not query_possibilities):
explain(' No matching rule SMILES found. Skipping fragment.')
continue
explain(' Evaluating %d possible rule SMILES: %s', len(query_possibilities), sorted((x[0] for x in query_possibilities)))
all_center_smarts_list = environment.compute_constant_center_smarts_list(frag.constant_smiles, min_radius=min_radius)
for (permutation, permuted_variable_smiles, permuted_variable_smiles_id) in query_possibilities:
explain(' Evaluate constant %r with permutation %r against rules using SMILES %s (%d)', frag.constant_smiles, permutation, permuted_variable_smiles, permuted_variable_smiles_id)
possible_envs = environment.get_all_possible_smarts(all_center_smarts_list, frag.variable_symmetry_class, permutation)
rows = dataset.find_rule_environments_for_transform(permuted_variable_smiles_id, sorted(possible_envs), max_variable_size=max_variable_size, cursor=cursor)
to_weld.extend(((frag.constant_smiles, frag.variable_smiles, substructure_pat, row) for row in rows))
if (pool is None):
results = map(_weld_and_filter, to_weld)
else:
results = pool.imap(_weld_and_filter, to_weld, 20)
for (frag_constant_smiles, frag_variable_smiles, row, product_smiles, passed_substructure_test) in results:
(rule_id, rule_environment_id, other_constant_smiles, is_reversed) = row
if (not passed_substructure_test):
explain(' Skip rule %d: %r + %r -> %r; does not contain --substructure', rule_id, frag_constant_smiles, str(other_constant_smiles), product_smiles)
continue
product_rule_environment_table[product_smiles].add((rule_id, frag_variable_smiles, rule_environment_id, is_reversed))
explain(' Rule %d: %r + %r -> %r', rule_id, frag_constant_smiles, str(other_constant_smiles), product_smiles)
explain(('== Product SMILES in database: %d ==' % (len(product_rule_environment_table),)))
transform_products = list(iter_transform_products(dataset, product_rule_environment_table, property_info_list, min_pairs, rule_selection_function, cursor, explain))
return TransformResult(property_info_list, transform_products) |
class EasyTag(BaseDbModel):
class Meta():
table = 'easytags'
id = fields.BigIntField(pk=True)
guild_id = fields.BigIntField()
channel_id = fields.BigIntField(index=True)
delete_after = fields.BooleanField(default=False)
def _guild(self) -> Optional[discord.Guild]:
return self.bot.get_guild(self.guild_id)
def channel(self) -> Optional[discord.TextChannel]:
return self.bot.get_channel(self.channel_id)
def ignorerole(self) -> Optional[discord.Role]:
if (not (self._guild is None)):
return discord.utils.get(self._guild.roles, name='quotient-tag-ignore') |
class TestReadOnly():
def test_write_a_read_only_property(self, request_unmarshaller):
data = json.dumps({'id': 10, 'name': 'Pedro'}).encode()
request = MockRequest(host_url='', method='POST', path='/users', data=data)
result = request_unmarshaller.unmarshal(request)
assert (len(result.errors) == 1)
assert (type(result.errors[0]) == InvalidRequestBody)
assert (result.body is None)
def test_read_only_property_response(self, response_unmarshaller):
data = json.dumps({'id': 10, 'name': 'Pedro'}).encode()
request = MockRequest(host_url='', method='POST', path='/users')
response = MockResponse(data)
result = response_unmarshaller.unmarshal(request, response)
assert (not result.errors)
assert is_dataclass(result.data)
assert (result.data.__class__.__name__ == 'User')
assert (result.data.id == 10)
assert (result.data.name == 'Pedro') |
class W_ChpContinuationMarkKey(W_InterposeContinuationMarkKey):
import_from_mixin(ChaperoneMixin)
def post_get_cont(self, value, env, cont):
vals = values.Values.make1(value)
return check_chaperone_results(vals, env, imp_cmk_post_get_cont(self.inner, env, cont))
def post_set_cont(self, body, value, env, cont):
vals = values.Values.make1(value)
return check_chaperone_results(vals, env, imp_cmk_post_set_cont(body, self.inner, env, cont)) |
class Dictionary(object):
def __init__(self, pad='<pad>', eos='</s>', unk='<unk>', bos='<s>', extra_special_symbols=None):
(self.unk_word, self.pad_word, self.eos_word) = (unk, pad, eos)
self.symbols = []
self.count = []
self.indices = {}
self.bos_index = self.add_symbol(bos)
self.pad_index = self.add_symbol(pad)
self.eos_index = self.add_symbol(eos)
self.unk_index = self.add_symbol(unk)
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(s)
self.nspecial = len(self.symbols)
def __eq__(self, other):
return (self.indices == other.indices)
def __getitem__(self, idx):
if (idx < len(self.symbols)):
return self.symbols[idx]
return self.unk_word
def __len__(self):
return len(self.symbols)
def __contains__(self, sym):
return (sym in self.indices)
def index(self, sym):
assert isinstance(sym, str)
if (sym in self.indices):
return self.indices[sym]
return self.unk_index
def string(self, tensor, bpe_symbol=None, escape_unk=False):
if (torch.is_tensor(tensor) and (tensor.dim() == 2)):
return '\n'.join((self.string(t, bpe_symbol, escape_unk) for t in tensor))
def token_string(i):
if (i == self.unk()):
return self.unk_string(escape_unk)
else:
return self[i]
if hasattr(self, 'bos_index'):
sent = ' '.join((token_string(i) for i in tensor if ((i != self.eos()) and (i != self.bos()))))
else:
sent = ' '.join((token_string(i) for i in tensor if (i != self.eos())))
return data_utils.process_bpe_symbol(sent, bpe_symbol)
def unk_string(self, escape=False):
if escape:
return '<{}>'.format(self.unk_word)
else:
return self.unk_word
def add_symbol(self, word, n=1):
if (word in self.indices):
idx = self.indices[word]
self.count[idx] = (self.count[idx] + n)
return idx
else:
idx = len(self.symbols)
self.indices[word] = idx
self.symbols.append(word)
self.count.append(n)
return idx
def update(self, new_dict):
for word in new_dict.symbols:
idx2 = new_dict.indices[word]
if (word in self.indices):
idx = self.indices[word]
self.count[idx] = (self.count[idx] + new_dict.count[idx2])
else:
idx = len(self.symbols)
self.indices[word] = idx
self.symbols.append(word)
self.count.append(new_dict.count[idx2])
def finalize(self, threshold=(- 1), nwords=(- 1), padding_factor=8):
if (nwords <= 0):
nwords = len(self)
new_indices = dict(zip(self.symbols[:self.nspecial], range(self.nspecial)))
new_symbols = self.symbols[:self.nspecial]
new_count = self.count[:self.nspecial]
c = Counter(dict(sorted(zip(self.symbols[self.nspecial:], self.count[self.nspecial:]))))
for (symbol, count) in c.most_common((nwords - self.nspecial)):
if (count >= threshold):
new_indices[symbol] = len(new_symbols)
new_symbols.append(symbol)
new_count.append(count)
else:
break
threshold_nwords = len(new_symbols)
if (padding_factor > 1):
i = 0
while ((threshold_nwords % padding_factor) != 0):
symbol = 'madeupword{:04d}'.format(i)
new_indices[symbol] = len(new_symbols)
new_symbols.append(symbol)
new_count.append(0)
i += 1
threshold_nwords += 1
assert ((len(new_symbols) % padding_factor) == 0)
assert (len(new_symbols) == len(new_indices))
self.count = list(new_count)
self.symbols = list(new_symbols)
self.indices = new_indices
def bos(self):
return self.bos_index
def pad(self):
return self.pad_index
def eos(self):
return self.eos_index
def unk(self):
return self.unk_index
def load(cls, f):
d = cls()
d.add_from_file(f)
return d
def add_from_file(self, f):
if isinstance(f, str):
try:
with open(f, 'r', encoding='utf-8') as fd:
self.add_from_file(fd)
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(f))
return
lines = f.readlines()
indices_start_line = self._load_meta(lines)
for line in lines[indices_start_line:]:
idx = line.rfind(' ')
if (idx == (- 1)):
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'")
word = line[:idx]
count = int(line[(idx + 1):])
self.indices[word] = len(self.symbols)
self.symbols.append(word)
self.count.append(count)
def _save(self, f, kv_iterator):
if isinstance(f, str):
os.makedirs(os.path.dirname(f), exist_ok=True)
with open(f, 'w', encoding='utf-8') as fd:
return self.save(fd)
for (k, v) in kv_iterator:
print('{} {}'.format(k, v), file=f)
def _get_meta(self):
return ([], [])
def _load_meta(self, lines):
return 0
def save(self, f):
(ex_keys, ex_vals) = self._get_meta()
self._save(f, zip((ex_keys + self.symbols[self.nspecial:]), (ex_vals + self.count[self.nspecial:])))
def dummy_sentence(self, length):
t = torch.Tensor(length).uniform_((self.nspecial + 1), len(self)).long()
t[(- 1)] = self.eos()
return t
def encode_line(self, line, line_tokenizer=tokenize_line, add_if_not_exist=True, consumer=None, append_eos=True, reverse_order=False):
words = line_tokenizer(line)
if reverse_order:
words = list(reversed(words))
nwords = len(words)
ids = torch.IntTensor(((nwords + 1) if append_eos else nwords))
for (i, word) in enumerate(words):
if add_if_not_exist:
idx = self.add_symbol(word)
else:
idx = self.index(word)
if (consumer is not None):
consumer(word, idx)
ids[i] = idx
if append_eos:
ids[nwords] = self.eos_index
return ids
def _add_file_to_dictionary_single_worker(filename, tokenize, eos_word, worker_id=0, num_workers=1):
counter = Counter()
with open(filename, 'r', encoding='utf-8') as f:
size = os.fstat(f.fileno()).st_size
chunk_size = (size // num_workers)
offset = (worker_id * chunk_size)
end = (offset + chunk_size)
f.seek(offset)
if (offset > 0):
safe_readline(f)
line = f.readline()
while line:
for word in tokenize(line):
counter.update([word])
counter.update([eos_word])
if (f.tell() > end):
break
line = f.readline()
return counter
def add_file_to_dictionary(filename, dict, tokenize, num_workers):
def merge_result(counter):
for (w, c) in sorted(counter.items()):
dict.add_symbol(w, c)
if (num_workers > 1):
pool = Pool(processes=num_workers)
results = []
for worker_id in range(num_workers):
results.append(pool.apply_async(Dictionary._add_file_to_dictionary_single_worker, (filename, tokenize, dict.eos_word, worker_id, num_workers)))
pool.close()
pool.join()
for r in results:
merge_result(r.get())
else:
merge_result(Dictionary._add_file_to_dictionary_single_worker(filename, tokenize, dict.eos_word)) |
class KnownValues(unittest.TestCase):
def test_sgx_jk(self):
mol = gto.Mole()
mol.build(verbose=0, atom=[['O', (0.0, 0.0, 0.0)], [1, (0.0, (- 0.757), 0.587)], [1, (0.0, 0.757, 0.587)]], basis='ccpvdz')
nao = mol.nao
mf = scf.UHF(mol)
dm = mf.get_init_guess()
(vjref, vkref) = scf.hf.get_jk(mol, dm)
sgxobj = sgx.SGX(mol)
sgxobj.grids = sgx_jk.get_gridss(mol, 0, 1e-10)
with lib.temporary_env(sgxobj, debug=False):
(vj, vk) = sgx_jk.get_jk_favork(sgxobj, dm)
with lib.temporary_env(sgxobj, debug=True):
(vj1, vk1) = sgx_jk.get_jk_favork(sgxobj, dm)
self.assertAlmostEqual(abs((vj1 - vj)).max(), 0, 9)
self.assertAlmostEqual(abs((vk1 - vk)).max(), 0, 9)
self.assertAlmostEqual(abs((vjref - vj)).max(), 0, 2)
self.assertAlmostEqual(abs((vkref - vk)).max(), 0, 2)
with lib.temporary_env(sgxobj, debug=False):
(vj, vk) = sgx_jk.get_jk_favorj(sgxobj, dm)
with lib.temporary_env(sgxobj, debug=True):
(vj1, vk1) = sgx_jk.get_jk_favorj(sgxobj, dm)
self.assertAlmostEqual(abs((vj1 - vj)).max(), 0, 9)
self.assertAlmostEqual(abs((vk1 - vk)).max(), 0, 9)
self.assertAlmostEqual(abs((vjref - vj)).max(), 0, 2)
self.assertAlmostEqual(abs((vkref - vk)).max(), 0, 2)
def test_dfj(self):
mol = gto.Mole()
mol.build(verbose=0, atom=[['O', (0.0, 0.0, 0.0)], [1, (0.0, (- 0.757), 0.587)], [1, (0.0, 0.757, 0.587)]], basis='ccpvdz')
nao = mol.nao
numpy.random.seed(1)
dm = numpy.random.random((nao, nao))
dm = (dm + dm.T)
mf = sgx.sgx_fit(scf.RHF(mol), 'weigend')
mf.with_df.dfj = True
mf.build()
(vj, vk) = mf.get_jk(mol, dm)
self.assertAlmostEqual(lib.finger(vj), (- 19.), 9)
self.assertAlmostEqual(lib.finger(vk), (- 16.), 9)
def test_rsh_get_jk(self):
mol = gto.M(verbose=0, atom='H 0 0 0; H 0 0 1', basis='ccpvdz')
nao = mol.nao_nr()
numpy.random.seed(1)
dm = numpy.random.random((2, nao, nao))
sgxobj = sgx.SGX(mol)
sgxobj.grids = sgx_jk.get_gridss(mol, 0, 1e-07)
(vj, vk) = sgxobj.get_jk(dm, hermi=0, omega=1.1)
self.assertAlmostEqual(lib.finger(vj), 4., 9)
self.assertAlmostEqual(lib.finger(vk), 8., 9)
(vj1, vk1) = scf.hf.get_jk(mol, dm, hermi=0, omega=1.1)
self.assertAlmostEqual(abs((vj - vj1)).max(), 0, 2)
self.assertAlmostEqual(abs((vk - vk1)).max(), 0, 2) |
def test_delete_user_policy(initialized_db, app):
policies = model.autoprune.get_namespace_autoprune_policies_by_orgname('devtable')
assert (len(policies) == 1)
policy_uuid = policies[0].uuid
with client_with_identity('devtable', app) as cl:
conduct_api_call(cl, UserAutoPrunePolicy, 'DELETE', {'orgname': 'devtable', 'policy_uuid': policy_uuid}, expected_code=200)
conduct_api_call(cl, UserAutoPrunePolicy, 'GET', {'orgname': 'devtable', 'policy_uuid': policy_uuid}, expected_code=404)
logs = list(get_latest_logs_query(performer='devtable', namespace='devtable'))
log_kinds = get_log_entry_kinds()
log = None
for l in logs:
if (l.kind == log_kinds['delete_namespace_autoprune_policy']):
log = l
break
assert (log is not None)
assert (json.loads(log.metadata_json)['policy_uuid'] == policy_uuid)
assert (json.loads(log.metadata_json)['namespace'] == 'devtable') |
def _parse_readme(lns):
subres = {}
for ln in [x.strip() for x in lns]:
if (not ln.startswith('*')):
continue
ln = ln[1:].strip()
for k in ['download', 'dataset', 'models', 'model', 'pre-processing']:
if ln.startswith(k):
break
else:
continue
if (k in ['dataset', 'model', 'pre-processing']):
splat = ln.split(':')
(_, v) = splat
subres[k] = v
elif (k == 'download'):
v = ln.split('(')[(- 1)][:(- 1)]
subres[k] = v
return subres |
class TestVectorizedSymbolLookup(WithAssetFinder, ZiplineTestCase):
def make_equity_info(cls):
T = partial(pd.Timestamp, tz='UTC')
def asset(sid, symbol, start_date, end_date):
return dict(sid=sid, symbol=symbol, start_date=T(start_date), end_date=T(end_date), exchange='NYSE')
records = [asset(1, 'A', '2014-01-02', '2014-01-31'), asset(2, 'A', '2014-02-03', '2015-01-02'), asset(3, 'B', '2014-01-02', '2014-01-15'), asset(4, 'B', '2014-01-17', '2015-01-02'), asset(5, 'C', '2001-01-02', '2015-01-02'), asset(6, 'D', '2001-01-02', '2015-01-02'), asset(7, 'FUZZY', '2001-01-02', '2015-01-02')]
return pd.DataFrame.from_records(records)
_space(as_of=pd.to_datetime(['2014-01-02', '2014-01-15', '2014-01-17', '2015-01-02'], utc=True), symbols=[[], ['A'], ['B'], ['C'], ['D'], list('ABCD'), list('ABCDDCBA'), list('AABBAABBACABD')])
def test_lookup_symbols(self, as_of, symbols):
af = self.asset_finder
expected = [af.lookup_symbol(symbol, as_of) for symbol in symbols]
result = af.lookup_symbols(symbols, as_of)
assert_equal(result, expected)
def test_fuzzy(self):
af = self.asset_finder
syms = ['A', 'B', 'FUZZ.Y']
dt = pd.Timestamp('2014-01-15', tz='UTC')
with self.assertRaises(SymbolNotFound):
af.lookup_symbols(syms, pd.Timestamp('2014-01-15', tz='UTC'))
with self.assertRaises(SymbolNotFound):
af.lookup_symbols(syms, pd.Timestamp('2014-01-15', tz='UTC'), fuzzy=False)
results = af.lookup_symbols(syms, dt, fuzzy=True)
assert_equal(results, af.retrieve_all([1, 3, 7]))
assert_equal(results, [af.lookup_symbol(sym, dt, fuzzy=True) for sym in syms]) |
def nr_e2(eri, mo_coeff, orbs_slice, aosym='s1', mosym='s1', out=None, ao_loc=None):
assert eri.flags.c_contiguous
assert (aosym in ('s4', 's2ij', 's2kl', 's2', 's1'))
assert (mosym in ('s2', 's1'))
mo_coeff = numpy.asfortranarray(mo_coeff)
assert (mo_coeff.dtype == numpy.double)
nao = mo_coeff.shape[0]
(k0, k1, l0, l1) = orbs_slice
kc = (k1 - k0)
lc = (l1 - l0)
kl_count = (kc * lc)
if (aosym in ('s4', 's2', 's2kl')):
if (mosym == 's2'):
fmmm = _fpointer('AO2MOmmm_nr_s2_s2')
assert (kc == lc)
kl_count = ((kc * (kc + 1)) // 2)
elif (kc <= lc):
fmmm = _fpointer('AO2MOmmm_nr_s2_iltj')
else:
fmmm = _fpointer('AO2MOmmm_nr_s2_igtj')
elif (kc <= lc):
fmmm = _fpointer('AO2MOmmm_nr_s1_iltj')
else:
fmmm = _fpointer('AO2MOmmm_nr_s1_igtj')
nrow = eri.shape[0]
out = numpy.ndarray((nrow, kl_count), buffer=out)
if (out.size == 0):
return out
if (ao_loc is None):
pao_loc = ctypes.POINTER(ctypes.c_void_p)()
c_nbas = ctypes.c_int(0)
ftrans = _fpointer(('AO2MOtranse2_nr_' + aosym))
else:
ao_loc = numpy.asarray(ao_loc, dtype=numpy.int32)
c_nbas = ctypes.c_int((ao_loc.shape[0] - 1))
pao_loc = ao_loc.ctypes.data_as(ctypes.c_void_p)
ftrans = _fpointer(('AO2MOsortranse2_nr_' + aosym))
fdrv = getattr(libao2mo, 'AO2MOnr_e2_drv')
fdrv(ftrans, fmmm, out.ctypes.data_as(ctypes.c_void_p), eri.ctypes.data_as(ctypes.c_void_p), mo_coeff.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(nrow), ctypes.c_int(nao), (ctypes.c_int * 4)(*orbs_slice), pao_loc, c_nbas)
return out |
def copy_files_from_dict(all_files, target_dir, dest_dir):
for (task_id, files) in all_files.items():
for (file_id, names) in files.items():
to_copy = os.path.join(target_dir, 'GetFiles', names[LOCAL_NAME_KEY])
new_dest_name = os.path.basename(names[REMOTE_NAME_KEY])
ext = os.path.splitext(new_dest_name)[1]
if ((ext.lower() not in SAFE_EXTS) and (ext != '')):
new_dest_name += '.r'
dest = os.path.join(dest_dir, ('%s_%s_%s' % (task_id, file_id, new_dest_name)))
if os.path.exists(dest):
continue
try:
shutil.copyfile(to_copy, dest)
except IOError as e:
pass |
class CoreClient(rpc.TCPClient):
def __init__(self, host, open_timeout=5000):
self.packer = Vxi11Packer()
self.unpacker = Vxi11Unpacker('')
super(CoreClient, self).__init__(host, DEVICE_CORE_PROG, DEVICE_CORE_VERS, open_timeout)
def create_link(self, id, lock_device, lock_timeout, name):
params = (id, lock_device, lock_timeout, name)
try:
return self.make_call(CREATE_LINK, params, self.packer.pack_create_link_parms, self.unpacker.unpack_create_link_resp)
except socket.timeout:
return (ErrorCodes.device_not_accessible, None, None, None)
def device_write(self, link, io_timeout, lock_timeout, flags, data):
params = (link, io_timeout, lock_timeout, flags, data)
try:
return self.make_call(DEVICE_WRITE, params, self.packer.pack_device_write_parms, self.unpacker.unpack_device_write_resp)
except socket.timeout as e:
return (ErrorCodes.io_error, e.args[0])
def device_read(self, link, request_size, io_timeout, lock_timeout, flags, term_char):
params = (link, request_size, io_timeout, lock_timeout, flags, term_char)
try:
return self.make_call(DEVICE_READ, params, self.packer.pack_device_read_parms, self.unpacker.unpack_device_read_resp)
except socket.timeout as e:
return (ErrorCodes.io_error, e.args[0], '')
def device_read_stb(self, link, flags, lock_timeout, io_timeout):
params = (link, flags, lock_timeout, io_timeout)
return self.make_call(DEVICE_READSTB, params, self.packer.pack_device_generic_parms, self.unpacker.unpack_device_read_stb_resp)
def device_trigger(self, link, flags, lock_timeout, io_timeout):
params = (link, flags, lock_timeout, io_timeout)
return self.make_call(DEVICE_TRIGGER, params, self.packer.pack_device_generic_parms, self.unpacker.unpack_device_error)
def device_clear(self, link, flags, lock_timeout, io_timeout):
params = (link, flags, lock_timeout, io_timeout)
return self.make_call(DEVICE_CLEAR, params, self.packer.pack_device_generic_parms, self.unpacker.unpack_device_error)
def device_remote(self, link, flags, lock_timeout, io_timeout):
params = (link, flags, lock_timeout, io_timeout)
return self.make_call(DEVICE_REMOTE, params, self.packer.pack_device_generic_parms, self.unpacker.unpack_device_error)
def device_local(self, link, flags, lock_timeout, io_timeout):
params = (link, flags, lock_timeout, io_timeout)
return self.make_call(DEVICE_LOCAL, params, self.packer.pack_device_generic_parms, self.unpacker.unpack_device_error)
def device_lock(self, link, flags, lock_timeout):
params = (link, flags, lock_timeout)
return self.make_call(DEVICE_LOCK, params, self.packer.pack_device_lock_parms, self.unpacker.unpack_device_error)
def device_unlock(self, link):
return self.make_call(DEVICE_UNLOCK, link, self.packer.pack_device_link, self.unpacker.unpack_device_error)
def device_enable_srq(self, link, enable, handle):
params = (link, enable, handle)
return self.make_call(DEVICE_ENABLE_SRQ, params, self.packer.pack_device_enable_srq_parms, self.unpacker.unpack_device_error)
def device_docmd(self, link, flags, io_timeout, lock_timeout, cmd, network_order, datasize, data_in):
params = (link, flags, io_timeout, lock_timeout, cmd, network_order, datasize, data_in)
return self.make_call(DEVICE_DOCMD, params, self.packer.pack_device_docmd_parms, self.unpacker.unpack_device_docmd_resp)
def destroy_link(self, link):
return self.make_call(DESTROY_LINK, link, self.packer.pack_device_link, self.unpacker.unpack_device_error)
def create_intr_chan(self, host_addr, host_port, prog_num, prog_vers, prog_family):
params = (host_addr, host_port, prog_num, prog_vers, prog_family)
return self.make_call(CREATE_INTR_CHAN, params, self.packer.pack_device_docmd_parms, self.unpacker.unpack_device_error)
def destroy_intr_chan(self):
return self.make_call(DESTROY_INTR_CHAN, None, None, self.unpacker.unpack_device_error) |
def to_tensor(data):
if isinstance(data, torch.Tensor):
return data
elif isinstance(data, np.ndarray):
return torch.from_numpy(data)
elif (isinstance(data, Sequence) and (not mmcv.is_str(data))):
return torch.tensor(data)
elif isinstance(data, int):
return torch.LongTensor([data])
elif isinstance(data, float):
return torch.FloatTensor([data])
else:
raise TypeError(f'type {type(data)} cannot be converted to tensor.') |
class Speech2Text2Processor(ProcessorMixin):
feature_extractor_class = 'AutoFeatureExtractor'
tokenizer_class = 'Speech2Text2Tokenizer'
def __init__(self, feature_extractor, tokenizer):
super().__init__(feature_extractor, tokenizer)
self.current_processor = self.feature_extractor
def __call__(self, *args, **kwargs):
return self.current_processor(*args, **kwargs)
def batch_decode(self, *args, **kwargs):
return self.tokenizer.batch_decode(*args, **kwargs)
def decode(self, *args, **kwargs):
return self.tokenizer.decode(*args, **kwargs)
def as_target_processor(self):
self.current_processor = self.tokenizer
(yield)
self.current_processor = self.feature_extractor |
class Gate():
def __init__(self, gate_type, targets, args, boxes, options={}, comments=None):
global orientation
self.type = gate_type
self.position_list = []
self.comments = comments
self.wire_color_changes = {}
self.wire_style_changes = {}
self.wire_type_changes = {}
self.change_to_classical = 0
self.options = copy.deepcopy(options)
self.boxes = []
self.already_drawn = 0
if (self.type == 'G'):
self.boxes = boxes
self.controls = copy.copy(args)
self.targets = []
elif (self.type == 'LABEL'):
self.targets = copy.copy(targets)
self.controls = []
if (len(args) == 0):
self.labels = ['']
else:
self.labels = copy.copy(args)
for i in range(len(self.labels)):
if (self.labels[i] == '...'):
self.labels[i] = '\\cdots'
elif (self.type in EQUALS):
if (len(args) == 0):
if (orientation == 'vertical'):
self.label_string = '\\rotatebox{-90}{$=$}'
else:
self.label_string = '$=$'
else:
self.label_string = args[0]
self.targets = copy.copy(targets)
self.controls = []
if (self.type != '='):
add_to_predocument('decorate')
elif (self.type == 'PHANTOM'):
self.targets = copy.copy(targets)
self.controls = []
elif (self.type in ['IN', 'OUT']):
self.targets = copy.copy(targets)
self.name = args[0]
self.controls = []
elif (self.type in ['START', 'END']):
self.targets = copy.copy(targets)
self.controls = []
elif (self.type in ['M', '/']):
if ('operator' in options.keys()):
self.name = options['operator']
elif (len(args) == 0):
self.name = None
else:
self.name = args[0]
self.controls = []
self.targets = copy.copy(targets)
else:
self.controls = copy.copy(args)
self.targets = copy.copy(targets)
save_targets = self.targets
self.targets = []
for wn in save_targets:
if (wn not in self.targets):
self.targets.append(wn)
save_controls = self.controls
self.controls = []
for wn in save_controls:
if ((wn not in self.targets) and (wn not in self.controls)):
self.controls.append(wn)
if level_stack:
default_color = level_stack[(- 1)]['color']
default_style = level_stack[(- 1)]['style']
default_fill = level_stack[(- 1)]['fill']
else:
default_color = None
default_style = None
default_fill = bgcolor
self.color = options.get('color', default_color)
self.style = options.get('style', default_style)
self.fill = options.get('fill', default_fill)
length = options.get('length', None)
breadth = options.get('breadth', None)
size = options.get('size', None)
if ((self.type == 'N') and (length == None)):
max_size = options.get('size', 6)
for wn in (self.controls + self.targets):
max_size = max(max_size, options['wires'].get(wn, {}).get('size', 0))
if (max_size > 6):
length = max_size
self.specified_length = length
self.specified_breadth = breadth
self.check_wires()
if (self.type == 'M'):
for w in self.targets:
if (not options['wires'].get(w, {}).get('type', None)):
self.change_wires([w], dict(type='c'), maybe=1)
self.controls = []
if (self.type == 'IN'):
self.change_wires(self.targets, dict(type='q'), maybe=1)
if (self.type == 'START'):
for wn in self.targets:
if (wires[wn].start_at_start_of_circuit and (not wires[wn].explicit_end_seen)):
start_type = wires[wn].type_info[0]
wires[wn].type_info[0] = 'o'
wires[wn].start_at_start_of_circuit = 0
self.change_wires([wn], dict(type=start_type), maybe=1)
else:
self.change_wires([wn], dict(type='q'), maybe=1)
if (self.type in ['OUT', 'END']):
self.change_wires(self.targets, dict(type='o'), maybe=1)
if (self.type == 'END'):
for wn in self.targets:
wires[wn].explicit_end_seen = 1
self.pi = None
self.input_line = current_input_line
self.input_line_num = line_num
if (self.type == 'N'):
self.match_type = (len(self.targets) + len(self.controls))
elif self.boxes:
self.match_type = 'G'
else:
self.match_type = self.type
def match(self, the_depth):
other_type = the_depth.get_type()
if (self.match_type == 'PHANTOM'):
return 1
if isinstance(self.match_type, int):
if (other_type == self.match_type):
return 1
else:
return 0
elif (self.match_type == 'G'):
if (other_type == 'G'):
return 1
else:
return 0
elif ((other_type == 'G') or isinstance(other_type, int)):
return 0
else:
return 1
def fix_targets(self):
if (self.type in (['LABEL', 'PHANTOM'] + EQUALS)):
if (not self.targets):
self.targets = copy.copy(wires_in_order)
if (self.type == 'LABEL'):
if (len(self.labels) == 1):
self.labels = ([self.labels[0]] * len(self.targets))
if (len(self.labels) != len(self.targets)):
sys.exit((('Error: Line %i: Wrong number of labels in: ' % self.input_line_num) + ' '.join(self.labels)))
def change_wires(self, raw_wire_names, options, maybe=0):
wire_names = [get_wire_name(wn, return_prefix=0) for wn in raw_wire_names]
did_something = 0
if options.get('color', None):
for wn in wire_names:
self.wire_color_changes[wires[wn]] = options['color']
did_something = 1
if options.get('style', None):
for wn in wire_names:
self.wire_style_changes[wires[wn]] = options['style']
did_something = 1
if options.get('type', None):
any_changes = 0
for wn in wire_names:
if (maybe and (wires[wn] in self.wire_type_changes)):
pass
else:
self.wire_type_changes[wires[wn]] = options['type']
any_changes = 1
if ((options['type'] == 'c') and any_changes):
self.change_to_classical = 1
if any_changes:
did_something = 1
if (not did_something):
pass
def all_wires(self):
wires_to_use = []
if self.targets:
wires_to_use.extend(self.targets)
if self.controls:
wires_to_use.extend(self.controls)
if self.boxes:
for box in self.boxes:
wires_to_use.extend(box.targets)
if (not wires_to_use):
if (self.type in (['LABEL', 'TOUCH'] + EQUALS)):
wires_to_use.extend(list(wires.keys()))
return wires_to_use
def any_wires(self):
if (self.targets or self.controls or self.boxes):
return 1
return 0
def max_depth(self):
if (self.type == 'PHANTOM'):
return (- 1)
return max([wires[w].get_depth() for w in self.all_wires()])
def check_wires(self):
global auto_wires, new_wire_depth
input_targets = copy.copy(self.targets)
input_controls = copy.copy(self.controls)
box_targets = []
for b in self.boxes:
box_targets.extend(copy.copy(b.targets))
self.targets = []
self.controls = []
for w in input_targets:
(wname, wprefix) = get_wire_name(w)
if (wname in box_targets):
continue
self.targets.append(wname)
for w in input_controls:
(wname, wprefix) = get_wire_name(w)
if ((wname in box_targets) or (wname in self.targets)):
continue
self.controls.append(wname)
my_wires = ((box_targets + self.controls) + self.targets)
named_opts = copy.copy(self.options['wires'])
for wstr in named_opts.keys():
if (self.options['wires'][wstr] and (wstr not in my_wires)):
wname = get_wire_name(wstr, return_prefix=0, create_wire=0)
if (wname != None):
self.options['wires'][wname] = self.options['wires'][wstr]
for g in my_wires:
if self.options['wires'].get(g, None):
self.change_wires([g], self.options['wires'][g])
def min_and_max_wires(self, pos, quantum_only=0):
global circuit_bottom, circuit_top
names_to_use = (self.targets + self.controls)
for box in self.boxes:
names_to_use.extend(box.targets)
minval = circuit_top
maxval = circuit_bottom
for wn in names_to_use:
if (quantum_only and (wires[wn].get_type(pos) != 'q')):
continue
if (wires[wn].location(pos) < minval):
minval = wires[wn].location(pos)
argmin = wires[wn]
if (wires[wn].location(pos) > maxval):
maxval = wires[wn].location(pos)
argmax = wires[wn]
if (quantum_only and (maxval == circuit_bottom)):
return (wires[names_to_use[0]], wires[names_to_use[0]])
return (argmin, argmax)
def set_depth(self, d):
global new_wire_depth
if (self.type == 'PHANTOM'):
return
else:
for w in self.all_wires():
wires[w].set_depth(d)
if ((self.type in (['LABEL', 'TOUCH'] + EQUALS)) and (not self.any_wires())):
new_wire_depth = d
def attach_to(self, g):
self.options['attach_to'] = g
def set_position(self, pos, direction=1):
for w in self.wire_color_changes:
if (direction == 1):
new_color = self.wire_color_changes[w]
else:
new_color = w.get_color((self.position_list[0][0] - 0.001))
w.change_color(pos, new_color)
for w in self.wire_style_changes:
if (direction == 1):
new_style = self.wire_style_changes[w]
else:
new_style = w.get_style((self.position_list[0][0] - 0.001))
w.change_style(pos, new_style)
for w in self.wire_type_changes:
if (direction == 1):
new_type = self.wire_type_changes[w]
else:
new_type = w.get_type((self.position_list[0][0] - 0.001))
w.change_type(pos, new_type)
if (self.type == 'PERMUTE'):
start_pos = (pos - (0.5 * self.get_length()))
end_pos = (pos + (0.5 * self.get_length()))
self.permute_wires(start_pos, end_pos, direction)
self.position_list.append((pos, direction))
def get_ranges(self, pos):
raw_starts = []
raw_ends = []
for w in self.all_wires():
loc = wires[w].location(pos)
breadth = wires[w].get_breadth()
raw_starts.append((loc - (0.5 * breadth)))
raw_ends.append((loc + (0.5 * breadth)))
raw_starts.sort()
raw_ends.sort()
starts = [raw_starts[0]]
ends = []
for i in range((len(raw_starts) - 1)):
if (abs((raw_ends[i] - raw_starts[(i + 1)])) > 0.0001):
starts.append(raw_starts[(i + 1)])
ends.append(raw_ends[i])
ends.append(raw_ends[(- 1)])
ranges = [(starts[i], ends[i]) for i in range(len(starts))]
return ranges
def set_pi(self, pos):
global wires
sorted_targets = copy.copy(self.targets)
sorted_targets.sort(key=(lambda wn: wires[wn].location(pos)))
sorted_targets.reverse()
self.pi = {}
self.pi_inverse = {}
for i in range(len(sorted_targets)):
self.pi[sorted_targets[i]] = self.targets[i]
self.pi_inverse[self.targets[i]] = sorted_targets[i]
def permute_wires(self, start_pos, end_pos, direction):
global wires
if (not self.pi):
self.set_pi(start_pos)
(minw, maxw) = self.min_and_max_wires(start_pos)
minloc = minw.location(start_pos)
maxloc = maxw.location(start_pos)
affected = []
for w in wires.values():
if ((w.location(start_pos) >= minloc) and (w.location(start_pos) <= maxloc)):
affected.append(w)
affected.sort(key=(lambda w: w.location(start_pos)))
new_order = []
for w in affected:
if (w.name in self.targets):
if (direction == 1):
new_order.append(wires[self.pi[w.name]])
else:
new_order.append(wires[self.pi_inverse[w.name]])
else:
new_order.append(w)
next_wire_begin = (minw.location(start_pos) - (0.5 * minw.get_breadth()))
for w in new_order:
w_breadth = w.get_breadth()
old_location = w.location(start_pos)
new_location = (next_wire_begin + (0.5 * w_breadth))
w.set_location(old_location, pos=start_pos)
w.set_location((0.5 * (old_location + new_location)), (0.5 * (start_pos + end_pos)))
w.set_location(new_location, pos=end_pos)
if (old_location != new_location):
w.add_corner(start_pos)
w.add_corner(end_pos)
next_wire_begin += w_breadth
def do_gate(self):
global master_depth_list, overall_depth, last_depth
global allow_different_gates
if (self.type == 'PHANTOM'):
pass
else:
if (self.type == 'TOUCH'):
my_depth = max(self.max_depth(), last_depth)
else:
my_depth = (1 + self.max_depth())
if ((self.type in ['START', 'END']) and (my_depth < last_depth)):
my_depth = last_depth
while ((my_depth < overall_depth) and (not allow_different_gates) and (not self.match(master_depth_list[my_depth]))):
my_depth += 1
if (my_depth >= overall_depth):
new_depth()
last_depth = my_depth
depth_to_use = master_depth_list[last_depth]
depth_to_use.add_gate(self)
self.set_depth(last_depth)
def get_length(self):
if (self.specified_length != None):
return self.specified_length
if (self.type in ['PHANTOM', 'TOUCH']):
return 0
if (self.type in (['LABEL', 'START', 'END', 'PERMUTE'] + EQUALS)):
return 15
if (self.type == '/'):
return (GATE_SIZE * (2.0 / 3.0))
if self.boxes:
if self.controls:
the_length = 6
else:
the_length = 0
for box in self.boxes:
the_length = max(the_length, box.get_length())
return the_length
if (self.type in ['M']):
return GATE_SIZE
if self.change_to_classical:
return GATE_SIZE
if (self.type in ['N', 'IN', 'OUT']):
return 6
assert 0, ('error: type is ' + self.type)
def get_breadth(self):
if (self.specified_breadth != None):
return self.specified_breadth
if (self.type in ['IN', 'OUT']):
return 6
if ((self.type == 'M') and self.name):
return (GATE_SIZE * (2.0 / 3.0))
return GATE_SIZE
def draw_comments(self, pos):
global circuit_bottom, circuit_top
if (not self.comments):
return
directions = get_directions()
(x, y) = get_x_y(pos, circuit_top)
draw_comment(self.comments[0], x, y, directions[0])
(x, y) = get_x_y(pos, circuit_bottom)
draw_comment(self.comments[1], x, y, directions[1])
def ready_to_draw(self):
if (not self.options.get('attach_to', None)):
return 1
return self.options['attach_to'].already_drawn
def draw_gate(self):
global wires, orientation, bgcolor
print(self.input_line)
self.already_drawn = 1
if (self.type == 'PHANTOM'):
return
if (self.type == 'PERMUTE'):
return
scope_str = make_scope_str(color=self.color)
if scope_str:
print(('\\begin{scope}[%s]' % scope_str))
(width, height) = get_w_h(self.get_length(), self.get_breadth())
self.draw_comments(self.position_list[0][0])
for (pos, dir) in self.position_list:
fixed_wires = []
if (self.type == 'TOUCH'):
if (scope_str or self.style):
draw_command = '\\draw'
if self.style:
draw_command += ('[%s]' % self.style)
wire_ranges = self.get_ranges(pos)
for (start, end) in wire_ranges:
print((draw_command + (' (%f,%f) -- (%f,%f);' % (get_x_y(pos, start) + get_x_y(pos, end)))))
elif (self.type == 'LABEL'):
tikz_str = ('fill=%s' % bgcolor)
if (orientation == 'vertical'):
tikz_str += ', rotate around={-90:(0,0)}'
for i in range(len(self.targets)):
wires[self.targets[i]].draw_label(pos, self.labels[i], tikz_str)
elif (self.type in EQUALS):
target_locations = [wires[t].location(pos) for t in self.targets]
target_min = min(target_locations)
target_max = max(target_locations)
(x, y) = get_x_y(pos, (0.5 * (target_min + target_max)))
(w, h) = get_w_h(self.get_length(), ((target_max - target_min) + self.get_breadth()))
draw_equals(x, y, w, h, self.label_string)
delta = BRACE_AMPLITUDE
start_loc = (target_min - (0.5 * self.get_breadth()))
end_loc = (target_max + (0.5 * self.get_breadth()))
if (self.type[0] == '<'):
draw_breadthwise_brace(start_loc, end_loc, ((pos - (0.5 * self.get_length())) + delta), (- delta))
elif (self.type[0] == '>'):
draw_breadthwise_brace(start_loc, end_loc, (pos - (0.5 * self.get_length())), delta)
if (self.type[(- 1)] == '<'):
draw_breadthwise_brace(start_loc, end_loc, (pos + (0.5 * self.get_length())), (- delta))
elif (self.type[(- 1)] == '>'):
draw_breadthwise_brace(start_loc, end_loc, ((pos + (0.5 * self.get_length())) - delta), delta)
else:
if (self.type not in ['M', '/', 'START', 'END']):
(minw, maxw) = self.min_and_max_wires(pos)
top = maxw.location(pos)
bottom = minw.location(pos)
(qmin, qmax) = self.min_and_max_wires(pos, quantum_only=1)
qtop = qmax.location(pos)
qbottom = qmin.location(pos)
draw_command = '\\draw'
if self.style:
draw_command += ('[%s]' % self.style)
if (top != qtop):
for p in [(pos - CLASSICAL_SEP), (pos + CLASSICAL_SEP)]:
print((draw_command + (' (%f,%f) -- (%f,%f);' % (get_x_y(p, top) + get_x_y(p, qtop)))))
if (qtop != qbottom):
print((draw_command + (' (%f,%f) -- (%f,%f);' % (get_x_y(pos, qtop) + get_x_y(pos, qbottom)))))
if (qbottom != bottom):
for p in [(pos - CLASSICAL_SEP), (pos + CLASSICAL_SEP)]:
print((draw_command + (' (%f,%f) -- (%f,%f);' % (get_x_y(p, qbottom) + get_x_y(p, bottom)))))
for box in self.boxes:
box.draw_box(pos, dir)
(minw, maxw) = box.min_and_max_wires(pos)
box_min = minw.location(pos)
box_max = maxw.location(pos)
pos_s = (box.get_length() * 0.5)
tikz_strs = {}
for wn in list(wires.keys()):
loc = wires[wn].location(pos)
if ((loc < box_min) or (box_max < loc)):
continue
if (wn in self.controls):
tikz_strs[wn] = None
elif (wn in self.all_wires()):
continue
else:
tikz_strs[wn] = 'dashed'
if (wn not in fixed_wires):
fixed_wires.append(wn)
fixed_wires.sort(key=get_start_loc_from_name, reverse=True)
for wn in fixed_wires:
wires[wn].fix_wire((pos - pos_s), (pos + pos_s), tikz_str=tikz_strs[wn])
if (self.type in ['IN', 'OUT']):
if (self.type == 'IN'):
thick_side = 1
else:
thick_side = (- 1)
thick_side *= dir
target = self.targets[0]
(x, y) = get_x_y(pos, wires[target].location(pos))
draw_drop(x, y, width, height, self.name, thick_side, style=self.style)
elif (self.type in ['START', 'END']):
if (self.type == 'START'):
forward = 1
else:
forward = (- 1)
forward *= dir
for target in self.targets:
if (forward == 1):
wires[target].draw_start_label((pos + (0.5 * self.get_length())), ('fill=%s' % bgcolor), length=self.get_length())
else:
wires[target].draw_end_label((pos - (0.5 * self.get_length())), ('fill=%s' % bgcolor), length=self.get_length())
elif (self.type in ['N', 'M', '/']):
for target in self.targets:
(x, y) = get_x_y(pos, wires[target].location(pos))
if (self.type == 'N'):
draw_options = get_draw_options(self.options, target, '+')
draw_xor_or_control(x, y, draw_options)
elif (self.type == 'M'):
draw_measurement(x, y, width, height, name=self.name, style=self.style, fill=self.fill)
elif (self.type == '/'):
draw_slash(x, y, width, height, name=self.name, style=self.style)
elif (not self.boxes):
assert 0, ('unknown type %s' % self.type)
for wn in self.controls:
(x, y) = get_x_y(pos, wires[wn].location(pos))
draw_options = get_draw_options(self.options, wn, '.')
draw_options['direction'] = dir
draw_xor_or_control(x, y, draw_options)
if ((self.type not in ['M', '/', 'IN', 'OUT', 'START', 'END']) and self.change_to_classical):
for w in list(self.wire_type_changes.keys()):
if ((self.wire_type_changes[w] == 'c') and (w.name not in fixed_wires)):
(x, y) = get_x_y(pos, w.location(pos))
draw_measurement(x, y, width, height, style=self.style, fill=self.fill)
if scope_str:
print('\\end{scope}') |
class Migration(migrations.Migration):
dependencies = [('successstories', '0010_story_submitted_by')]
operations = [migrations.AlterField(model_name='story', name='submitted_by', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL))] |
def get_all_QAs(qtotal=100):
page = 1
next = ('/api/v0/qa/all?page=' + str(page))
qas = []
image_map = {}
while True:
data = utils.retrieve_data(next)
for d in data['results']:
if (d['image'] not in image_map):
image_map[d['image']] = get_image_data(id=d['image'])
qas.extend(utils.parse_QA(data['results'], image_map))
if ((qtotal is not None) and (len(qas) > qtotal)):
return qas
if (data['next'] is None):
break
page += 1
next = ('/api/v0/qa/all?page=' + str(page))
return qas |
def channel_open(open_queue: List[ChannelNew]) -> None:
for channel_open in open_queue:
channel = channel_details(channel_open.endpoint, channel_open.token_address, channel_open.partner)
assert (channel is None), 'Channel already exists, the operation should not have been scheduled.'
channel_open_request = {'token_address': channel_open.token_address, 'partner_address': channel_open.partner, 'total_deposit': channel_open.initial_deposit}
log.info(f'Opening {channel_open}')
url_channel_open = f'{channel_open.endpoint}/api/{API_VERSION}/channels'
response = requests.put(url_channel_open, json=channel_open_request)
assert (response, response.text) |
def test_struct_with_struct_type():
mock_client = MagicMock(spec=HMS)
class MockTable():
name = 'dummy_table'
columns = [HColumn('col1', HStructType(names=['sub_col1', 'sub_col2'], types=[HPrimitiveType(PrimitiveCategory.BOOLEAN), HPrimitiveType(PrimitiveCategory.INT)]))]
mock_client.get_table.return_value = MockTable
client = HiveMetastoreClient(mock_client)
result = client.schema('dummy_database', 'dummy_table')
assert isinstance(result, StructType)
assert (len(result.fields) == 1)
assert isinstance(result.fields[0], UnionType)
assert isinstance(result.fields[0].types[1], StructType)
assert (result.fields[0].extra_attrs['name'] == 'col1')
struct_field = result.fields[0].types[1]
assert (len(struct_field.fields) == 2)
assert isinstance(struct_field.fields[0], UnionType)
assert isinstance(struct_field.fields[0].types[1], BoolType)
assert (struct_field.fields[0].extra_attrs['name'] == 'sub_col1')
assert isinstance(struct_field.fields[1], UnionType)
assert isinstance(struct_field.fields[1].types[1], IntType)
assert (struct_field.fields[1].types[1].bits == 32)
assert (struct_field.fields[1].extra_attrs['name'] == 'sub_col2') |
.parametrize('Type', [Bits16, Bits32])
def test_adder(do_test, Type):
def tv_in(model, tv):
model.in_1 = tv[0]
model.in_2 = tv[1]
class A(Component):
def construct(s, Type):
s.in_1 = InPort(Type)
s.in_2 = InPort(Type)
s.out = OutPort(Type)
def add_upblk():
s.out = (s.in_1 + s.in_2)
def line_trace(s):
return ('sum = ' + str(s.out))
a = A(Type)
a._tvs = [(randint((- 255), 255), randint((- 255), 255)) for _ in range(10)]
a._tv_in = tv_in
do_test(a) |
class BlockListRelease(FilterReleasePlugin):
name = 'blocklist_release'
blocklist_package_names: list[Requirement] = []
def initialize_plugin(self) -> None:
if (not self.blocklist_package_names):
self.blocklist_release_requirements = self._determine_filtered_package_requirements()
logger.info((f'Initialized release plugin {self.name}, filtering ' + f'{self.blocklist_release_requirements}'))
def _determine_filtered_package_requirements(self) -> list[Requirement]:
filtered_requirements: set[Requirement] = set()
try:
lines = self.blocklist['packages']
package_lines = lines.split('\n')
except KeyError:
package_lines = []
for package_line in package_lines:
package_line = package_line.strip()
if ((not package_line) or package_line.startswith('#')):
continue
requirement = Requirement(package_line)
requirement.name = canonicalize_name(requirement.name)
requirement.specifier.prereleases = True
filtered_requirements.add(requirement)
return list(filtered_requirements)
def filter(self, metadata: dict) -> bool:
name = metadata['info']['name']
version = metadata['version']
return (not self._check_match(canonicalize_name(name), version))
def _check_match(self, name: str, version_string: str) -> bool:
if ((not name) or (not version_string)):
return False
try:
version = Version(version_string)
except InvalidVersion:
logger.debug(f'Package {name}=={version_string} has an invalid version')
return False
for requirement in self.blocklist_release_requirements:
if (name != requirement.name):
continue
if (version in requirement.specifier):
logger.debug(f'MATCH: Release {name}=={version} matches specifier {requirement.specifier}')
return True
return False |
class ManagedDockWindow(ManagedWindowBase):
def __init__(self, procedure_class, x_axis=None, y_axis=None, linewidth=1, log_fmt=None, log_datefmt=None, **kwargs):
self.x_axis = x_axis
self.y_axis = y_axis
measure_quantities = []
if isinstance(self.x_axis, list):
measure_quantities += [*self.x_axis]
self.x_axis_labels = self.x_axis
self.x_axis = self.x_axis[0]
else:
self.x_axis_labels = [self.x_axis]
measure_quantities.append(self.x_axis)
if isinstance(self.y_axis, list):
measure_quantities += [*self.y_axis]
self.y_axis_labels = self.y_axis
self.y_axis = self.y_axis[0]
else:
self.y_axis_labels = [self.y_axis]
measure_quantities.append(self.y_axis)
self.log_widget = LogWidget('Experiment Log', fmt=log_fmt, datefmt=log_datefmt)
self.dock_widget = DockWidget('Dock Tab', procedure_class, self.x_axis_labels, self.y_axis_labels, linewidth=linewidth)
if ('widget_list' not in kwargs):
kwargs['widget_list'] = ()
kwargs['widget_list'] = (kwargs['widget_list'] + (self.dock_widget, self.log_widget))
super().__init__(procedure_class, **kwargs)
self.browser_widget.browser.measured_quantities.update(measure_quantities)
logging.getLogger().addHandler(self.log_widget.handler)
log.setLevel(self.log_level)
log.info('DockWindow connected to logging') |
class PlayAdvancementTab(Packet):
id = 34
to = 0
def __init__(self, action: int, tab_id: int) -> None:
super().__init__()
self.action = action
self.tab_id = tab_id
def decode(cls, buf: Buffer) -> PlayAdvancementTab:
return cls(buf.unpack_varint(), buf.unpack_optional(buf.unpack_varint)) |
class unit_gtcn_5(nn.Module):
def __init__(self, in_channels, out_channels, A, coff_embedding=4, num_subset=3):
super(unit_gtcn_5, self).__init__()
inter_channels = (out_channels // coff_embedding)
self.inter_c = inter_channels
self.PA = nn.Parameter(torch.from_numpy(A.astype(np.float32)))
nn.init.constant(self.PA, 1e-06)
self.A = Variable(torch.from_numpy(A.astype(np.float32)), requires_grad=False)
self.num_subset = num_subset
self.conv_a = nn.ModuleList()
self.conv_b = nn.ModuleList()
self.conv_d = nn.ModuleList()
self.conv_T1 = nn.ModuleList()
self.conv_T2 = nn.ModuleList()
for i in range(self.num_subset):
self.conv_a.append(nn.Conv2d(in_channels, inter_channels, 1))
self.conv_b.append(nn.Conv2d(in_channels, inter_channels, 1))
self.conv_d.append(nn.Conv2d(in_channels, out_channels, 1))
self.conv_T1.append(nn.Conv2d(in_channels, inter_channels, (9, 1), padding=(4, 0)))
self.conv_T2.append(nn.Conv2d(in_channels, inter_channels, (9, 1), padding=(4, 0)))
if (in_channels != out_channels):
self.down = nn.Sequential(nn.Conv2d(in_channels, out_channels, 1), nn.BatchNorm2d(out_channels))
else:
self.down = (lambda x: x)
self.bn = nn.BatchNorm2d(out_channels)
self.soft = nn.Softmax((- 2))
self.relu = nn.ReLU()
self.A_ch3 = (((4 * torch.pow(self.A, 2)) - self.A) - (2 * torch.eye(self.A.size((- 1)))))
for m in self.modules():
if isinstance(m, nn.Conv2d):
conv_init(m)
elif isinstance(m, nn.BatchNorm2d):
bn_init(m, 1)
bn_init(self.bn, 1e-06)
for i in range(self.num_subset):
conv_branch_init(self.conv_d[i], self.num_subset)
def forward(self, x):
(N, C, T, V) = x.size()
A_ch3 = self.A_ch3.cuda(x.get_device())
A = (A_ch3 + self.PA)
y = None
for i in range(self.num_subset):
A1 = self.conv_a[i](x).permute(0, 3, 1, 2).contiguous().view(N, V, (self.inter_c * T))
A2 = self.conv_b[i](x).view(N, (self.inter_c * T), V)
A1 = self.soft((torch.matmul(A1, A2) / A1.size((- 1))))
A_T1 = self.conv_T1[i](x).permute(0, 3, 1, 2).contiguous().view(N, V, (self.inter_c * T))
A_T2 = self.conv_T2[i](x).view(N, (self.inter_c * T), V)
A_T1 = self.soft((torch.matmul(A_T1, A_T2) / A_T1.size((- 1))))
A1 = ((A[i] + A1) + A_T1)
A2 = x.view(N, (C * T), V)
z = self.conv_d[i](torch.matmul(A2, A1).view(N, C, T, V))
y = ((z + y) if (y is not None) else z)
y = self.bn(y)
y += self.down(x)
return self.relu(y) |
.fast
def test_noplot_different_quantities(*args, **kwargs):
import matplotlib.pyplot as plt
plt.ion()
from radis import load_spec
from radis.test.utils import getTestFile
s = load_spec(getTestFile('CO_Tgas1500K_mole_fraction0.01.spec'), binary=True)
s.update()
s.plot('abscoeff', nfig='test_noplot_different_quantities')
with pytest.raises(ValueError):
s.plot('emisscoeff', nfig='same')
plt.close('test_noplot_different_quantities') |
def testCheckMethodCalls(SAMPLE_PATH_14d9f) -> None:
targetMethod = ['Lcom/google/progress/WifiCheckTask;', 'checkWifiCanOrNotConnectServer', '([Ljava/lang/String;)Z']
checkMethods = []
checkMethods.append(tuple(['Landroid/util/Log;', 'e', '(Ljava/lang/String; Ljava/lang/String;)I']))
assert (checkMethodCalls('14d9f1a92dd984d6040cc41ed06e273e.apk', targetMethod, checkMethods) is True) |
class BASNet(nn.Module):
def __init__(self, n_channels, n_classes):
super(BASNet, self).__init__()
resnet = models.resnet34(pretrained=True)
self.inconv = nn.Conv2d(n_channels, 64, 3, padding=1)
self.inbn = nn.BatchNorm2d(64)
self.inrelu = nn.ReLU(inplace=True)
self.encoder1 = resnet.layer1
self.encoder2 = resnet.layer2
self.encoder3 = resnet.layer3
self.encoder4 = resnet.layer4
self.pool4 = nn.MaxPool2d(2, 2, ceil_mode=True)
self.resb5_1 = BasicBlock(512, 512)
self.resb5_2 = BasicBlock(512, 512)
self.resb5_3 = BasicBlock(512, 512)
self.pool5 = nn.MaxPool2d(2, 2, ceil_mode=True)
self.resb6_1 = BasicBlock(512, 512)
self.resb6_2 = BasicBlock(512, 512)
self.resb6_3 = BasicBlock(512, 512)
self.convbg_1 = nn.Conv2d(512, 512, 3, dilation=2, padding=2)
self.bnbg_1 = nn.BatchNorm2d(512)
self.relubg_1 = nn.ReLU(inplace=True)
self.convbg_m = nn.Conv2d(512, 512, 3, dilation=2, padding=2)
self.bnbg_m = nn.BatchNorm2d(512)
self.relubg_m = nn.ReLU(inplace=True)
self.convbg_2 = nn.Conv2d(512, 512, 3, dilation=2, padding=2)
self.bnbg_2 = nn.BatchNorm2d(512)
self.relubg_2 = nn.ReLU(inplace=True)
self.conv6d_1 = nn.Conv2d(1024, 512, 3, padding=1)
self.bn6d_1 = nn.BatchNorm2d(512)
self.relu6d_1 = nn.ReLU(inplace=True)
self.conv6d_m = nn.Conv2d(512, 512, 3, dilation=2, padding=2)
self.bn6d_m = nn.BatchNorm2d(512)
self.relu6d_m = nn.ReLU(inplace=True)
self.conv6d_2 = nn.Conv2d(512, 512, 3, dilation=2, padding=2)
self.bn6d_2 = nn.BatchNorm2d(512)
self.relu6d_2 = nn.ReLU(inplace=True)
self.conv5d_1 = nn.Conv2d(1024, 512, 3, padding=1)
self.bn5d_1 = nn.BatchNorm2d(512)
self.relu5d_1 = nn.ReLU(inplace=True)
self.conv5d_m = nn.Conv2d(512, 512, 3, padding=1)
self.bn5d_m = nn.BatchNorm2d(512)
self.relu5d_m = nn.ReLU(inplace=True)
self.conv5d_2 = nn.Conv2d(512, 512, 3, padding=1)
self.bn5d_2 = nn.BatchNorm2d(512)
self.relu5d_2 = nn.ReLU(inplace=True)
self.conv4d_1 = nn.Conv2d(1024, 512, 3, padding=1)
self.bn4d_1 = nn.BatchNorm2d(512)
self.relu4d_1 = nn.ReLU(inplace=True)
self.conv4d_m = nn.Conv2d(512, 512, 3, padding=1)
self.bn4d_m = nn.BatchNorm2d(512)
self.relu4d_m = nn.ReLU(inplace=True)
self.conv4d_2 = nn.Conv2d(512, 256, 3, padding=1)
self.bn4d_2 = nn.BatchNorm2d(256)
self.relu4d_2 = nn.ReLU(inplace=True)
self.conv3d_1 = nn.Conv2d(512, 256, 3, padding=1)
self.bn3d_1 = nn.BatchNorm2d(256)
self.relu3d_1 = nn.ReLU(inplace=True)
self.conv3d_m = nn.Conv2d(256, 256, 3, padding=1)
self.bn3d_m = nn.BatchNorm2d(256)
self.relu3d_m = nn.ReLU(inplace=True)
self.conv3d_2 = nn.Conv2d(256, 128, 3, padding=1)
self.bn3d_2 = nn.BatchNorm2d(128)
self.relu3d_2 = nn.ReLU(inplace=True)
self.conv2d_1 = nn.Conv2d(256, 128, 3, padding=1)
self.bn2d_1 = nn.BatchNorm2d(128)
self.relu2d_1 = nn.ReLU(inplace=True)
self.conv2d_m = nn.Conv2d(128, 128, 3, padding=1)
self.bn2d_m = nn.BatchNorm2d(128)
self.relu2d_m = nn.ReLU(inplace=True)
self.conv2d_2 = nn.Conv2d(128, 64, 3, padding=1)
self.bn2d_2 = nn.BatchNorm2d(64)
self.relu2d_2 = nn.ReLU(inplace=True)
self.conv1d_1 = nn.Conv2d(128, 64, 3, padding=1)
self.bn1d_1 = nn.BatchNorm2d(64)
self.relu1d_1 = nn.ReLU(inplace=True)
self.conv1d_m = nn.Conv2d(64, 64, 3, padding=1)
self.bn1d_m = nn.BatchNorm2d(64)
self.relu1d_m = nn.ReLU(inplace=True)
self.conv1d_2 = nn.Conv2d(64, 64, 3, padding=1)
self.bn1d_2 = nn.BatchNorm2d(64)
self.relu1d_2 = nn.ReLU(inplace=True)
self.upscore6 = nn.Upsample(scale_factor=32, mode='bilinear')
self.upscore5 = nn.Upsample(scale_factor=16, mode='bilinear')
self.upscore4 = nn.Upsample(scale_factor=8, mode='bilinear')
self.upscore3 = nn.Upsample(scale_factor=4, mode='bilinear')
self.upscore2 = nn.Upsample(scale_factor=2, mode='bilinear')
self.outconvb = nn.Conv2d(512, 1, 3, padding=1)
self.outconv6 = nn.Conv2d(512, 1, 3, padding=1)
self.outconv5 = nn.Conv2d(512, 1, 3, padding=1)
self.outconv4 = nn.Conv2d(256, 1, 3, padding=1)
self.outconv3 = nn.Conv2d(128, 1, 3, padding=1)
self.outconv2 = nn.Conv2d(64, 1, 3, padding=1)
self.outconv1 = nn.Conv2d(64, 1, 3, padding=1)
self.refunet = RefUnet(1, 64)
def forward(self, x):
hx = x
hx = self.inconv(hx)
hx = self.inbn(hx)
hx = self.inrelu(hx)
h1 = self.encoder1(hx)
h2 = self.encoder2(h1)
h3 = self.encoder3(h2)
h4 = self.encoder4(h3)
hx = self.pool4(h4)
hx = self.resb5_1(hx)
hx = self.resb5_2(hx)
h5 = self.resb5_3(hx)
hx = self.pool5(h5)
hx = self.resb6_1(hx)
hx = self.resb6_2(hx)
h6 = self.resb6_3(hx)
hx = self.relubg_1(self.bnbg_1(self.convbg_1(h6)))
hx = self.relubg_m(self.bnbg_m(self.convbg_m(hx)))
hbg = self.relubg_2(self.bnbg_2(self.convbg_2(hx)))
hx = self.relu6d_1(self.bn6d_1(self.conv6d_1(torch.cat((hbg, h6), 1))))
hx = self.relu6d_m(self.bn6d_m(self.conv6d_m(hx)))
hd6 = self.relu6d_2(self.bn6d_2(self.conv6d_2(hx)))
hx = self.upscore2(hd6)
hx = self.relu5d_1(self.bn5d_1(self.conv5d_1(torch.cat((hx, h5), 1))))
hx = self.relu5d_m(self.bn5d_m(self.conv5d_m(hx)))
hd5 = self.relu5d_2(self.bn5d_2(self.conv5d_2(hx)))
hx = self.upscore2(hd5)
hx = self.relu4d_1(self.bn4d_1(self.conv4d_1(torch.cat((hx, h4), 1))))
hx = self.relu4d_m(self.bn4d_m(self.conv4d_m(hx)))
hd4 = self.relu4d_2(self.bn4d_2(self.conv4d_2(hx)))
hx = self.upscore2(hd4)
hx = self.relu3d_1(self.bn3d_1(self.conv3d_1(torch.cat((hx, h3), 1))))
hx = self.relu3d_m(self.bn3d_m(self.conv3d_m(hx)))
hd3 = self.relu3d_2(self.bn3d_2(self.conv3d_2(hx)))
hx = self.upscore2(hd3)
hx = self.relu2d_1(self.bn2d_1(self.conv2d_1(torch.cat((hx, h2), 1))))
hx = self.relu2d_m(self.bn2d_m(self.conv2d_m(hx)))
hd2 = self.relu2d_2(self.bn2d_2(self.conv2d_2(hx)))
hx = self.upscore2(hd2)
hx = self.relu1d_1(self.bn1d_1(self.conv1d_1(torch.cat((hx, h1), 1))))
hx = self.relu1d_m(self.bn1d_m(self.conv1d_m(hx)))
hd1 = self.relu1d_2(self.bn1d_2(self.conv1d_2(hx)))
db = self.outconvb(hbg)
db = self.upscore6(db)
d6 = self.outconv6(hd6)
d6 = self.upscore6(d6)
d5 = self.outconv5(hd5)
d5 = self.upscore5(d5)
d4 = self.outconv4(hd4)
d4 = self.upscore4(d4)
d3 = self.outconv3(hd3)
d3 = self.upscore3(d3)
d2 = self.outconv2(hd2)
d2 = self.upscore2(d2)
d1 = self.outconv1(hd1)
dout = self.refunet(d1)
return (F.sigmoid(dout), F.sigmoid(d1), F.sigmoid(d2), F.sigmoid(d3), F.sigmoid(d4), F.sigmoid(d5), F.sigmoid(d6), F.sigmoid(db)) |
class GetDialogs():
async def get_dialogs(self: 'pyrogram.Client', limit: int=0) -> Optional[AsyncGenerator[('types.Dialog', None)]]:
current = 0
total = (limit or ((1 << 31) - 1))
limit = min(100, total)
offset_date = 0
offset_id = 0
offset_peer = raw.types.InputPeerEmpty()
while True:
r = (await self.invoke(raw.functions.messages.GetDialogs(offset_date=offset_date, offset_id=offset_id, offset_peer=offset_peer, limit=limit, hash=0), sleep_threshold=60))
users = {i.id: i for i in r.users}
chats = {i.id: i for i in r.chats}
messages = {}
for message in r.messages:
if isinstance(message, raw.types.MessageEmpty):
continue
chat_id = utils.get_peer_id(message.peer_id)
messages[chat_id] = (await types.Message._parse(self, message, users, chats))
dialogs = []
for dialog in r.dialogs:
if (not isinstance(dialog, raw.types.Dialog)):
continue
dialogs.append(types.Dialog._parse(self, dialog, messages, users, chats))
if (not dialogs):
return
last = dialogs[(- 1)]
offset_id = last.top_message.id
offset_date = utils.datetime_to_timestamp(last.top_message.date)
offset_peer = (await self.resolve_peer(last.chat.id))
for dialog in dialogs:
(yield dialog)
current += 1
if (current >= total):
return |
def group_hash_bucket_indices(hash_bucket_object_groups: np.ndarray, num_buckets: int, num_groups: int, object_store: Optional[IObjectStore]=None) -> Tuple[(np.ndarray, List[ObjectRef])]:
object_refs = []
hash_bucket_group_to_obj_id = np.empty([num_groups], dtype='object')
if (hash_bucket_object_groups is None):
return (hash_bucket_group_to_obj_id, object_refs)
hb_group_to_object = np.empty([num_groups], dtype='object')
for (hb_index, obj) in enumerate(hash_bucket_object_groups):
if obj:
hb_group = (hb_index % num_groups)
if (hb_group_to_object[hb_group] is None):
hb_group_to_object[hb_group] = np.empty([num_buckets], dtype='object')
hb_group_to_object[hb_group][hb_index] = obj
for (hb_group, obj) in enumerate(hb_group_to_object):
if (obj is None):
continue
object_ref = object_store.put(obj)
object_refs.append(object_ref)
hash_bucket_group_to_obj_id[hb_group] = object_ref
del object_ref
return (hash_bucket_group_to_obj_id, object_refs) |
def test_from_dict_interval_logical_type():
logical_type_dict = {'type': 'bytes', 'logical': 'build.recap.Interval', 'bytes': 12, 'variable': False}
recap_type = from_dict(logical_type_dict)
assert isinstance(recap_type, BytesType)
assert (recap_type.logical == logical_type_dict['logical'])
assert (recap_type.bytes_ == logical_type_dict['bytes'])
assert (recap_type.variable == logical_type_dict['variable']) |
class RHEL7_LogVol(F21_LogVol):
removedKeywords = F21_LogVol.removedKeywords
removedAttrs = F21_LogVol.removedAttrs
def _getParser(self):
op = F21_LogVol._getParser(self)
op.add_argument('--mkfsoptions', dest='mkfsopts', version=RHEL7, help='\n Specifies additional parameters to be passed to the\n program that makes a filesystem on this partition. No\n processing is done on the list of arguments, so they\n must be supplied in a format that can be passed directly\n to the mkfs program. This means multiple options should\n be comma-separated or surrounded by double quotes,\n depending on the filesystem.')
return op
def parse(self, args):
retval = F21_LogVol.parse(self, args)
if ((not retval.format) and retval.mkfsopts):
raise KickstartParseError(_('--mkfsoptions with --noformat has no effect.'), lineno=self.lineno)
if (retval.fsprofile and retval.mkfsopts):
raise KickstartParseError(_('--mkfsoptions and --fsprofile cannot be used together.'), lineno=self.lineno)
return retval |
class Renderer(object):
def render(self, ast):
walker = ast.walker()
self.buf = ''
self.last_out = '\n'
event = walker.nxt()
while (event is not None):
type_ = event['node'].t
if hasattr(self, type_):
getattr(self, type_)(event['node'], event['entering'])
event = walker.nxt()
return self.buf
def lit(self, s):
self.buf += s
self.last_out = s
def cr(self):
if (self.last_out != '\n'):
self.lit('\n')
def out(self, s):
self.lit(s) |
class CmdLineHandler(HardwareHandlerBase):
def get_passphrase(self, msg, confirm):
import getpass
print_stderr(msg)
return getpass.getpass('')
def get_pin(self, msg, *, show_strength=True):
t = {'a': '7', 'b': '8', 'c': '9', 'd': '4', 'e': '5', 'f': '6', 'g': '1', 'h': '2', 'i': '3'}
print_stderr(msg)
print_stderr('a b c\nd e f\ng h i\n-----')
o = raw_input()
try:
return ''.join(map((lambda x: t[x]), o))
except KeyError as e:
raise Exception('Character {} not in matrix!'.format(e)) from e
def prompt_auth(self, msg):
import getpass
print_stderr(msg)
response = getpass.getpass('')
if (len(response) == 0):
return None
return response
def yes_no_question(self, msg):
print_stderr(msg)
return (raw_input() in 'yY')
def stop(self):
pass
def show_message(self, msg, on_cancel=None):
print_stderr(msg)
def show_error(self, msg, blocking=False):
print_stderr(msg)
def update_status(self, b):
_logger.info(f'hw device status {b}')
def finished(self):
pass |
class TestDOTAFCOS(TestDOTA):
def eval(self):
txt_name = '{}.txt'.format(self.cfgs.VERSION)
real_test_img_list = self.get_test_image()
fcos = build_whole_network_batch_quad.DetectionNetworkFCOS(cfgs=self.cfgs, is_training=False)
self.test_dota(det_net=fcos, real_test_img_list=real_test_img_list, txt_name=txt_name)
if (not self.args.show_box):
os.remove(txt_name) |
def main():
checkpoint_folder = str(os.path.join(args.save_dir, args.exp_name))
if (not os.path.exists(checkpoint_folder)):
os.makedirs(checkpoint_folder)
model = TSCAN()
if (args.pre_trained == 1):
print('Using pre-trained on all ALL AFRL!')
model.load_state_dict(torch.load('./checkpoints/AFRL_pretrained/meta_pretrained_all_AFRL.pth'))
elif (args.pre_trained == 2):
print('Using pre-trained on all 15 subjs in AFRL!')
model.load_state_dict(torch.load('./checkpoints/AFRL_pretrained/pre_trained_AFRL_15subj_23.pth'))
elif (args.pre_trained == 3):
print('Using filtered pre-trained on all ALL AFRL!')
model.load_state_dict(torch.load('./checkpoints/AFRL_pretrained/AFRL_full_filtered_23.pth'))
elif (args.pre_trained == 4):
print('Using pre-trained on first 10 subjs in AFRL')
model.load_state_dict(torch.load('./checkpoints/Pretrained_AFRL_10/Pretrained_AFRL_10_23.pth'))
elif (args.pre_trained == 5):
print('Using pre-trained on first 10 subjs in AFRL')
model.load_state_dict(torch.load('./checkpoints/Pretrained_AFRL_10_resp/Pretrained_AFRL_10_resp_23.pth'))
else:
print('Not using any pretrained models!')
if (args.freeze == 1):
print('Freezing the motion branch!')
model.motion_conv1.weight.requires_grad = False
model.motion_conv1.bias.requires_grad = False
model.motion_conv2.weight.requires_grad = False
model.motion_conv2.bias.requires_grad = False
model.motion_conv3.weight.requires_grad = False
model.motion_conv3.bias.requires_grad = False
model.motion_conv4.weight.requires_grad = False
model.motion_conv4.bias.requires_grad = False
model.to(device=args.device)
meta_optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
transform = ToTensor1D()
AFRL_filelists_path = sorted(glob.glob((('./meta_filelists/' + 'AFRL') + '/person/*.txt')))
UBFC_filelists_path = sorted(glob.glob((('./meta_filelists/' + 'UBFC') + '/person/*.txt')))
UBFC_filelists_path_unsupervised = sorted(glob.glob((('./meta_filelists/' + 'UBFC') + '/person_unsupervised/*.txt')))
MMSE_filelists_path_M = sorted(glob.glob((('./meta_filelists/' + 'MMSE') + '/person/male/*.txt')))
MMSE_filelists_path_F = sorted(glob.glob((('./meta_filelists/' + 'MMSE') + '/person/female/*.txt')))
MMSE_filelists_path_M_unsupervised = sorted(glob.glob((('./meta_filelists/' + 'MMSE') + '/person_unsupervised/male/*.txt')))
MMSE_filelists_path_F_unsupervised = sorted(glob.glob((('./meta_filelists/' + 'MMSE') + '/person_unsupervised/female/*.txt')))
if (args.dataset == 'AFRL'):
if (args.task_type == 'person'):
train_list = AFRL_filelists_path[10:20]
test_list = AFRL_filelists_path[20:]
else:
raise ValueError('AFRL Task traing not ready.')
elif (args.dataset == 'UBFC'):
if (args.task_type == 'person'):
train_list = UBFC_filelists_path[:20]
test_list = UBFC_filelists_path[20:]
else:
raise ValueError('UBFC Task traing not ready.')
elif (args.dataset == 'MMSE'):
if (args.task_type == 'person'):
train_list = (MMSE_filelists_path_M[:10] + MMSE_filelists_path_F[:10])
test_list = (MMSE_filelists_path_M[10:] + MMSE_filelists_path_F[10:])
else:
raise ValueError('UBFC Task traing not ready.')
elif (args.dataset == 'Meta_AFRL_UBFC_All'):
if (args.task_type == 'person'):
train_list = AFRL_filelists_path
test_list = UBFC_filelists_path
elif (args.dataset == 'Meta_AFRL_MMSE_All'):
if (args.task_type == 'person'):
train_list = AFRL_filelists_path
test_list = (MMSE_filelists_path_M + MMSE_filelists_path_F)
elif (args.dataset == 'Meta_AFRL_UBFC'):
if (args.task_type == 'person'):
train_list = (AFRL_filelists_path + UBFC_filelists_path[:20])
test_list = UBFC_filelists_path[20:]
elif (args.dataset == 'Meta_AFRL_UBFC_cv'):
if (args.task_type == 'person'):
train_list = AFRL_filelists_path
test_list = UBFC_filelists_path[21:]
elif (args.dataset == 'Meta_AFRL_MMSE'):
if (args.task_type == 'person'):
train_list = ((AFRL_filelists_path + MMSE_filelists_path_M[:10]) + MMSE_filelists_path_F[:10])
test_list = (MMSE_filelists_path_M[10:] + MMSE_filelists_path_F[10:])
elif (args.dataset == 'Meta_10_AFRL_UBFC'):
if (args.task_type == 'person'):
train_list = AFRL_filelists_path[15:]
test_list = UBFC_filelists_path
elif (args.dataset == 'Meta_10_AFRL_MMSE'):
if (args.task_type == 'person'):
train_list = AFRL_filelists_path[15:]
test_list = (MMSE_filelists_path_M + MMSE_filelists_path_F)
elif (args.dataset == 'Pre_AFRL_Meta_MMSE_MTest_UBFC'):
train_list = (MMSE_filelists_path_M + MMSE_filelists_path_F)
test_list = UBFC_filelists_path
elif (args.dataset == 'Pre_AFRL_Meta_MMSE_MTest_UBFC_unsupervised'):
train_list = (MMSE_filelists_path_M_unsupervised + MMSE_filelists_path_F_unsupervised)
test_list = UBFC_filelists_path_unsupervised
elif (args.dataset == 'Pre_AFRL_Meta_UBFC_MTest_MMSE'):
train_list = UBFC_filelists_path
test_list = (MMSE_filelists_path_M + MMSE_filelists_path_F)
elif (args.dataset == 'Pre_AFRL_Meta_UBFC_MTest_MMSE_unsupervised'):
train_list = UBFC_filelists_path_unsupervised
test_list = (MMSE_filelists_path_M_unsupervised + MMSE_filelists_path_F_unsupervised)
elif (args.dataset == 'MTrain_AFRL_UBFC_MTest_MMSE'):
train_list = (AFRL_filelists_path + UBFC_filelists_path)
test_list = (MMSE_filelists_path_M + MMSE_filelists_path_F)
elif (args.dataset == 'MTrain_AFRL_MMSE_MTest_UBFC'):
train_list = ((AFRL_filelists_path + MMSE_filelists_path_M) + MMSE_filelists_path_F)
test_list = UBFC_filelists_path
elif (args.dataset == 'Pre_AFRL_10_MTrain_AFRL_10_MTest_AFRL_5'):
train_list = AFRL_filelists_path[10:20]
test_list = AFRL_filelists_path[20:]
elif (args.dataset == 'MTrain_AFRL_20_MTest_AFRL_5'):
train_list = AFRL_filelists_path[:20]
test_list = AFRL_filelists_path[20:]
else:
raise ValueError('Dataset is not supported!')
train_path = read_txt(train_list, args.folder)
test_path = read_txt(test_list, args.folder)
meta_train_dataset = RPPG_DATASET(args.dataset, args.num_shots, args.num_test_shots, train_path, num_tasks=len(train_list), state='train', transform=transform, target_transform=transform, sample_type='task', frame_depth=20, fs=args.tr_fs, signal=args.signal, unsupervised=args.unsupervised)
train_dataloader = BatchMetaDataLoader(meta_train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)
meta_test_dataset = RPPG_DATASET(args.dataset, args.num_shots, args.num_test_shots, test_path, num_tasks=len(test_list), state='test', transform=transform, target_transform=transform, sample_type='task', frame_depth=20, fs=args.ts_fs, signal=args.signal, unsupervised=args.unsupervised)
test_dataloader = BatchMetaDataLoader(meta_test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers)
for i in range(args.num_epochs):
print('Epoch: ', i)
train(args, model, meta_optimizer, train_dataloader)
print('Finish training!')
if ((i % 1) == 0):
(model, preds, labels, final_HR0, final_HR) = test(args, model, test_dataloader, checkpoint_folder, epoch=i)
print('Finish Eval')
if ((i + 1) == args.num_epochs):
print('Saving the final outputs from the last epoch!')
pred_path = str(os.path.join(checkpoint_folder, (((str(args.exp_name) + '_') + str(i)) + '_pred_all')))
label_path = str(os.path.join(checkpoint_folder, (((str(args.exp_name) + '_') + str(i)) + '_label_all')))
final_HR_path = str(os.path.join(checkpoint_folder, (((str(args.exp_name) + '_') + str(i)) + '_HR_all')))
final_HR0_path = str(os.path.join(checkpoint_folder, (((str(args.exp_name) + '_') + str(i)) + '_HR0_all')))
np.save(pred_path, preds)
np.save(label_path, labels)
np.save(final_HR_path, final_HR)
np.save(final_HR0_path, final_HR0)
print('Pearson Results')
print('Pearson: ', abs(np.corrcoef(final_HR, final_HR0)[(1, 0)]))
print('') |
class BERT2VQ(nn.Module):
def __init__(self, opt) -> None:
super().__init__()
self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
self.bertmodel = BertModel.from_pretrained('bert-base-uncased')
if (opt.gpu_ids[0] != (- 1)):
self.device = f'cuda:{opt.gpu_ids[0]}'
else:
(self.device + 'cpu')
ntoken = 512
nblocks = 2
use_attn = False
convt_layers = []
in_c = 64
self.dz = self.hz = self.wz = 8
self.linear_expand = nn.Linear(768, 1024)
self.linear_to3d = nn.Linear(1024, ((self.hz * self.wz) * self.dz))
self.linear3d_to_conv = torch.nn.Conv3d(1, in_c, 3, 1, 1)
for _ in range(nblocks):
out_c = (in_c * 2)
convt_layers.append(PVQVAEResnetBlock(in_channels=in_c, out_channels=out_c, temb_channels=0, dropout=0.1))
if use_attn:
convt_layers.append(AttnBlock(out_c))
in_c = out_c
self.convt_layers = nn.Sequential(*convt_layers)
self.convt3 = PVQVAEResnetBlock(in_channels=in_c, out_channels=in_c, temb_channels=0, dropout=0.1)
if use_attn:
self.attn3 = AttnBlock(in_c)
self.norm_out = Normalize(in_c)
self.conv_out = torch.nn.Conv3d(in_c, ntoken, 3, 1, 1)
def forward(self, x):
tokenized = self.tokenizer(x, return_tensors='pt', padding=True).to(self.device)
x = self.bertmodel(**tokenized).pooler_output
x = self.linear_expand(x)
x = self.linear_to3d(x).unsqueeze(1)
x = rearrange(x, 'b c (d h w) -> b c d h w', d=8, h=8, w=8)
x = self.linear3d_to_conv(x)
temb = None
x = self.convt_layers(x)
x = self.convt3(x, temb)
if hasattr(self, 'attn3'):
x = self.attn3(x)
x = self.norm_out(x)
x = self.conv_out(x)
return x |
class ContentManageableAdminTests(unittest.TestCase):
def make_admin(self, **kwargs):
cls = type('TestAdmin', (ContentManageableModelAdmin,), kwargs)
return cls(mock.Mock(), mock.Mock())
def test_readonly_fields(self):
admin = self.make_admin(readonly_fields=['f1'])
self.assertEqual(admin.get_readonly_fields(request=mock.Mock()), ['f1', 'created', 'updated', 'creator', 'last_modified_by'])
def test_list_filter(self):
admin = self.make_admin(list_filter=['f1'])
self.assertEqual(admin.get_list_filter(request=mock.Mock()), ['f1', 'created', 'updated'])
def test_list_display(self):
admin = self.make_admin(list_display=['f1'])
self.assertEqual(admin.get_list_display(request=mock.Mock()), ['f1', 'created', 'updated'])
def test_get_fieldsets(self):
admin = self.make_admin(fieldsets=[(None, {'fields': ['foo', 'created']})])
fieldsets = admin.get_fieldsets(request=mock.Mock())
self.assertEqual(fieldsets, [(None, {'fields': ['foo']}), ('CMS metadata', {'fields': [('creator', 'created'), ('last_modified_by', 'updated')], 'classes': ('collapse',)})])
def test_save_model(self):
admin = self.make_admin()
request = mock.Mock()
obj = mock.Mock()
admin.save_model(request=request, obj=obj, form=None, change=False)
self.assertEqual(obj.creator, request.user, "save_model didn't set obj.creator to request.user")
def test_update_model(self):
admin = self.make_admin()
request = mock.Mock()
obj = mock.Mock()
admin.save_model(request=request, obj=obj, form=None, change=True)
self.assertEqual(obj.last_modified_by, request.user, "save_model didn't set obj.last_modified_by to request.user") |
def load_model_ensemble_and_task(filenames, arg_overrides=None, task=None):
from fairseq import tasks
ensemble = []
for filename in filenames:
if (not os.path.exists(filename)):
raise IOError('Model file not found: {}'.format(filename))
state = load_checkpoint_to_cpu(filename, arg_overrides)
args = state['args']
if (task is None):
task = tasks.setup_task(args)
model = task.build_model(args)
model.load_state_dict(state['model'], strict=True)
ensemble.append(model)
return (ensemble, args, task) |
def aggregate_wbg(prob, keep_bg=False):
k = prob.shape
new_prob = torch.cat([torch.prod((1 - prob), dim=0, keepdim=True), prob], 0).clamp(1e-07, (1 - 1e-07))
logits = torch.log((new_prob / (1 - new_prob)))
if keep_bg:
return F.softmax(logits, dim=0)
else:
return F.softmax(logits, dim=0)[1:] |
def test_segmented_only_catches_404(cipher_signature):
stream = cipher_signature.streams.filter(adaptive=True)[0]
with mock.patch('pytube.request.stream') as mock_stream:
mock_stream.side_effect = HTTPError('', 403, 'Forbidden', '', '')
with mock.patch('pytube.streams.open', mock.mock_open(), create=True):
with pytest.raises(HTTPError):
stream.download() |
def run_experiment(variant):
env_params = variant['env_params']
policy_params = variant['policy_params']
value_fn_params = variant['value_fn_params']
algorithm_params = variant['algorithm_params']
replay_buffer_params = variant['replay_buffer_params']
sampler_params = variant['sampler_params']
noise_params = variant['noise_params']
task = variant['task']
domain = variant['domain']
print('domain : ', domain)
num_actors = algorithm_params['num_actors']
num_q = algorithm_params['num_q']
with_best = True
target_range = algorithm_params['target_range']
target_ratio = algorithm_params['target_ratio']
best_update_interval = algorithm_params['best_update_interval']
if ('delayed' in domain):
print('Delayed')
env = dummy([delay(normalize(ENVIRONMENTS[domain][task](**env_params)), DELAY_FREQ) for _ in range(num_actors)])
else:
env = dummy([normalize(ENVIRONMENTS[domain][task](**env_params)) for _ in range(num_actors)])
dict_ph = _init_placeholder(env)
sampler_params['min_pool_size'] = algorithm_params['base_kwargs']['n_initial_exploration_steps']
sampler = DummySampler(num_envs=num_actors, **sampler_params)
base_kwargs = dict(algorithm_params['base_kwargs'], sampler=sampler)
pool = SimpleReplayBuffer(env_spec=env.spec, **replay_buffer_params)
arr_initial_exploration_policy = [UniformPolicy(env_spec=env.spec) for _ in range(num_actors)]
arr_actor = [Actor(actor_num=i) for i in range(num_actors)]
for actor in arr_actor:
init_actor(actor, pool, dict_ph, env, num_q, value_fn_params, noise_params)
if with_best:
best_actor = Actor(actor_num=num_actors)
init_actor(best_actor, pool, dict_ph, env, num_q, value_fn_params, noise_params)
else:
best_actor = None
algorithm = P3S_TD3(base_kwargs=base_kwargs, env=env, arr_actor=arr_actor, best_actor=best_actor, dict_ph=dict_ph, arr_initial_exploration_policy=arr_initial_exploration_policy, with_best=with_best, target_ratio=target_ratio, target_range=target_range, lr=algorithm_params['lr'], discount=algorithm_params['discount'], tau=algorithm_params['tau'], reparameterize=algorithm_params['reparameterize'], policy_update_interval=algorithm_params['policy_update_interval'], best_update_interval=best_update_interval, save_full_state=False)
algorithm._sess.run(tf.global_variables_initializer())
algorithm.train() |
def test_phase(opt, net, testloader, log_save_path=None):
with torch.no_grad():
net.eval()
start = time()
avg_frame_rate = 0
mae = 0.0
rmse = 0.0
me = 0.0
for (j, data) in enumerate(testloader):
(inputs, labels) = (data['image'], data['target'])
(inputs, labels) = (inputs.type(torch.float32), labels.unsqueeze(1).type(torch.float32))
(inputs, labels) = (inputs.cuda(), labels.cuda())
features = net(inputs)
div_res = net.resample(features)
merge_res = net.parse_merge(div_res)
outputs = merge_res[('div' + str(net.args['div_times']))]
del merge_res
pre = outputs.sum()
gt = labels.sum()
mae += abs((pre - gt))
rmse += ((pre - gt) * (pre - gt))
me += (pre - gt)
end = time()
running_frame_rate = (opt['test_batch_size'] * float((1 / (end - start))))
avg_frame_rate = (((avg_frame_rate * j) + running_frame_rate) / (j + 1))
if ((j % 1) == 0):
print(('Test:[%5d/%5d] pre: %.3f gt:%.3f err:%.3f frame: %.2fHz/%.2fHz' % ((j + 1), len(testloader), pre, gt, (pre - gt), running_frame_rate, avg_frame_rate)))
start = time()
log_str = (('%10s\t %8s\t &%8s\t &%8s\t\\\\' % (' ', 'mae', 'rmse', 'me')) + '\n')
log_str += (('%-10s\t %8.3f\t %8.3f\t %8.3f\t' % ('test', (mae / (j + 1)), math.sqrt((rmse / (j + 1))), (me / (j + 1)))) + '\n')
if log_save_path:
txt_write(log_save_path, log_str, mode='w')
im_num = len(testloader)
return ((mae / im_num), math.sqrt((rmse / im_num)), (me / im_num)) |
class SawyerPushWallEnvV2(SawyerXYZEnv):
OBJ_RADIUS = 0.02
def __init__(self):
hand_low = ((- 0.5), 0.4, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = ((- 0.05), 0.6, 0.015)
obj_high = (0.05, 0.65, 0.015)
goal_low = ((- 0.05), 0.85, 0.01)
goal_high = (0.05, 0.9, 0.02)
super().__init__(self.model_name, hand_low=hand_low, hand_high=hand_high)
self.init_config = {'obj_init_angle': 0.3, 'obj_init_pos': np.array([0, 0.6, 0.02]), 'hand_init_pos': np.array([0, 0.6, 0.2])}
self.goal = np.array([0.05, 0.8, 0.015])
self.obj_init_angle = self.init_config['obj_init_angle']
self.obj_init_pos = self.init_config['obj_init_pos']
self.hand_init_pos = self.init_config['hand_init_pos']
self._random_reset_space = Box(np.hstack((obj_low, goal_low)), np.hstack((obj_high, goal_high)))
self.goal_space = Box(np.array(goal_low), np.array(goal_high))
self.num_resets = 0
def model_name(self):
return full_v2_path_for('sawyer_xyz/sawyer_push_wall_v2.xml')
_assert_task_is_set
def evaluate_state(self, obs, action):
obj = obs[4:7]
(reward, tcp_to_obj, tcp_open, obj_to_target, grasp_reward, in_place_reward) = self.compute_reward(action, obs)
success = float((obj_to_target <= 0.07))
near_object = float((tcp_to_obj <= 0.03))
grasp_success = float((self.touching_main_object and (tcp_open > 0) and ((obj[2] - 0.02) > self.obj_init_pos[2])))
info = {'success': success, 'near_object': near_object, 'grasp_success': grasp_success, 'grasp_reward': grasp_reward, 'in_place_reward': in_place_reward, 'obj_to_target': obj_to_target, 'unscaled_reward': reward}
return (reward, info)
def _get_pos_objects(self):
return self.data.get_geom_xpos('objGeom')
def _get_quat_objects(self):
return Rotation.from_matrix(self.data.get_geom_xmat('objGeom')).as_quat()
def adjust_initObjPos(self, orig_init_pos):
diff = (self.get_body_com('obj')[:2] - self.data.get_geom_xpos('objGeom')[:2])
adjustedPos = (orig_init_pos[:2] + diff)
return [adjustedPos[0], adjustedPos[1], self.data.get_geom_xpos('objGeom')[(- 1)]]
def reset_model(self):
self._reset_hand()
self._target_pos = self.goal.copy()
self.obj_init_pos = self.adjust_initObjPos(self.init_config['obj_init_pos'])
self.obj_init_angle = self.init_config['obj_init_angle']
if self.random_init:
goal_pos = self._get_state_rand_vec()
self._target_pos = goal_pos[3:]
while (np.linalg.norm((goal_pos[:2] - self._target_pos[:2])) < 0.15):
goal_pos = self._get_state_rand_vec()
self._target_pos = goal_pos[3:]
self._target_pos = np.concatenate((goal_pos[(- 3):(- 1)], [self.obj_init_pos[(- 1)]]))
self.obj_init_pos = np.concatenate((goal_pos[:2], [self.obj_init_pos[(- 1)]]))
self._set_obj_xyz(self.obj_init_pos)
self.num_resets += 1
return self._get_obs()
def compute_reward(self, action, obs):
_TARGET_RADIUS = 0.05
tcp = self.tcp_center
obj = obs[4:7]
tcp_opened = obs[3]
midpoint = np.array([(- 0.05), 0.77, obj[2]])
target = self._target_pos
tcp_to_obj = np.linalg.norm((obj - tcp))
in_place_scaling = np.array([3.0, 1.0, 1.0])
obj_to_midpoint = np.linalg.norm(((obj - midpoint) * in_place_scaling))
obj_to_midpoint_init = np.linalg.norm(((self.obj_init_pos - midpoint) * in_place_scaling))
obj_to_target = np.linalg.norm((obj - target))
obj_to_target_init = np.linalg.norm((self.obj_init_pos - target))
in_place_part1 = reward_utils.tolerance(obj_to_midpoint, bounds=(0, _TARGET_RADIUS), margin=obj_to_midpoint_init, sigmoid='long_tail')
in_place_part2 = reward_utils.tolerance(obj_to_target, bounds=(0, _TARGET_RADIUS), margin=obj_to_target_init, sigmoid='long_tail')
object_grasped = self._gripper_caging_reward(action, obj, object_reach_radius=0.01, obj_radius=0.015, pad_success_thresh=0.05, xz_thresh=0.005, high_density=True)
reward = (2 * object_grasped)
if ((tcp_to_obj < 0.02) and (tcp_opened > 0)):
reward = (((2 * object_grasped) + 1.0) + (4.0 * in_place_part1))
if (obj[1] > 0.75):
reward = ((((2 * object_grasped) + 1.0) + 4.0) + (3.0 * in_place_part2))
if (obj_to_target < _TARGET_RADIUS):
reward = 10.0
return [reward, tcp_to_obj, tcp_opened, np.linalg.norm((obj - target)), object_grasped, in_place_part2] |
()
def initialized_db(appconfig):
under_test_real_database = bool(os.environ.get('TEST_DATABASE_URI'))
configure(appconfig)
model._basequery._lookup_team_roles()
model._basequery.get_public_repo_visibility()
model.log.get_log_entry_kinds()
if (not under_test_real_database):
db.obj.execute_sql('PRAGMA foreign_keys = ON;')
db.obj.execute_sql('PRAGMA encoding="UTF-8";')
assert (db.obj.execute_sql('PRAGMA foreign_keys;').fetchone()[0] == 1)
assert (db.obj.execute_sql('PRAGMA encoding;').fetchone()[0] == 'UTF-8')
if under_test_real_database:
with db.transaction():
test_savepoint = db.savepoint()
test_savepoint.__enter__()
(yield)
try:
test_savepoint.rollback()
test_savepoint.__exit__(None, None, None)
except InternalError:
pass
elif (os.environ.get('DISALLOW_AUTO_JOINS', 'false').lower() == 'true'):
def get_rel_instance(self, instance):
value = instance.__data__.get(self.name)
if ((value is not None) or (self.name in instance.__rel__)):
if (self.name not in instance.__rel__):
lookup_allowed = False
try:
outerframes = inspect.getouterframes(inspect.currentframe())
except IndexError:
outerframes = []
for allowed_auto_join in ALLOWED_AUTO_JOINS:
if lookup_allowed:
break
if (len(outerframes) >= (allowed_auto_join.frame_start_index + CALLER_FRAMES_OFFSET)):
found_match = True
for (index, pattern_prefix) in enumerate(allowed_auto_join.pattern_prefixes):
frame_info = outerframes[(index + CALLER_FRAMES_OFFSET)]
if (not frame_info[FRAME_NAME_INDEX].startswith(pattern_prefix)):
found_match = False
break
if found_match:
lookup_allowed = True
break
if (not lookup_allowed):
raise Exception('Missing join on instance `%s` for field `%s`', instance, self.name)
obj = self.rel_model.get((self.field.rel_field == value))
instance.__rel__[self.name] = obj
return instance.__rel__[self.name]
elif (not self.field.null):
raise self.rel_model.DoesNotExist
return value
with patch('peewee.ForeignKeyAccessor.get_rel_instance', get_rel_instance):
(yield)
else:
(yield) |
(os.getuid(), 'test requires non-root access')
class FailsToOpenOutputFile_TestCase(TestCase):
def setUp(self):
self._include_path = mktempfile('text', prefix='ks-include')
ks_content = ('autopart\n%%include %s' % self._include_path)
self._ks_path = mktempfile(ks_content)
self._output_path = mktempfile()
os.chmod(self._output_path, 0)
def tearDown(self):
os.unlink(self._ks_path)
os.unlink(self._include_path)
os.unlink(self._output_path)
def runTest(self):
(retval, msg) = ksflatten.main(['--version', 'F26', '--config', self._ks_path, '--output', self._output_path])
self.assertEqual(retval, 1)
self.assertTrue((msg.find('Failed to open output file') > (- 1))) |
def test_makefile() -> None:
utils.print_title('Testing makefile')
a2x_path = (pathlib.Path(sys.executable).parent / 'a2x')
assert a2x_path.exists(), a2x_path
with tempfile.TemporaryDirectory() as tmpdir:
subprocess.run(['make', '-f', 'misc/Makefile', f'DESTDIR={tmpdir}', f'A2X={a2x_path}', 'install'], check=True) |
def find(root: (Path | str), dirs: bool=True) -> str:
if isinstance(root, str):
root = Path(root)
results: list[Path] = []
for (dirpath, dirnames, filenames) in os.walk(root):
names = filenames
if dirs:
names += dirnames
for name in names:
results.append((Path(dirpath) / name))
results.sort()
return '\n'.join((str(result.relative_to(root)) for result in results)) |
def start_training():
logger.info('Setup config, data and model...')
opt = BaseOptions().parse()
set_seed(opt.seed)
if opt.debug:
cudnn.benchmark = False
cudnn.deterministic = True
dataset_config = dict(dset_name=opt.dset_name, data_path=opt.train_path, v_feat_dirs=opt.v_feat_dirs, q_feat_dir=opt.t_feat_dir, q_feat_type='last_hidden_state', max_q_l=opt.max_q_l, max_v_l=opt.max_v_l, ctx_mode=opt.ctx_mode, data_ratio=opt.data_ratio, normalize_v=(not opt.no_norm_vfeat), normalize_t=(not opt.no_norm_tfeat), clip_len=opt.clip_length, max_windows=opt.max_windows, span_loss_type=opt.span_loss_type, txt_drop_ratio=opt.txt_drop_ratio, dset_domain=opt.dset_domain)
dataset_config['data_path'] = opt.train_path
train_dataset = StartEndDataset(**dataset_config)
if (opt.eval_path is not None):
dataset_config['data_path'] = opt.eval_path
dataset_config['txt_drop_ratio'] = 0
dataset_config['q_feat_dir'] = opt.t_feat_dir.replace('sub_features', 'text_features')
eval_dataset = StartEndDataset(**dataset_config)
else:
eval_dataset = None
(model, criterion, optimizer, lr_scheduler) = setup_model(opt)
logger.info(f'Model {model}')
count_parameters(model)
logger.info('Start Training...')
if (opt.dset_name in ['tvsum', 'youtube_uni']):
train_hl(model, criterion, optimizer, lr_scheduler, train_dataset, eval_dataset, opt)
else:
train(model, criterion, optimizer, lr_scheduler, train_dataset, eval_dataset, opt)
return (opt.ckpt_filepath.replace('.ckpt', '_best.ckpt'), opt.eval_split_name, opt.eval_path, opt.debug, opt) |
def minmax(iterable_or_value, *others, key=None, default=_marker):
iterable = ((iterable_or_value, *others) if others else iterable_or_value)
it = iter(iterable)
try:
lo = hi = next(it)
except StopIteration as e:
if (default is _marker):
raise ValueError('`minmax()` argument is an empty iterable. Provide a `default` value to suppress this error.') from e
return default
if (key is None):
for (x, y) in zip_longest(it, it, fillvalue=lo):
if (y < x):
(x, y) = (y, x)
if (x < lo):
lo = x
if (hi < y):
hi = y
else:
lo_key = hi_key = key(lo)
for (x, y) in zip_longest(it, it, fillvalue=lo):
(x_key, y_key) = (key(x), key(y))
if (y_key < x_key):
(x, y, x_key, y_key) = (y, x, y_key, x_key)
if (x_key < lo_key):
(lo, lo_key) = (x, x_key)
if (hi_key < y_key):
(hi, hi_key) = (y, y_key)
return (lo, hi) |
class WorkuploadComFolder(SimpleDecrypter):
__name__ = 'WorkuploadComFolder'
__type__ = 'decrypter'
__version__ = '0.01'
__status__ = 'testing'
__pattern__ = '
__config__ = [('enabled', 'bool', 'Activated', True), ('use_premium', 'bool', 'Use premium account if available', True), ('folder_per_package', 'Default;Yes;No', 'Create folder for each package', 'Default'), ('max_wait', 'int', 'Reconnect if waiting time is greater than minutes', 10)]
__description__ = 'Workupload.com folder decrypter plugin'
__license__ = 'GPLv3'
__authors__ = [('GammaC0de', 'nitzo2001[AT]yahoo[DOT]com')]
LINK_PATTERN = '<a href="(/file/\\w+?)"'
def decrypt(self, pyfile):
html = self.load(pyfile.url)
links = uniquify(re.findall(self.LINK_PATTERN, html))
if links:
self.packages = [(pyfile.package().folder, [(' + link) for link in links], pyfile.package().name)] |
def evolve_function_sig_callback(ctx: mypy.plugin.FunctionSigContext) -> CallableType:
if (len(ctx.args) != 2):
ctx.api.fail(f'"{ctx.default_signature.name}" has unexpected type annotation', ctx.context)
return ctx.default_signature
if (len(ctx.args[0]) != 1):
return ctx.default_signature
inst_arg = ctx.args[0][0]
inst_type = get_proper_type(ctx.api.get_expression_type(inst_arg))
inst_type_str = format_type_bare(inst_type, ctx.api.options)
attr_types = _get_expanded_attr_types(ctx, inst_type, inst_type, inst_type)
if (attr_types is None):
return ctx.default_signature
fields = _meet_fields(attr_types)
return CallableType(arg_names=['inst', *fields.keys()], arg_kinds=([ARG_POS] + ([ARG_NAMED_OPT] * len(fields))), arg_types=[inst_type, *fields.values()], ret_type=inst_type, fallback=ctx.default_signature.fallback, name=f'{ctx.default_signature.name} of {inst_type_str}') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.