code stringlengths 281 23.7M |
|---|
class _FunctionChangers():
def __init__(self, pyfunction, definition_info, changers=None):
self.pyfunction = pyfunction
self.definition_info = definition_info
self.changers = changers
self.changed_definition_infos = self._get_changed_definition_infos()
def _get_changed_definition_infos(self):
definition_info = self.definition_info
result = [definition_info]
for changer in self.changers:
definition_info = copy.deepcopy(definition_info)
changer.change_definition_info(definition_info)
result.append(definition_info)
return result
def change_definition(self, call):
return self.changed_definition_infos[(- 1)].to_string()
def change_call(self, primary, pyname, call):
call_info = functionutils.CallInfo.read(primary, pyname, self.definition_info, call)
mapping = functionutils.ArgumentMapping(self.definition_info, call_info)
for (definition_info, changer) in zip(self.changed_definition_infos, self.changers):
changer.change_argument_mapping(definition_info, mapping)
return mapping.to_call_info(self.changed_definition_infos[(- 1)]).to_string() |
def quote_type_string(type_string: str) -> str:
no_quote_regex = '^<(tuple|union): \\d+ items>$'
if ((type_string in ['Module', 'overloaded function', 'Never', '<deleted>']) or type_string.startswith('Module ') or (re.match(no_quote_regex, type_string) is not None) or type_string.endswith('?')):
return type_string
return f'"{type_string}"' |
.parametrize('ovr_levels', [[2], [3], [2, 4, 8]])
_gdal33
def test_ignore_overviews(data, ovr_levels):
inputfile = str(data.join('RGB.byte.tif'))
with rasterio.open(inputfile, 'r+') as src:
src.build_overviews(ovr_levels, resampling=Resampling.nearest)
with rasterio.open(inputfile, OVERVIEW_LEVEL=(- 1)) as src:
assert (src.overviews(1) == [])
assert (src.overviews(2) == [])
assert (src.overviews(3) == []) |
class FC3_TestCase(CommandTest):
command = 'vnc'
def runTest(self):
obj = self.assert_parse('vnc', 'vnc\n')
obj.enabled = False
self.assertEqual(str(obj), '')
self.assert_parse('vnc --connect=HOSTNAME', 'vnc --connect=HOSTNAME\n')
self.assert_parse('vnc --connect=HOSTNAME:PORT', 'vnc --connect=HOSTNAME:PORT\n')
self.assert_parse('vnc --password=PASSWORD', 'vnc --password=PASSWORD\n')
self.assert_parse('vnc --connect=HOSTNAME --password=PASSWORD', 'vnc --connect=HOSTNAME --password=PASSWORD\n')
self.assert_parse_error('vnc --connect')
self.assert_parse_error('vnc --password') |
def test_model_before_dream(trained_data_prop, computed_data_prop, directory, prop_name='logP'):
plt.figure()
plt.scatter(trained_data_prop, computed_data_prop)
plt.xlabel(('Modelled ' + prop_name))
plt.ylabel(('Computed ' + prop_name))
name = (directory + '/test_model_before_dreaming')
plt.savefig(name)
plt.show()
closefig() |
def register_model_architecture(model_name, arch_name):
def arch_override_from_yaml(args, arch):
root_dir = os.path.dirname(os.path.dirname(fairseq.__file__))
yaml_path = os.path.join(root_dir, 'config/model/{}.yaml'.format(arch))
if (not os.path.exists(yaml_path)):
raise RuntimeError(f'yaml file {yaml_path} does not exist!')
arch_cfg = OmegaConf.load(yaml_path)
for (k, v) in arch_cfg.items():
setattr(args, k, getattr(args, k, v))
def register_model_arch_fn(fn):
if (model_name not in MODEL_REGISTRY):
raise ValueError('Cannot register model architecture for unknown model type ({})'.format(model_name))
if (arch_name in ARCH_MODEL_REGISTRY):
raise ValueError('Cannot register duplicate model architecture ({})'.format(arch_name))
if (not callable(fn)):
raise ValueError('Model architecture must be callable ({})'.format(arch_name))
ARCH_MODEL_REGISTRY[arch_name] = MODEL_REGISTRY[model_name]
ARCH_MODEL_NAME_REGISTRY[arch_name] = model_name
ARCH_MODEL_INV_REGISTRY.setdefault(model_name, []).append(arch_name)
if ((type(fn) is type) and issubclass(fn, BaseFairseqModel)):
ARCH_CONFIG_REGISTRY[arch_name] = (lambda args: arch_override_from_yaml(args, arch=arch_name))
else:
ARCH_CONFIG_REGISTRY[arch_name] = fn
return fn
return register_model_arch_fn |
class QCBasisSet(_QCBase):
center_data: Mapping[(str, QCCenterData)]
atom_map: Sequence[str]
name: str
schema_version: (int | None) = None
schema_name: (str | None) = None
description: (str | None) = None
def from_dict(cls, data: dict[(str, Any)]) -> QCBasisSet:
center_data = {k: QCCenterData.from_dict(v) for (k, v) in data.pop('center_data').items()}
return cls(**data, center_data=center_data)
def to_hdf5(self, group: h5py.Group) -> None:
center_data = group.require_group('center_data')
for (key, value) in self.center_data.items():
key_group = center_data.require_group(key)
value.to_hdf5(key_group)
group.attrs['atom_map'] = self.atom_map
group.attrs['name'] = self.name
if (self.schema_version is not None):
group.attrs['schema_version'] = self.schema_version
if (self.schema_name is not None):
group.attrs['schema_name'] = self.schema_name
if (self.description is not None):
group.attrs['description'] = self.description
def _from_hdf5_group(cls, h5py_group: h5py.Group) -> QCBasisSet:
center_data: dict[(str, QCCenterData)] = {}
for (name, group) in h5py_group['center_data'].items():
center_data[name] = cast(QCCenterData, QCCenterData.from_hdf5(group))
return cls(center_data=center_data, atom_map=h5py_group.attrs['atom_map'], name=h5py_group.attrs['name'], schema_version=h5py_group.attrs.get('schema_version', None), schema_name=h5py_group.attrs.get('schema_name', None), description=h5py_group.attrs.get('description', None)) |
def _set_ie_mode():
import winreg
def get_ie_mode():
ie_key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, 'Software\\Microsoft\\Internet Explorer')
try:
(version, type) = winreg.QueryValueEx(ie_key, 'svcVersion')
except:
(version, type) = winreg.QueryValueEx(ie_key, 'Version')
winreg.CloseKey(ie_key)
if version.startswith('11'):
value = 11001
elif version.startswith('10'):
value = 10001
elif version.startswith('9'):
value = 9999
elif version.startswith('8'):
value = 8888
else:
value = 11001
return value
try:
browser_emulation = winreg.OpenKey(winreg.HKEY_CURRENT_USER, 'Software\\Microsoft\\Internet Explorer\\Main\\FeatureControl\\FEATURE_BROWSER_EMULATION', 0, winreg.KEY_ALL_ACCESS)
except WindowsError:
browser_emulation = winreg.CreateKeyEx(winreg.HKEY_CURRENT_USER, 'Software\\Microsoft\\Internet Explorer\\Main\\FeatureControl\\FEATURE_BROWSER_EMULATION', 0, winreg.KEY_ALL_ACCESS)
try:
dpi_support = winreg.OpenKey(winreg.HKEY_CURRENT_USER, 'Software\\Microsoft\\Internet Explorer\\Main\\FeatureControl\\FEATURE_96DPI_PIXEL', 0, winreg.KEY_ALL_ACCESS)
except WindowsError:
dpi_support = winreg.CreateKeyEx(winreg.HKEY_CURRENT_USER, 'Software\\Microsoft\\Internet Explorer\\Main\\FeatureControl\\FEATURE_96DPI_PIXEL', 0, winreg.KEY_ALL_ACCESS)
mode = get_ie_mode()
executable_name = sys.executable.split('\\')[(- 1)]
winreg.SetValueEx(browser_emulation, executable_name, 0, winreg.REG_DWORD, mode)
winreg.CloseKey(browser_emulation)
winreg.SetValueEx(dpi_support, executable_name, 0, winreg.REG_DWORD, 1)
winreg.CloseKey(dpi_support) |
def write_shell_script(dir: str, name: str, content: List[str]) -> str:
script_path = os.path.join(dir, name)
with open(script_path, 'w') as f:
f.write('#! /bin/bash\n')
for line in content:
f.write(f'''{line}
''')
f.write('\n')
os.chmod(script_path, 493)
return script_path |
class InfLineLabel(TextItem):
def __init__(self, line, text='', movable=False, position=0.5, anchors=None, **kwds):
self.line = line
self.movable = movable
self.moving = False
self.orthoPos = position
self.format = text
self.line.sigPositionChanged.connect(self.valueChanged)
self._endpoints = (None, None)
if (anchors is None):
rax = kwds.get('rotateAxis', None)
if (rax is not None):
if (tuple(rax) == (1, 0)):
anchors = [(0.5, 0), (0.5, 1)]
else:
anchors = [(0, 0.5), (1, 0.5)]
elif ((line.angle % 180) == 0):
anchors = [(0.5, 0), (0.5, 1)]
else:
anchors = [(0, 0.5), (1, 0.5)]
self.anchors = anchors
TextItem.__init__(self, **kwds)
self.setParentItem(line)
self.valueChanged()
def valueChanged(self):
if (not self.isVisible()):
return
value = self.line.value()
self.setText(self.format.format(value=value))
self.updatePosition()
def getEndpoints(self):
if (self._endpoints[0] is None):
lr = self.line.boundingRect()
pt1 = Point(lr.left(), 0)
pt2 = Point(lr.right(), 0)
if ((self.line.angle % 90) != 0):
view = self.getViewBox()
if ((not self.isVisible()) or (not isinstance(view, ViewBox))):
return (None, None)
p = QtGui.QPainterPath()
p.moveTo(pt1)
p.lineTo(pt2)
p = self.line.itemTransform(view)[0].map(p)
vr = QtGui.QPainterPath()
vr.addRect(view.boundingRect())
paths = vr.intersected(p).toSubpathPolygons(QtGui.QTransform())
if (len(paths) > 0):
l = list(paths[0])
pt1 = self.line.mapFromItem(view, l[0])
pt2 = self.line.mapFromItem(view, l[1])
self._endpoints = (pt1, pt2)
return self._endpoints
def updatePosition(self):
self._endpoints = (None, None)
(pt1, pt2) = self.getEndpoints()
if (pt1 is None):
return
pt = ((pt2 * self.orthoPos) + (pt1 * (1 - self.orthoPos)))
self.setPos(pt)
vr = self.line.viewRect()
if (vr is not None):
self.setAnchor(self.anchors[(0 if (vr.center().y() < 0) else 1)])
def setVisible(self, v):
TextItem.setVisible(self, v)
if v:
self.valueChanged()
def setMovable(self, m):
self.movable = m
self.setAcceptHoverEvents(m)
def setPosition(self, p):
self.orthoPos = p
self.updatePosition()
def setFormat(self, text):
self.format = text
self.valueChanged()
def mouseDragEvent(self, ev):
if (self.movable and (ev.button() == QtCore.Qt.MouseButton.LeftButton)):
if ev.isStart():
self._moving = True
self._cursorOffset = self._posToRel(ev.buttonDownPos())
self._startPosition = self.orthoPos
ev.accept()
if (not self._moving):
return
rel = self._posToRel(ev.pos())
self.orthoPos = fn.clip_scalar(((self._startPosition + rel) - self._cursorOffset), 0.0, 1.0)
self.updatePosition()
if ev.isFinish():
self._moving = False
def mouseClickEvent(self, ev):
if (self.moving and (ev.button() == QtCore.Qt.MouseButton.RightButton)):
ev.accept()
self.orthoPos = self._startPosition
self.moving = False
def hoverEvent(self, ev):
if ((not ev.isExit()) and self.movable):
ev.acceptDrags(QtCore.Qt.MouseButton.LeftButton)
def viewTransformChanged(self):
GraphicsItem.viewTransformChanged(self)
self.updatePosition()
TextItem.viewTransformChanged(self)
def _posToRel(self, pos):
(pt1, pt2) = self.getEndpoints()
if (pt1 is None):
return 0
pos = self.mapToParent(pos)
return ((pos.x() - pt1.x()) / (pt2.x() - pt1.x())) |
(constants.InterfaceType.pxi, 'INSTR')
class PXIInstrument(PXICommon):
manufacturer_name: Attribute[str] = attributes.AttrVI_ATTR_MANF_NAME()
manufacturer_id: Attribute[int] = attributes.AttrVI_ATTR_MANF_ID()
model_name: Attribute[str] = attributes.AttrVI_ATTR_MODEL_NAME()
model_code: Attribute[int] = attributes.AttrVI_ATTR_MODEL_CODE() |
class TestWordInformationPreserved(MetricClassTester):
def test_word_information_preserved_with_valid_input(self) -> None:
self.run_class_implementation_tests(metric=WordInformationPreserved(), state_names={'correct_total', 'input_total', 'target_total'}, update_kwargs={'input': [['hello world', 'welcome to the facebook'], ['hello world', 'welcome to the facebook'], ['hello world', 'welcome to the facebook'], ['hello world', 'welcome to the facebook']], 'target': [['hello metaverse', 'welcome to meta'], ['hello metaverse', 'welcome to meta'], ['hello metaverse', 'welcome to meta'], ['hello metaverse', 'welcome to meta']]}, compute_result=torch.tensor(0.3, dtype=torch.float64), num_total_updates=4)
def test_word_information_preserved_with_invalid_input(self) -> None:
metric = WordInformationPreserved()
with self.assertRaisesRegex(ValueError, 'input and target should have the same type'):
metric.update(['hello metaverse', 'welcome to meta'], 'hello world')
with self.assertRaisesRegex(ValueError, 'input and target lists should have the same length'):
metric.update(['hello metaverse', 'welcome to meta'], ['welcome to meta', 'this is the prediction', 'there is an other sample']) |
def unwrap_assert_methods() -> None:
for patcher in _mock_module_patches:
try:
patcher.stop()
except RuntimeError as e:
if (str(e) == 'stop called on unstarted patcher'):
pass
else:
raise
_mock_module_patches[:] = []
_mock_module_originals.clear() |
def runUsernameLikePassword(args):
status = True
usernameLikePassword = UsernameLikePassword(args)
status = usernameLikePassword.connect(stopIfError=True)
if (args['run'] != None):
args['print'].title('MSSQL users have not the password identical to the username ?')
usernameLikePassword.tryUsernameLikePassword()
if (usernameLikePassword.validAccountsList == {}):
args['print'].badNews('No found a valid account on {0}:{1} in UsernameLikePassword module'.format(args['host'], args['port']))
else:
args['print'].goodNews('Accounts found on {0}:{1}: {2}'.format(args['host'], args['port'], getCredentialsFormated(usernameLikePassword.validAccountsList))) |
(name='get_reviewers_vote_details')
def get_reviewers_vote_details(proposal, user):
v_detail = collections.namedtuple('v_detail', 'voter_nick vote_value vote_comment')
reviewers = ProposalSectionReviewer.objects.filter(proposal_section=proposal.proposal_section, conference_reviewer__conference=proposal.conference)
vote_details = []
for reviewer in reviewers:
voter_nick = reviewer.conference_reviewer.nick
rv_qs = ProposalSectionReviewerVote.objects.filter(proposal=proposal, voter=reviewer)
if rv_qs:
vote_value = rv_qs[0].vote_value.description
else:
vote_value = None
vc_qs = ProposalComment.objects.filter(proposal=proposal, commenter=reviewer.conference_reviewer.reviewer, vote=True)
if vc_qs:
vote_comment = vc_qs[0].comment
else:
vote_comment = None
vote_details.append(v_detail(voter_nick, vote_value, vote_comment))
return vote_details |
class S_VGG11(nn.Module):
def __init__(self, num_classes: int=10, T: int=3) -> None:
super(S_VGG11, self).__init__()
self.layer1 = Spiking(nn.Sequential(first_conv(3, 64, 3, stride=1, padding=1), nn.BatchNorm2d(64), IF()), T)
self.layer2 = Spiking(nn.Sequential(QuantConv2d(64, 128, 3, stride=2, padding=1), nn.BatchNorm2d(128), IF()), T)
self.layer3 = Spiking(nn.Sequential(QuantConv2d(128, 256, 3, stride=1, padding=1), nn.BatchNorm2d(256), IF()), T)
self.layer4 = Spiking(nn.Sequential(QuantConv2d(256, 256, 3, stride=2, padding=1), nn.BatchNorm2d(256), IF()), T)
self.layer5 = Spiking(nn.Sequential(QuantConv2d(256, 512, 3, stride=1, padding=1), nn.BatchNorm2d(512), IF()), T)
self.layer6 = Spiking(nn.Sequential(QuantConv2d(512, 512, 3, stride=2, padding=1), nn.BatchNorm2d(512), IF()), T)
self.layer7 = Spiking(nn.Sequential(QuantConv2d(512, 512, 3, stride=1, padding=1), nn.BatchNorm2d(512), IF()), T)
self.layer8 = Spiking(nn.Sequential(QuantConv2d(512, 512, 3, stride=2, padding=1), nn.BatchNorm2d(512), IF()), T)
self.layer9 = Spiking(nn.Sequential(QuantLinear(((2 * 2) * 512), 4096), nn.BatchNorm1d(4096), IF()), T)
self.layer10 = Spiking(nn.Sequential(QuantLinear(4096, 4096), nn.BatchNorm1d(4096), IF()), T)
self.layer11 = last_Spiking(last_fc(4096, 10), T)
self.flat = Dummy(nn.Flatten(2))
self.layer1.is_first = True
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.layer5(x)
x = self.layer6(x)
x = self.layer7(x)
x = self.layer8(x)
x = self.flat(x)
x = self.layer9(x)
x = self.layer10(x)
x = self.layer11(x)
return x
def show_params(self):
for m in self.modules():
if (isinstance(m, QuantConv2d) or isinstance(m, QuantLinear) or isinstance(m, QuantReLU)):
m.show_params() |
def get_system_metadata(repo_root):
import git
return {'helsinki_git_sha': git.Repo(path=repo_root, search_parent_directories=True).head.object.hexsha, 'transformers_git_sha': git.Repo(path='.', search_parent_directories=True).head.object.hexsha, 'port_machine': socket.gethostname(), 'port_time': time.strftime('%Y-%m-%d-%H:%M')} |
class Test_average_gate_fidelity():
def test_identity(self, dimension):
id = qeye(dimension)
assert (average_gate_fidelity(id) == pytest.approx(1, abs=1e-12))
.parametrize('dimension', [2, 5, 10, 20])
def test_bounded(self, dimension):
tol = 1e-07
channel = rand_super_bcsz(dimension)
assert ((- tol) <= average_gate_fidelity(channel) <= (1 + tol))
.parametrize('dimension', [2, 5, 10, 20])
def test_unitaries_equal_1(self, dimension):
tol = 1e-07
U = rand_unitary(dimension)
SU = to_super(U)
assert (average_gate_fidelity(SU, target=U) == pytest.approx(1, abs=tol))
def test_average_gate_fidelity_against_legacy_implementation(self):
def agf_pre_50(oper, target=None):
kraus = to_kraus(oper)
d = kraus[0].shape[0]
if (kraus[0].shape[1] != d):
return TypeError('Average gate fidelity only implemented for square superoperators.')
if (target is None):
return ((d + np.sum([(np.abs(A_k.tr()) ** 2) for A_k in kraus])) / ((d * d) + d))
return ((d + np.sum([(np.abs((A_k * target.dag()).tr()) ** 2) for A_k in kraus])) / ((d * d) + d))
oper = rand_super_bcsz(16)
target = rand_unitary(16)
np.testing.assert_almost_equal(average_gate_fidelity(oper, target), agf_pre_50(oper, target)) |
def downloadSample(tmp_path_factory: pytest.TempPathFactory, sample: Dict[(str, str)]):
folder = os.path.splitext(os.path.basename(__file__))[0]
folder = tmp_path_factory.mktemp(folder)
SAMPLE_PATH_14d9f = (folder / sample['fileName'])
response = requests.get(sample['sourceUrl'], allow_redirects=True)
file = open(SAMPLE_PATH_14d9f, 'wb')
file.write(response.content)
file.close()
return str(SAMPLE_PATH_14d9f) |
class Registry(object):
def __init__(self, name):
self._name = name
self._obj_map = {}
def _do_register(self, name, obj):
assert (name not in self._obj_map), "An object named '{}' was already registered in '{}' registry!".format(name, self._name)
self._obj_map[name] = obj
def register(self, obj=None, name=None):
if (obj is None):
def deco(func_or_class, name=name):
if (name is None):
name = func_or_class.__name__
self._do_register(name, func_or_class)
return func_or_class
return deco
if (name is None):
name = obj.__name__
self._do_register(name, obj)
def get(self, name):
ret = self._obj_map.get(name)
if (ret is None):
raise KeyError("No object named '{}' found in '{}' registry!".format(name, self._name))
return ret
def get_list(self):
return list(self._obj_map.keys()) |
def main_worker(gpu, ngpus_per_node, args):
global return_acc
args.gpu = gpu
if (args.gpu is not None):
print('Use GPU: {} for training'.format(args.gpu))
if args.distributed:
if ((args.dist_url == 'env://') and (args.rank == (- 1))):
args.rank = int(os.environ['RANK'])
if args.multiprocessing_distributed:
args.rank = ((args.rank * ngpus_per_node) + gpu)
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank)
num_classes = (10 if (args.dataset == 'CIFAR10') else 100)
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
model = models.__dict__[args.arch](pretrained=True, num_classes=num_classes)
else:
print("=> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch](num_classes=num_classes)
if args.distributed:
if (args.gpu is not None):
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
args.batch_size = int((args.batch_size / ngpus_per_node))
args.workers = int((((args.workers + ngpus_per_node) - 1) / ngpus_per_node))
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
model = torch.nn.parallel.DistributedDataParallel(model)
elif (args.gpu is not None):
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
model = torch.nn.DataParallel(model).cuda()
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
cudnn.benchmark = args.benchmark
if (args.dataset == 'CIFAR10'):
data_root = args.cifar10_path
train_dataset = datasets.CIFAR10(root=data_root, train=True, download=True, transform=transforms.Compose([transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0., 0., 0.], std=[0.2023, 0.1994, 0.201])]))
test_dataset = datasets.CIFAR10(root=data_root, train=False, download=True, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=[0., 0., 0.], std=[0.2023, 0.1994, 0.201])]))
else:
data_root = args.cifar100_path
train_dataset = datasets.CIFAR100(root=data_root, train=True, download=True, transform=transforms.Compose([transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.5071, 0.4867, 0.4408], std=[0.2675, 0.2565, 0.2761])]))
test_dataset = datasets.CIFAR100(root=data_root, train=False, download=True, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=[0.5071, 0.4867, 0.4408], std=[0.2675, 0.2565, 0.2761])]))
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None), num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True)
log_dir = os.path.dirname(args.log_path)
print('tensorboard dir {}'.format(log_dir))
tensor_writer = SummaryWriter(log_dir)
(optimizer, base_optimizer, lr_scheduler, grad_rho_scheduler, grad_norm_rho_scheduler) = get_optim_and_schedulers(model, args)
for epoch in range(args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
train_epoch(gpu, train_loader, model, base_optimizer, epoch, args, lr_scheduler=lr_scheduler, grad_rho_scheduler=grad_rho_scheduler, grad_norm_rho_scheduler=grad_norm_rho_scheduler, optimizer=optimizer)
lr_scheduler.step()
acc1 = validate(gpu, val_loader, model, criterion, True, args)
return_acc = max(return_acc, acc1)
tensor_writer.add_scalar('return_/test', return_acc, epoch)
print('Test top-1 acc: ', return_acc) |
def test_zip():
a = Stream()
b = Stream()
c = sz.zip(a, b)
L = c.sink_to_list()
a.emit(1)
b.emit('a')
a.emit(2)
b.emit('b')
assert (L == [(1, 'a'), (2, 'b')])
d = Stream()
e = a.zip(b, d)
L2 = e.sink_to_list()
a.emit(1)
b.emit(2)
d.emit(3)
assert (L2 == [(1, 2, 3)]) |
def test_license_by_id() -> None:
license = license_by_id('MIT')
assert (license.id == 'MIT')
assert (license.name == 'MIT License')
assert license.is_osi_approved
assert (not license.is_deprecated)
license = license_by_id('LGPL-3.0-or-later')
assert (license.id == 'LGPL-3.0-or-later')
assert (license.name == 'GNU Lesser General Public License v3.0 or later')
assert license.is_osi_approved
assert (not license.is_deprecated) |
def test_info_no_setup_pkg_info_no_deps(fixture_dir: FixtureDirGetter) -> None:
info = PackageInfo.from_directory((fixture_dir('inspection') / 'demo_no_setup_pkg_info_no_deps'), disable_build=True)
assert (info.name == 'demo')
assert (info.version == '0.1.0')
assert (info.requires_dist is None) |
def read_test_dataset(model_name):
model_path = ((TransphoneConfig.data_path / 'model') / model_name)
grapheme_vocab = Vocab.read((model_path / 'grapheme.vocab'))
phoneme_vocab = Vocab.read((model_path / 'phoneme.vocab'))
test_phoneme_lst = []
test_grapheme_lst = []
lang_lst = []
for lang_id in tqdm(read_all_langs()):
try:
lexicon = read_lexicon(lang_id)
except:
print('skip ', lang_id)
if (len(lexicon.word2phoneme) <= 50):
continue
word2phoneme_lst = list(lexicon.word2phoneme.items())
lang_phoneme_lst = []
lang_grapheme_lst = []
for (grapheme_str, phonemes) in word2phoneme_lst[(- 25):]:
graphemes = list(grapheme_str)
skip = False
for phoneme in phonemes:
if (phoneme not in phoneme_vocab):
skip = True
break
for grapheme in graphemes:
if (grapheme not in grapheme_vocab):
skip = True
break
if skip:
continue
lang_phoneme_lst.append(phonemes)
lang_grapheme_lst.append(grapheme_str)
if (len(lang_phoneme_lst) > 0):
test_phoneme_lst.append(lang_phoneme_lst)
test_grapheme_lst.append(lang_grapheme_lst)
lang_lst.append(lang_id)
return (test_grapheme_lst, test_phoneme_lst, lang_lst) |
def get_logger(task, model_dir):
logger = logging.getLogger(task)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(message)s')
fh = logging.FileHandler((((model_dir + '/') + task) + '.log'))
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
logger.addHandler(fh)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(ch)
return logger |
def test_ellipsis_replacing_str_key():
layouts = make_layouts(TestField('a_'), TestField('b'), name_mapping(name_style=NameStyle.UPPER, map=[('.*', ('data', ...))]), DEFAULT_NAME_MAPPING)
assert (layouts == Layouts(InputNameLayout(crown=InpDictCrown(map={'data': InpDictCrown(map={'A': InpFieldCrown('a_'), 'B': InpFieldCrown('b')}, extra_policy=ExtraSkip())}, extra_policy=ExtraSkip()), extra_move=None), OutputNameLayout(crown=OutDictCrown(map={'data': OutDictCrown(map={'A': OutFieldCrown('a_'), 'B': OutFieldCrown('b')}, sieves={})}, sieves={}), extra_move=None))) |
class OneFormerConfig(PretrainedConfig):
model_type = 'oneformer'
attribute_map = {'hidden_size': 'hidden_dim'}
def __init__(self, backbone_config: Optional[Dict]=None, ignore_value: int=255, num_queries: int=150, no_object_weight: int=0.1, class_weight: float=2.0, mask_weight: float=5.0, dice_weight: float=5.0, contrastive_weight: float=0.5, contrastive_temperature: float=0.07, train_num_points: int=12544, oversample_ratio: float=3.0, importance_sample_ratio: float=0.75, init_std: float=0.02, init_xavier_std: float=1.0, layer_norm_eps: float=1e-05, is_training: bool=False, use_auxiliary_loss: bool=True, output_auxiliary_logits: bool=True, strides: Optional[list]=[4, 8, 16, 32], task_seq_len: int=77, text_encoder_width: int=256, text_encoder_context_length: int=77, text_encoder_num_layers: int=6, text_encoder_vocab_size: int=49408, text_encoder_proj_layers: int=2, text_encoder_n_ctx: int=16, conv_dim: int=256, mask_dim: int=256, hidden_dim: int=256, encoder_feedforward_dim: int=1024, norm: str='GN', encoder_layers: int=6, decoder_layers: int=10, use_task_norm: bool=True, num_attention_heads: int=8, dropout: float=0.1, dim_feedforward: int=2048, pre_norm: bool=False, enforce_input_proj: bool=False, query_dec_layers: int=2, common_stride: int=4, **kwargs):
if (backbone_config is None):
logger.info('`backbone_config` is unset. Initializing the config with the default `Swin` backbone.')
backbone_config = CONFIG_MAPPING['swin'](image_size=224, in_channels=3, patch_size=4, embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], window_size=7, drop_path_rate=0.3, use_absolute_embeddings=False, out_features=['stage1', 'stage2', 'stage3', 'stage4'])
elif isinstance(backbone_config, dict):
backbone_model_type = backbone_config.get('model_type')
config_class = CONFIG_MAPPING[backbone_model_type]
backbone_config = config_class.from_dict(backbone_config)
self.backbone_config = backbone_config
self.ignore_value = ignore_value
self.num_queries = num_queries
self.no_object_weight = no_object_weight
self.class_weight = class_weight
self.mask_weight = mask_weight
self.dice_weight = dice_weight
self.contrastive_weight = contrastive_weight
self.contrastive_temperature = contrastive_temperature
self.train_num_points = train_num_points
self.oversample_ratio = oversample_ratio
self.importance_sample_ratio = importance_sample_ratio
self.init_std = init_std
self.init_xavier_std = init_xavier_std
self.layer_norm_eps = layer_norm_eps
self.is_training = is_training
self.use_auxiliary_loss = use_auxiliary_loss
self.output_auxiliary_logits = output_auxiliary_logits
self.strides = strides
self.task_seq_len = task_seq_len
self.text_encoder_width = text_encoder_width
self.text_encoder_context_length = text_encoder_context_length
self.text_encoder_num_layers = text_encoder_num_layers
self.text_encoder_vocab_size = text_encoder_vocab_size
self.text_encoder_proj_layers = text_encoder_proj_layers
self.text_encoder_n_ctx = text_encoder_n_ctx
self.conv_dim = conv_dim
self.mask_dim = mask_dim
self.hidden_dim = hidden_dim
self.encoder_feedforward_dim = encoder_feedforward_dim
self.norm = norm
self.encoder_layers = encoder_layers
self.decoder_layers = decoder_layers
self.use_task_norm = use_task_norm
self.num_attention_heads = num_attention_heads
self.dropout = dropout
self.dim_feedforward = dim_feedforward
self.pre_norm = pre_norm
self.enforce_input_proj = enforce_input_proj
self.query_dec_layers = query_dec_layers
self.common_stride = common_stride
self.num_hidden_layers = decoder_layers
super().__init__(**kwargs)
def to_dict(self) -> Dict[(str, any)]:
output = copy.deepcopy(self.__dict__)
output['backbone_config'] = self.backbone_config.to_dict()
output['model_type'] = self.__class__.model_type
return output |
def main(argv):
parser = argparse.ArgumentParser(description='Submit jobs')
parser.add_argument('--job_type', type=str, default='S', help='Run single (S) or multiple (M) jobs in one experiment: S, M')
args = parser.parse_args()
sbatch_cfg = {'account': 'rrg-ashique', 'job-name': 'MERL_mc_dqn', 'time': '0-01:00:00', 'mail-user': ''}
general_cfg = {'user': 'qlan3', 'check-time-interval': 5, 'cluster_name': 'Narval', 'cluster_capacity': 996, 'job-list': list(range(1, (10 + 1)))}
make_dir(f"output/{sbatch_cfg['job-name']}")
if (args.job_type == 'M'):
max_parallel_jobs = 4
mem_per_job = 16
cpu_per_job = 2
mem_per_cpu = int(ceil((mem_per_job / cpu_per_job)))
with open('procfile', 'w') as f:
f.write(str(max_parallel_jobs))
sbatch_cfg['gres'] = 'gpu:1'
sbatch_cfg['cpus-per-task'] = (cpu_per_job * max_parallel_jobs)
sbatch_cfg['mem-per-cpu'] = f'{mem_per_cpu}G'
general_cfg['script-path'] = './sbatch_m.sh'
general_cfg['max_parallel_jobs'] = max_parallel_jobs
submitter = Submitter(general_cfg, sbatch_cfg)
submitter.multiple_submit()
elif (args.job_type == 'S'):
mem_per_cpu = 1500
sbatch_cfg['cpus-per-task'] = 1
sbatch_cfg['mem-per-cpu'] = f'{mem_per_cpu}M'
general_cfg['script-path'] = './sbatch_s.sh'
submitter = Submitter(general_cfg, sbatch_cfg)
submitter.single_submit() |
def test_solver_direct_origin_dependency_with_extras_requested_by_other_package(solver: Solver, repo: Repository, package: ProjectPackage, fixture_dir: FixtureDirGetter) -> None:
pendulum = get_package('pendulum', '2.0.3')
cleo = get_package('cleo', '1.0.0')
demo_foo = get_package('demo-foo', '1.2.3')
demo_foo.add_dependency(Factory.create_dependency('demo', {'version': '>=0.1', 'extras': ['foo']}))
repo.add_package(demo_foo)
repo.add_package(pendulum)
repo.add_package(cleo)
path = (((fixture_dir('git') / 'github.com') / 'demo') / 'demo').as_posix()
package.add_dependency(Factory.create_dependency('demo', {'path': path}))
package.add_dependency(Factory.create_dependency('demo-foo', '^1.2.3'))
transaction = solver.solve()
demo = Package('demo', '0.1.2', source_type='directory', source_url=path)
ops = check_solver_result(transaction, [{'job': 'install', 'package': cleo}, {'job': 'install', 'package': pendulum}, {'job': 'install', 'package': demo}, {'job': 'install', 'package': demo_foo}])
op = ops[2]
assert (op.package.name == 'demo')
assert (op.package.version.text == '0.1.2')
assert (op.package.source_type == 'directory')
assert (op.package.source_url == path) |
def parse_repository_name(include_tag=False, ns_kwarg_name='namespace_name', repo_kwarg_name='repo_name', tag_kwarg_name='tag_name', incoming_repo_kwarg='repository'):
def inner(func):
(func)
def wrapper(*args, **kwargs):
try:
repo_name_components = parse_namespace_repository(kwargs[incoming_repo_kwarg], app.config['LIBRARY_NAMESPACE'], include_tag=include_tag, allow_library=features.LIBRARY_SUPPORT)
except ImplicitLibraryNamespaceNotAllowed:
abort(400, message='A namespace must be specified explicitly')
del kwargs[incoming_repo_kwarg]
kwargs[ns_kwarg_name] = repo_name_components[0]
kwargs[repo_kwarg_name] = repo_name_components[1]
if include_tag:
kwargs[tag_kwarg_name] = repo_name_components[2]
return func(*args, **kwargs)
return wrapper
return inner |
class FlowchartWidget(dockarea.DockArea):
def __init__(self, chart, ctrl):
dockarea.DockArea.__init__(self)
self.chart = chart
self.ctrl = ctrl
self.hoverItem = None
self.view = FlowchartGraphicsView.FlowchartGraphicsView(self)
self.viewDock = dockarea.Dock('view', size=(1000, 600))
self.viewDock.addWidget(self.view)
self.viewDock.hideTitleBar()
self.addDock(self.viewDock)
self.hoverText = QtWidgets.QTextEdit()
self.hoverText.setReadOnly(True)
self.hoverDock = dockarea.Dock('Hover Info', size=(1000, 20))
self.hoverDock.addWidget(self.hoverText)
self.addDock(self.hoverDock, 'bottom')
self.selInfo = QtWidgets.QWidget()
self.selInfoLayout = QtWidgets.QGridLayout()
self.selInfo.setLayout(self.selInfoLayout)
self.selDescLabel = QtWidgets.QLabel()
self.selNameLabel = QtWidgets.QLabel()
self.selDescLabel.setWordWrap(True)
self.selectedTree = DataTreeWidget()
self.selInfoLayout.addWidget(self.selDescLabel)
self.selInfoLayout.addWidget(self.selectedTree)
self.selDock = dockarea.Dock('Selected Node', size=(1000, 200))
self.selDock.addWidget(self.selInfo)
self.addDock(self.selDock, 'bottom')
self._scene = self.view.scene()
self._viewBox = self.view.viewBox()
self.buildMenu()
self._scene.selectionChanged.connect(self.selectionChanged)
self._scene.sigMouseHover.connect(self.hoverOver)
def reloadLibrary(self):
self.nodeMenu.triggered.disconnect(self.nodeMenuTriggered)
self.nodeMenu = None
self.subMenus = []
self.chart.library.reload()
self.buildMenu()
def buildMenu(self, pos=None):
def buildSubMenu(node, rootMenu, subMenus, pos=None):
for (section, node) in node.items():
if isinstance(node, OrderedDict):
menu = QtWidgets.QMenu(section)
rootMenu.addMenu(menu)
buildSubMenu(node, menu, subMenus, pos=pos)
subMenus.append(menu)
else:
act = rootMenu.addAction(section)
act.nodeType = section
act.pos = pos
self.nodeMenu = QtWidgets.QMenu()
self.subMenus = []
buildSubMenu(self.chart.library.getNodeTree(), self.nodeMenu, self.subMenus, pos=pos)
self.nodeMenu.triggered.connect(self.nodeMenuTriggered)
return self.nodeMenu
def menuPosChanged(self, pos):
self.menuPos = pos
def showViewMenu(self, ev):
self.buildMenu(ev.scenePos())
self.nodeMenu.popup(ev.screenPos())
def scene(self):
return self._scene
def viewBox(self):
return self._viewBox
def nodeMenuTriggered(self, action):
nodeType = action.nodeType
if (action.pos is not None):
pos = action.pos
else:
pos = self.menuPos
pos = self.viewBox().mapSceneToView(pos)
self.chart.createNode(nodeType, pos=pos)
def selectionChanged(self):
items = self._scene.selectedItems()
if (len(items) == 0):
data = None
else:
item = items[0]
if (hasattr(item, 'node') and isinstance(item.node, Node)):
n = item.node
if (n in self.ctrl.items):
self.ctrl.select(n)
else:
self.ctrl.clearSelection()
data = {'outputs': n.outputValues(), 'inputs': n.inputValues()}
self.selNameLabel.setText(n.name())
if hasattr(n, 'nodeName'):
self.selDescLabel.setText(('<b>%s</b>: %s' % (n.nodeName, n.__class__.__doc__)))
else:
self.selDescLabel.setText('')
if (n.exception is not None):
data['exception'] = n.exception
else:
data = None
self.selectedTree.setData(data, hideRoot=True)
def hoverOver(self, items):
term = None
for item in items:
if (item is self.hoverItem):
return
self.hoverItem = item
if (hasattr(item, 'term') and isinstance(item.term, Terminal)):
term = item.term
break
if (term is None):
self.hoverText.setPlainText('')
else:
val = term.value()
if isinstance(val, ndarray):
val = ('%s %s %s' % (type(val).__name__, str(val.shape), str(val.dtype)))
else:
val = str(val)
if (len(val) > 400):
val = (val[:400] + '...')
self.hoverText.setPlainText(('%s.%s = %s' % (term.node().name(), term.name(), val)))
def clear(self):
self.selectedTree.setData(None)
self.hoverText.setPlainText('')
self.selNameLabel.setText('')
self.selDescLabel.setText('') |
class ClusterUtilization():
def __init__(self, cluster_resources: Dict[(str, Any)], available_resources: Dict[(str, Any)]):
used_resources = {}
for key in cluster_resources:
if ((isinstance(cluster_resources[key], float) or isinstance(cluster_resources[key], int)) and (key in available_resources)):
used_resources[key] = (cluster_resources[key] - available_resources[key])
self.total_memory_bytes = cluster_resources.get('memory')
self.used_memory_bytes = used_resources.get('memory')
self.total_cpu = cluster_resources.get('CPU')
self.used_cpu = used_resources.get('CPU')
self.total_object_store_memory_bytes = cluster_resources.get('object_store_memory')
self.used_object_store_memory_bytes = used_resources.get('object_store_memory')
self.used_memory_percent = ((self.used_memory_bytes / self.total_memory_bytes) * 100)
self.used_object_store_memory_percent = ((self.used_object_store_memory_bytes / self.total_object_store_memory_bytes) * 100)
self.used_cpu_percent = ((self.used_cpu / self.total_cpu) * 100)
self.used_resources = used_resources
def get_current_cluster_utilization() -> ClusterUtilization:
cluster_resources = ray.cluster_resources()
available_resources = ray.available_resources()
return ClusterUtilization(cluster_resources=cluster_resources, available_resources=available_resources) |
class TestMatrixTransport(MatrixTransport):
__test__ = False
def __init__(self, config: MatrixTransportConfig, environment: Environment) -> None:
super().__init__(config, environment)
self.broadcast_messages: DefaultDict[(str, List[Message])] = defaultdict(list)
self.send_messages: DefaultDict[(QueueIdentifier, List[Message])] = defaultdict(list)
def broadcast(self, message: Message, device_id: DeviceIDs) -> None:
self.broadcast_messages[device_id.value].append(message)
super().broadcast(message, device_id=device_id)
def send_async(self, message_queues: List[MessagesQueue]) -> None:
for queue in message_queues:
self.send_messages[queue.queue_identifier].extend(queue.messages)
super().send_async(message_queues) |
class RandomAugmentation(nn.Module):
def __init__(self, augmentation: nn.Module, p: float=0.5, same_on_batch: bool=False):
super().__init__()
self.prob = p
self.augmentation = augmentation
self.same_on_batch = same_on_batch
def forward(self, images: Tensor) -> Tensor:
is_batch = (len(images) < 4)
if ((not is_batch) or self.same_on_batch):
if (random() <= self.prob):
out = self.augmentation(images)
else:
out = images
else:
out = self.augmentation(images)
batch_size = len(images)
indices = torch.where((torch.rand(batch_size) > self.prob))
out[indices] = images[indices]
return out |
def dwsconv3x3_block(in_channels, out_channels, stride=1, padding=1, dilation=1, bias=False, bn_eps=1e-05, activation=(lambda : nn.ReLU(inplace=True))):
return DwsConvBlock(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=stride, padding=padding, dilation=dilation, bias=bias, bn_eps=bn_eps, activation=activation) |
class TestFmtCols(object):
def setup_method(self):
self.in_str = np.arange(0, 40, 1).astype(str)
self.in_kwargs = {'ncols': 5, 'max_num': 40, 'lpad': None}
self.out_str = None
self.filler_row = (- 1)
self.ncols = None
self.nrows = None
self.lpad = (len(self.in_str[(- 1)]) + 1)
return
def teardown_method(self):
del self.in_str, self.in_kwargs, self.out_str, self.filler_row
del self.ncols, self.nrows, self.lpad
return
def eval_output(self):
out_rows = self.out_str.split('\n')[:(- 1)]
assert (len(out_rows) == self.nrows)
for (i, row) in enumerate(out_rows):
split_row = row.split()
if (i == self.filler_row):
assert ('...' in split_row)
if (i > 0):
assert (len(split_row) == 1)
assert (len(row) == (self.lpad * self.ncols))
else:
assert (len(row) == (self.lpad * len(split_row)))
if (i == (len(out_rows) - 1)):
assert (len(split_row) <= self.ncols)
else:
assert (len(split_row) == self.ncols)
return
def test_neg_ncols(self):
self.in_kwargs['ncols'] = (- 5)
self.out_str = utils._core.fmt_output_in_cols(self.in_str, **self.in_kwargs)
assert (len(self.out_str) == 0)
return
.parametrize('key,val,raise_type,err_msg', [('ncols', 0, ZeroDivisionError, 'integer division or modulo by zero'), ('max_num', (- 10), ValueError, 'max() arg is an empty sequence')])
def test_fmt_raises(self, key, val, raise_type, err_msg):
self.in_kwargs[key] = val
utils.testing.eval_bad_input(utils._core.fmt_output_in_cols, raise_type, err_msg, [self.in_str], self.in_kwargs)
return
.parametrize('ncol', [3, 5, 10])
def test_ncols(self, ncol):
self.in_kwargs['ncols'] = ncol
self.ncols = ncol
self.nrows = int(np.ceil((self.in_kwargs['max_num'] / ncol)))
self.out_str = utils._core.fmt_output_in_cols(self.in_str, **self.in_kwargs)
self.eval_output()
return
.parametrize('max_num,filler,nrow', [(0, 0, 1), (1, 0, 1), (10, 1, 3), (50, (- 1), 8)])
def test_max_num(self, max_num, filler, nrow):
self.in_kwargs['max_num'] = max_num
self.filler_row = filler
self.ncols = self.in_kwargs['ncols']
self.nrows = nrow
self.out_str = utils._core.fmt_output_in_cols(self.in_str, **self.in_kwargs)
self.eval_output()
return
.parametrize('in_pad', [5, 30])
def test_lpad(self, in_pad):
self.in_kwargs['lpad'] = in_pad
self.ncols = self.in_kwargs['ncols']
self.nrows = int(np.ceil((self.in_kwargs['max_num'] / self.ncols)))
self.lpad = in_pad
self.out_str = utils._core.fmt_output_in_cols(self.in_str, **self.in_kwargs)
self.eval_output()
return |
class ApplicationsTab(QWidget):
def __init__(self, fileInfo, parent=None):
super(ApplicationsTab, self).__init__(parent)
topLabel = QLabel('Open with:')
applicationsListBox = QListWidget()
applications = []
for i in range(1, 31):
applications.append(('Application %d' % i))
applicationsListBox.insertItems(0, applications)
alwaysCheckBox = QCheckBox()
if fileInfo.suffix():
alwaysCheckBox = QCheckBox(("Always use this application to open files with the extension '%s'" % fileInfo.suffix()))
else:
alwaysCheckBox = QCheckBox('Always use this application to open this type of file')
layout = QVBoxLayout()
layout.addWidget(topLabel)
layout.addWidget(applicationsListBox)
layout.addWidget(alwaysCheckBox)
self.setLayout(layout) |
def parse_hitran_file(fname, columns, count=(- 1), output='pandas'):
data = _read_hitran_file(fname, columns, count=1, linereturnformat='a2')
linereturnformat = _get_linereturnformat(data, columns, fname)
data = _read_hitran_file(fname, columns, count, linereturnformat)
df = _ndarray2df(data, columns, linereturnformat)
import vaex
if (output == 'vaex'):
df = vaex.from_pandas(df)
return df |
def ql_syscall_socket(ql: Qiling, domain: int, socktype: int, protocol: int):
idx = next((i for i in range(NR_OPEN) if (ql.os.fd[i] is None)), (- 1))
if (idx != (- 1)):
vsock_type = socktype
hsock_type = __host_socket_type(vsock_type, ql.arch.type)
ql.log.debug(f'Converted emulated socket type {vsock_type} to host socket type {hsock_type}')
try:
sock = ql_socket.open(domain, hsock_type, protocol)
if (ql.verbose >= QL_VERBOSE.DEBUG):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
except OSError as e:
ql.log.debug(f'Error opening socket: {e}')
idx = (- 1)
else:
ql.os.fd[idx] = sock
s_domain = socket_domain_mapping(domain, ql.arch.type, ql.os.type)
s_socktype = socket_type_mapping(socktype, ql.arch.type)
ql.log.debug(('socket(%s, %s, %s) = %d' % (s_domain, s_socktype, protocol, idx)))
return idx |
def masked_mae_loss(scaler, null_val):
def loss(preds, labels):
if scaler:
preds = scaler.inverse_transform(preds)
labels = scaler.inverse_transform(labels)
mae = masked_mae_torch(preds=preds, labels=labels, null_val=null_val)
return mae
return loss |
def simus(matrix, objectives, b=None, rank_by=1, solver='pulp'):
transposed_matrix = matrix.T
b = np.asarray(b)
if (None in b):
mins = np.min(transposed_matrix, axis=1)
maxs = np.max(transposed_matrix, axis=1)
auto_b = np.where((objectives == Objective.MIN.value), mins, maxs)
b = np.where((b != None), b, auto_b)
(stages, stages_results) = _solve_stages(transposed_matrix=transposed_matrix, b=b, objectives=objectives, solver=solver)
method_1_score = _first_method(stages_results=stages_results)
(method_2_score, tita_j_p, tita_j_d, dominance, dominance_by_criteria) = _second_method(stages_results=stages_results)
score = [method_1_score, method_2_score][(rank_by - 1)]
ranking = rank.rank_values(score, reverse=True)
return (ranking, stages, stages_results, method_1_score, method_2_score, tita_j_p, tita_j_d, dominance, dominance_by_criteria) |
def setup_optimizer(rc_explainer, pro_flag=False):
params = list(rc_explainer.edge_action_rep_generator.parameters())
if pro_flag:
for i_explainer in rc_explainer.edge_action_prob_generator:
params += list(i_explainer.parameters())
else:
params += list(rc_explainer.edge_action_prob_generator.parameters())
optimizer = torch.optim.Adam(params, lr=0.01, weight_decay=1e-05)
return optimizer |
def plot_waveform_to_numpy(waveform):
(fig, ax) = plt.subplots(figsize=(12, 3))
ax.plot()
ax.plot(range(len(waveform)), waveform, linewidth=0.1, alpha=0.7, color='blue')
plt.xlabel('Samples')
plt.ylabel('Amplitude')
plt.ylim((- 1), 1)
plt.tight_layout()
fig.canvas.draw()
data = save_figure_to_numpy(fig)
plt.close()
return data |
class SSLCertificate(object):
def __init__(self, openssl_cert):
self.openssl_cert = openssl_cert
def validate_private_key(self, private_key_path):
context = OpenSSL.SSL.Context(OpenSSL.SSL.TLSv1_METHOD)
context.use_certificate(self.openssl_cert)
try:
context.use_privatekey_file(private_key_path)
context.check_privatekey()
except OpenSSL.SSL.Error as ex:
raise KeyInvalidException(ex.args[0][0][2])
def matches_name(self, check_name):
for dns_name in self.names:
if fnmatch(check_name, dns_name):
return True
return False
def expired(self):
return self.openssl_cert.has_expired()
def common_name(self):
return self.openssl_cert.get_subject().commonName
def names(self):
dns_names = set()
common_name = self.common_name
if (common_name is not None):
dns_names.add(common_name)
for i in range(0, self.openssl_cert.get_extension_count()):
ext = self.openssl_cert.get_extension(i)
if (ext.get_short_name() == _SUBJECT_ALT_NAME):
value = str(ext)
for san_name in value.split(','):
san_name_trimmed = san_name.strip()
if san_name_trimmed.startswith('DNS:'):
dns_names.add(san_name_trimmed[4:])
return dns_names |
class TruncatingMemoryHandler(logging.handlers.MemoryHandler):
target: Optional['logging.Handler']
def __init__(self):
logging.handlers.MemoryHandler.__init__(self, capacity=1, flushLevel=logging.DEBUG)
self.max_size = 100
self.num_messages_seen = 0
self.__never_dumped = True
def flush(self):
self.acquire()
try:
if self.target:
for record in self.buffer:
if (record.levelno >= self.target.level):
self.target.handle(record)
finally:
self.release()
def dump_to_target(self, target: 'logging.Handler'):
self.acquire()
try:
self.setTarget(target)
self.flush()
self.setTarget(None)
finally:
self.__never_dumped = False
self.release()
def emit(self, record):
self.num_messages_seen += 1
if (len(self.buffer) < self.max_size):
super().emit(record)
def close(self) -> None:
if self.__never_dumped:
_configure_stderr_logging()
super().close() |
def _get_test_content_areadef():
data = {}
proj = 'data/mtg_geos_projection'
attrs = {'sweep_angle_axis': 'y', 'perspective_point_height': '.0', 'semi_major_axis': '6378137.0', 'longitude_of_projection_origin': '0.0', 'inverse_flattening': '298.', 'units': 'm'}
data[proj] = xr.DataArray(0, dims=(), attrs=attrs)
for (k, v) in attrs.items():
data[((proj + '/attr/') + k)] = v
return data |
class AttModule(nn.Module):
def __init__(self, N):
super(AttModule, self).__init__()
self.forw_att = AttentionBlock(N)
self.back_att = AttentionBlock(N)
def forward(self, x, rev=False):
if (not rev):
return self.forw_att(x)
else:
return self.back_att(x) |
def download_voc(path, overwrite=False):
_DOWNLOAD_URLS = [(' '34ed68851bce2a36e2a223fa52c661d592c66b3c'), (' '41a8d6e12baa5ab18ee7f8f8029b9e11805b4ef1'), (' '4e443f8a2eca6b1dac8a6c57641b67dd40621a49')]
makedirs(path)
for (url, checksum) in _DOWNLOAD_URLS:
filename = download(url, path=path, overwrite=overwrite, sha1_hash=checksum)
with tarfile.open(filename) as tar:
tar.extractall(path=path) |
def test_switch_last():
with pytest.raises(Call) as err:
switch(context=Context({'list': 'sg1', 'case1': False, 'case2': True, 'fg': 'fgv', 'switch': [{'case': '{case1}', 'call': '{list}'}, {'case': '{case2}', 'call': 'sg2'}]}), name='blah')
cof = err.value
assert isinstance(cof, Call)
assert (cof.groups == ['sg2'])
assert (cof.success_group is None)
assert (cof.failure_group is None)
assert (cof.original_config == ('switch', [{'case': '{case1}', 'call': '{list}'}, {'case': '{case2}', 'call': 'sg2'}])) |
class DMA_CR(IntEnum):
EN = (1 << 0)
TCIE = (1 << 1)
HTIE = (1 << 2)
TEIE = (1 << 3)
DIR = (1 << 4)
CIRC = (1 << 5)
PINC = (1 << 6)
MINC = (1 << 7)
PSIZE_0 = (1 << 8)
PSIZE_1 = (2 << 8)
PSIZE = (3 << 8)
MSIZE_0 = (1 << 10)
MSIZE_1 = (2 << 10)
MSIZE = (3 << 10)
PL = (3 << 12)
MEM2MEM = (1 << 14) |
class LatexyzPreviewMath(sublime_plugin.EventListener):
def on_activated_async(self, view):
self.set_template_preamble(view)
def on_post_save_async(self, view):
self.set_template_preamble(view)
def set_template_preamble(self, view):
try:
pt = view.sel()[0].end()
except:
pt = 0
if (not view.match_selector(pt, 'text.tex.latex')):
return
lz_settings = sublime.load_settings(lz_settings_file)
if (not lz_settings.get('auto_set_preview_math_template_preamble')):
return
newcommand_regions = view.find_by_selector('meta.function.newcommand.latex, meta.function.declare-math-operator.latex')
newcommands = []
for s in newcommand_regions:
newcommands.append(view.substr(s))
view.settings().set('preview_math_template_preamble', '\n'.join(newcommands)) |
def get_agent_from_batch(features, device, vocab):
(agent_input_ids, agent_output_ids, agent_lens) = features[3]
agent_padding_mask = agent_input_ids.ne(vocab.token2idx('<PAD>')).float()
max_agent_len = max(agent_lens)
agent_input_ids = agent_input_ids.to(device)
agent_padding_mask = agent_padding_mask.to(device)
agent_lens = agent_lens.to(device)
agent_output_ids = agent_output_ids.to(device)
return (agent_input_ids, agent_padding_mask, max_agent_len, agent_lens, agent_output_ids) |
class FetcherTestCase(WithResponses, WithMakeAlgo, ZiplineTestCase):
START_DATE = pd.Timestamp('2006-01-03', tz='utc')
END_DATE = pd.Timestamp('2006-12-29', tz='utc')
SIM_PARAMS_DATA_FREQUENCY = 'daily'
DATA_PORTAL_USE_MINUTE_DATA = False
BENCHMARK_SID = None
def make_equity_info(cls):
return pd.DataFrame.from_dict({24: {'start_date': pd.Timestamp('2006-01-01', tz='UTC'), 'end_date': pd.Timestamp('2007-01-01', tz='UTC'), 'symbol': 'AAPL', 'exchange': 'nasdaq'}, 3766: {'start_date': pd.Timestamp('2006-01-01', tz='UTC'), 'end_date': pd.Timestamp('2007-01-01', tz='UTC'), 'symbol': 'IBM', 'exchange': 'nasdaq'}, 5061: {'start_date': pd.Timestamp('2006-01-01', tz='UTC'), 'end_date': pd.Timestamp('2007-01-01', tz='UTC'), 'symbol': 'MSFT', 'exchange': 'nasdaq'}, 14848: {'start_date': pd.Timestamp('2006-01-01', tz='UTC'), 'end_date': pd.Timestamp('2007-01-01', tz='UTC'), 'symbol': 'YHOO', 'exchange': 'nasdaq'}, 25317: {'start_date': pd.Timestamp('2006-01-01', tz='UTC'), 'end_date': pd.Timestamp('2007-01-01', tz='UTC'), 'symbol': 'DELL', 'exchange': 'nasdaq'}, 13: {'start_date': pd.Timestamp('2006-01-01', tz='UTC'), 'end_date': pd.Timestamp('2010-01-01', tz='UTC'), 'symbol': 'NFLX', 'exchange': 'nasdaq'}, 9999999: {'start_date': pd.Timestamp('2006-01-01', tz='UTC'), 'end_date': pd.Timestamp('2007-01-01', tz='UTC'), 'symbol': 'AAPL', 'exchange': 'non_us_exchange'}}, orient='index')
def make_exchanges_info(cls, *args, **kwargs):
return pd.DataFrame.from_records([{'exchange': 'nasdaq', 'country_code': 'US'}, {'exchange': 'non_us_exchange', 'country_code': 'CA'}])
def run_algo(self, code, sim_params=None):
if (sim_params is None):
sim_params = self.sim_params
test_algo = self.make_algo(script=code, sim_params=sim_params, data_portal=FetcherDataPortal(self.asset_finder, self.trading_calendar))
results = test_algo.run()
return results
def test_minutely_fetcher(self):
self.responses.add(self.responses.GET, ' body=AAPL_MINUTE_CSV_DATA, content_type='text/csv')
sim_params = factory.create_simulation_parameters(start=pd.Timestamp('2006-01-03', tz='UTC'), end=pd.Timestamp('2006-01-10', tz='UTC'), emission_rate='minute', data_frequency='minute')
test_algo = self.make_algo(script='\nfrom zipline.api import fetch_csv, record, sid\n\ndef initialize(context):\n fetch_csv(\' handle_data(context, data):\n record(aapl_signal=data.current(sid(24), "signal"))\n', sim_params=sim_params)
gen = test_algo.get_generator()
perf_packets = list(gen)
signal = [result['minute_perf']['recorded_vars']['aapl_signal'] for result in perf_packets if ('minute_perf' in result)]
self.assertEqual((6 * 390), len(signal))
np.testing.assert_array_equal(([np.NaN] * 390), signal[0:390])
np.testing.assert_array_equal(([2] * 390), signal[390:780])
np.testing.assert_array_equal(([3] * 780), signal[780:1560])
np.testing.assert_array_equal(([4] * 780), signal[1560:])
def test_fetch_csv_with_multi_symbols(self):
self.responses.add(self.responses.GET, ' body=MULTI_SIGNAL_CSV_DATA, content_type='text/csv')
results = self.run_algo('\nfrom zipline.api import fetch_csv, record, sid\n\ndef initialize(context):\n fetch_csv(\' context.stocks = [sid(3766), sid(25317)]\n\ndef handle_data(context, data):\n record(ibm_signal=data.current(sid(3766), "signal"))\n record(dell_signal=data.current(sid(25317), "signal"))\n ')
self.assertEqual(5, results['ibm_signal'].iloc[(- 1)])
self.assertEqual(5, results['dell_signal'].iloc[(- 1)])
def test_fetch_csv_with_pure_signal_file(self):
self.responses.add(self.responses.GET, ' body=CPIAUCSL_DATA, content_type='text/csv')
results = self.run_algo('\nfrom zipline.api import fetch_csv, sid, record\n\ndef clean(df):\n return df.rename(columns={\'Value\':\'cpi\', \'Date\':\'date\'})\n\ndef initialize(context):\n fetch_csv(\n \' symbol=\'urban\',\n pre_func=clean,\n date_format=\'%Y-%m-%d\'\n )\n context.stocks = [sid(3766), sid(25317)]\n\ndef handle_data(context, data):\n\n cur_cpi = data.current("urban", "cpi")\n record(cpi=cur_cpi)\n ')
self.assertEqual(results['cpi'][(- 1)], 203.1)
def test_algo_fetch_csv(self):
self.responses.add(self.responses.GET, ' body=AAPL_CSV_DATA, content_type='text/csv')
results = self.run_algo('\nfrom zipline.api import fetch_csv, record, sid\n\ndef normalize(df):\n df[\'scaled\'] = df[\'signal\'] * 10\n return df\n\ndef initialize(context):\n fetch_csv(\' post_func=normalize)\n context.checked_name = False\n\ndef handle_data(context, data):\n record(\n signal=data.current(sid(24), "signal"),\n scaled=data.current(sid(24), "scaled"),\n price=data.current(sid(24), "price"))\n ')
self.assertEqual(5, results['signal'][(- 1)])
self.assertEqual(50, results['scaled'][(- 1)])
self.assertEqual(24, results['price'][(- 1)])
def test_algo_fetch_csv_with_extra_symbols(self):
self.responses.add(self.responses.GET, ' body=AAPL_IBM_CSV_DATA, content_type='text/csv')
results = self.run_algo('\nfrom zipline.api import fetch_csv, record, sid\n\ndef normalize(df):\n df[\'scaled\'] = df[\'signal\'] * 10\n return df\n\ndef initialize(context):\n fetch_csv(\' post_func=normalize,\n mask=True)\n\ndef handle_data(context, data):\n record(\n signal=data.current(sid(24),"signal"),\n scaled=data.current(sid(24), "scaled"),\n price=data.current(sid(24), "price"))\n ')
self.assertEqual(5, results['signal'][(- 1)])
self.assertEqual(50, results['scaled'][(- 1)])
self.assertEqual(24, results['price'][(- 1)])
([('unspecified', ''), ('none', 'usecols=None'), ('without date', "usecols=['Value']"), ('with date', "usecols=('Value', 'Date')")])
def test_usecols(self, testname, usecols):
self.responses.add(self.responses.GET, ' body=CPIAUCSL_DATA, content_type='text/csv')
code = '\nfrom zipline.api import fetch_csv, sid, record\n\ndef clean(df):\n return df.rename(columns={{\'Value\':\'cpi\'}})\n\ndef initialize(context):\n fetch_csv(\n \' symbol=\'urban\',\n pre_func=clean,\n date_column=\'Date\',\n date_format=\'%Y-%m-%d\',{usecols}\n )\n context.stocks = [sid(3766), sid(25317)]\n\ndef handle_data(context, data):\n data.current("urban", "cpi")\n '
results = self.run_algo(code.format(usecols=usecols))
self.assertEqual(len(results), 251)
def test_sources_merge_custom_ticker(self):
requests_kwargs = {}
def capture_kwargs(zelf, url, **kwargs):
requests_kwargs.update(mask_requests_args(url, kwargs).requests_kwargs)
return PALLADIUM_DATA
with patch('zipline.sources.requests_csv.PandasRequestsCSV.fetch_url', new=capture_kwargs):
results = self.run_algo('\nfrom zipline.api import fetch_csv, record, sid\n\ndef rename_col(df):\n df = df.rename(columns={\'New York 15:00\': \'price\'})\n df = df.fillna(method=\'ffill\')\n return df[[\'price\', \'sid\']]\n\ndef initialize(context):\n fetch_csv(\' date_column=\'Date\',\n symbol=\'palladium\',\n post_func=rename_col,\n date_format=\'%Y-%m-%d\'\n )\n context.stock = sid(24)\n\ndef handle_data(context, data):\n record(palladium=data.current("palladium", "price"))\n record(aapl=data.current(context.stock, "price"))\n ')
np.testing.assert_array_equal(([24] * 251), results['aapl'])
self.assertEqual(337, results['palladium'].iloc[(- 1)])
expected = {'allow_redirects': False, 'stream': True, 'timeout': 30.0}
self.assertEqual(expected, requests_kwargs)
([('symbol', FETCHER_UNIVERSE_DATA, None), ('arglebargle', FETCHER_UNIVERSE_DATA_TICKER_COLUMN, FETCHER_ALTERNATE_COLUMN_HEADER)])
def test_fetcher_universe(self, name, data, column_name):
with patch('zipline.sources.requests_csv.PandasRequestsCSV.fetch_url', new=(lambda *a, **k: data)):
sim_params = factory.create_simulation_parameters(start=pd.Timestamp('2006-01-09', tz='UTC'), end=pd.Timestamp('2006-01-11', tz='UTC'))
algocode = '\nfrom pandas import Timestamp\nfrom zipline.api import fetch_csv, record, sid, get_datetime\nfrom zipline.utils.pandas_utils import normalize_date\n\ndef initialize(context):\n fetch_csv(\n \' date_format=\'%m/%d/%Y\'{token}\n )\n context.expected_sids = {{\n Timestamp(\'2006-01-09 00:00:00+0000\', tz=\'UTC\'):[24, 3766, 5061],\n Timestamp(\'2006-01-10 00:00:00+0000\', tz=\'UTC\'):[24, 3766, 5061],\n Timestamp(\'2006-01-11 00:00:00+0000\', tz=\'UTC\'):[24, 3766, 5061, 14848]\n }}\n context.bar_count = 0\n\ndef handle_data(context, data):\n expected = context.expected_sids[normalize_date(get_datetime())]\n actual = data.fetcher_assets\n for stk in expected:\n if stk not in actual:\n raise Exception(\n "{{stk}} is missing on dt={{dt}}".format(\n stk=stk, dt=get_datetime()))\n\n record(sid_count=len(actual))\n record(bar_count=context.bar_count)\n context.bar_count += 1\n '
replacement = ''
if column_name:
replacement = (",symbol_column='%s'\n" % column_name)
real_algocode = algocode.format(token=replacement)
results = self.run_algo(real_algocode, sim_params=sim_params)
self.assertEqual(len(results), 3)
self.assertEqual(3, results['sid_count'].iloc[0])
self.assertEqual(3, results['sid_count'].iloc[1])
self.assertEqual(4, results['sid_count'].iloc[2])
def test_fetcher_universe_non_security_return(self):
self.responses.add(self.responses.GET, ' body=NON_ASSET_FETCHER_UNIVERSE_DATA, content_type='text/csv')
sim_params = factory.create_simulation_parameters(start=pd.Timestamp('2006-01-09', tz='UTC'), end=pd.Timestamp('2006-01-10', tz='UTC'))
self.run_algo('\nfrom zipline.api import fetch_csv\n\ndef initialize(context):\n fetch_csv(\n \' date_format=\'%m/%d/%Y\'\n )\n\ndef handle_data(context, data):\n if len(data.fetcher_assets) > 0:\n raise Exception("Shouldn\'t be any assets in fetcher_assets!")\n ', sim_params=sim_params)
def test_order_against_data(self):
self.responses.add(self.responses.GET, ' body=PALLADIUM_DATA, content_type='text/csv')
with self.assertRaises(UnsupportedOrderParameters):
self.run_algo("\nfrom zipline.api import fetch_csv, order, sid\n\ndef rename_col(df):\n return df.rename(columns={'New York 15:00': 'price'})\n\ndef initialize(context):\n fetch_csv(' date_column='Date',\n symbol='palladium',\n post_func=rename_col,\n date_format='%Y-%m-%d'\n )\n context.stock = sid(24)\n\ndef handle_data(context, data):\n order('palladium', 100)\n ")
def test_fetcher_universe_minute(self):
self.responses.add(self.responses.GET, ' body=FETCHER_UNIVERSE_DATA, content_type='text/csv')
sim_params = factory.create_simulation_parameters(start=pd.Timestamp('2006-01-09', tz='UTC'), end=pd.Timestamp('2006-01-11', tz='UTC'), data_frequency='minute')
results = self.run_algo('\nfrom pandas import Timestamp\nfrom zipline.api import fetch_csv, record, get_datetime\n\ndef initialize(context):\n fetch_csv(\n \' date_format=\'%m/%d/%Y\'\n )\n context.expected_sids = {\n Timestamp(\'2006-01-09 00:00:00+0000\', tz=\'UTC\'):[24, 3766, 5061],\n Timestamp(\'2006-01-10 00:00:00+0000\', tz=\'UTC\'):[24, 3766, 5061],\n Timestamp(\'2006-01-11 00:00:00+0000\', tz=\'UTC\'):[24, 3766, 5061, 14848]\n }\n context.bar_count = 0\n\ndef handle_data(context, data):\n expected = context.expected_sids[get_datetime().replace(hour=0, minute=0)]\n actual = data.fetcher_assets\n for stk in expected:\n if stk not in actual:\n raise Exception("{stk} is missing".format(stk=stk))\n\n record(sid_count=len(actual))\n record(bar_count=context.bar_count)\n context.bar_count += 1\n ', sim_params=sim_params)
self.assertEqual(3, len(results))
self.assertEqual(3, results['sid_count'].iloc[0])
self.assertEqual(3, results['sid_count'].iloc[1])
self.assertEqual(4, results['sid_count'].iloc[2])
def test_fetcher_in_before_trading_start(self):
self.responses.add(self.responses.GET, ' body=NFLX_DATA, content_type='text/csv')
sim_params = factory.create_simulation_parameters(start=pd.Timestamp('2013-06-13', tz='UTC'), end=pd.Timestamp('2013-11-15', tz='UTC'), data_frequency='minute')
results = self.run_algo("\nfrom zipline.api import fetch_csv, record, symbol\n\ndef initialize(context):\n fetch_csv(' date_column = 'Settlement Date',\n date_format = '%m/%d/%y')\n context.stock = symbol('NFLX')\n\ndef before_trading_start(context, data):\n record(Short_Interest = data.current(context.stock, 'dtc'))\n", sim_params=sim_params)
values = results['Short_Interest']
np.testing.assert_array_equal(values[0:33], np.full(33, np.nan))
np.testing.assert_array_almost_equal(values[33:44], ([1.690317] * 11))
np.testing.assert_array_almost_equal(values[44:55], ([2.811858] * 11))
np.testing.assert_array_almost_equal(values[55:64], ([2.50233] * 9))
np.testing.assert_array_almost_equal(values[64:75], ([2.550829] * 11))
np.testing.assert_array_almost_equal(values[75:], ([2.64484] * 35))
def test_fetcher_bad_data(self):
self.responses.add(self.responses.GET, ' body=NFLX_DATA, content_type='text/csv')
sim_params = factory.create_simulation_parameters(start=pd.Timestamp('2013-06-12', tz='UTC'), end=pd.Timestamp('2013-06-14', tz='UTC'), data_frequency='minute')
results = self.run_algo("\nfrom zipline.api import fetch_csv, symbol\nimport numpy as np\n\ndef initialize(context):\n fetch_csv(' date_column = 'Settlement Date',\n date_format = '%m/%d/%y')\n context.nflx = symbol('NFLX')\n context.aapl = symbol('AAPL', country_code='US')\n\ndef handle_data(context, data):\n assert np.isnan(data.current(context.nflx, 'invalid_column'))\n assert np.isnan(data.current(context.aapl, 'invalid_column'))\n assert np.isnan(data.current(context.aapl, 'dtc'))\n", sim_params=sim_params)
self.assertEqual(3, len(results)) |
class RatingBox(Gtk.VBox):
def __init__(self):
super().__init__(self)
self.thumb_ups = 1
self.thumb_downs = 1
self.title = Gtk.Label('')
self.title.set_line_wrap(True)
self.title.set_lines(2)
hbox = Gtk.HBox()
self.upvote = ToggleButton('')
self.downvote = ToggleButton('')
self.upvote.connect('toggled', self.__thumb_toggled)
self.downvote.connect('toggled', self.__thumb_toggled)
self.score_label = Gtk.Label('----')
self.upvote.set_property('height-request', 50)
self.downvote.set_property('height-request', 50)
hbox.pack_start(self.upvote, True, True, 5)
hbox.pack_start(self.downvote, True, True, 5)
self.hbox = hbox
self.pack_start(self.title, False, False, 10)
self.pack_start(self.score_label, True, True, 5)
self.pack_start(self.hbox, False, False, 5)
def set_current_title(self, title):
self.title.set_text(title)
def set_current_score(self, cth_up, cth_down):
self.thumb_ups = cth_up
self.thumb_downs = cth_down
self.__set_pending_score_value((self.thumb_ups - self.thumb_downs))
def poll_vote(self, reset=True):
upward = (1 if self.upvote.get_active() else 0)
downward = (1 if self.downvote.get_active() else 0)
vote = (upward, downward)
if reset:
self.downvote.set_active(False)
self.upvote.set_active(False)
return vote
def __set_pending_score_value(self, score):
existing_score = (self.thumb_ups - self.thumb_downs)
if (score == existing_score):
self.score_label.set_markup(util.bold(str(int(score))))
elif (score > existing_score):
self.score_label.set_markup((('<b><span foreground="green">' + str(int(score))) + '</span></b>'))
else:
self.score_label.set_markup((('<b><span foreground="red">' + str(int(score))) + '</span></b>'))
def __thumb_toggled(self, button):
if button.get_active():
if (button == self.upvote):
self.downvote.set_active(False)
elif (button == self.downvote):
self.upvote.set_active(False)
vote = self.poll_vote(False)
self.__set_pending_score_value((((self.thumb_ups + vote[0]) - self.thumb_downs) - vote[1])) |
def get_val_transformations(p):
if (p['val_db_name'] == 'VOCSegmentation'):
import data.dataloaders.fblib_transforms as fblib_tr
return transforms.Compose([fblib_tr.FixedResize(resolutions={'image': tuple((512, 512)), 'semseg': tuple((512, 512))}, flagvals={'image': cv2.INTER_CUBIC, 'semseg': cv2.INTER_NEAREST}), fblib_tr.ToTensor(), fblib_tr.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
elif (p['val_db_name'] == 'cityscapes'):
import data.dataloaders.vanilla_transforms as tr
return transforms.Compose([tr.ToTensor(), tr.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
else:
raise NotImplementedError |
def xml_parse(father, page_tree, flag=1):
if flag:
for child in father:
try:
tag_attr = child.attrib['xpath']
except:
tag_attr = ''
if ((child.text is None) or ('\n' in child.text)):
tag_text = ''
else:
tag_text = child.text
page_tree[child.tag] = [tag_attr, tag_text, {}]
xml_parse(child, page_tree[child.tag][2], flag=1)
else:
for child in father:
tag_attr = child.attrib
tag_text = child.text
page_tree[child.tag] = [tag_attr, tag_text, {}]
try:
page_tree['step'][2]['elem'] = []
for elem in father.find('step').iter('elem'):
page_tree['step'][2]['elem'].append([elem.attrib, elem.text])
except:
pass |
class DicomLocation(db.Model):
__tablename__ = 'DicomLocation'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(128))
host = db.Column(db.String(128), nullable=False)
port = db.Column(db.Integer, nullable=False)
ae_title = db.Column(db.String(128))
owner_key = db.Column(db.String(80), db.ForeignKey('APIKey.key'), nullable=False)
def __repr__(self):
return '{0} {1} {2}'.format(self.host, self.port, self.ae_title) |
class Effect446(BaseEffect):
type = 'passive'
def handler(fit, container, context, projectionRange, **kwargs):
level = (container.level if ('skill' in context) else 1)
fit.ship.boostItemAttr('shieldCapacity', (container.getModifiedItemAttr('shieldCapacityBonus') * level), **kwargs) |
def _print_panoptic_results(pq_res):
headers = ['', 'PQ', 'SQ', 'RQ', '#categories']
data = []
for name in ['All', 'Things', 'Stuff']:
row = (([name] + [(pq_res[name][k] * 100) for k in ['pq', 'sq', 'rq']]) + [pq_res[name]['n']])
data.append(row)
table = tabulate(data, headers=headers, tablefmt='pipe', floatfmt='.3f', stralign='center', numalign='center')
logger.info(('Panoptic Evaluation Results:\n' + table)) |
def datatype(name, static_fields):
_ordering
class DataType(object):
__name__ = name
def __init__(self, **kwargs):
self._db_id = kwargs.pop('db_id', None)
self._inputs = kwargs.pop('inputs', None)
self._fields = kwargs
for name in static_fields:
assert (name in self._fields), ('Missing field %s' % name)
def __eq__(self, other):
return (self._db_id == other._db_id)
def __lt__(self, other):
return (self._db_id < other._db_id)
def __getattr__(self, name):
if (name in static_fields):
return self._fields[name]
raise AttributeError(('Unknown field `%s`' % name))
def __repr__(self):
return ('<%s> #%s' % (name, self._db_id))
def __hash__(self):
return hash((self.__name__, self._db_id))
def from_dict(cls, dict_data):
try:
return cls(**dict_data)
except:
raise FromDictionaryException()
def asdict(self):
dictionary_rep = dict(self._fields)
assert (('db_id' not in dictionary_rep) and ('inputs' not in dictionary_rep))
dictionary_rep['db_id'] = self._db_id
dictionary_rep['inputs'] = self._inputs
return dictionary_rep
return DataType |
.filterwarnings('ignore:The input coordinates to pcolor:UserWarning')
.parametrize('color, args', [('sequential', {}), ('diverging', {}), ('sequential', {'projection': '3d'}), ('sequential', {'colorbar': True})])
def test_plot_spin_distribution(color, args):
j = 5
psi = qutip.spin_coherent(j, (np.random.rand() * np.pi), ((np.random.rand() * 2) * np.pi))
theta = np.linspace(0, np.pi, 50)
phi = np.linspace(0, (2 * np.pi), 50)
(Q, THETA, PHI) = qutip.spin_q_function(psi, theta, phi)
if (color == 'diverging'):
Q *= (- .0)
Q[(0, 0)] = (- .0)
(fig, ax) = qutip.plot_spin_distribution(Q, THETA, PHI, **args)
plt.close()
assert isinstance(fig, mpl.figure.Figure)
assert isinstance(ax, mpl.axes.Axes) |
class Decoder(nn.Module):
def __init__(self, state_embed_size, hidden_size):
super().__init__()
self.state_embed_size = state_embed_size
self.hidden_size = hidden_size
self.decoder_lins = nn.Sequential(nn.Linear(state_embed_size, self.hidden_size), nn.ReLU(True), nn.Linear(self.hidden_size, self.hidden_size), nn.ReLU(True))
self.decoder_generator = nn.Sequential(nn.ConvTranspose2d(self.hidden_size, (32 * 8), 4, 1, 0), nn.BatchNorm2d((32 * 8)), nn.ReLU(True), nn.ConvTranspose2d((32 * 8), (32 * 4), 4, 2, 1), nn.BatchNorm2d((32 * 4)), nn.ReLU(True), nn.ConvTranspose2d((32 * 4), (32 * 2), 4, 2, 1), nn.BatchNorm2d((32 * 2)), nn.ReLU(True), nn.ConvTranspose2d((32 * 2), 32, 4, 2, 1), nn.BatchNorm2d(32), nn.ReLU(True), nn.ConvTranspose2d(32, 3, 4, 2, 1), nn.Sigmoid())
def forward(self, state_z):
state_z = state_z.contiguous().view((- 1), self.state_embed_size)
current = self.decoder_lins(state_z)
current = current.reshape(*current.shape, 1, 1)
current = self.decoder_generator(current)
return current |
def get_doc(project, source_code, offset, resource=None, maxfixes=1):
fixer = fixsyntax.FixSyntax(project, source_code, resource, maxfixes)
pyname = fixer.pyname_at(offset)
if (pyname is None):
return None
pyobject = pyname.get_object()
return PyDocExtractor().get_doc(pyobject) |
class RerankingEvaluator(SentenceEvaluator):
def __init__(self, samples, mrr_at_k: int=10, name: str='', write_csv: bool=True, similarity_fct=cos_sim):
self.samples = samples
self.name = name
self.mrr_at_k = mrr_at_k
self.similarity_fct = cos_sim
if isinstance(self.samples, dict):
self.samples = list(self.samples.values())
self.csv_file = (('RerankingEvaluator' + (('_' + name) if name else '')) + '_results.csv')
self.csv_headers = ['epoch', 'steps', 'MAP', '{}'.format(mrr_at_k)]
self.write_csv = write_csv
def __call__(self, model, output_path: str=None, epoch: int=(- 1), steps: int=(- 1)) -> float:
if (epoch != (- 1)):
if (steps == (- 1)):
out_txt = ' after epoch {}:'.format(epoch)
else:
out_txt = ' in epoch {} after {} steps:'.format(epoch, steps)
else:
out_txt = ':'
logger.info(((('RerankingEvaluator: Evaluating the model on ' + self.name) + ' dataset') + out_txt))
all_mrr_scores = []
all_ap_scores = []
num_queries = 0
num_positives = []
num_negatives = []
for instance in self.samples:
query = instance['query']
positive = list(instance['positive'])
negative = list(instance['negative'])
docs = (positive + negative)
is_relevant = (([True] * len(positive)) + ([False] * len(negative)))
if ((len(positive) == 0) or (len(negative) == 0)):
continue
num_queries += 1
num_positives.append(len(positive))
num_negatives.append(len(negative))
query_emb = model.encode(query, convert_to_tensor=True, show_progress_bar=False)
docs_emb = model.encode(docs, convert_to_tensor=True, show_progress_bar=False)
pred_scores = self.similarity_fct(query_emb, docs_emb)
if (len(pred_scores.shape) > 1):
pred_scores = pred_scores[0]
pred_scores_argsort = torch.argsort((- pred_scores))
mrr_score = 0
for (rank, index) in enumerate(pred_scores_argsort[0:self.mrr_at_k]):
if is_relevant[index]:
mrr_score = (1 / (rank + 1))
break
all_mrr_scores.append(mrr_score)
all_ap_scores.append(average_precision_score(is_relevant, pred_scores.cpu().tolist()))
mean_ap = np.mean(all_ap_scores)
mean_mrr = np.mean(all_mrr_scores)
logger.info('Queries: {} \t Positives: Min {:.1f}, Mean {:.1f}, Max {:.1f} \t Negatives: Min {:.1f}, Mean {:.1f}, Max {:.1f}'.format(num_queries, np.min(num_positives), np.mean(num_positives), np.max(num_positives), np.min(num_negatives), np.mean(num_negatives), np.max(num_negatives)))
logger.info('MAP: {:.2f}'.format((mean_ap * 100)))
logger.info('{}: {:.2f}'.format(self.mrr_at_k, (mean_mrr * 100)))
if ((output_path is not None) and self.write_csv):
csv_path = os.path.join(output_path, self.csv_file)
output_file_exists = os.path.isfile(csv_path)
with open(csv_path, mode=('a' if output_file_exists else 'w'), encoding='utf-8') as f:
writer = csv.writer(f)
if (not output_file_exists):
writer.writerow(self.csv_headers)
writer.writerow([epoch, steps, mean_ap, mean_mrr])
return mean_ap |
class TuckER(Virtue_Triple):
def __init__(self, num_ps, num_qs, num_rs, embedding_dim, reg):
super(TuckER, self).__init__(num_ps, num_qs, num_rs, embedding_dim, reg)
w = torch.empty(embedding_dim, embedding_dim, embedding_dim)
nn.init.xavier_uniform_(w)
self._W = torch.nn.Parameter(torch.tensor(w, dtype=torch.float, device='cuda', requires_grad=True))
def forward(self, ps, qs, rs):
ps_embedding = self._PsEmbedding(ps)
qs_embedding = self._QsEmbedding(qs)
rs_embedding = self._RsEmbedding(rs)
W_after_p = torch.mm(ps_embedding, self._W.view(ps_embedding.size(1), (- 1)))
W_after_p = W_after_p.view((- 1), rs_embedding.size(1), qs_embedding.size(1))
W_after_r = torch.bmm(rs_embedding.view((- 1), 1, rs_embedding.size(1)), W_after_p)
W_after_q = torch.bmm(W_after_r, qs_embedding.view((- 1), qs_embedding.size(1), 1))
inferences = W_after_q.view((- 1), 1)
regs = (self.reg * ((torch.norm(ps_embedding) + torch.norm(qs_embedding)) + torch.norm(rs_embedding)))
return (inferences, regs) |
def get_features(commit_hash, return_dict=False):
(title, body, files_changed) = (commit_title(commit_hash), commit_body(commit_hash), commit_files_changed(commit_hash))
pr_number = parse_pr_number(body, commit_hash, title)
labels = []
if (pr_number is not None):
labels = gh_labels(pr_number)
result = Features(title, body, pr_number, files_changed, labels)
if return_dict:
return features_to_dict(result)
return result |
class CRLNumber(ExtensionType):
oid = ExtensionOID.CRL_NUMBER
def __init__(self, crl_number: int) -> None:
if (not isinstance(crl_number, int)):
raise TypeError('crl_number must be an integer')
self._crl_number = crl_number
def __eq__(self, other: object) -> bool:
if (not isinstance(other, CRLNumber)):
return NotImplemented
return (self.crl_number == other.crl_number)
def __hash__(self) -> int:
return hash(self.crl_number)
def __repr__(self) -> str:
return f'<CRLNumber({self.crl_number})>'
def crl_number(self) -> int:
return self._crl_number
def public_bytes(self) -> bytes:
return rust_x509.encode_extension_value(self) |
def test_default(hatch, helpers, temp_dir, config_file):
config_file.model.template.plugins['default']['src-layout'] = False
config_file.save()
project_name = 'My.App'
with temp_dir.as_cwd():
result = hatch('new', project_name)
assert (result.exit_code == 0), result.output
project_path = (temp_dir / 'my-app')
config = {'project': {'name': project_name, 'dynamic': ['version']}, 'tool': {'hatch': {'version': {'path': 'my_app/__about__.py'}, 'build': {'targets': {'custom': {}}}}}}
file_path = (project_path / DEFAULT_BUILD_SCRIPT)
file_path.write_text(helpers.dedent("\n import os\n\n from hatchling.builders.wheel import WheelBuilder\n\n def get_builder():\n return CustomWheelBuilder\n\n class CustomWheelBuilder(WheelBuilder):\n def build(self, **kwargs):\n for i, artifact in enumerate(super().build(**kwargs)):\n build_dir = os.path.dirname(artifact)\n new_path = os.path.join(build_dir, f'{self.PLUGIN_NAME}-{i}.whl')\n os.replace(artifact, new_path)\n yield new_path\n "))
builder = CustomBuilder(str(project_path), config=config)
build_path = (project_path / 'dist')
with project_path.as_cwd():
artifacts = list(builder.build())
assert (len(artifacts) == 1)
expected_artifact = artifacts[0]
build_artifacts = list(build_path.iterdir())
assert (len(build_artifacts) == 1)
assert (expected_artifact == str(build_artifacts[0]))
assert (expected_artifact == str((build_path / 'custom-0.whl')))
extraction_directory = (temp_dir / '_archive')
extraction_directory.mkdir()
with zipfile.ZipFile(str(expected_artifact), 'r') as zip_archive:
zip_archive.extractall(str(extraction_directory))
metadata_directory = f'{builder.project_id}.dist-info'
expected_files = helpers.get_template_files('wheel.standard_default_license_single', project_name, metadata_directory=metadata_directory)
helpers.assert_files(extraction_directory, expected_files)
with zipfile.ZipFile(str(expected_artifact), 'r') as zip_archive:
zip_info = zip_archive.getinfo(f'{metadata_directory}/WHEEL')
assert (zip_info.date_time == (2020, 2, 2, 0, 0, 0)) |
.xfail(reason="doesn't match, since pivot implicitly sorts")
def test_pivot_wide_long_wide():
df = pd.DataFrame({'name': ['Wilbur', 'Petunia', 'Gregory'], 'a': [67, 80, 64], 'b': [56, 90, 50]})
result = df.pivot_longer(column_names=['a', 'b'], names_to='drug', values_to='heartrate')
result = result.pivot_wider(index='name', names_from='drug', values_from='heartrate')
assert_frame_equal(result, df) |
class TRECEval(object):
def __init__(self, task_path, seed=1111):
logging.info('***** Transfer task : TREC *****\n\n')
self.seed = seed
self.train = self.loadFile(os.path.join(task_path, 'train_5500.label'))
self.test = self.loadFile(os.path.join(task_path, 'TREC_10.label'))
def do_prepare(self, params, prepare):
samples = (self.train['X'] + self.test['X'])
return prepare(params, samples)
def loadFile(self, fpath):
trec_data = {'X': [], 'y': []}
tgt2idx = {'ABBR': 0, 'DESC': 1, 'ENTY': 2, 'HUM': 3, 'LOC': 4, 'NUM': 5}
with io.open(fpath, 'r', encoding='latin-1') as f:
for line in f:
(target, sample) = line.strip().split(':', 1)
sample = sample.split(' ', 1)[1].split()
assert (target in tgt2idx), target
trec_data['X'].append(sample)
trec_data['y'].append(tgt2idx[target])
return trec_data
def run(self, params, batcher):
(train_embeddings, test_embeddings) = ([], [])
sorted_corpus_train = sorted(zip(self.train['X'], self.train['y']), key=(lambda z: (len(z[0]), z[1])))
train_samples = [x for (x, y) in sorted_corpus_train]
train_labels = [y for (x, y) in sorted_corpus_train]
sorted_corpus_test = sorted(zip(self.test['X'], self.test['y']), key=(lambda z: (len(z[0]), z[1])))
test_samples = [x for (x, y) in sorted_corpus_test]
test_labels = [y for (x, y) in sorted_corpus_test]
for ii in range(0, len(train_labels), params.batch_size):
batch = train_samples[ii:(ii + params.batch_size)]
embeddings = batcher(params, batch)
train_embeddings.append(embeddings)
train_embeddings = np.vstack(train_embeddings)
logging.info('Computed train embeddings')
for ii in range(0, len(test_labels), params.batch_size):
batch = test_samples[ii:(ii + params.batch_size)]
embeddings = batcher(params, batch)
test_embeddings.append(embeddings)
test_embeddings = np.vstack(test_embeddings)
logging.info('Computed test embeddings')
config_classifier = {'nclasses': 6, 'seed': self.seed, 'usepytorch': params.usepytorch, 'classifier': params.classifier, 'kfold': params.kfold}
clf = KFoldClassifier({'X': train_embeddings, 'y': np.array(train_labels)}, {'X': test_embeddings, 'y': np.array(test_labels)}, config_classifier)
(devacc, testacc, _) = clf.run()
logging.debug('\nDev acc : {0} Test acc : {1} for TREC\n'.format(devacc, testacc))
return {'devacc': devacc, 'acc': testacc, 'ndev': len(self.train['X']), 'ntest': len(self.test['X'])} |
def convert_t5x_checkpoint_to_flax(t5x_checkpoint_path, config_name, flax_dump_folder_path):
config = AutoConfig.from_pretrained(config_name)
flax_model = FlaxAutoModelForSeq2SeqLM.from_config(config=config)
t5x_model = checkpoints.load_t5x_checkpoint(t5x_checkpoint_path)
split_mlp_wi = ('wi_0' in t5x_model['target']['encoder']['layers_0']['mlp'])
if (config.model_type == 't5'):
encoder_attn_name = 'SelfAttention'
if ((config.model_type == 'longt5') and (config.encoder_attention_type == 'local')):
encoder_attn_name = 'LocalSelfAttention'
elif ((config.model_type == 'longt5') and (config.encoder_attention_type == 'transient-global')):
encoder_attn_name = 'TransientGlobalSelfAttention'
else:
raise ValueError("Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type` attribute with a value from ['local', 'transient-global].")
for layer_index in range(config.num_layers):
layer_name = f'layers_{str(layer_index)}'
t5x_attention_key = t5x_model['target']['encoder'][layer_name]['attention']['key']['kernel']
t5x_attention_out = t5x_model['target']['encoder'][layer_name]['attention']['out']['kernel']
t5x_attention_query = t5x_model['target']['encoder'][layer_name]['attention']['query']['kernel']
t5x_attention_value = t5x_model['target']['encoder'][layer_name]['attention']['value']['kernel']
if ((config.model_type == 'longt5') and (config.encoder_attention_type == 'transient-global')):
t5x_global_layer_norm = t5x_model['target']['encoder'][layer_name]['attention']['T5LayerNorm_0']['scale']
t5x_attention_layer_norm = t5x_model['target']['encoder'][layer_name]['pre_attention_layer_norm']['scale']
if split_mlp_wi:
t5x_mlp_wi_0 = t5x_model['target']['encoder'][layer_name]['mlp']['wi_0']['kernel']
t5x_mlp_wi_1 = t5x_model['target']['encoder'][layer_name]['mlp']['wi_1']['kernel']
else:
t5x_mlp_wi = t5x_model['target']['encoder'][layer_name]['mlp']['wi']['kernel']
t5x_mlp_wo = t5x_model['target']['encoder'][layer_name]['mlp']['wo']['kernel']
t5x_mlp_layer_norm = t5x_model['target']['encoder'][layer_name]['pre_mlp_layer_norm']['scale']
flax_model_encoder_layer_block = flax_model.params['encoder']['block'][str(layer_index)]['layer']
flax_model_encoder_layer_block['0'][encoder_attn_name]['k']['kernel'] = t5x_attention_key
flax_model_encoder_layer_block['0'][encoder_attn_name]['o']['kernel'] = t5x_attention_out
flax_model_encoder_layer_block['0'][encoder_attn_name]['q']['kernel'] = t5x_attention_query
flax_model_encoder_layer_block['0'][encoder_attn_name]['v']['kernel'] = t5x_attention_value
flax_model_encoder_layer_block['0']['layer_norm']['weight'] = t5x_attention_layer_norm
if ((config.model_type == 'longt5') and (config.encoder_attention_type == 'transient-global')):
flax_model_encoder_layer_block['0'][encoder_attn_name]['global_input_layer_norm']['weight'] = t5x_global_layer_norm
if split_mlp_wi:
flax_model_encoder_layer_block['1']['DenseReluDense']['wi_0']['kernel'] = t5x_mlp_wi_0
flax_model_encoder_layer_block['1']['DenseReluDense']['wi_1']['kernel'] = t5x_mlp_wi_1
else:
flax_model_encoder_layer_block['1']['DenseReluDense']['wi']['kernel'] = t5x_mlp_wi
flax_model_encoder_layer_block['1']['DenseReluDense']['wo']['kernel'] = t5x_mlp_wo
flax_model_encoder_layer_block['1']['layer_norm']['weight'] = t5x_mlp_layer_norm
flax_model.params['encoder']['block'][str(layer_index)]['layer'] = flax_model_encoder_layer_block
t5x_encoder_rel_embedding = t5x_model['target']['encoder']['relpos_bias']['rel_embedding'].T
flax_model.params['encoder']['block']['0']['layer']['0'][encoder_attn_name]['relative_attention_bias']['embedding'] = t5x_encoder_rel_embedding
if ((config.model_type == 'longt5') and (config.encoder_attention_type == 'transient-global')):
t5x_encoder_global_rel_embedding = t5x_model['target']['encoder']['side_relpos_bias']['rel_embedding'].T
flax_model.params['encoder']['block']['0']['layer']['0'][encoder_attn_name]['global_relative_attention_bias']['embedding'] = t5x_encoder_global_rel_embedding
t5x_encoder_norm = t5x_model['target']['encoder']['encoder_norm']['scale']
flax_model.params['encoder']['final_layer_norm']['weight'] = t5x_encoder_norm
for layer_index in range(config.num_layers):
layer_name = f'layers_{str(layer_index)}'
t5x_attention_key = t5x_model['target']['decoder'][layer_name]['self_attention']['key']['kernel']
t5x_attention_out = t5x_model['target']['decoder'][layer_name]['self_attention']['out']['kernel']
t5x_attention_query = t5x_model['target']['decoder'][layer_name]['self_attention']['query']['kernel']
t5x_attention_value = t5x_model['target']['decoder'][layer_name]['self_attention']['value']['kernel']
t5x_pre_attention_layer_norm = t5x_model['target']['decoder'][layer_name]['pre_self_attention_layer_norm']['scale']
t5x_enc_dec_attention_module = t5x_model['target']['decoder'][layer_name]['encoder_decoder_attention']
t5x_enc_dec_attention_key = t5x_enc_dec_attention_module['key']['kernel']
t5x_enc_dec_attention_out = t5x_enc_dec_attention_module['out']['kernel']
t5x_enc_dec_attention_query = t5x_enc_dec_attention_module['query']['kernel']
t5x_enc_dec_attention_value = t5x_enc_dec_attention_module['value']['kernel']
t5x_cross_layer_norm = t5x_model['target']['decoder'][layer_name]['pre_cross_attention_layer_norm']['scale']
if split_mlp_wi:
t5x_mlp_wi_0 = t5x_model['target']['decoder'][layer_name]['mlp']['wi_0']['kernel']
t5x_mlp_wi_1 = t5x_model['target']['decoder'][layer_name]['mlp']['wi_1']['kernel']
else:
t5x_mlp_wi = t5x_model['target']['decoder'][layer_name]['mlp']['wi']['kernel']
t5x_mlp_wo = t5x_model['target']['decoder'][layer_name]['mlp']['wo']['kernel']
tx5_mlp_layer_norm = t5x_model['target']['decoder'][layer_name]['pre_mlp_layer_norm']['scale']
flax_model_decoder_layer_block = flax_model.params['decoder']['block'][str(layer_index)]['layer']
flax_model_decoder_layer_block['0']['SelfAttention']['k']['kernel'] = t5x_attention_key
flax_model_decoder_layer_block['0']['SelfAttention']['o']['kernel'] = t5x_attention_out
flax_model_decoder_layer_block['0']['SelfAttention']['q']['kernel'] = t5x_attention_query
flax_model_decoder_layer_block['0']['SelfAttention']['v']['kernel'] = t5x_attention_value
flax_model_decoder_layer_block['0']['layer_norm']['weight'] = t5x_pre_attention_layer_norm
flax_model_decoder_layer_block['1']['EncDecAttention']['k']['kernel'] = t5x_enc_dec_attention_key
flax_model_decoder_layer_block['1']['EncDecAttention']['o']['kernel'] = t5x_enc_dec_attention_out
flax_model_decoder_layer_block['1']['EncDecAttention']['q']['kernel'] = t5x_enc_dec_attention_query
flax_model_decoder_layer_block['1']['EncDecAttention']['v']['kernel'] = t5x_enc_dec_attention_value
flax_model_decoder_layer_block['1']['layer_norm']['weight'] = t5x_cross_layer_norm
if split_mlp_wi:
flax_model_decoder_layer_block['2']['DenseReluDense']['wi_0']['kernel'] = t5x_mlp_wi_0
flax_model_decoder_layer_block['2']['DenseReluDense']['wi_1']['kernel'] = t5x_mlp_wi_1
else:
flax_model_decoder_layer_block['2']['DenseReluDense']['wi']['kernel'] = t5x_mlp_wi
flax_model_decoder_layer_block['2']['DenseReluDense']['wo']['kernel'] = t5x_mlp_wo
flax_model_decoder_layer_block['2']['layer_norm']['weight'] = tx5_mlp_layer_norm
flax_model.params['decoder']['block'][str(layer_index)]['layer'] = flax_model_decoder_layer_block
tx5_decoder_norm = t5x_model['target']['decoder']['decoder_norm']['scale']
flax_model.params['decoder']['final_layer_norm']['weight'] = tx5_decoder_norm
t5x_decoder_rel_embedding = t5x_model['target']['decoder']['relpos_bias']['rel_embedding'].T
flax_model.params['decoder']['block']['0']['layer']['0']['SelfAttention']['relative_attention_bias']['embedding'] = t5x_decoder_rel_embedding
tx5_token_embeddings = t5x_model['target']['token_embedder']['embedding']
flax_model.params['shared']['embedding'] = tx5_token_embeddings
if ('logits_dense' in t5x_model['target']['decoder']):
flax_model.params['lm_head']['kernel'] = t5x_model['target']['decoder']['logits_dense']['kernel']
flax_model.save_pretrained(flax_dump_folder_path)
print('T5X Model was sucessfully converted!') |
class TestGlobalVariablesChecker(pylint.testutils.CheckerTestCase):
CHECKER_CLASS = GlobalVariablesChecker
def test_no_message_global_type_alias_assignment_builtin(self):
src = '\n MyType = list[list[list[int]]]\n '
mod = astroid.parse(src)
with self.assertNoMessages():
self.checker.visit_assignname(mod)
def test_message_global_type_alias_assignment_builtin(self):
src = '\n TypeName = list[int]\n '
mod = astroid.parse(src)
with self.assertAddsMessages(pylint.testutils.MessageTest(msg_id='forbidden-global-variables', node=mod.globals['TypeName'][0], args="a global variable 'TypeName' is assigned to on line 2"), ignore_position=True):
self.checker.visit_assignname(mod)
def test_no_message_global_type_alias_assignment_import(self):
src = '\n import datetime\n MyType = datetime.date\n '
mod = astroid.parse(src)
attribute_node = [child for child in mod.body[1].get_children()][1]
attribute_name_node = [child for child in attribute_node.get_children()][0]
with self.assertNoMessages():
self.checker.visit_import(mod.body[0])
self.checker.visit_assignname(attribute_name_node) |
def add_spectra(s1, s2, var=None, force=False):
if (var is None):
try:
var = _get_unique_var(s2, var, inplace=False)
except KeyError:
var = _get_unique_var(s1, var, inplace=False)
if (var not in s1.get_vars()):
raise KeyError('Variable {0} not in Spectrum {1}'.format(var, s1.get_name()))
if (var not in s2.get_vars()):
raise KeyError('Variable {0} not in Spectrum {1}'.format(var, s1.get_name()))
if ((var in ['transmittance_noslit', 'transmittance']) and (not force)):
raise ValueError((('It does not make much physical sense to sum transmittances. Are ' + 'you sure of what you are doing? See also `//` (MergeSlabs), `>` ') + "(SerialSlabs) and `concat_spectra`. If you're sure, use `force=True`"))
Iunit1 = s1.units[var]
wunit1 = s1.get_waveunit()
s2 = s2.resample(s1, inplace=False)
(w1, I1) = s1.get(var=var, Iunit=Iunit1, wunit=wunit1)
(w2, I2) = s2.get(var=var, Iunit=Iunit1, wunit=wunit1)
name = ((s1.get_name() + '+') + s2.get_name())
sub = Spectrum.from_array(w1, (I1 + I2), var, wunit=wunit1, Iunit=Iunit1, name=name)
return sub |
def test_assert_child_key_has_value_raises_no_child():
context = Context({'parent': {'child': 1}})
with pytest.raises(KeyNotInContextError) as err:
context.assert_child_key_has_value('parent', 'XchildX', 'arb')
assert (str(err.value) == "context['parent']['XchildX'] doesn't exist. It must exist for arb.") |
class TestPredictionInterpolation():
.parametrize(('obs_time', 'expected'), [((- 1), np.nan), (1.5, 2.5), (5, np.nan)])
def test_interpolate_continuous(self, obs_time, expected):
prediction_times = np.array([0, 1, 2, 3])
predicted_values = np.array([1, 2, 3, 4])
res = nav.interpolate_continuous(obs_time, prediction_times, predicted_values)
np.testing.assert_allclose(res, expected)
.parametrize(('obs_time', 'expected'), [((- 1), np.nan), (1.5, (0.75 * np.pi)), (2.5, ((- 0.75) * np.pi)), (3.5, ((- 0.25) * np.pi)), (5, np.nan)])
def test_interpolate_angles(self, obs_time, expected):
prediction_times = np.array([0, 1, 2, 3, 4])
predicted_angles = np.array([0, (0.5 * np.pi), np.pi, (1.5 * np.pi), (2 * np.pi)])
res = nav.interpolate_angles(obs_time, prediction_times, predicted_angles)
np.testing.assert_allclose(res, expected)
.parametrize(('obs_time', 'expected'), [((- 1), (np.nan * np.ones((2, 2)))), (1.5, [[1, 0], [0, 2]]), (3, (np.nan * np.ones((2, 2))))])
def test_interpolate_nearest(self, obs_time, expected):
prediction_times = np.array([0, 1, 2])
predicted_angles = np.array([np.zeros((2, 2)), np.diag((1, 2)), np.zeros((2, 2))])
res = nav.interpolate_nearest(obs_time, prediction_times, predicted_angles)
np.testing.assert_allclose(res, expected)
def test_interpolate_orbit_prediction(self, obs_time, orbit_prediction, orbit_expected):
orbit_prediction = orbit_prediction.to_numba()
orbit = nav.interpolate_orbit_prediction(orbit_prediction, obs_time)
_assert_namedtuple_close(orbit, orbit_expected)
def test_interpolate_attitude_prediction(self, obs_time, attitude_prediction, attitude_expected):
attitude_prediction = attitude_prediction.to_numba()
attitude = nav.interpolate_attitude_prediction(attitude_prediction, obs_time)
_assert_namedtuple_close(attitude, attitude_expected)
()
def obs_time(self):
return 2.5
()
def orbit_expected(self):
return nav.Orbit(angles=nav.OrbitAngles(greenwich_sidereal_time=1.5, declination_from_sat_to_sun=1.6, right_ascension_from_sat_to_sun=1.7), sat_position=nav.Vector3D(x=1.8, y=1.9, z=2.0), nutation_precession=(1.6 * np.identity(3)))
()
def attitude_expected(self):
return nav.Attitude(angle_between_earth_and_sun=1.5, angle_between_sat_spin_and_z_axis=1.6, angle_between_sat_spin_and_yz_plane=1.7) |
class ClassNameFieldTest(StringTestMixin, BaseFieldTestMixin, FieldTestCase):
field_class = fields.ClassName
def test_simple_string_field(self):
field = fields.ClassName()
assert (not field.required)
assert (not field.discriminator)
assert (field.__schema__ == {'type': 'string'})
def test_default_output_classname(self, api):
model = api.model('Test', {'name': fields.ClassName()})
class FakeClass(object):
pass
data = api.marshal(FakeClass(), model)
assert (data == {'name': 'FakeClass'})
def test_output_dash(self, api):
model = api.model('Test', {'name': fields.ClassName(dash=True)})
class FakeClass(object):
pass
data = api.marshal(FakeClass(), model)
assert (data == {'name': 'fake_class'})
def test_with_dict(self, api):
model = api.model('Test', {'name': fields.ClassName()})
data = api.marshal({}, model)
assert (data == {'name': 'object'}) |
class Time2DistanceGetter(SmoothPointGetter):
def _getCommonData(self, miscParams, src, tgt):
return {'maxSpeed': src.getMaxVelocity(), 'mass': src.item.ship.getModifiedItemAttr('mass'), 'agility': src.item.ship.getModifiedItemAttr('agility')}
def _calculatePoint(self, x, miscParams, src, tgt, commonData):
time = x
maxSpeed = commonData['maxSpeed']
mass = commonData['mass']
agility = commonData['agility']
distance_t = ((maxSpeed * time) + ((((maxSpeed * agility) * mass) * math.exp((((- time) * 1000000) / (agility * mass)))) / 1000000))
distance_0 = ((maxSpeed * 0) + ((((maxSpeed * agility) * mass) * math.exp((((- 0) * 1000000) / (agility * mass)))) / 1000000))
distance = (distance_t - distance_0)
return distance |
_optimizer('adamax')
class FairseqAdamax(LegacyFairseqOptimizer):
def __init__(self, args, params):
super().__init__(args)
self._optimizer = Adamax(params, **self.optimizer_config)
def add_args(parser):
parser.add_argument('--adamax-betas', default='(0.9, 0.999)', metavar='B', help='betas for Adam optimizer')
parser.add_argument('--adamax-eps', type=float, default=1e-08, metavar='D', help='epsilon for Adam optimizer')
parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay')
parser.add_argument('--no-bias-correction', default=False, action='store_true', help='disable bias correction')
def optimizer_config(self):
return {'lr': self.args.lr[0], 'betas': eval(self.args.adamax_betas), 'eps': self.args.adamax_eps, 'weight_decay': self.args.weight_decay, 'bias_correction': (not self.args.no_bias_correction)} |
def driver_kwargs(request, capabilities, chrome_options, chrome_service, driver_args, driver_class, driver_log, driver_path, firefox_options, firefox_service, ie_options, ie_service, edge_options, edge_service, safari_options, safari_service, remote_options, pytestconfig):
kwargs = {}
driver = getattr(drivers, pytestconfig.getoption('driver').lower())
kwargs.update(driver.driver_kwargs(capabilities=capabilities, chrome_options=chrome_options, chrome_service=chrome_service, driver_args=driver_args, driver_log=driver_log, driver_path=driver_path, firefox_options=firefox_options, firefox_service=firefox_service, ie_options=ie_options, ie_service=ie_service, edge_options=edge_options, edge_service=edge_service, safari_options=safari_options, safari_service=safari_service, remote_options=remote_options, host=pytestconfig.getoption('selenium_host'), port=pytestconfig.getoption('selenium_port'), service_log_path=None, request=request, test='.'.join(split_class_and_test_names(request.node.nodeid))))
for (name, value) in capabilities.items():
kwargs['options'].set_capability(name, value)
pytestconfig._driver_log = driver_log
return kwargs |
class MobileNetV3Features(nn.Module):
def __init__(self, block_args, out_indices=(0, 1, 2, 3, 4), feature_location='bottleneck', in_chans=3, stem_size=16, fix_stem=False, output_stride=32, pad_type='', round_chs_fn=round_channels, se_from_exp=True, act_layer=None, norm_layer=None, se_layer=None, drop_rate=0.0, drop_path_rate=0.0):
super(MobileNetV3Features, self).__init__()
act_layer = (act_layer or nn.ReLU)
norm_layer = (norm_layer or nn.BatchNorm2d)
se_layer = (se_layer or SqueezeExcite)
self.drop_rate = drop_rate
if (not fix_stem):
stem_size = round_chs_fn(stem_size)
self.conv_stem = create_conv2d(in_chans, stem_size, 3, stride=2, padding=pad_type)
self.bn1 = norm_layer(stem_size)
self.act1 = act_layer(inplace=True)
builder = EfficientNetBuilder(output_stride=output_stride, pad_type=pad_type, round_chs_fn=round_chs_fn, se_from_exp=se_from_exp, act_layer=act_layer, norm_layer=norm_layer, se_layer=se_layer, drop_path_rate=drop_path_rate, feature_location=feature_location)
self.blocks = nn.Sequential(*builder(stem_size, block_args))
self.feature_info = FeatureInfo(builder.features, out_indices)
self._stage_out_idx = {v['stage']: i for (i, v) in enumerate(self.feature_info) if (i in out_indices)}
efficientnet_init_weights(self)
self.feature_hooks = None
if (feature_location != 'bottleneck'):
hooks = self.feature_info.get_dicts(keys=('module', 'hook_type'))
self.feature_hooks = FeatureHooks(hooks, self.named_modules())
def forward(self, x) -> List[torch.Tensor]:
x = self.conv_stem(x)
x = self.bn1(x)
x = self.act1(x)
if (self.feature_hooks is None):
features = []
if (0 in self._stage_out_idx):
features.append(x)
for (i, b) in enumerate(self.blocks):
x = b(x)
if ((i + 1) in self._stage_out_idx):
features.append(x)
return features
else:
self.blocks(x)
out = self.feature_hooks.get_output(x.device)
return list(out.values()) |
(HAS_TV_TUPLE)
.parametrize('tpl', [tuple, Tuple])
def test_type_var_tuple_generic_post(tpl):
from typing import TypeVarTuple, Unpack
ShapeT = TypeVarTuple('ShapeT')
DType = TypeVar('DType')
class PostArray(Generic[(Unpack[ShapeT], DType)]):
pass
assert_normalize(PostArray, PostArray, [normalize_type(Unpack[Tuple[(Any, ...)]]), nt_zero(Any)])
assert_normalize(PostArray[int], PostArray, [nt_zero(int)])
assert_normalize(PostArray[(int, str)], PostArray, [nt_zero(int), nt_zero(str)])
assert_normalize(PostArray[(Unpack[tpl[()]], int)], PostArray, [nt_zero(int)]) |
def preformat_MalNetTiny(dataset_dir, feature_set):
if (feature_set in ['none', 'Constant']):
tf = T.Constant()
elif (feature_set == 'OneHotDegree'):
tf = T.OneHotDegree()
elif (feature_set == 'LocalDegreeProfile'):
tf = T.LocalDegreeProfile()
else:
raise ValueError(f'Unexpected transform function: {feature_set}')
dataset = MalNetTiny(dataset_dir)
dataset.name = 'MalNetTiny'
logging.info(f'Computing "{feature_set}" node features for MalNetTiny.')
pre_transform_in_memory(dataset, tf)
split_dict = dataset.get_idx_split()
dataset.split_idxs = [split_dict['train'], split_dict['valid'], split_dict['test']]
return dataset |
((pty is None), 'pty module not supported on platform')
class Test_Pty_Serial_Open(unittest.TestCase):
def setUp(self):
(self.master, self.slave) = pty.openpty()
def test_pty_serial_open_slave(self):
with serial.Serial(os.ttyname(self.slave), timeout=1) as slave:
pass
def test_pty_serial_write(self):
with serial.Serial(os.ttyname(self.slave), timeout=1) as slave:
with os.fdopen(self.master, 'wb') as fd:
fd.write(DATA)
fd.flush()
out = slave.read(len(DATA))
self.assertEqual(DATA, out)
def test_pty_serial_read(self):
with serial.Serial(os.ttyname(self.slave), timeout=1) as slave:
with os.fdopen(self.master, 'rb') as fd:
slave.write(DATA)
slave.flush()
out = fd.read(len(DATA))
self.assertEqual(DATA, out) |
class TestOutermorphismMatrix():
def test_invariants(self, g2):
(e1, e2) = g2.basis_vectors_lst
matrix = np.array([[0, 1], [(- 1), 0]])
f = transformations.OutermorphismMatrix(matrix, g2)
assert (f(e1) == (- e2))
assert (f(e2) == e1)
assert (f((e1 ^ e2)) == (f(e1) ^ f(e2)))
assert (f(g2.scalar) == g2.scalar)
assert (f(((g2.scalar + (2 * e1)) + (3 * (e1 ^ e2)))) == ((f(g2.scalar) + (2 * f(e1))) + (3 * f((e1 ^ e2)))))
assert (pretty.pretty(f) == textwrap.dedent(" OutermorphismMatrix(array([[ 0, 1],\n [-1, 0]]),\n Layout([1, 1],\n ids=BasisVectorIds(['u', 'v']),\n order=BasisBladeOrder.shortlex(2),\n names=['', 'eu', 'ev', 'euv']))"))
def test_between_layouts(self, g2, g3):
matrix = np.array([[1, 0], [0, 1], [0, 0]])
with pytest.raises(ValueError):
transformations.OutermorphismMatrix(matrix, g3, g2)
(e1, e2) = g2.basis_vectors_lst
(ex, ey, ez) = g3.basis_vectors_lst
f = transformations.OutermorphismMatrix(matrix, g2, g3)
assert (f(e1) == ex)
assert (f(e2) == ey)
assert (pretty.pretty(f) == textwrap.dedent(" OutermorphismMatrix(array([[1, 0],\n [0, 1],\n [0, 0]]),\n layout_src=Layout([1, 1],\n ids=BasisVectorIds(['u', 'v']),\n order=BasisBladeOrder.shortlex(2),\n names=['', 'eu', 'ev', 'euv']),\n layout_dst=Layout([1, 1, 1],\n ids=BasisVectorIds(['x', 'y', 'z']),\n order=BasisBladeOrder.shortlex(3),\n names=['', 'ex', 'ey', 'ez', 'exy', 'exz', 'eyz', 'exyz']))")) |
class AutoConfigTest(unittest.TestCase):
def test_module_spec(self):
self.assertIsNotNone(transformers.models.auto.__spec__)
self.assertIsNotNone(importlib.util.find_spec('transformers.models.auto'))
def test_config_from_model_shortcut(self):
config = AutoConfig.from_pretrained('bert-base-uncased')
self.assertIsInstance(config, BertConfig)
def test_config_model_type_from_local_file(self):
config = AutoConfig.from_pretrained(SAMPLE_ROBERTA_CONFIG)
self.assertIsInstance(config, RobertaConfig)
def test_config_model_type_from_model_identifier(self):
config = AutoConfig.from_pretrained(DUMMY_UNKNOWN_IDENTIFIER)
self.assertIsInstance(config, RobertaConfig)
def test_config_for_model_str(self):
config = AutoConfig.for_model('roberta')
self.assertIsInstance(config, RobertaConfig)
def test_pattern_matching_fallback(self):
keys = list(CONFIG_MAPPING.keys())
for (i, key) in enumerate(keys):
self.assertFalse(any(((key in later_key) for later_key in keys[(i + 1):])))
def test_new_config_registration(self):
try:
AutoConfig.register('custom', CustomConfig)
with self.assertRaises(ValueError):
AutoConfig.register('model', CustomConfig)
with self.assertRaises(ValueError):
AutoConfig.register('bert', BertConfig)
config = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(tmp_dir)
new_config = AutoConfig.from_pretrained(tmp_dir)
self.assertIsInstance(new_config, CustomConfig)
finally:
if ('custom' in CONFIG_MAPPING._extra_content):
del CONFIG_MAPPING._extra_content['custom']
def test_repo_not_found(self):
with self.assertRaisesRegex(EnvironmentError, 'bert-base is not a local folder and is not a valid model identifier'):
_ = AutoConfig.from_pretrained('bert-base')
def test_revision_not_found(self):
with self.assertRaisesRegex(EnvironmentError, 'aaaaaa is not a valid git identifier \\(branch name, tag name or commit id\\)'):
_ = AutoConfig.from_pretrained(DUMMY_UNKNOWN_IDENTIFIER, revision='aaaaaa')
def test_configuration_not_found(self):
with self.assertRaisesRegex(EnvironmentError, 'hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.'):
_ = AutoConfig.from_pretrained('hf-internal-testing/no-config-test-repo')
def test_from_pretrained_dynamic_config(self):
config = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model', trust_remote_code=True)
self.assertEqual(config.__class__.__name__, 'NewModelConfig') |
def create_db(filename: str, create_file: bool=True) -> True:
def create(filename: str, data: str) -> None:
with open(filename, 'w') as db_file:
db_file.write(data)
if filename.endswith('.json'):
if (create_file and (not os.path.exists(filename))):
create(filename, json.dumps(EMPTY_DATA)) |
def test_nested_process_search_unsupported_field(cortex_product: CortexXDR):
criteria = {'foo': 'bar'}
cortex_product._queries = {}
cortex_product.log = logging.getLogger('pytest_surveyor')
cortex_product.nested_process_search(Tag('unsupported_field'), criteria, {})
assert (len(cortex_product._queries) == 1)
assert (cortex_product._queries[Tag('unsupported_field')] == []) |
class VersionTest(unittest.TestCase):
def test_can_get_version(self) -> None:
import torchx.pipelines.kfp
self.assertIsNotNone(torchx.pipelines.kfp.__version__)
def test_kfp_1x(self) -> None:
import torchx.pipelines.kfp
with patch('kfp.__version__', '2.0.1'):
with self.assertRaisesRegex(ImportError, 'Only kfp version'):
importlib.reload(torchx.pipelines.kfp)
with patch('kfp.__version__', '1.5.0'):
importlib.reload(torchx.pipelines.kfp) |
def parse(experiment_path, run, max_steps):
(run_rl_objective, run_cost_objective, run_sum_costs, run_timesteps) = ([], [], [], [])
files = list(Path(experiment_path).glob(os.path.join(run, 'events.out.tfevents.*')))
last_time = (- 1)
all_sum_costs = 0
for file in sorted(files, key=numerical_sort):
(objective, cost_objective, sum_costs, timestamps) = parse_tf_event_file(str(file))
if (not all([objective, cost_objective, sum_costs, timestamps])):
print('Not all metrics are available!')
continue
run_rl_objective += [obj for (obj, stamp) in zip(objective, timestamps) if (last_time < stamp <= max_steps)]
run_cost_objective += [obj for (obj, stamp) in zip(cost_objective, timestamps) if (last_time < stamp <= max_steps)]
run_sum_costs += [((cost + all_sum_costs) / stamp) for (cost, stamp) in zip(sum_costs, timestamps) if (last_time < stamp <= max_steps)]
run_timesteps += [stamp for stamp in timestamps if (last_time < stamp <= max_steps)]
last_time = timestamps[(- 1)]
all_sum_costs = (run_sum_costs[(- 1)] * last_time)
return (run_rl_objective, run_cost_objective, run_sum_costs, run_timesteps) |
class QRangeSlider(QtWidgets.QWidget, Ui_Form):
endValueChanged = QtCore.pyqtSignal(int)
maxValueChanged = QtCore.pyqtSignal(int)
minValueChanged = QtCore.pyqtSignal(int)
startValueChanged = QtCore.pyqtSignal(int)
_SPLIT_START = 1
_SPLIT_END = 2
minValueChanged = QtCore.pyqtSignal(int)
maxValueChanged = QtCore.pyqtSignal(int)
startValueChanged = QtCore.pyqtSignal(int)
endValueChanged = QtCore.pyqtSignal(int)
def __init__(self, parent=None):
super().__init__(parent)
self.setupUi(self)
self.setMouseTracking(False)
self._splitter.splitterMoved.connect(self._handleMoveSplitter)
self._head_layout = QtWidgets.QHBoxLayout()
self._head_layout.setSpacing(0)
self._head.setLayout(self._head_layout)
self.head = Head(self._head, main=self)
self._head_layout.addWidget(self.head)
self._handle_layout = QtWidgets.QHBoxLayout()
self._handle_layout.setSpacing(0)
self._handle.setLayout(self._handle_layout)
self.handle = Handle(self._handle, main=self)
self.handle.setTextColor((150, 255, 150))
self._handle_layout.addWidget(self.handle)
self._tail_layout = QtWidgets.QHBoxLayout()
self._tail_layout.setSpacing(0)
self._tail.setLayout(self._tail_layout)
self.tail = Tail(self._tail, main=self)
self._tail_layout.addWidget(self.tail)
self._formatter = None
self.setMin(0)
self.setMax(99)
self.setStart(0)
self.setEnd(99)
self.setDrawValues(True)
def min(self):
return getattr(self, '__min', None)
def max(self):
return getattr(self, '__max', None)
def setMin(self, value):
assert (type(value) is int)
setattr(self, '__min', value)
self.minValueChanged.emit(value)
def setMax(self, value):
assert (type(value) is int)
setattr(self, '__max', value)
self.maxValueChanged.emit(value)
def start(self):
return getattr(self, '__start', None)
def end(self):
return getattr(self, '__end', None)
def _setStart(self, value):
setattr(self, '__start', value)
self.startValueChanged.emit(value)
def setFormatter(self, func):
self._formatter = func
def setStart(self, value):
assert (type(value) is int)
v = self._valueToPos(value)
self._splitter.splitterMoved.disconnect()
self._splitter.moveSplitter(v, self._SPLIT_START)
self._splitter.splitterMoved.connect(self._handleMoveSplitter)
self._setStart(value)
def _setEnd(self, value):
setattr(self, '__end', value)
self.endValueChanged.emit(value)
def setEnd(self, value):
assert (type(value) is int)
v = self._valueToPos(value)
self._splitter.splitterMoved.disconnect()
self._splitter.moveSplitter(v, self._SPLIT_END)
self._splitter.splitterMoved.connect(self._handleMoveSplitter)
self._setEnd(value)
def drawValues(self):
return getattr(self, '__drawValues', None)
def setDrawValues(self, draw):
assert (type(draw) is bool)
setattr(self, '__drawValues', draw)
def getRange(self):
return (self.start(), self.end())
def setRange(self, start, end):
self.setStart(start)
self.setEnd(end)
def keyPressEvent(self, event):
key = event.key()
if (key == QtCore.Qt.Key_Left):
s = (self.start() - 1)
e = (self.end() - 1)
elif (key == QtCore.Qt.Key_Right):
s = (self.start() + 1)
e = (self.end() + 1)
else:
event.ignore()
return
event.accept()
if ((s >= self.min()) and (e <= self.max())):
self.setRange(s, e)
def setBackgroundStyle(self, style):
self._tail.setStyleSheet(style)
self._head.setStyleSheet(style)
def setSpanStyle(self, style):
self._handle.setStyleSheet(style)
def _valueToPos(self, value):
return scale(value, (self.min(), self.max()), (0, self.width()))
def _posToValue(self, xpos):
return scale(xpos, (0, self.width()), (self.min(), self.max()))
def _handleMoveSplitter(self, xpos, index):
self._splitter.handleWidth()
def _lockWidth(widget):
width = widget.size().width()
widget.setMinimumWidth(width)
widget.setMaximumWidth(width)
def _unlockWidth(widget):
widget.setMinimumWidth(0)
widget.setMaximumWidth()
v = self._posToValue(xpos)
if (index == self._SPLIT_START):
_lockWidth(self._tail)
if (v >= self.end()):
return
offset = (- 20)
(xpos + offset)
self._setStart(v)
elif (index == self._SPLIT_END):
_lockWidth(self._head)
if (v <= self.start()):
return
offset = (- 40)
((self.width() - xpos) + offset)
self._setEnd(v)
_unlockWidth(self._tail)
_unlockWidth(self._head)
_unlockWidth(self._handle) |
.parametrize('args', [['dir1', 'dir2', '-v'], ['dir1', '-v', 'dir2'], ['dir2', '-v', 'dir1'], ['-v', 'dir2', 'dir1']])
def test_consider_args_after_options_for_rootdir(pytester: Pytester, args: List[str]) -> None:
root = pytester.mkdir('myroot')
d1 = root.joinpath('dir1')
d1.mkdir()
d2 = root.joinpath('dir2')
d2.mkdir()
for (i, arg) in enumerate(args):
if (arg == 'dir1'):
args[i] = str(d1)
elif (arg == 'dir2'):
args[i] = str(d2)
with MonkeyPatch.context() as mp:
mp.chdir(root)
result = pytester.runpytest(*args)
result.stdout.fnmatch_lines(['*rootdir: *myroot']) |
class TouchKeyboard(object):
YELLOW = const(65504)
GREEN = const(2016)
KEYS = ((('q', 'w', 'e', 'r', 't', 'y', 'u', 'i', 'o', 'p'), ('a', 's', 'd', 'f', 'g', 'h', 'j', 'k', 'l'), ('\t', 'z', 'x', 'c', 'v', 'b', 'n', 'm', '\x08', '\x08'), ('\n', ' ', '\r')), (('Q', 'W', 'E', 'R', 'T', 'Y', 'U', 'I', 'O', 'P'), ('A', 'S', 'D', 'F', 'G', 'H', 'J', 'K', 'L'), ('\t', 'Z', 'X', 'C', 'V', 'B', 'N', 'M', '\x08', '\x08'), ('\n', ' ', '\r')), (('1', '2', '3', '4', '5', '6', '7', '8', '9', '0'), ('', '#', '$', '%', '^', '&', '*', '(', ')'), ('\x0c', '+', ',', '.', '-', '_', '!', '?', '\x08', '\x08'), ('\x07', ' ', '\r')), (('1', '2', '3', '4', '5', '6', '7', '8', '9', '0'), ('<', '>', '|', '\\', '/', '{', '}', '[', ']'), ('\x0c', '=', '"', "'", ';', ':', '`', '~', '\x08', '\x08'), ('\x07', ' ', '\r')))
def __init__(self, display, font):
self.display = display
self.font = font
self.kb_screen = 0
self.kb_text = ''
self.load_keyboard()
self.waiting = False
self.locked = False
def clear_text(self):
self.display.fill_hrect(0, 11, self.display.width, 24, 0)
self.kb_text = ''
def handle_keypress(self, x, y, debug=False):
if (self.locked is True):
return
if (self.waiting is True):
self.clear_text()
self.waiting = False
return
(x, y) = (y, x)
if debug:
self.display.fill_circle(x, y, 5, self.GREEN)
if (y >= 47):
row = (int((y / 47)) - 1)
if (row == 0):
column = int((x / 32))
elif ((row == 1) or (row == 2)):
column = max(0, int(((x - 16) / 32)))
elif (x < 80):
column = 0
elif (x > 240):
column = 2
else:
column = 1
key = self.KEYS[self.kb_screen][row][column]
if ((key == '\t') or (key == '\x0c')):
self.kb_screen ^= 1
self.load_keyboard()
elif (key == '\x07'):
self.kb_screen = 0
self.load_keyboard()
elif (key == '\n'):
self.kb_screen = 2
self.load_keyboard()
elif (key == '\x08'):
self.kb_text = self.kb_text[:(- 1)]
margin = self.font.measure_text(self.kb_text)
self.display.fill_vrect(margin, 11, 12, 24, 0)
elif (key == '\r'):
if (self.kb_text != ''):
return True
else:
margin = self.font.measure_text(self.kb_text)
self.kb_text += key
self.display.draw_letter(margin, 11, key, self.font, self.YELLOW)
return False
def load_keyboard(self):
self.display.draw_image('images/kb{0}.raw'.format(self.kb_screen), 0, 47, 320, 192)
def show_message(self, msg, color):
self.clear_text()
msg_length = self.font.measure_text(msg)
margin = ((self.display.width - msg_length) // 2)
self.display.draw_text(margin, 11, msg, self.font, color) |
.skipif((not HAVE_DEPS_FOR_RESOURCE_ESTIMATES), reason='pyscf and/or jax not installed.')
.slow
def test_kpoint_thc_reg_gamma():
cell = gto.Cell()
cell.atom = '\n C 0. 0. 0.\n C 1. 1. 1.\n '
cell.basis = 'gth-szv'
cell.pseudo = 'gth-hf-rev'
cell.a = '\n 0., 3., 3.\n 3., 0., 3.\n 3., 3., 0.'
cell.unit = 'B'
cell.mesh = ([11] * 3)
cell.verbose = 0
cell.build(parse_arg=False)
kmesh = [1, 1, 1]
kpts = cell.make_kpts(kmesh)
num_kpts = len(kpts)
mf = scf.KRHF(cell, kpts)
mf.kernel()
num_mo = mf.mo_coeff[0].shape[(- 1)]
num_interp_points = (10 * mf.mo_coeff[0].shape[(- 1)])
kpt_thc = solve_kmeans_kpisdf(mf, num_interp_points, single_translation=False, verbose=False)
(chi, zeta, g_mapping) = (kpt_thc.chi, kpt_thc.zeta, kpt_thc.g_mapping)
momentum_map = build_momentum_transfer_mapping(cell, kpts)
buffer = np.zeros((2 * (chi.size + get_zeta_size(zeta))), dtype=np.float64)
pack_thc_factors(chi, zeta, buffer)
num_G_per_Q = [z.shape[0] for z in zeta]
(chi_unpacked, zeta_unpacked) = unpack_thc_factors(buffer, num_interp_points, num_mo, num_kpts, num_G_per_Q)
assert np.allclose(chi_unpacked, chi)
for iq in range(num_kpts):
assert np.allclose(zeta[iq], zeta_unpacked[iq])
rsmf = scf.KRHF(mf.cell, mf.kpts).rs_density_fit()
rsmf.verbose = 0
rsmf.mo_occ = mf.mo_occ
rsmf.mo_coeff = mf.mo_coeff
rsmf.mo_energy = mf.mo_energy
rsmf.with_df.mesh = mf.cell.mesh
mymp = mp.KMP2(rsmf)
Luv = cholesky_from_df_ints(mymp)
Luv_cont = make_contiguous_cholesky(Luv)
eri = np.einsum('npq,nrs->pqrs', Luv_cont[(0, 0)], Luv_cont[(0, 0)]).real
buffer = np.zeros((chi.size + get_zeta_size(zeta)), dtype=np.float64)
buffer[:chi.size] = chi.T.real.ravel()
buffer[chi.size:] = zeta[iq].real.ravel()
np.random.seed(7)
opt_param = lbfgsb_opt_thc_l2reg(eri, num_interp_points, maxiter=10, initial_guess=buffer, penalty_param=None)
chi_unpacked_mol = opt_param[:chi.size].reshape((num_interp_points, num_mo)).T
zeta_unpacked_mol = opt_param[chi.size:].reshape(zeta[0].shape)
(opt_param, _) = lbfgsb_opt_kpthc_l2reg(chi, zeta, momentum_map, g_mapping, jnp.array(Luv_cont), maxiter=10, penalty_param=None, disp_freq=(- 1))
(chi_unpacked, zeta_unpacked) = unpack_thc_factors(opt_param, num_interp_points, num_mo, num_kpts, num_G_per_Q)
assert np.allclose(chi_unpacked[0], chi_unpacked_mol)
assert np.allclose(zeta_unpacked[0], zeta_unpacked_mol)
mol_obj = thc_obj_mol(buffer, num_mo, num_interp_points, eri, 0.001)
buffer = np.zeros((2 * (chi.size + get_zeta_size(zeta))), dtype=np.float64)
pack_thc_factors(chi, zeta, buffer)
gam_obj = thc_objective_regularized(buffer, num_mo, num_interp_points, momentum_map, g_mapping, Luv_cont, 0.001)
assert ((mol_obj - gam_obj) < 1e-12) |
class TestData(unittest.TestCase):
(generatePackageSpecifiers)
def test_using_valid_specifier_sets(self, pkg, spec):
message = f'Bad specifier for {pkg}: {spec!r}'
try:
specifier_set = SpecifierSet(spec)
except InvalidSpecifier:
specifier_set = None
self.failUnless(specifier_set, msg=message)
self.assertIsInstance(specifier_set, SpecifierSet, message)
def test_main_module(self):
self.assertTrue((len(INSECURE) > 0))
self.assertTrue((len(INSECURE_FULL) > 0)) |
def work_block(args):
try:
(cls, store_dir, step, iblock, shared, force) = args
if ((store_dir, step) not in g_builders):
g_builders[(store_dir, step)] = cls(store_dir, step, shared, force=force)
builder = g_builders[(store_dir, step)]
builder.work_block(iblock)
except KeyboardInterrupt:
raise Interrupted()
except IOError as e:
if (e.errno == errno.EINTR):
raise Interrupted()
else:
raise
return (store_dir, step, iblock) |
class Seq_User_Act(Seq_User):
def __init__(self, nlg_sample, nlg_template):
super().__init__(nlg_sample=nlg_sample, nlg_template=nlg_template)
self._set_initial_state()
self._set_initial_goal_dic()
cfg.init_handler('tsdf-usr_act')
cfg.dataset = 'usr_act'
if cfg.cuda:
torch.cuda.set_device(cfg.cuda_device)
logging.info('Device: {}'.format(torch.cuda.current_device()))
self.m = Model('usr_act')
self.m.count_params()
self.m.load_model()
self.entity = self.m.reader.entity
self.state_list = []
self.act = ''
self.prev_usr = ''
self._set_initial_model_parameters()
def _set_initial_state(self):
self.state = {'informed': {k: 0 for k in self.entity_type['informable_slots']}, 'asked': {k: 0 for k in self.entity_type['requestable_slots']}, 'asked_answered': {k: 0 for k in (self.entity_type['requestable_slots'] + ['name'])}, 'reservation_informed': {k: 0 for k in self.entity_type['reservation_slots']}, 'results': [], 'no_match_presented': 0, 'asked_anything_else': 0, 'no_other_presented': 0, 'match_presented': 0, 'book_fail': 0, 'usr_act_sequence': [], 'sys_act_sequence': [], 'inform': {k: None for k in self.entity_type['informable_slots']}, 'book': {k: None for k in self.entity_type['reservation_slots']}, 'reqt': []}
self.check_constrain = []
self.check_info = dialog_config.INFO_CHECK_NOTYET
self.check_reservation = []
self.dialog_status = dialog_config.NO_OUTCOME_YET
def _set_initial_goal_dic(self):
self.goal_dic = defaultdict(list)
for key in ['cur_info', 'info_second_choice', 'cur_book', 'book_second_choice']:
if (key in self.goal):
for slot_name in self.goal[key]:
self.goal_dic[slot_name] += [self.goal[key][slot_name]]
if ('reqt' in self.goal):
for slot_name in self.goal['reqt']:
self.goal_dic[slot_name] = [slot_name]
self.goal_list = list(self.goal['cur_info'].keys())
if ('info_second_choice' in self.goal):
self.goal_list += list(self.goal['info_second_choice'].keys())
if ('reqt' in self.goal):
self.goal_list += list(self.goal['reqt'])
if ('cur_book' in self.goal):
self.goal_list += list(self.goal['cur_book'].keys())
if ('book_second_choice' in self.goal):
self.goal_list += list(self.goal['book_second_choice'].keys())
def _set_initial_model_parameters(self):
self.turn_batch = {'dial_id': [0], 'turn_num': [0], 'user': [[0]], 'response': [[0]], 'bspan': [[0]], 'u_len': [0], 'm_len': [0], 'degree': [[1]], 'supervised': [True], 'goal': [self.m.reader.vocab.sentence_encode((word_tokenize(' '.join(self.goal_list)) + ['EOS_Z0']))]}
self.prev_z = None
self.prev_act = None
def respond(self, sys_act, prev_sys=None):
mode = 'test'
turn_states = {}
turn_num = self.turn_batch['turn_num'][0]
act_list = ['inform_type', 'inform_type_change', 'ask_info', 'make_reservation', 'make_reservation_change_time', 'anything_else', 'goodbye']
if (turn_num != 0):
self.update_states_from_sys(sys_act)
if (prev_sys is None):
prev_sys = 'Hello! What can I help you?'.lower()
else:
prev_sys = prev_sys.lower()
utt_tokenized = (word_tokenize(prev_sys) + ['EOS_U'])
utt_encoded = self.m.reader.vocab.sentence_encode(utt_tokenized)
if (self.turn_batch['turn_num'] == [0]):
self.turn_batch['user'] = [utt_encoded]
else:
self.turn_batch['user'] = [((self.m.reader.vocab.sentence_encode(word_tokenize(self.prev_act)) + [self.m.reader.vocab.encode('EOS_M')]) + utt_encoded)]
self.turn_batch['u_len'] = [len(i) for i in self.turn_batch['user']]
self.turn_batch['m_len'] = [len(i) for i in self.turn_batch['response']]
(u_input, u_input_np, z_input, m_input, m_input_np, u_len, m_len, degree_input, kw_ret) = self.m._convert_batch(self.turn_batch, self.prev_z)
(m_idx, z_idx, turn_states) = self.m.m(mode=mode, u_input=u_input, u_len=u_len, z_input=z_input, m_input=m_input, degree_input=degree_input, u_input_np=u_input_np, m_input_np=m_input_np, m_len=m_len, turn_states=turn_states, dial_id=self.turn_batch['dial_id'], **kw_ret)
self.act = act_list[m_idx[(0, 0, 0)]]
if (turn_num == 0):
self.act = 'inform_type'
slot_dict = self.generate_dial_act_slots(sys_act, prev_sys)
usr_act = Action(self.act, slot_dict)
if ((self.act == 'inform_type') and (slot_dict == {}) and (sys_act.act == SystemAct.ASK_TYPE)):
usr_response_sent = 'i do not care.'
elif self.nlg_sample:
assert self.nlg_templates
assert self.generator
print('supervised nlg_sample')
if (prev_sys is None):
prev_sys = '<start>'
(usr_response_sent, lexicalized_usr_act) = self.nlg.generate_sent(usr_act, templates=self.nlg_templates, generator=self.generator, context=prev_sys, seq2seq=None)
else:
if (self.seq2seq is None):
print('supervised templates')
assert self.nlg_template
assert (not self.nlg_sample)
assert (self.generator is None)
(usr_response_sent, lexicalized_usr_act) = self.nlg.generate_sent(usr_act, turn_num=(len(self.state['usr_act_sequence']) - 1), generator=None, seq2seq=None)
else:
print(' supervised seq2seq')
assert (not self.nlg_sample)
assert (not self.nlg_template)
assert self.seq2seq
(usr_response_sent, lexicalized_usr_act) = self.nlg.generate_sent(usr_act, generator=None, seq2seq=self.seq2seq)
usr_response_sent = usr_response_sent.replace('<eos>', '')
usr_response_sent = usr_response_sent.lower()
if (turn_num != 0):
self.success_or_not(self.prev_usr, prev_sys, usr_response_sent, sys_act)
self.update_states_from_user(slot_dict)
self.prev_z = z_idx
self.prev_act = self.act
self.prev_usr = usr_response_sent
turn_num += 1
self.turn_batch['turn_num'] = [turn_num]
return (None, usr_response_sent)
def interact(self):
mode = 'test'
turn_states = {}
turn_num = self.turn_batch['turn_num'][0]
utterance = 'Hello! What can I help you?'.lower()
print(('Sys: ' + utterance))
while True:
if ((self.turn_batch['turn_num'][0] > 10) or (utterance == 'close')):
break
utt_tokenized = (word_tokenize(utterance) + ['EOS_U'])
utt_encoded = self.m.reader.vocab.sentence_encode(utt_tokenized)
if (self.turn_batch['turn_num'] == [0]):
self.turn_batch['user'] = [utt_encoded]
else:
self.turn_batch['user'] = [((self.m.reader.vocab.sentence_encode(word_tokenize(self.prev_act)) + [self.m.reader.vocab.encode('EOS_M')]) + utt_encoded)]
self.turn_batch['u_len'] = [len(i) for i in self.turn_batch['user']]
self.turn_batch['m_len'] = [len(i) for i in self.turn_batch['response']]
(u_input, u_input_np, z_input, m_input, m_input_np, u_len, m_len, degree_input, kw_ret) = self.m._convert_batch(self.turn_batch, self.prev_z)
(m_idx, z_idx, turn_states) = self.m.m(mode=mode, u_input=u_input, u_len=u_len, z_input=z_input, m_input=m_input, degree_input=degree_input, u_input_np=u_input_np, m_input_np=m_input_np, m_len=m_len, turn_states=turn_states, dial_id=self.turn_batch['dial_id'], **kw_ret)
sent = self.m.reader.vocab.sentence_decode(m_idx[0], eos='EOS_M')
filled_sent = self.fill_sentence(sent)
print(('Usr Simu: ' + filled_sent))
print(('Goal:' + ' '.join(self.goal_list)))
print('\n')
pdb.set_trace()
self.prev_z = z_idx
self.prev_act = filled_sent
turn_num += 1
self.turn_batch['turn_num'] = [turn_num]
utterance = input('Sys:').lower()
def generate_dial_act_slots(self, sys_act, prev_sys):
slot_dict = {}
if (self.act == 'inform_type'):
avail_slot = []
if (self.turn_batch['turn_num'][0] == 0):
avail_slot = random.sample(self.goal['cur_info'].keys(), k=random.choice(range(1, (len(self.goal['cur_info'].keys()) + 1))))
for slot in avail_slot:
slot_dict[slot] = self.goal['cur_info'][slot]
elif (sys_act.act == SystemAct.ASK_TYPE):
for slot in ['area', 'food', 'pricerange']:
if (slot in prev_sys):
avail_slot.append(slot)
if (slot in self.goal['cur_info']):
slot_dict[slot] = self.goal['cur_info'][slot]
if (avail_slot == []):
slot_dict = self.goal['cur_info']
else:
avail_slot = [slot_name for slot_name in self.state['inform'] if (self.state['inform'][slot_name] is None)]
if avail_slot:
for slot in avail_slot:
if (slot in self.goal['cur_info']):
slot_dict[slot] = self.goal['cur_info'][slot]
if (not slot_dict):
if (sys_act.act == SystemAct.NOMATCH_RESULT):
if ('info_second_choice' in self.goal):
self.act = 'inform_type_change'
else:
self.act = 'goodbye'
elif self.state['results']:
if ('reqt' in self.goal):
self.act = 'ask_info'
else:
self.act = 'make_reservation'
if (self.act == 'inform_type_change'):
if ('info_second_choice' not in self.goal):
self.act = 'inform_type'
slot_dict = self.goal['cur_info']
else:
slot_dict = self.goal['info_second_choice']
if (self.act == 'ask_info'):
if self.state['results']:
if ('reqt' not in self.goal):
avail_slot = sorted(random.sample(['address', 'postcode', 'phone'], k=random.choice(range(1, 4))))
else:
avail_slot = list((set(self.goal['reqt']) - set(self.state['reqt'])))
for slot in avail_slot:
slot_dict[slot] = None
if (slot_dict == {}):
self.act = 'goodbye'
else:
self.act = 'inform_type'
slot_dict = self.goal['cur_info']
if (self.act == 'make_reservation'):
avail_slot = []
if self.state['results']:
if ('cur_book' in self.goal):
slot_dict = self.goal['cur_book']
else:
if (sys_act.act == SystemAct.ASK_RESERVATION_INFO):
for slot in ['time', 'day', 'people']:
if (slot in prev_sys):
avail_slot.append(slot)
if (avail_slot == []):
avail_slot = sorted(random.sample(['time', 'day', 'people'], k=random.choice(range(1, 4))))
for slot in avail_slot:
slot_dict[slot] = random.choice(self.entity['informable'][slot])
self.goal['cur_book'] = slot_dict
else:
self.act = 'inform_type'
slot_dict = self.goal['cur_info']
if (self.act == 'make_reservation_change_time'):
if ('book_second_choice' in self.goal):
slot_dict = self.goal['book_second_choice']
elif ('make_reservation' in self.goal):
self.act = 'make_reservation'
slot_dict = self.goal['cur_book']
else:
self.act = 'make_reservation'
avail_slot = sorted(random.sample(['time', 'day', 'people'], k=random.choice(range(1, 4))))
for slot in avail_slot:
slot_dict[slot] = random.choice(self.entity['informable'][slot])
self.goal['cur_book'] = slot_dict
else:
avail_slot = []
return slot_dict
def success_or_not(self, prev_usr, prev_sys, cur_usr, sys_act):
stop_flag = 0
stop_flag = 0
non_stop_pat = re.compile('number|phone|post|address|name|information|value_|restaurant_')
if (('bye' in cur_usr) and ('?' not in cur_usr)):
stop_flag = 1
elif (('thank' in cur_usr) and ('[' not in cur_usr) and ('?' not in cur_usr)):
stop_flag = 1
elif (re.match('.*have a (good|nice|lovely).*', cur_usr) and ('?' not in cur_usr)):
stop_flag = 1
elif re.match('.*(that is|thats|that s|that will be) all.*', cur_usr):
stop_flag = 1
elif (not re.findall(non_stop_pat, cur_usr)):
if ('all set' in cur_usr):
stop_flag = 1
elif ('i am all i need' in cur_usr):
stop_flag = 1
elif ('that s it' in cur_usr):
stop_flag = 1
if (self.turn_batch['turn_num'][0] > dialog_config.MAX_TURN):
stop_flag = 1
if ((sys_act.act == SystemAct.NOMATCH_RESULT) and ('info_second_choice' not in self.goal)):
stop_flag = 1
if (sys_act.act == SystemAct.GOODBYE):
self.state_list.append(dialog_config.TURN_FAIL_FOR_SL)
elif (self.prev_act == 'ask_info'):
if (sys_act.act == SystemAct.PROVIDE_INFO):
self.state_list.append(dialog_config.TURN_SUCCESS_FOR_SL)
else:
self.state_list.append(dialog_config.TURN_FAIL_FOR_SL)
elif ((re.search('value_time|value_day|value_people', self.m.reader.delex_sent(prev_usr)) is not None) or (re.search('reference number|reservation number', prev_usr) is not None)):
if (sys_act.act == SystemAct.ASK_RESERVATION_INFO):
tmp_flag = 1
for slot_name in ['time', 'day', 'people']:
if ((slot_name in prev_sys) and (self.state['book'][slot_name] is not None)):
tmp_flag = 0
if tmp_flag:
self.state_list.append(dialog_config.TURN_SUCCESS_FOR_SL)
else:
self.state_list.append(dialog_config.TURN_FAIL_FOR_SL)
elif (sys_act.act in [SystemAct.BOOKING_SUCCESS, SystemAct.BOOKING_FAIL]):
self.state_list.append(dialog_config.TURN_SUCCESS_FOR_SL)
elif ((sys_act.act == SystemAct.PRESENT_RESULT) and (self.state['results'] == [])):
prev_sys_slot = self.m.reader.delex_sent(prev_sys)
constraints = [slot[1:(- 1)].split('|')[1] for slot in re.findall('\\[.*?\\]', prev_sys_slot)]
tmp_flag = 1
if (self.state['inform']['name'] is not None):
tmp_flag = 0
for slot_name in self.state['inform']:
if ((self.state['inform'][slot_name] is not None) and (self.state['inform'][slot_name] not in constraints)):
tmp_flag = 0
if tmp_flag:
self.state_list.append(dialog_config.TURN_SUCCESS_FOR_SL)
else:
self.state_list.append(dialog_config.TURN_FAIL_FOR_SL)
else:
self.state_list.append(dialog_config.TURN_FAIL_FOR_SL)
elif (sys_act.act in [SystemAct.BOOKING_SUCCESS, SystemAct.BOOKING_FAIL]):
self.state_list.append(dialog_config.TURN_FAIL_FOR_SL)
elif (sys_act.act == SystemAct.ASK_RESERVATION_INFO):
if (('book' in prev_usr) or ('reserv' in prev_usr)):
self.state_list.append(dialog_config.TURN_SUCCESS_FOR_SL)
else:
self.state_list.append(dialog_config.TURN_FAIL_FOR_SL)
elif (sys_act.act == SystemAct.NOMATCH_RESULT):
cur_info = {slot_name: slot_val for (slot_name, slot_val) in self.state['inform'].items() if (slot_val is not None)}
match_list = self.query_in_DB(cur_info)
if (not match_list):
self.state_list.append(dialog_config.TURN_SUCCESS_FOR_SL)
else:
self.state_list.append(dialog_config.TURN_FAIL_FOR_SL)
elif (sys_act.act == SystemAct.NO_OTHER):
cur_info = {slot_name: slot_val for (slot_name, slot_val) in self.state['inform'].items() if (slot_val is not None)}
match_list = self.query_in_DB(cur_info, skip=self.state['results'])
if (not match_list):
self.state_list.append(dialog_config.TURN_SUCCESS_FOR_SL)
else:
self.state_list.append(dialog_config.TURN_FAIL_FOR_SL)
elif (re.search('value_area|value_food|value_pricerange', self.m.reader.delex_sent(prev_usr)) is not None):
if (sys_act.act == SystemAct.PRESENT_RESULT):
prev_sys_slot = self.m.reader.delex_sent(prev_sys)
constraints = [slot[1:(- 1)].split('|')[1] for slot in re.findall('\\[.*?\\]', prev_sys_slot)]
tmp_flag = 1
for slot_name in self.state['inform']:
if ((self.state['inform'][slot_name] is not None) and (self.state['inform'][slot_name] not in constraints)):
tmp_flag = 0
if tmp_flag:
self.state_list.append(dialog_config.TURN_SUCCESS_FOR_SL)
else:
self.state_list.append(dialog_config.TURN_FAIL_FOR_SL)
elif (sys_act.act == SystemAct.ASK_TYPE):
tmp_flag = 1
for slot_name in ['area', 'food', 'pricerange']:
if ((slot_name in prev_sys) and (self.state['inform'][slot_name] is not None)):
tmp_flag = 0
if tmp_flag:
self.state_list.append(dialog_config.TURN_SUCCESS_FOR_SL)
else:
self.state_list.append(dialog_config.TURN_FAIL_FOR_SL)
else:
self.state_list.append(dialog_config.TURN_FAIL_FOR_SL)
elif (re.search('restaurant_name', self.m.reader.delex_sent(prev_usr)) is not None):
if ((sys_act.act == SystemAct.NOMATCH_RESULT) or (sys_act.act == SystemAct.PRESENT_RESULT)):
self.state_list.append(dialog_config.TURN_SUCCESS_FOR_SL)
else:
self.state_list.append(dialog_config.TURN_FAIL_FOR_SL)
elif (sys_act.act == SystemAct.ASK_TYPE):
if ((self.state['inform']['name'] is not None) and ((self.state['inform']['area'] is None) or (self.state['inform']['food'] is None) or (self.state['inform']['pricerange'] is None))):
self.state_list.append(dialog_config.TURN_SUCCESS_FOR_SL)
else:
self.state_list.append(dialog_config.TURN_FAIL_FOR_SL)
else:
self.state_list.append(dialog_config.TURN_SUCCESS_FOR_SL)
self.dialog_status = self.state_list[(- 1)]
if stop_flag:
if (dialog_config.TURN_FAIL_FOR_SL not in self.state_list):
self.dialog_status = dialog_config.SUCCESS_DIALOG
else:
self.dialog_status = dialog_config.FAILED_DIALOG
def update_states_from_user(self, slot_dic):
for slot_name in slot_dic:
slot_val = slot_dic[slot_name]
if (slot_name in self.state['inform']):
self.state['inform'][slot_name] = slot_val
elif (slot_name in self.state['book']):
self.state['book'][slot_name] = slot_val
else:
self.state['reqt'].append(slot_name)
def update_states_from_sys(self, sys_act):
if (sys_act.act == SystemAct.PRESENT_RESULT):
self.state['results'].append(sys_act.parameters)
def reset(self):
super().reset()
self._set_initial_state()
self._set_initial_goal_dic()
self._set_initial_model_parameters()
self.state_list = []
self.act = ''
self.prev_usr = '' |
def normalize(x):
if (not isinstance(x, str)):
x = x.decode('utf8', errors='ignore')
x = ''.join((c for c in unicodedata.normalize('NFKD', x) if (unicodedata.category(c) != 'Mn')))
x = re.sub('[ `]', "'", x)
x = re.sub('[]', '"', x)
x = re.sub('[]', '-', x)
while True:
old_x = x
x = re.sub('((?<!^)\\[[^\\]]*\\]|\\[\\d+\\]|[*#+])*$', '', x.strip())
x = re.sub('(?<!^)( \\([^)]*\\))*$', '', x.strip())
x = re.sub('^"([^"]*)"$', '\\1', x.strip())
if (x == old_x):
break
if (x and (x[(- 1)] == '.')):
x = x[:(- 1)]
x = re.sub('\\s+', ' ', x, flags=re.U).lower().strip()
return x |
def fund_node(token_result: Callable[([], Contract)], proxy_manager: ProxyManager, to_address: Address, amount: TokenAmount) -> None:
token_contract = token_result()
token_proxy = proxy_manager.token(TokenAddress(to_canonical_address(token_contract.address)), BLOCK_ID_LATEST)
token_proxy.transfer(to_address=to_address, amount=amount) |
def bind_texture(texture):
if (not getattr(texture, 'image', None)):
texture.image = load_image(texture.find())
glEnable(texture.image.target)
glBindTexture(texture.image.target, texture.image.id)
if (texture.options.clamp == 'on'):
glTexParameterf(texture.image.target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE)
glTexParameterf(texture.image.target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)
else:
glTexParameterf(texture.image.target, GL_TEXTURE_WRAP_S, GL_REPEAT)
glTexParameterf(texture.image.target, GL_TEXTURE_WRAP_T, GL_REPEAT) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.